LLVM  3.7.0
X86RegisterInfo.cpp
Go to the documentation of this file.
1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
12 // on X86.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "X86RegisterInfo.h"
17 #include "X86FrameLowering.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/MC/MCAsmInfo.h"
41 
42 using namespace llvm;
43 
44 #define GET_REGINFO_TARGET_DESC
45 #include "X86GenRegisterInfo.inc"
46 
48 ForceStackAlign("force-align-stack",
49  cl::desc("Force align the stack to the minimum alignment"
50  " needed for the function."),
51  cl::init(false), cl::Hidden);
52 
53 static cl::opt<bool>
54 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
55  cl::desc("Enable use of a base pointer for complex stack frames"));
56 
58  : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
59  X86_MC::getDwarfRegFlavour(TT, false),
60  X86_MC::getDwarfRegFlavour(TT, true),
61  (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
63 
64  // Cache some information.
65  Is64Bit = TT.isArch64Bit();
66  IsWin64 = Is64Bit && TT.isOSWindows();
67 
68  // Use a callee-saved register as the base pointer. These registers must
69  // not conflict with any ABI requirements. For example, in 32-bit mode PIC
70  // requires GOT in the EBX register before function calls via PLT GOT pointer.
71  if (Is64Bit) {
72  SlotSize = 8;
73  // This matches the simplified 32-bit pointer code in the data layout
74  // computation.
75  // FIXME: Should use the data layout?
76  bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32;
77  StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
78  FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
79  BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
80  } else {
81  SlotSize = 4;
82  StackPtr = X86::ESP;
83  FramePtr = X86::EBP;
84  BasePtr = X86::ESI;
85  }
86 }
87 
88 bool
90  // ExeDepsFixer and PostRAScheduler require liveness.
91  return true;
92 }
93 
94 int
95 X86RegisterInfo::getSEHRegNum(unsigned i) const {
96  return getEncodingValue(i);
97 }
98 
99 const TargetRegisterClass *
101  unsigned Idx) const {
102  // The sub_8bit sub-register index is more constrained in 32-bit mode.
103  // It behaves just like the sub_8bit_hi index.
104  if (!Is64Bit && Idx == X86::sub_8bit)
105  Idx = X86::sub_8bit_hi;
106 
107  // Forward to TableGen's default version.
108  return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
109 }
110 
111 const TargetRegisterClass *
113  const TargetRegisterClass *B,
114  unsigned SubIdx) const {
115  // The sub_8bit sub-register index is more constrained in 32-bit mode.
116  if (!Is64Bit && SubIdx == X86::sub_8bit) {
117  A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
118  if (!A)
119  return nullptr;
120  }
121  return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
122 }
123 
124 const TargetRegisterClass *
126  const MachineFunction &MF) const {
127  // Don't allow super-classes of GR8_NOREX. This class is only used after
128  // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
129  // to the full GR8 register class in 64-bit mode, so we cannot allow the
130  // reigster class inflation.
131  //
132  // The GR8_NOREX class is always used in a way that won't be constrained to a
133  // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
134  // full GR8 class.
135  if (RC == &X86::GR8_NOREXRegClass)
136  return RC;
137 
138  const TargetRegisterClass *Super = RC;
140  do {
141  switch (Super->getID()) {
142  case X86::GR8RegClassID:
143  case X86::GR16RegClassID:
144  case X86::GR32RegClassID:
145  case X86::GR64RegClassID:
146  case X86::FR32RegClassID:
147  case X86::FR64RegClassID:
148  case X86::RFP32RegClassID:
149  case X86::RFP64RegClassID:
150  case X86::RFP80RegClassID:
151  case X86::VR128RegClassID:
152  case X86::VR256RegClassID:
153  // Don't return a super-class that would shrink the spill size.
154  // That can happen with the vector and float classes.
155  if (Super->getSize() == RC->getSize())
156  return Super;
157  }
158  Super = *I++;
159  } while (Super);
160  return RC;
161 }
162 
163 const TargetRegisterClass *
165  unsigned Kind) const {
166  const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
167  switch (Kind) {
168  default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
169  case 0: // Normal GPRs.
170  if (Subtarget.isTarget64BitLP64())
171  return &X86::GR64RegClass;
172  return &X86::GR32RegClass;
173  case 1: // Normal GPRs except the stack pointer (for encoding reasons).
174  if (Subtarget.isTarget64BitLP64())
175  return &X86::GR64_NOSPRegClass;
176  return &X86::GR32_NOSPRegClass;
177  case 2: // Available for tailcall (not callee-saved GPRs).
178  const Function *F = MF.getFunction();
179  if (IsWin64 || (F && F->getCallingConv() == CallingConv::X86_64_Win64))
180  return &X86::GR64_TCW64RegClass;
181  else if (Is64Bit)
182  return &X86::GR64_TCRegClass;
183 
184  bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
185  if (hasHipeCC)
186  return &X86::GR32RegClass;
187  return &X86::GR32_TCRegClass;
188  }
189 }
190 
191 const TargetRegisterClass *
193  if (RC == &X86::CCRRegClass) {
194  if (Is64Bit)
195  return &X86::GR64RegClass;
196  else
197  return &X86::GR32RegClass;
198  }
199  return RC;
200 }
201 
202 unsigned
204  MachineFunction &MF) const {
205  const X86FrameLowering *TFI = getFrameLowering(MF);
206 
207  unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
208  switch (RC->getID()) {
209  default:
210  return 0;
211  case X86::GR32RegClassID:
212  return 4 - FPDiff;
213  case X86::GR64RegClassID:
214  return 12 - FPDiff;
215  case X86::VR128RegClassID:
216  return Is64Bit ? 10 : 4;
217  case X86::VR64RegClassID:
218  return 4;
219  }
220 }
221 
222 const MCPhysReg *
224  const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
225  bool HasAVX = Subtarget.hasAVX();
226  bool HasAVX512 = Subtarget.hasAVX512();
227  bool CallsEHReturn = MF->getMMI().callsEHReturn();
228 
229  assert(MF && "MachineFunction required");
230  switch (MF->getFunction()->getCallingConv()) {
231  case CallingConv::GHC:
232  case CallingConv::HiPE:
233  return CSR_NoRegs_SaveList;
234  case CallingConv::AnyReg:
235  if (HasAVX)
236  return CSR_64_AllRegs_AVX_SaveList;
237  return CSR_64_AllRegs_SaveList;
239  return CSR_64_RT_MostRegs_SaveList;
241  if (HasAVX)
242  return CSR_64_RT_AllRegs_AVX_SaveList;
243  return CSR_64_RT_AllRegs_SaveList;
245  if (HasAVX512 && IsWin64)
246  return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
247  if (HasAVX512 && Is64Bit)
248  return CSR_64_Intel_OCL_BI_AVX512_SaveList;
249  if (HasAVX && IsWin64)
250  return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
251  if (HasAVX && Is64Bit)
252  return CSR_64_Intel_OCL_BI_AVX_SaveList;
253  if (!HasAVX && !IsWin64 && Is64Bit)
254  return CSR_64_Intel_OCL_BI_SaveList;
255  break;
256  }
257  case CallingConv::Cold:
258  if (Is64Bit)
259  return CSR_64_MostRegs_SaveList;
260  break;
262  return CSR_Win64_SaveList;
264  if (CallsEHReturn)
265  return CSR_64EHRet_SaveList;
266  return CSR_64_SaveList;
267  default:
268  break;
269  }
270 
271  if (Is64Bit) {
272  if (IsWin64)
273  return CSR_Win64_SaveList;
274  if (CallsEHReturn)
275  return CSR_64EHRet_SaveList;
276  return CSR_64_SaveList;
277  }
278  if (CallsEHReturn)
279  return CSR_32EHRet_SaveList;
280  return CSR_32_SaveList;
281 }
282 
283 const uint32_t *
285  CallingConv::ID CC) const {
286  const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
287  bool HasAVX = Subtarget.hasAVX();
288  bool HasAVX512 = Subtarget.hasAVX512();
289 
290  switch (CC) {
291  case CallingConv::GHC:
292  case CallingConv::HiPE:
293  return CSR_NoRegs_RegMask;
294  case CallingConv::AnyReg:
295  if (HasAVX)
296  return CSR_64_AllRegs_AVX_RegMask;
297  return CSR_64_AllRegs_RegMask;
299  return CSR_64_RT_MostRegs_RegMask;
301  if (HasAVX)
302  return CSR_64_RT_AllRegs_AVX_RegMask;
303  return CSR_64_RT_AllRegs_RegMask;
305  if (HasAVX512 && IsWin64)
306  return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
307  if (HasAVX512 && Is64Bit)
308  return CSR_64_Intel_OCL_BI_AVX512_RegMask;
309  if (HasAVX && IsWin64)
310  return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
311  if (HasAVX && Is64Bit)
312  return CSR_64_Intel_OCL_BI_AVX_RegMask;
313  if (!HasAVX && !IsWin64 && Is64Bit)
314  return CSR_64_Intel_OCL_BI_RegMask;
315  break;
316  }
317  case CallingConv::Cold:
318  if (Is64Bit)
319  return CSR_64_MostRegs_RegMask;
320  break;
321  default:
322  break;
324  return CSR_Win64_RegMask;
326  return CSR_64_RegMask;
327  }
328 
329  // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
330  // callsEHReturn().
331  if (Is64Bit) {
332  if (IsWin64)
333  return CSR_Win64_RegMask;
334  return CSR_64_RegMask;
335  }
336  return CSR_32_RegMask;
337 }
338 
339 const uint32_t*
341  return CSR_NoRegs_RegMask;
342 }
343 
345  BitVector Reserved(getNumRegs());
346  const X86FrameLowering *TFI = getFrameLowering(MF);
347 
348  // Set the stack-pointer register and its aliases as reserved.
349  for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
350  ++I)
351  Reserved.set(*I);
352 
353  // Set the instruction pointer register and its aliases as reserved.
354  for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
355  ++I)
356  Reserved.set(*I);
357 
358  // Set the frame-pointer register and its aliases as reserved if needed.
359  if (TFI->hasFP(MF)) {
360  for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
361  ++I)
362  Reserved.set(*I);
363  }
364 
365  // Set the base-pointer register and its aliases as reserved if needed.
366  if (hasBasePointer(MF)) {
368  const uint32_t *RegMask = getCallPreservedMask(MF, CC);
371  "Stack realignment in presence of dynamic allocas is not supported with"
372  "this calling convention.");
373 
374  unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), MVT::i64,
375  false);
376  for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
377  I.isValid(); ++I)
378  Reserved.set(*I);
379  }
380 
381  // Mark the segment registers as reserved.
382  Reserved.set(X86::CS);
383  Reserved.set(X86::SS);
384  Reserved.set(X86::DS);
385  Reserved.set(X86::ES);
386  Reserved.set(X86::FS);
387  Reserved.set(X86::GS);
388 
389  // Mark the floating point stack registers as reserved.
390  for (unsigned n = 0; n != 8; ++n)
391  Reserved.set(X86::ST0 + n);
392 
393  // Reserve the registers that only exist in 64-bit mode.
394  if (!Is64Bit) {
395  // These 8-bit registers are part of the x86-64 extension even though their
396  // super-registers are old 32-bits.
397  Reserved.set(X86::SIL);
398  Reserved.set(X86::DIL);
399  Reserved.set(X86::BPL);
400  Reserved.set(X86::SPL);
401 
402  for (unsigned n = 0; n != 8; ++n) {
403  // R8, R9, ...
404  for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
405  Reserved.set(*AI);
406 
407  // XMM8, XMM9, ...
408  for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
409  Reserved.set(*AI);
410  }
411  }
412  if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
413  for (unsigned n = 16; n != 32; ++n) {
414  for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
415  Reserved.set(*AI);
416  }
417  }
418 
419  return Reserved;
420 }
421 
422 void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
423  // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
424  // because the calling convention defines the EFLAGS register as NOT
425  // preserved.
426  //
427  // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
428  // an assert to track this and clear the register afterwards to avoid
429  // unnecessary crashes during release builds.
430  assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
431  "EFLAGS are not live-out from a patchpoint.");
432 
433  // Also clean other registers that don't need preserving (IP).
434  for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
435  Mask[Reg / 32] &= ~(1U << (Reg % 32));
436 }
437 
438 //===----------------------------------------------------------------------===//
439 // Stack Frame Processing methods
440 //===----------------------------------------------------------------------===//
441 
443  const MachineFrameInfo *MFI = MF.getFrameInfo();
444 
445  if (!EnableBasePointer)
446  return false;
447 
448  // When we need stack realignment, we can't address the stack from the frame
449  // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
450  // can't address variables from the stack pointer. MS inline asm can
451  // reference locals while also adjusting the stack pointer. When we can't
452  // use both the SP and the FP, we need a separate base pointer register.
453  bool CantUseFP = needsStackRealignment(MF);
454  bool CantUseSP =
455  MFI->hasVarSizedObjects() || MFI->hasOpaqueSPAdjustment();
456  return CantUseFP && CantUseSP;
457 }
458 
460  if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
461  return false;
462 
463  const MachineFrameInfo *MFI = MF.getFrameInfo();
464  const MachineRegisterInfo *MRI = &MF.getRegInfo();
465 
466  // Stack realignment requires a frame pointer. If we already started
467  // register allocation with frame pointer elimination, it is too late now.
468  if (!MRI->canReserveReg(FramePtr))
469  return false;
470 
471  // If a base pointer is necessary. Check that it isn't too late to reserve
472  // it.
473  if (MFI->hasVarSizedObjects())
474  return MRI->canReserveReg(BasePtr);
475  return true;
476 }
477 
479  const MachineFrameInfo *MFI = MF.getFrameInfo();
480  const X86FrameLowering *TFI = getFrameLowering(MF);
481  const Function *F = MF.getFunction();
482  unsigned StackAlign = TFI->getStackAlignment();
483  bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
485 
486  // If we've requested that we force align the stack do so now.
487  if (ForceStackAlign)
488  return canRealignStack(MF);
489 
490  return requiresRealignment && canRealignStack(MF);
491 }
492 
494  unsigned Reg, int &FrameIdx) const {
495  // Since X86 defines assignCalleeSavedSpillSlots which always return true
496  // this function neither used nor tested.
497  llvm_unreachable("Unused function on X86. Otherwise need a test case.");
498 }
499 
500 void
502  int SPAdj, unsigned FIOperandNum,
503  RegScavenger *RS) const {
504  MachineInstr &MI = *II;
505  MachineFunction &MF = *MI.getParent()->getParent();
506  const X86FrameLowering *TFI = getFrameLowering(MF);
507  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
508  unsigned BasePtr;
509 
510  unsigned Opc = MI.getOpcode();
511  bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm ||
512  Opc == X86::TCRETURNmi || Opc == X86::TCRETURNmi64;
513  if (hasBasePointer(MF))
514  BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
515  else if (needsStackRealignment(MF))
516  BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
517  else if (AfterFPPop)
518  BasePtr = StackPtr;
519  else
520  BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
521 
522  // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
523  // simple FP case, and doesn't work with stack realignment. On 32-bit, the
524  // offset is from the traditional base pointer location. On 64-bit, the
525  // offset is from the SP at the end of the prologue, not the FP location. This
526  // matches the behavior of llvm.frameaddress.
527  if (Opc == TargetOpcode::LOCAL_ESCAPE) {
528  MachineOperand &FI = MI.getOperand(FIOperandNum);
529  bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
530  int Offset;
531  if (IsWinEH)
532  Offset = TFI->getFrameIndexOffsetFromSP(MF, FrameIndex);
533  else
534  Offset = TFI->getFrameIndexOffset(MF, FrameIndex);
535  FI.ChangeToImmediate(Offset);
536  return;
537  }
538 
539  // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
540  // register as source operand, semantic is the same and destination is
541  // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
542  if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
543  BasePtr = getX86SubSuperRegister(BasePtr, MVT::i64, false);
544 
545  // This must be part of a four operand memory reference. Replace the
546  // FrameIndex with base register with EBP. Add an offset to the offset.
547  MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
548 
549  // Now add the frame object offset to the offset from EBP.
550  int FIOffset;
551  if (AfterFPPop) {
552  // Tail call jmp happens after FP is popped.
553  const MachineFrameInfo *MFI = MF.getFrameInfo();
554  FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
555  } else
556  FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
557 
558  if (BasePtr == StackPtr)
559  FIOffset += SPAdj;
560 
561  // The frame index format for stackmaps and patchpoints is different from the
562  // X86 format. It only has a FI and an offset.
563  if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
564  assert(BasePtr == FramePtr && "Expected the FP as base register");
565  int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
566  MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
567  return;
568  }
569 
570  if (MI.getOperand(FIOperandNum+3).isImm()) {
571  // Offset is a 32-bit integer.
572  int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
573  int Offset = FIOffset + Imm;
574  assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
575  "Requesting 64-bit offset in 32-bit immediate!");
576  MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
577  } else {
578  // Offset is symbolic. This is extremely rare.
579  uint64_t Offset = FIOffset +
580  (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
581  MI.getOperand(FIOperandNum + 3).setOffset(Offset);
582  }
583 }
584 
586  const X86FrameLowering *TFI = getFrameLowering(MF);
587  return TFI->hasFP(MF) ? FramePtr : StackPtr;
588 }
589 
590 unsigned
592  const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
593  unsigned FrameReg = getFrameRegister(MF);
594  if (Subtarget.isTarget64BitILP32())
595  FrameReg = getX86SubSuperRegister(FrameReg, MVT::i32, false);
596  return FrameReg;
597 }
598 
599 namespace llvm {
601  bool High) {
602  switch (VT) {
603  default: return 0;
604  case MVT::i8:
605  if (High) {
606  switch (Reg) {
607  default: return getX86SubSuperRegister(Reg, MVT::i64);
608  case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
609  return X86::SI;
610  case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
611  return X86::DI;
612  case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
613  return X86::BP;
614  case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
615  return X86::SP;
616  case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
617  return X86::AH;
618  case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
619  return X86::DH;
620  case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
621  return X86::CH;
622  case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
623  return X86::BH;
624  }
625  } else {
626  switch (Reg) {
627  default: return 0;
628  case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
629  return X86::AL;
630  case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
631  return X86::DL;
632  case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
633  return X86::CL;
634  case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
635  return X86::BL;
636  case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
637  return X86::SIL;
638  case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
639  return X86::DIL;
640  case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
641  return X86::BPL;
642  case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
643  return X86::SPL;
644  case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
645  return X86::R8B;
646  case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
647  return X86::R9B;
648  case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
649  return X86::R10B;
650  case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
651  return X86::R11B;
652  case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
653  return X86::R12B;
654  case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
655  return X86::R13B;
656  case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
657  return X86::R14B;
658  case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
659  return X86::R15B;
660  }
661  }
662  case MVT::i16:
663  switch (Reg) {
664  default: return 0;
665  case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
666  return X86::AX;
667  case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
668  return X86::DX;
669  case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
670  return X86::CX;
671  case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
672  return X86::BX;
673  case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
674  return X86::SI;
675  case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
676  return X86::DI;
677  case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
678  return X86::BP;
679  case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
680  return X86::SP;
681  case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
682  return X86::R8W;
683  case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
684  return X86::R9W;
685  case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
686  return X86::R10W;
687  case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
688  return X86::R11W;
689  case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
690  return X86::R12W;
691  case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
692  return X86::R13W;
693  case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
694  return X86::R14W;
695  case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
696  return X86::R15W;
697  }
698  case MVT::i32:
699  switch (Reg) {
700  default: return 0;
701  case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
702  return X86::EAX;
703  case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
704  return X86::EDX;
705  case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
706  return X86::ECX;
707  case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
708  return X86::EBX;
709  case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
710  return X86::ESI;
711  case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
712  return X86::EDI;
713  case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
714  return X86::EBP;
715  case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
716  return X86::ESP;
717  case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
718  return X86::R8D;
719  case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
720  return X86::R9D;
721  case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
722  return X86::R10D;
723  case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
724  return X86::R11D;
725  case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
726  return X86::R12D;
727  case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
728  return X86::R13D;
729  case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
730  return X86::R14D;
731  case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
732  return X86::R15D;
733  }
734  case MVT::i64:
735  switch (Reg) {
736  default: return 0;
737  case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
738  return X86::RAX;
739  case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
740  return X86::RDX;
741  case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
742  return X86::RCX;
743  case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
744  return X86::RBX;
745  case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
746  return X86::RSI;
747  case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
748  return X86::RDI;
749  case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
750  return X86::RBP;
751  case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
752  return X86::RSP;
753  case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
754  return X86::R8;
755  case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
756  return X86::R9;
757  case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
758  return X86::R10;
759  case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
760  return X86::R11;
761  case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
762  return X86::R12;
763  case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
764  return X86::R13;
765  case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
766  return X86::R14;
767  case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
768  return X86::R15;
769  }
770  }
771 }
772 
774  bool High) {
775  unsigned Res = getX86SubSuperRegisterOrZero(Reg, VT, High);
776  if (Res == 0)
777  llvm_unreachable("Unexpected register or VT");
778  return Res;
779 }
780 
781 unsigned get512BitSuperRegister(unsigned Reg) {
782  if (Reg >= X86::XMM0 && Reg <= X86::XMM31)
783  return X86::ZMM0 + (Reg - X86::XMM0);
784  if (Reg >= X86::YMM0 && Reg <= X86::YMM31)
785  return X86::ZMM0 + (Reg - X86::YMM0);
786  if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
787  return Reg;
788  llvm_unreachable("Unexpected SIMD register");
789 }
790 
791 }
bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:276
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *B, unsigned Idx) const override
getMatchingSuperRegClass - Return a subclass of the specified register class A so that each register ...
const MachineFunction * getParent() const
getParent - Return the MachineFunction containing this basic block.
Intel_OCL_BI - Calling conventions for Intel OpenCL built-ins.
Definition: CallingConv.h:133
BitVector & set()
Definition: BitVector.h:218
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
Definition: CallingConv.h:137
Alignment of stack for function (3 bits) stored as log2 of alignment with +1 bias 0 means unaligned (...
Definition: Attributes.h:106
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
void ChangeToRegister(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value...
bool hasBasePointer(const MachineFunction &MF) const
A Stackmap instruction captures the location of live variables at its position in the instruction str...
unsigned getID() const
getID() - Return the register class ID number.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
F(f)
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
unsigned getDwarfRegFlavour(const Triple &TT, bool isEH)
bool canReserveReg(unsigned PhysReg) const
canReserveReg - Returns true if PhysReg can be used as a reserved register.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
Reports a serious error, calling any installed error handler.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:172
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:464
unsigned getMaxAlignment() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
unsigned getSize() const
getSize - Return the size of the register in bytes, which is also the size of a stack slot allocated ...
X86RegisterInfo(const Triple &TT)
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
getPointerRegClass - Returns a TargetRegisterClass used for pointer values.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
Reg
All possible values of the reg field in the ModR/M byte.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:317
#define false
Definition: ConvertUTF.c:65
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1046
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register...
bool canRealignStack(const MachineFunction &MF) const
static cl::opt< bool > EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
BitVector getReservedRegs(const MachineFunction &MF) const override
getReservedRegs - Returns a bitset indexed by physical register number indicating if a register is a ...
int64_t getImm() const
unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, bool High)
Returns the sub or super register of a specific X86 register.
void adjustStackMapLiveOutMask(uint32_t *Mask) const override
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:267
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:120
bundle_iterator< MachineInstr, instr_iterator > iterator
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg, int &FrameIdx) const override
Instruction that records the offset of a local stack allocation passed to llvm.localescape.
#define true
Definition: ConvertUTF.c:66
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:325
Patchable call instruction - this instruction represents a call to a constant address, followed by a series of NOPs.
bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override
Code Generation virtual methods...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:273
unsigned get512BitSuperRegister(unsigned Reg)
MCRegAliasIterator enumerates all registers aliasing Reg.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
unsigned getX86SubSuperRegisterOrZero(unsigned Reg, MVT::SimpleValueType VT, bool High)
Returns the sub or super register of a specific X86 register.
int64_t getOffset() const
Return the offset from the symbol in this operand.
void setOffset(int64_t Offset)
const TargetRegisterClass *const * sc_iterator
MCSubRegIterator enumerates all sub-registers of Reg.
unsigned getBaseRegister() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI)
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:307
MachineOperand class - Representation of each machine instruction operand.
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
int getSEHRegNum(unsigned i) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
getCalleeSavedRegs - Return a null-terminated list of all of the callee-save registers on this target...
MachineFrameInfo * getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
getCrossCopyRegClass - Returns a legal register class to copy a register in the specified class to or...
static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getNoPreservedMask() const
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
Definition: MachineInstr.h:51
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:217
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
Definition: X86Subtarget.h:313
#define I(x, y, z)
Definition: MD5.cpp:54
bool needsStackRealignment(const MachineFunction &MF) const override
bool hasAVX512() const
Definition: X86Subtarget.h:331
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
int getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
cl::opt< bool > ForceStackAlign("force-align-stack", cl::desc("Force align the stack to the minimum alignment"" needed for the function."), cl::init(false), cl::Hidden)
EnvironmentType getEnvironment() const
getEnvironment - Get the parsed environment type of this triple.
Definition: Triple.h:260
const ARM::ArchExtKind Kind
The C convention as implemented on Windows/x86-64.
Definition: CallingConv.h:143
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
MachineModuleInfo & getMMI() const
unsigned getFrameRegister(const MachineFunction &MF) const override
int getFrameIndexOffset(const MachineFunction &MF, int FI) const override
getFrameIndexOffset - Returns the displacement from the frame register to the stack frame of the spec...
sc_iterator getSuperClasses() const
getSuperClasses - Returns a NULL terminated list of super-classes.
bool hasAVX() const
Definition: X86Subtarget.h:329
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const
bool usesWindowsCFI() const
Definition: MCAsmInfo.h:521