LLVM  4.0.0
X86MCCodeEmitter.cpp
Go to the documentation of this file.
1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the X86MCCodeEmitter class.
11 //
12 //===----------------------------------------------------------------------===//
13 
17 #include "llvm/MC/MCCodeEmitter.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstrInfo.h"
22 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSymbol.h"
26 
27 using namespace llvm;
28 
29 #define DEBUG_TYPE "mccodeemitter"
30 
31 namespace {
32 class X86MCCodeEmitter : public MCCodeEmitter {
33  X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
34  void operator=(const X86MCCodeEmitter &) = delete;
35  const MCInstrInfo &MCII;
36  MCContext &Ctx;
37 public:
38  X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
39  : MCII(mcii), Ctx(ctx) {
40  }
41 
42  ~X86MCCodeEmitter() override {}
43 
44  bool is64BitMode(const MCSubtargetInfo &STI) const {
45  return STI.getFeatureBits()[X86::Mode64Bit];
46  }
47 
48  bool is32BitMode(const MCSubtargetInfo &STI) const {
49  return STI.getFeatureBits()[X86::Mode32Bit];
50  }
51 
52  bool is16BitMode(const MCSubtargetInfo &STI) const {
53  return STI.getFeatureBits()[X86::Mode16Bit];
54  }
55 
56  /// Is16BitMemOperand - Return true if the specified instruction has
57  /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
58  bool Is16BitMemOperand(const MCInst &MI, unsigned Op,
59  const MCSubtargetInfo &STI) const {
60  const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
61  const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
62  const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
63 
64  if (is16BitMode(STI) && BaseReg.getReg() == 0 &&
65  Disp.isImm() && Disp.getImm() < 0x10000)
66  return true;
67  if ((BaseReg.getReg() != 0 &&
68  X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
69  (IndexReg.getReg() != 0 &&
70  X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
71  return true;
72  return false;
73  }
74 
75  unsigned GetX86RegNum(const MCOperand &MO) const {
76  return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
77  }
78 
79  unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const {
80  return Ctx.getRegisterInfo()->getEncodingValue(
81  MI.getOperand(OpNum).getReg());
82  }
83 
84  // Does this register require a bit to be set in REX prefix.
85  bool isREXExtendedReg(const MCInst &MI, unsigned OpNum) const {
86  return (getX86RegEncoding(MI, OpNum) >> 3) & 1;
87  }
88 
89  void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const {
90  OS << (char)C;
91  ++CurByte;
92  }
93 
94  void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
95  raw_ostream &OS) const {
96  // Output the constant in little endian byte order.
97  for (unsigned i = 0; i != Size; ++i) {
98  EmitByte(Val & 255, CurByte, OS);
99  Val >>= 8;
100  }
101  }
102 
103  void EmitImmediate(const MCOperand &Disp, SMLoc Loc,
104  unsigned ImmSize, MCFixupKind FixupKind,
105  unsigned &CurByte, raw_ostream &OS,
107  int ImmOffset = 0) const;
108 
109  inline static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode,
110  unsigned RM) {
111  assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
112  return RM | (RegOpcode << 3) | (Mod << 6);
113  }
114 
115  void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
116  unsigned &CurByte, raw_ostream &OS) const {
117  EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
118  }
119 
120  void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
121  unsigned &CurByte, raw_ostream &OS) const {
122  // SIB byte is in the same format as the ModRMByte.
123  EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
124  }
125 
126  void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
127  uint64_t TSFlags, bool Rex, unsigned &CurByte,
129  const MCSubtargetInfo &STI) const;
130 
131  void encodeInstruction(const MCInst &MI, raw_ostream &OS,
132  SmallVectorImpl<MCFixup> &Fixups,
133  const MCSubtargetInfo &STI) const override;
134 
135  void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
136  const MCInst &MI, const MCInstrDesc &Desc,
137  raw_ostream &OS) const;
138 
139  void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
140  const MCInst &MI, raw_ostream &OS) const;
141 
142  bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
143  const MCInst &MI, const MCInstrDesc &Desc,
144  const MCSubtargetInfo &STI, raw_ostream &OS) const;
145 
146  uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
147  int MemOperand, const MCInstrDesc &Desc) const;
148 };
149 
150 } // end anonymous namespace
151 
153  const MCRegisterInfo &MRI,
154  MCContext &Ctx) {
155  return new X86MCCodeEmitter(MCII, Ctx);
156 }
157 
158 /// isDisp8 - Return true if this signed displacement fits in a 8-bit
159 /// sign-extended field.
160 static bool isDisp8(int Value) {
161  return Value == (int8_t)Value;
162 }
163 
164 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit
165 /// compressed dispacement field.
166 static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
167  assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) &&
168  "Compressed 8-bit displacement is only valid for EVEX inst.");
169 
170  unsigned CD8_Scale =
172  if (CD8_Scale == 0) {
173  CValue = Value;
174  return isDisp8(Value);
175  }
176 
177  unsigned Mask = CD8_Scale - 1;
178  assert((CD8_Scale & Mask) == 0 && "Invalid memory object size.");
179  if (Value & Mask) // Unaligned offset
180  return false;
181  Value /= (int)CD8_Scale;
182  bool Ret = (Value == (int8_t)Value);
183 
184  if (Ret)
185  CValue = Value;
186  return Ret;
187 }
188 
189 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
190 /// in an instruction with the specified TSFlags.
191 static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
192  unsigned Size = X86II::getSizeOfImm(TSFlags);
193  bool isPCRel = X86II::isImmPCRel(TSFlags);
194 
195  if (X86II::isImmSigned(TSFlags)) {
196  switch (Size) {
197  default: llvm_unreachable("Unsupported signed fixup size!");
198  case 4: return MCFixupKind(X86::reloc_signed_4byte);
199  }
200  }
201  return MCFixup::getKindForSize(Size, isPCRel);
202 }
203 
204 /// Is32BitMemOperand - Return true if the specified instruction has
205 /// a 32-bit memory operand. Op specifies the operand # of the memoperand.
206 static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
207  const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
208  const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
209 
210  if ((BaseReg.getReg() != 0 &&
211  X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
212  (IndexReg.getReg() != 0 &&
213  X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
214  return true;
215  if (BaseReg.getReg() == X86::EIP) {
216  assert(IndexReg.getReg() == 0 && "Invalid eip-based address.");
217  return true;
218  }
219  return false;
220 }
221 
222 /// Is64BitMemOperand - Return true if the specified instruction has
223 /// a 64-bit memory operand. Op specifies the operand # of the memoperand.
224 #ifndef NDEBUG
225 static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
226  const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
227  const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
228 
229  if ((BaseReg.getReg() != 0 &&
230  X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
231  (IndexReg.getReg() != 0 &&
232  X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
233  return true;
234  return false;
235 }
236 #endif
237 
238 /// StartsWithGlobalOffsetTable - Check if this expression starts with
239 /// _GLOBAL_OFFSET_TABLE_ and if it is of the form
240 /// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
241 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
242 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
243 /// of a binary expression.
248 };
251  const MCExpr *RHS = nullptr;
252  if (Expr->getKind() == MCExpr::Binary) {
253  const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
254  Expr = BE->getLHS();
255  RHS = BE->getRHS();
256  }
257 
258  if (Expr->getKind() != MCExpr::SymbolRef)
259  return GOT_None;
260 
261  const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
262  const MCSymbol &S = Ref->getSymbol();
263  if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
264  return GOT_None;
265  if (RHS && RHS->getKind() == MCExpr::SymbolRef)
266  return GOT_SymDiff;
267  return GOT_Normal;
268 }
269 
270 static bool HasSecRelSymbolRef(const MCExpr *Expr) {
271  if (Expr->getKind() == MCExpr::SymbolRef) {
272  const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
273  return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
274  }
275  return false;
276 }
277 
278 void X86MCCodeEmitter::
279 EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
280  MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
281  SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
282  const MCExpr *Expr = nullptr;
283  if (DispOp.isImm()) {
284  // If this is a simple integer displacement that doesn't require a
285  // relocation, emit it now.
286  if (FixupKind != FK_PCRel_1 &&
287  FixupKind != FK_PCRel_2 &&
288  FixupKind != FK_PCRel_4) {
289  EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
290  return;
291  }
292  Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
293  } else {
294  Expr = DispOp.getExpr();
295  }
296 
297  // If we have an immoffset, add it to the expression.
298  if ((FixupKind == FK_Data_4 ||
299  FixupKind == FK_Data_8 ||
300  FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
302  if (Kind != GOT_None) {
303  assert(ImmOffset == 0);
304 
305  if (Size == 8) {
307  } else {
308  assert(Size == 4);
310  }
311 
312  if (Kind == GOT_Normal)
313  ImmOffset = CurByte;
314  } else if (Expr->getKind() == MCExpr::SymbolRef) {
315  if (HasSecRelSymbolRef(Expr)) {
316  FixupKind = MCFixupKind(FK_SecRel_4);
317  }
318  } else if (Expr->getKind() == MCExpr::Binary) {
319  const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr);
320  if (HasSecRelSymbolRef(Bin->getLHS())
321  || HasSecRelSymbolRef(Bin->getRHS())) {
322  FixupKind = MCFixupKind(FK_SecRel_4);
323  }
324  }
325  }
326 
327  // If the fixup is pc-relative, we need to bias the value to be relative to
328  // the start of the field, not the end of the field.
329  if (FixupKind == FK_PCRel_4 ||
330  FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
334  ImmOffset -= 4;
335  if (FixupKind == FK_PCRel_2)
336  ImmOffset -= 2;
337  if (FixupKind == FK_PCRel_1)
338  ImmOffset -= 1;
339 
340  if (ImmOffset)
341  Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
342  Ctx);
343 
344  // Emit a symbolic constant as a fixup and 4 zeros.
345  Fixups.push_back(MCFixup::create(CurByte, Expr, FixupKind, Loc));
346  EmitConstant(0, Size, CurByte, OS);
347 }
348 
349 void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
350  unsigned RegOpcodeField,
351  uint64_t TSFlags, bool Rex,
352  unsigned &CurByte, raw_ostream &OS,
353  SmallVectorImpl<MCFixup> &Fixups,
354  const MCSubtargetInfo &STI) const {
355  const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
356  const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
357  const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
358  const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
359  unsigned BaseReg = Base.getReg();
360  bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
361 
362  // Handle %rip relative addressing.
363  if (BaseReg == X86::RIP ||
364  BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
365  assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode");
366  assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
367  EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
368 
369  unsigned Opcode = MI.getOpcode();
370  // movq loads are handled with a special relocation form which allows the
371  // linker to eliminate some loads for GOT references which end up in the
372  // same linkage unit.
373  unsigned FixupKind = [=]() {
374  switch (Opcode) {
375  default:
377  case X86::MOV64rm:
378  assert(Rex);
380  case X86::CALL64m:
381  case X86::JMP64m:
382  case X86::TEST64rm:
383  case X86::ADC64rm:
384  case X86::ADD64rm:
385  case X86::AND64rm:
386  case X86::CMP64rm:
387  case X86::OR64rm:
388  case X86::SBB64rm:
389  case X86::SUB64rm:
390  case X86::XOR64rm:
393  }
394  }();
395 
396  // rip-relative addressing is actually relative to the *next* instruction.
397  // Since an immediate can follow the mod/rm byte for an instruction, this
398  // means that we need to bias the immediate field of the instruction with
399  // the size of the immediate field. If we have this case, add it into the
400  // expression to emit.
401  int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
402 
403  EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
404  CurByte, OS, Fixups, -ImmSize);
405  return;
406  }
407 
408  unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
409 
410  // 16-bit addressing forms of the ModR/M byte have a different encoding for
411  // the R/M field and are far more limited in which registers can be used.
412  if (Is16BitMemOperand(MI, Op, STI)) {
413  if (BaseReg) {
414  // For 32-bit addressing, the row and column values in Table 2-2 are
415  // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
416  // some special cases. And GetX86RegNum reflects that numbering.
417  // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
418  // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
419  // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
420  // while values 0-3 indicate the allowed combinations (base+index) of
421  // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
422  //
423  // R16Table[] is a lookup from the normal RegNo, to the row values from
424  // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
425  static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
426  unsigned RMfield = R16Table[BaseRegNo];
427 
428  assert(RMfield && "invalid 16-bit base register");
429 
430  if (IndexReg.getReg()) {
431  unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)];
432 
433  assert(IndexReg16 && "invalid 16-bit index register");
434  // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
435  assert(((IndexReg16 ^ RMfield) & 2) &&
436  "invalid 16-bit base/index register combination");
437  assert(Scale.getImm() == 1 &&
438  "invalid scale for 16-bit memory reference");
439 
440  // Allow base/index to appear in either order (although GAS doesn't).
441  if (IndexReg16 & 2)
442  RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
443  else
444  RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
445  }
446 
447  if (Disp.isImm() && isDisp8(Disp.getImm())) {
448  if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
449  // There is no displacement; just the register.
450  EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS);
451  return;
452  }
453  // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
454  EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS);
455  EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
456  return;
457  }
458  // This is the [REG]+disp16 case.
459  EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS);
460  } else {
461  // There is no BaseReg; this is the plain [disp16] case.
462  EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS);
463  }
464 
465  // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
466  EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups);
467  return;
468  }
469 
470  // Determine whether a SIB byte is needed.
471  // If no BaseReg, issue a RIP relative instruction only if the MCE can
472  // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
473  // 2-7) and absolute references.
474 
475  if (// The SIB byte must be used if there is an index register.
476  IndexReg.getReg() == 0 &&
477  // The SIB byte must be used if the base is ESP/RSP/R12, all of which
478  // encode to an R/M value of 4, which indicates that a SIB byte is
479  // present.
480  BaseRegNo != N86::ESP &&
481  // If there is no base register and we're in 64-bit mode, we need a SIB
482  // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
483  (!is64BitMode(STI) || BaseReg != 0)) {
484 
485  if (BaseReg == 0) { // [disp32] in X86-32 mode
486  EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
487  EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups);
488  return;
489  }
490 
491  // If the base is not EBP/ESP and there is no displacement, use simple
492  // indirect register encoding, this handles addresses like [EAX]. The
493  // encoding for [EBP] with no displacement means [disp32] so we handle it
494  // by emitting a displacement of 0 below.
495  if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
496  EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
497  return;
498  }
499 
500  // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
501  if (Disp.isImm()) {
502  if (!HasEVEX && isDisp8(Disp.getImm())) {
503  EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
504  EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
505  return;
506  }
507  // Try EVEX compressed 8-bit displacement first; if failed, fall back to
508  // 32-bit displacement.
509  int CDisp8 = 0;
510  if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
511  EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
512  EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups,
513  CDisp8 - Disp.getImm());
514  return;
515  }
516  }
517 
518  // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
519  EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
520  unsigned Opcode = MI.getOpcode();
521  unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
523  EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS,
524  Fixups);
525  return;
526  }
527 
528  // We need a SIB byte, so start by outputting the ModR/M byte first
529  assert(IndexReg.getReg() != X86::ESP &&
530  IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
531 
532  bool ForceDisp32 = false;
533  bool ForceDisp8 = false;
534  int CDisp8 = 0;
535  int ImmOffset = 0;
536  if (BaseReg == 0) {
537  // If there is no base register, we emit the special case SIB byte with
538  // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
539  EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
540  ForceDisp32 = true;
541  } else if (!Disp.isImm()) {
542  // Emit the normal disp32 encoding.
543  EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
544  ForceDisp32 = true;
545  } else if (Disp.getImm() == 0 &&
546  // Base reg can't be anything that ends up with '5' as the base
547  // reg, it is the magic [*] nomenclature that indicates no base.
548  BaseRegNo != N86::EBP) {
549  // Emit no displacement ModR/M byte
550  EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
551  } else if (!HasEVEX && isDisp8(Disp.getImm())) {
552  // Emit the disp8 encoding.
553  EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
554  ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
555  } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
556  // Emit the disp8 encoding.
557  EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
558  ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
559  ImmOffset = CDisp8 - Disp.getImm();
560  } else {
561  // Emit the normal disp32 encoding.
562  EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
563  }
564 
565  // Calculate what the SS field value should be...
566  static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
567  unsigned SS = SSTable[Scale.getImm()];
568 
569  if (BaseReg == 0) {
570  // Handle the SIB byte for the case where there is no base, see Intel
571  // Manual 2A, table 2-7. The displacement has already been output.
572  unsigned IndexRegNo;
573  if (IndexReg.getReg())
574  IndexRegNo = GetX86RegNum(IndexReg);
575  else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
576  IndexRegNo = 4;
577  EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS);
578  } else {
579  unsigned IndexRegNo;
580  if (IndexReg.getReg())
581  IndexRegNo = GetX86RegNum(IndexReg);
582  else
583  IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
584  EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
585  }
586 
587  // Do we need to output a displacement?
588  if (ForceDisp8)
589  EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset);
590  else if (ForceDisp32 || Disp.getImm() != 0)
591  EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
592  CurByte, OS, Fixups);
593 }
594 
595 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
596 /// called VEX.
597 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
598  int MemOperand, const MCInst &MI,
599  const MCInstrDesc &Desc,
600  raw_ostream &OS) const {
601  assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
602 
603  uint64_t Encoding = TSFlags & X86II::EncodingMask;
604  bool HasEVEX_K = TSFlags & X86II::EVEX_K;
605  bool HasVEX_4V = TSFlags & X86II::VEX_4V;
606  bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
607 
608  // VEX_R: opcode externsion equivalent to REX.R in
609  // 1's complement (inverted) form
610  //
611  // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
612  // 0: Same as REX_R=1 (64 bit mode only)
613  //
614  uint8_t VEX_R = 0x1;
615  uint8_t EVEX_R2 = 0x1;
616 
617  // VEX_X: equivalent to REX.X, only used when a
618  // register is used for index in SIB Byte.
619  //
620  // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
621  // 0: Same as REX.X=1 (64-bit mode only)
622  uint8_t VEX_X = 0x1;
623 
624  // VEX_B:
625  //
626  // 1: Same as REX_B=0 (ignored in 32-bit mode)
627  // 0: Same as REX_B=1 (64 bit mode only)
628  //
629  uint8_t VEX_B = 0x1;
630 
631  // VEX_W: opcode specific (use like REX.W, or used for
632  // opcode extension, or ignored, depending on the opcode byte)
633  uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0;
634 
635  // VEX_5M (VEX m-mmmmm field):
636  //
637  // 0b00000: Reserved for future use
638  // 0b00001: implied 0F leading opcode
639  // 0b00010: implied 0F 38 leading opcode bytes
640  // 0b00011: implied 0F 3A leading opcode bytes
641  // 0b00100-0b11111: Reserved for future use
642  // 0b01000: XOP map select - 08h instructions with imm byte
643  // 0b01001: XOP map select - 09h instructions with no imm byte
644  // 0b01010: XOP map select - 0Ah instructions with imm dword
645  uint8_t VEX_5M;
646  switch (TSFlags & X86II::OpMapMask) {
647  default: llvm_unreachable("Invalid prefix!");
648  case X86II::TB: VEX_5M = 0x1; break; // 0F
649  case X86II::T8: VEX_5M = 0x2; break; // 0F 38
650  case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
651  case X86II::XOP8: VEX_5M = 0x8; break;
652  case X86II::XOP9: VEX_5M = 0x9; break;
653  case X86II::XOPA: VEX_5M = 0xA; break;
654  }
655 
656  // VEX_4V (VEX vvvv field): a register specifier
657  // (in 1's complement form) or 1111 if unused.
658  uint8_t VEX_4V = 0xf;
659  uint8_t EVEX_V2 = 0x1;
660 
661  // EVEX_L2/VEX_L (Vector Length):
662  //
663  // L2 L
664  // 0 0: scalar or 128-bit vector
665  // 0 1: 256-bit vector
666  // 1 0: 512-bit vector
667  //
668  uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0;
669  uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0;
670 
671  // VEX_PP: opcode extension providing equivalent
672  // functionality of a SIMD prefix
673  //
674  // 0b00: None
675  // 0b01: 66
676  // 0b10: F3
677  // 0b11: F2
678  //
679  uint8_t VEX_PP;
680  switch (TSFlags & X86II::OpPrefixMask) {
681  default: llvm_unreachable("Invalid op prefix!");
682  case X86II::PS: VEX_PP = 0x0; break; // none
683  case X86II::PD: VEX_PP = 0x1; break; // 66
684  case X86II::XS: VEX_PP = 0x2; break; // F3
685  case X86II::XD: VEX_PP = 0x3; break; // F2
686  }
687 
688  // EVEX_U
689  uint8_t EVEX_U = 1; // Always '1' so far
690 
691  // EVEX_z
692  uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0;
693 
694  // EVEX_b
695  uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0;
696 
697  // EVEX_rc
698  uint8_t EVEX_rc = 0;
699 
700  // EVEX_aaa
701  uint8_t EVEX_aaa = 0;
702 
703  bool EncodeRC = false;
704 
705  // Classify VEX_B, VEX_4V, VEX_R, VEX_X
706  unsigned NumOps = Desc.getNumOperands();
707  unsigned CurOp = X86II::getOperandBias(Desc);
708 
709  switch (TSFlags & X86II::FormMask) {
710  default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!");
711  case X86II::RawFrm:
712  break;
713  case X86II::MRMDestMem: {
714  // MRMDestMem instructions forms:
715  // MemAddr, src1(ModR/M)
716  // MemAddr, src1(VEX_4V), src2(ModR/M)
717  // MemAddr, src1(ModR/M), imm8
718  //
719  unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
720  VEX_B = ~(BaseRegEnc >> 3) & 1;
721  unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
722  VEX_X = ~(IndexRegEnc >> 3) & 1;
723  if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
724  EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
725 
726  CurOp += X86::AddrNumOperands;
727 
728  if (HasEVEX_K)
729  EVEX_aaa = getX86RegEncoding(MI, CurOp++);
730 
731  if (HasVEX_4V) {
732  unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
733  VEX_4V = ~VRegEnc & 0xf;
734  EVEX_V2 = ~(VRegEnc >> 4) & 1;
735  }
736 
737  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
738  VEX_R = ~(RegEnc >> 3) & 1;
739  EVEX_R2 = ~(RegEnc >> 4) & 1;
740  break;
741  }
742  case X86II::MRMSrcMem: {
743  // MRMSrcMem instructions forms:
744  // src1(ModR/M), MemAddr
745  // src1(ModR/M), src2(VEX_4V), MemAddr
746  // src1(ModR/M), MemAddr, imm8
747  // src1(ModR/M), MemAddr, src2(Imm[7:4])
748  //
749  // FMA4:
750  // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
751  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
752  VEX_R = ~(RegEnc >> 3) & 1;
753  EVEX_R2 = ~(RegEnc >> 4) & 1;
754 
755  if (HasEVEX_K)
756  EVEX_aaa = getX86RegEncoding(MI, CurOp++);
757 
758  if (HasVEX_4V) {
759  unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
760  VEX_4V = ~VRegEnc & 0xf;
761  EVEX_V2 = ~(VRegEnc >> 4) & 1;
762  }
763 
764  unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
765  VEX_B = ~(BaseRegEnc >> 3) & 1;
766  unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
767  VEX_X = ~(IndexRegEnc >> 3) & 1;
768  if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
769  EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
770 
771  break;
772  }
773  case X86II::MRMSrcMem4VOp3: {
774  // Instruction format for 4VOp3:
775  // src1(ModR/M), MemAddr, src3(VEX_4V)
776  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
777  VEX_R = ~(RegEnc >> 3) & 1;
778 
779  unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
780  VEX_B = ~(BaseRegEnc >> 3) & 1;
781  unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
782  VEX_X = ~(IndexRegEnc >> 3) & 1;
783 
784  VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
785  break;
786  }
787  case X86II::MRMSrcMemOp4: {
788  // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
789  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
790  VEX_R = ~(RegEnc >> 3) & 1;
791 
792  unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
793  VEX_4V = ~VRegEnc & 0xf;
794 
795  unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
796  VEX_B = ~(BaseRegEnc >> 3) & 1;
797  unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
798  VEX_X = ~(IndexRegEnc >> 3) & 1;
799  break;
800  }
801  case X86II::MRM0m: case X86II::MRM1m:
802  case X86II::MRM2m: case X86II::MRM3m:
803  case X86II::MRM4m: case X86II::MRM5m:
804  case X86II::MRM6m: case X86II::MRM7m: {
805  // MRM[0-9]m instructions forms:
806  // MemAddr
807  // src1(VEX_4V), MemAddr
808  if (HasVEX_4V) {
809  unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
810  VEX_4V = ~VRegEnc & 0xf;
811  EVEX_V2 = ~(VRegEnc >> 4) & 1;
812  }
813 
814  if (HasEVEX_K)
815  EVEX_aaa = getX86RegEncoding(MI, CurOp++);
816 
817  unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
818  VEX_B = ~(BaseRegEnc >> 3) & 1;
819  unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
820  VEX_X = ~(IndexRegEnc >> 3) & 1;
821  break;
822  }
823  case X86II::MRMSrcReg: {
824  // MRMSrcReg instructions forms:
825  // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
826  // dst(ModR/M), src1(ModR/M)
827  // dst(ModR/M), src1(ModR/M), imm8
828  //
829  // FMA4:
830  // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
831  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
832  VEX_R = ~(RegEnc >> 3) & 1;
833  EVEX_R2 = ~(RegEnc >> 4) & 1;
834 
835  if (HasEVEX_K)
836  EVEX_aaa = getX86RegEncoding(MI, CurOp++);
837 
838  if (HasVEX_4V) {
839  unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
840  VEX_4V = ~VRegEnc & 0xf;
841  EVEX_V2 = ~(VRegEnc >> 4) & 1;
842  }
843 
844  RegEnc = getX86RegEncoding(MI, CurOp++);
845  VEX_B = ~(RegEnc >> 3) & 1;
846  VEX_X = ~(RegEnc >> 4) & 1;
847 
848  if (EVEX_b) {
849  if (HasEVEX_RC) {
850  unsigned RcOperand = NumOps-1;
851  assert(RcOperand >= CurOp);
852  EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
853  }
854  EncodeRC = true;
855  }
856  break;
857  }
858  case X86II::MRMSrcReg4VOp3: {
859  // Instruction format for 4VOp3:
860  // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
861  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
862  VEX_R = ~(RegEnc >> 3) & 1;
863 
864  RegEnc = getX86RegEncoding(MI, CurOp++);
865  VEX_B = ~(RegEnc >> 3) & 1;
866 
867  VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
868  break;
869  }
870  case X86II::MRMSrcRegOp4: {
871  // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
872  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
873  VEX_R = ~(RegEnc >> 3) & 1;
874 
875  unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
876  VEX_4V = ~VRegEnc & 0xf;
877 
878  // Skip second register source (encoded in Imm[7:4])
879  ++CurOp;
880 
881  RegEnc = getX86RegEncoding(MI, CurOp++);
882  VEX_B = ~(RegEnc >> 3) & 1;
883  VEX_X = ~(RegEnc >> 4) & 1;
884  break;
885  }
886  case X86II::MRMDestReg: {
887  // MRMDestReg instructions forms:
888  // dst(ModR/M), src(ModR/M)
889  // dst(ModR/M), src(ModR/M), imm8
890  // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
891  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
892  VEX_B = ~(RegEnc >> 3) & 1;
893  VEX_X = ~(RegEnc >> 4) & 1;
894 
895  if (HasEVEX_K)
896  EVEX_aaa = getX86RegEncoding(MI, CurOp++);
897 
898  if (HasVEX_4V) {
899  unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
900  VEX_4V = ~VRegEnc & 0xf;
901  EVEX_V2 = ~(VRegEnc >> 4) & 1;
902  }
903 
904  RegEnc = getX86RegEncoding(MI, CurOp++);
905  VEX_R = ~(RegEnc >> 3) & 1;
906  EVEX_R2 = ~(RegEnc >> 4) & 1;
907  if (EVEX_b)
908  EncodeRC = true;
909  break;
910  }
911  case X86II::MRM0r: case X86II::MRM1r:
912  case X86II::MRM2r: case X86II::MRM3r:
913  case X86II::MRM4r: case X86II::MRM5r:
914  case X86II::MRM6r: case X86II::MRM7r: {
915  // MRM0r-MRM7r instructions forms:
916  // dst(VEX_4V), src(ModR/M), imm8
917  if (HasVEX_4V) {
918  unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
919  VEX_4V = ~VRegEnc & 0xf;
920  EVEX_V2 = ~(VRegEnc >> 4) & 1;
921  }
922  if (HasEVEX_K)
923  EVEX_aaa = getX86RegEncoding(MI, CurOp++);
924 
925  unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
926  VEX_B = ~(RegEnc >> 3) & 1;
927  VEX_X = ~(RegEnc >> 4) & 1;
928  break;
929  }
930  }
931 
932  if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
933  // VEX opcode prefix can have 2 or 3 bytes
934  //
935  // 3 bytes:
936  // +-----+ +--------------+ +-------------------+
937  // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
938  // +-----+ +--------------+ +-------------------+
939  // 2 bytes:
940  // +-----+ +-------------------+
941  // | C5h | | R | vvvv | L | pp |
942  // +-----+ +-------------------+
943  //
944  // XOP uses a similar prefix:
945  // +-----+ +--------------+ +-------------------+
946  // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
947  // +-----+ +--------------+ +-------------------+
948  uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
949 
950  // Can we use the 2 byte VEX prefix?
951  if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
952  EmitByte(0xC5, CurByte, OS);
953  EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
954  return;
955  }
956 
957  // 3 byte VEX prefix
958  EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS);
959  EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
960  EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
961  } else {
962  assert(Encoding == X86II::EVEX && "unknown encoding!");
963  // EVEX opcode prefix can have 4 bytes
964  //
965  // +-----+ +--------------+ +-------------------+ +------------------------+
966  // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
967  // +-----+ +--------------+ +-------------------+ +------------------------+
968  assert((VEX_5M & 0x3) == VEX_5M
969  && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
970 
971  EmitByte(0x62, CurByte, OS);
972  EmitByte((VEX_R << 7) |
973  (VEX_X << 6) |
974  (VEX_B << 5) |
975  (EVEX_R2 << 4) |
976  VEX_5M, CurByte, OS);
977  EmitByte((VEX_W << 7) |
978  (VEX_4V << 3) |
979  (EVEX_U << 2) |
980  VEX_PP, CurByte, OS);
981  if (EncodeRC)
982  EmitByte((EVEX_z << 7) |
983  (EVEX_rc << 5) |
984  (EVEX_b << 4) |
985  (EVEX_V2 << 3) |
986  EVEX_aaa, CurByte, OS);
987  else
988  EmitByte((EVEX_z << 7) |
989  (EVEX_L2 << 6) |
990  (VEX_L << 5) |
991  (EVEX_b << 4) |
992  (EVEX_V2 << 3) |
993  EVEX_aaa, CurByte, OS);
994  }
995 }
996 
997 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
998 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
999 /// size, and 3) use of X86-64 extended registers.
1000 uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
1001  int MemOperand,
1002  const MCInstrDesc &Desc) const {
1003  uint8_t REX = 0;
1004  bool UsesHighByteReg = false;
1005 
1006  if (TSFlags & X86II::REX_W)
1007  REX |= 1 << 3; // set REX.W
1008 
1009  if (MI.getNumOperands() == 0) return REX;
1010 
1011  unsigned NumOps = MI.getNumOperands();
1012  unsigned CurOp = X86II::getOperandBias(Desc);
1013 
1014  // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
1015  for (unsigned i = CurOp; i != NumOps; ++i) {
1016  const MCOperand &MO = MI.getOperand(i);
1017  if (!MO.isReg()) continue;
1018  unsigned Reg = MO.getReg();
1019  if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
1020  UsesHighByteReg = true;
1022  // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
1023  // that returns non-zero.
1024  REX |= 0x40; // REX fixed encoding prefix
1025  }
1026 
1027  switch (TSFlags & X86II::FormMask) {
1028  case X86II::AddRegFrm:
1029  REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1030  break;
1031  case X86II::MRMSrcReg:
1032  REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1033  REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1034  break;
1035  case X86II::MRMSrcMem: {
1036  REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1037  REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1038  REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1039  CurOp += X86::AddrNumOperands;
1040  break;
1041  }
1042  case X86II::MRMDestReg:
1043  REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1044  REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1045  break;
1046  case X86II::MRMDestMem:
1047  REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1048  REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1049  CurOp += X86::AddrNumOperands;
1050  REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1051  break;
1052  case X86II::MRMXm:
1053  case X86II::MRM0m: case X86II::MRM1m:
1054  case X86II::MRM2m: case X86II::MRM3m:
1055  case X86II::MRM4m: case X86II::MRM5m:
1056  case X86II::MRM6m: case X86II::MRM7m:
1057  REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1058  REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1059  break;
1060  case X86II::MRMXr:
1061  case X86II::MRM0r: case X86II::MRM1r:
1062  case X86II::MRM2r: case X86II::MRM3r:
1063  case X86II::MRM4r: case X86II::MRM5r:
1064  case X86II::MRM6r: case X86II::MRM7r:
1065  REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1066  break;
1067  }
1068  if (REX && UsesHighByteReg)
1069  report_fatal_error("Cannot encode high byte register in REX-prefixed instruction");
1070 
1071  return REX;
1072 }
1073 
1074 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
1075 void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
1076  unsigned SegOperand,
1077  const MCInst &MI,
1078  raw_ostream &OS) const {
1079  // Check for explicit segment override on memory operand.
1080  switch (MI.getOperand(SegOperand).getReg()) {
1081  default: llvm_unreachable("Unknown segment register!");
1082  case 0: break;
1083  case X86::CS: EmitByte(0x2E, CurByte, OS); break;
1084  case X86::SS: EmitByte(0x36, CurByte, OS); break;
1085  case X86::DS: EmitByte(0x3E, CurByte, OS); break;
1086  case X86::ES: EmitByte(0x26, CurByte, OS); break;
1087  case X86::FS: EmitByte(0x64, CurByte, OS); break;
1088  case X86::GS: EmitByte(0x65, CurByte, OS); break;
1089  }
1090 }
1091 
1092 /// Emit all instruction prefixes prior to the opcode.
1093 ///
1094 /// MemOperand is the operand # of the start of a memory operand if present. If
1095 /// Not present, it is -1.
1096 ///
1097 /// Returns true if a REX prefix was used.
1098 bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
1099  int MemOperand, const MCInst &MI,
1100  const MCInstrDesc &Desc,
1101  const MCSubtargetInfo &STI,
1102  raw_ostream &OS) const {
1103  bool Ret = false;
1104  // Emit the operand size opcode prefix as needed.
1105  if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32
1106  : X86II::OpSize16))
1107  EmitByte(0x66, CurByte, OS);
1108 
1109  // Emit the LOCK opcode prefix.
1110  if (TSFlags & X86II::LOCK)
1111  EmitByte(0xF0, CurByte, OS);
1112 
1113  switch (TSFlags & X86II::OpPrefixMask) {
1114  case X86II::PD: // 66
1115  EmitByte(0x66, CurByte, OS);
1116  break;
1117  case X86II::XS: // F3
1118  EmitByte(0xF3, CurByte, OS);
1119  break;
1120  case X86II::XD: // F2
1121  EmitByte(0xF2, CurByte, OS);
1122  break;
1123  }
1124 
1125  // Handle REX prefix.
1126  // FIXME: Can this come before F2 etc to simplify emission?
1127  if (is64BitMode(STI)) {
1128  if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) {
1129  EmitByte(0x40 | REX, CurByte, OS);
1130  Ret = true;
1131  }
1132  }
1133 
1134  // 0x0F escape code must be emitted just before the opcode.
1135  switch (TSFlags & X86II::OpMapMask) {
1136  case X86II::TB: // Two-byte opcode map
1137  case X86II::T8: // 0F 38
1138  case X86II::TA: // 0F 3A
1139  EmitByte(0x0F, CurByte, OS);
1140  break;
1141  }
1142 
1143  switch (TSFlags & X86II::OpMapMask) {
1144  case X86II::T8: // 0F 38
1145  EmitByte(0x38, CurByte, OS);
1146  break;
1147  case X86II::TA: // 0F 3A
1148  EmitByte(0x3A, CurByte, OS);
1149  break;
1150  }
1151  return Ret;
1152 }
1153 
1154 void X86MCCodeEmitter::
1155 encodeInstruction(const MCInst &MI, raw_ostream &OS,
1156  SmallVectorImpl<MCFixup> &Fixups,
1157  const MCSubtargetInfo &STI) const {
1158  unsigned Opcode = MI.getOpcode();
1159  const MCInstrDesc &Desc = MCII.get(Opcode);
1160  uint64_t TSFlags = Desc.TSFlags;
1161 
1162  // Pseudo instructions don't get encoded.
1163  if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
1164  return;
1165 
1166  unsigned NumOps = Desc.getNumOperands();
1167  unsigned CurOp = X86II::getOperandBias(Desc);
1168 
1169  // Keep track of the current byte being emitted.
1170  unsigned CurByte = 0;
1171 
1172  // Encoding type for this instruction.
1173  uint64_t Encoding = TSFlags & X86II::EncodingMask;
1174 
1175  // It uses the VEX.VVVV field?
1176  bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1177  bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
1178 
1179  // It uses the EVEX.aaa field?
1180  bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1181  bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1182 
1183  // Used if a register is encoded in 7:4 of immediate.
1184  unsigned I8RegNum = 0;
1185 
1186  // Determine where the memory operand starts, if present.
1187  int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
1188  if (MemoryOperand != -1) MemoryOperand += CurOp;
1189 
1190  // Emit segment override opcode prefix as needed.
1191  if (MemoryOperand >= 0)
1192  EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg,
1193  MI, OS);
1194 
1195  // Emit the repeat opcode prefix as needed.
1196  if (TSFlags & X86II::REP)
1197  EmitByte(0xF3, CurByte, OS);
1198 
1199  // Emit the address size opcode prefix as needed.
1200  bool need_address_override;
1201  uint64_t AdSize = TSFlags & X86II::AdSizeMask;
1202  if ((is16BitMode(STI) && AdSize == X86II::AdSize32) ||
1203  (is32BitMode(STI) && AdSize == X86II::AdSize16) ||
1204  (is64BitMode(STI) && AdSize == X86II::AdSize32)) {
1205  need_address_override = true;
1206  } else if (MemoryOperand < 0) {
1207  need_address_override = false;
1208  } else if (is64BitMode(STI)) {
1209  assert(!Is16BitMemOperand(MI, MemoryOperand, STI));
1210  need_address_override = Is32BitMemOperand(MI, MemoryOperand);
1211  } else if (is32BitMode(STI)) {
1212  assert(!Is64BitMemOperand(MI, MemoryOperand));
1213  need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI);
1214  } else {
1215  assert(is16BitMode(STI));
1216  assert(!Is64BitMemOperand(MI, MemoryOperand));
1217  need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI);
1218  }
1219 
1220  if (need_address_override)
1221  EmitByte(0x67, CurByte, OS);
1222 
1223  bool Rex = false;
1224  if (Encoding == 0)
1225  Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
1226  else
1227  EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
1228 
1229  uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1230 
1231  if (TSFlags & X86II::Has3DNow0F0FOpcode)
1232  BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1233 
1234  uint64_t Form = TSFlags & X86II::FormMask;
1235  switch (Form) {
1236  default: errs() << "FORM: " << Form << "\n";
1237  llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1238  case X86II::Pseudo:
1239  llvm_unreachable("Pseudo instruction shouldn't be emitted");
1240  case X86II::RawFrmDstSrc: {
1241  unsigned siReg = MI.getOperand(1).getReg();
1242  assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
1243  (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
1244  (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
1245  "SI and DI register sizes do not match");
1246  // Emit segment override opcode prefix as needed (not for %ds).
1247  if (MI.getOperand(2).getReg() != X86::DS)
1248  EmitSegmentOverridePrefix(CurByte, 2, MI, OS);
1249  // Emit AdSize prefix as needed.
1250  if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1251  (is32BitMode(STI) && siReg == X86::SI))
1252  EmitByte(0x67, CurByte, OS);
1253  CurOp += 3; // Consume operands.
1254  EmitByte(BaseOpcode, CurByte, OS);
1255  break;
1256  }
1257  case X86II::RawFrmSrc: {
1258  unsigned siReg = MI.getOperand(0).getReg();
1259  // Emit segment override opcode prefix as needed (not for %ds).
1260  if (MI.getOperand(1).getReg() != X86::DS)
1261  EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1262  // Emit AdSize prefix as needed.
1263  if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1264  (is32BitMode(STI) && siReg == X86::SI))
1265  EmitByte(0x67, CurByte, OS);
1266  CurOp += 2; // Consume operands.
1267  EmitByte(BaseOpcode, CurByte, OS);
1268  break;
1269  }
1270  case X86II::RawFrmDst: {
1271  unsigned siReg = MI.getOperand(0).getReg();
1272  // Emit AdSize prefix as needed.
1273  if ((!is32BitMode(STI) && siReg == X86::EDI) ||
1274  (is32BitMode(STI) && siReg == X86::DI))
1275  EmitByte(0x67, CurByte, OS);
1276  ++CurOp; // Consume operand.
1277  EmitByte(BaseOpcode, CurByte, OS);
1278  break;
1279  }
1280  case X86II::RawFrm:
1281  EmitByte(BaseOpcode, CurByte, OS);
1282  break;
1283  case X86II::RawFrmMemOffs:
1284  // Emit segment override opcode prefix as needed.
1285  EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1286  EmitByte(BaseOpcode, CurByte, OS);
1287  EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1288  X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1289  CurByte, OS, Fixups);
1290  ++CurOp; // skip segment operand
1291  break;
1292  case X86II::RawFrmImm8:
1293  EmitByte(BaseOpcode, CurByte, OS);
1294  EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1295  X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1296  CurByte, OS, Fixups);
1297  EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
1298  OS, Fixups);
1299  break;
1300  case X86II::RawFrmImm16:
1301  EmitByte(BaseOpcode, CurByte, OS);
1302  EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1303  X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1304  CurByte, OS, Fixups);
1305  EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
1306  OS, Fixups);
1307  break;
1308 
1309  case X86II::AddRegFrm:
1310  EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
1311  break;
1312 
1313  case X86II::MRMDestReg: {
1314  EmitByte(BaseOpcode, CurByte, OS);
1315  unsigned SrcRegNum = CurOp + 1;
1316 
1317  if (HasEVEX_K) // Skip writemask
1318  ++SrcRegNum;
1319 
1320  if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1321  ++SrcRegNum;
1322 
1323  EmitRegModRMByte(MI.getOperand(CurOp),
1324  GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
1325  CurOp = SrcRegNum + 1;
1326  break;
1327  }
1328  case X86II::MRMDestMem: {
1329  EmitByte(BaseOpcode, CurByte, OS);
1330  unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1331 
1332  if (HasEVEX_K) // Skip writemask
1333  ++SrcRegNum;
1334 
1335  if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1336  ++SrcRegNum;
1337 
1338  emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
1339  Rex, CurByte, OS, Fixups, STI);
1340  CurOp = SrcRegNum + 1;
1341  break;
1342  }
1343  case X86II::MRMSrcReg: {
1344  EmitByte(BaseOpcode, CurByte, OS);
1345  unsigned SrcRegNum = CurOp + 1;
1346 
1347  if (HasEVEX_K) // Skip writemask
1348  ++SrcRegNum;
1349 
1350  if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1351  ++SrcRegNum;
1352 
1353  EmitRegModRMByte(MI.getOperand(SrcRegNum),
1354  GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1355  CurOp = SrcRegNum + 1;
1356  if (HasVEX_I8Reg)
1357  I8RegNum = getX86RegEncoding(MI, CurOp++);
1358  // do not count the rounding control operand
1359  if (HasEVEX_RC)
1360  --NumOps;
1361  break;
1362  }
1363  case X86II::MRMSrcReg4VOp3: {
1364  EmitByte(BaseOpcode, CurByte, OS);
1365  unsigned SrcRegNum = CurOp + 1;
1366 
1367  EmitRegModRMByte(MI.getOperand(SrcRegNum),
1368  GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1369  CurOp = SrcRegNum + 1;
1370  ++CurOp; // Encoded in VEX.VVVV
1371  break;
1372  }
1373  case X86II::MRMSrcRegOp4: {
1374  EmitByte(BaseOpcode, CurByte, OS);
1375  unsigned SrcRegNum = CurOp + 1;
1376 
1377  // Skip 1st src (which is encoded in VEX_VVVV)
1378  ++SrcRegNum;
1379 
1380  // Capture 2nd src (which is encoded in Imm[7:4])
1381  assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1382  I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
1383 
1384  EmitRegModRMByte(MI.getOperand(SrcRegNum),
1385  GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1386  CurOp = SrcRegNum + 1;
1387  break;
1388  }
1389  case X86II::MRMSrcMem: {
1390  unsigned FirstMemOp = CurOp+1;
1391 
1392  if (HasEVEX_K) // Skip writemask
1393  ++FirstMemOp;
1394 
1395  if (HasVEX_4V)
1396  ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1397 
1398  EmitByte(BaseOpcode, CurByte, OS);
1399 
1400  emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1401  TSFlags, Rex, CurByte, OS, Fixups, STI);
1402  CurOp = FirstMemOp + X86::AddrNumOperands;
1403  if (HasVEX_I8Reg)
1404  I8RegNum = getX86RegEncoding(MI, CurOp++);
1405  break;
1406  }
1407  case X86II::MRMSrcMem4VOp3: {
1408  unsigned FirstMemOp = CurOp+1;
1409 
1410  EmitByte(BaseOpcode, CurByte, OS);
1411 
1412  emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1413  TSFlags, Rex, CurByte, OS, Fixups, STI);
1414  CurOp = FirstMemOp + X86::AddrNumOperands;
1415  ++CurOp; // Encoded in VEX.VVVV.
1416  break;
1417  }
1418  case X86II::MRMSrcMemOp4: {
1419  unsigned FirstMemOp = CurOp+1;
1420 
1421  ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1422 
1423  // Capture second register source (encoded in Imm[7:4])
1424  assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1425  I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
1426 
1427  EmitByte(BaseOpcode, CurByte, OS);
1428 
1429  emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1430  TSFlags, Rex, CurByte, OS, Fixups, STI);
1431  CurOp = FirstMemOp + X86::AddrNumOperands;
1432  break;
1433  }
1434 
1435  case X86II::MRMXr:
1436  case X86II::MRM0r: case X86II::MRM1r:
1437  case X86II::MRM2r: case X86II::MRM3r:
1438  case X86II::MRM4r: case X86II::MRM5r:
1439  case X86II::MRM6r: case X86II::MRM7r: {
1440  if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1441  ++CurOp;
1442  if (HasEVEX_K) // Skip writemask
1443  ++CurOp;
1444  EmitByte(BaseOpcode, CurByte, OS);
1445  EmitRegModRMByte(MI.getOperand(CurOp++),
1446  (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
1447  CurByte, OS);
1448  break;
1449  }
1450 
1451  case X86II::MRMXm:
1452  case X86II::MRM0m: case X86II::MRM1m:
1453  case X86II::MRM2m: case X86II::MRM3m:
1454  case X86II::MRM4m: case X86II::MRM5m:
1455  case X86II::MRM6m: case X86II::MRM7m: {
1456  if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1457  ++CurOp;
1458  if (HasEVEX_K) // Skip writemask
1459  ++CurOp;
1460  EmitByte(BaseOpcode, CurByte, OS);
1461  emitMemModRMByte(MI, CurOp,
1462  (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
1463  Rex, CurByte, OS, Fixups, STI);
1464  CurOp += X86::AddrNumOperands;
1465  break;
1466  }
1467  case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
1468  case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C5:
1469  case X86II::MRM_C6: case X86II::MRM_C7: case X86II::MRM_C8:
1470  case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
1471  case X86II::MRM_CC: case X86II::MRM_CD: case X86II::MRM_CE:
1472  case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1:
1473  case X86II::MRM_D2: case X86II::MRM_D3: case X86II::MRM_D4:
1474  case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D7:
1475  case X86II::MRM_D8: case X86II::MRM_D9: case X86II::MRM_DA:
1476  case X86II::MRM_DB: case X86II::MRM_DC: case X86II::MRM_DD:
1477  case X86II::MRM_DE: case X86II::MRM_DF: case X86II::MRM_E0:
1478  case X86II::MRM_E1: case X86II::MRM_E2: case X86II::MRM_E3:
1479  case X86II::MRM_E4: case X86II::MRM_E5: case X86II::MRM_E6:
1480  case X86II::MRM_E7: case X86II::MRM_E8: case X86II::MRM_E9:
1481  case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC:
1482  case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_EF:
1483  case X86II::MRM_F0: case X86II::MRM_F1: case X86II::MRM_F2:
1484  case X86II::MRM_F3: case X86II::MRM_F4: case X86II::MRM_F5:
1485  case X86II::MRM_F6: case X86II::MRM_F7: case X86II::MRM_F8:
1486  case X86II::MRM_F9: case X86II::MRM_FA: case X86II::MRM_FB:
1487  case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE:
1488  case X86II::MRM_FF:
1489  EmitByte(BaseOpcode, CurByte, OS);
1490  EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS);
1491  break;
1492  }
1493 
1494  if (HasVEX_I8Reg) {
1495  // The last source register of a 4 operand instruction in AVX is encoded
1496  // in bits[7:4] of a immediate byte.
1497  assert(I8RegNum < 16 && "Register encoding out of range");
1498  I8RegNum <<= 4;
1499  if (CurOp != NumOps) {
1500  unsigned Val = MI.getOperand(CurOp++).getImm();
1501  assert(Val < 16 && "Immediate operand value out of range");
1502  I8RegNum |= Val;
1503  }
1504  EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
1505  CurByte, OS, Fixups);
1506  } else {
1507  // If there is a remaining operand, it must be a trailing immediate. Emit it
1508  // according to the right size for the instruction. Some instructions
1509  // (SSE4a extrq and insertq) have two trailing immediates.
1510  while (CurOp != NumOps && NumOps - CurOp <= 2) {
1511  EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1512  X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1513  CurByte, OS, Fixups);
1514  }
1515  }
1516 
1517  if (TSFlags & X86II::Has3DNow0F0FOpcode)
1518  EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
1519 
1520 #ifndef NDEBUG
1521  // FIXME: Verify.
1522  if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1523  errs() << "Cannot encode all operands of: ";
1524  MI.dump();
1525  errs() << '\n';
1526  abort();
1527  }
1528 #endif
1529 }
static bool HasSecRelSymbolRef(const MCExpr *Expr)
static bool Is64BitMemOperand(const MCInst &MI, unsigned Op)
Is64BitMemOperand - Return true if the specified instruction has a 64-bit memory operand.
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool isX86_64NonExtLowByteReg(unsigned reg)
Definition: X86BaseInfo.h:766
const MCSymbol & getSymbol() const
Definition: MCExpr.h:311
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
size_t i
bool isReg() const
Definition: MCInst.h:56
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:39
static bool isDisp8(int Value)
isDisp8 - Return true if this signed displacement fits in a 8-bit sign-extended field.
AddRegFrm - This form is used for instructions like 'push r32' that have their one register operand a...
Definition: X86BaseInfo.h:235
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
MachineInstrBuilder MachineInstrBuilder &DefMI const MCInstrDesc & Desc
RawFrmImm8 - This is used for the ENTER instruction, which has two immediates, the first of which is ...
Definition: X86BaseInfo.h:257
static GlobalOffsetTableExprKind StartsWithGlobalOffsetTable(const MCExpr *Expr)
ExprKind getKind() const
Definition: MCExpr.h:70
static MCFixupKind getKindForSize(unsigned Size, bool isPCRel)
Return the generic fixup kind for a value with the given size.
Definition: MCFixup.h:102
MRMSrcReg - This form is used for instructions that use the Mod/RM byte to specify a source...
Definition: X86BaseInfo.h:307
XOP - Opcode prefix used by XOP instructions.
Definition: X86BaseInfo.h:496
unsigned isImmPCRel(uint64_t TSFlags)
isImmPCRel - Return true if the immediate of the specified instruction's TSFlags indicates that it is...
Definition: X86BaseInfo.h:589
AddrSegmentReg - The operand # of the segment in the memory operand.
Definition: X86BaseInfo.h:39
A one-byte pc relative fixup.
Definition: MCFixup.h:28
MCCodeEmitter * createX86MCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI, MCContext &Ctx)
MRMSrcMem4VOp3 - This form is used for instructions that encode operand 3 with VEX.VVVV and load from memory.
Definition: X86BaseInfo.h:283
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
Reg
All possible values of the reg field in the ModR/M byte.
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:161
MRMXm - This form is used for instructions that use the Mod/RM byte to specify a memory source...
Definition: X86BaseInfo.h:293
A four-byte section relative fixup.
Definition: MCFixup.h:42
MRMSrcMem - This form is used for instructions that use the Mod/RM byte to specify a source...
Definition: X86BaseInfo.h:278
A four-byte fixup.
Definition: MCFixup.h:26
Context object for machine code objects.
Definition: MCContext.h:51
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:63
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:239
MRMSrcMemOp4 - This form is used for instructions that use the Mod/RM byte to specify the fourth sour...
Definition: X86BaseInfo.h:288
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:429
MRM_XX - A mod/rm byte of exactly 0xXX.
Definition: X86BaseInfo.h:329
bool hasImm(uint64_t TSFlags)
Definition: X86BaseInfo.h:566
static bool isCDisp8(uint64_t TSFlags, int Value, int &CValue)
isCDisp8 - Return true if this signed displacement fits in a 8-bit compressed dispacement field...
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:150
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
bool isImm() const
Definition: MCInst.h:57
const MCExpr * getExpr() const
Definition: MCInst.h:93
static MCFixupKind getImmFixupKind(uint64_t TSFlags)
getImmFixupKind - Return the appropriate fixup kind to use for an immediate in an instruction with th...
const MCExpr * getLHS() const
Get the left-hand side expression of the binary operator.
Definition: MCExpr.h:514
AddrNumOperands - Total number of operands in a memory reference.
Definition: X86BaseInfo.h:42
unsigned const MachineRegisterInfo * MRI
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:23
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:24
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:23
Lanai::Fixups FixupKind(const MCExpr *Expr)
unsigned getOperandBias(const MCInstrDesc &Desc)
getOperandBias - compute any additional adjustment needed to the offset to the start of the memory op...
Definition: X86BaseInfo.h:630
SMLoc getLoc() const
Definition: MCInst.h:162
unsigned isImmSigned(uint64_t TSFlags)
isImmSigned - Return true if the immediate of the specified instruction's TSFlags indicates that it i...
Definition: X86BaseInfo.h:608
Binary assembler expressions.
Definition: MCExpr.h:388
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:82
RawFrmImm16 - This is used for CALL FAR instructions, which have two immediates, the first of which i...
Definition: X86BaseInfo.h:263
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
A one-byte fixup.
Definition: MCFixup.h:24
static bool Is32BitMemOperand(const MCInst &MI, unsigned Op)
Is32BitMemOperand - Return true if the specified instruction has a 32-bit memory operand.
MRM[0-7][rm] - These forms are used to represent instructions that use a Mod/RM byte, and use the middle field to hold extended opcode information.
Definition: X86BaseInfo.h:273
A two-byte pc relative fixup.
Definition: MCFixup.h:29
A four-byte pc relative fixup.
Definition: MCFixup.h:30
MRMXr - This form is used for instructions that use the Mod/RM byte to specify a register source...
Definition: X86BaseInfo.h:322
MRMSrcReg4VOp3 - This form is used for instructions that encode operand 3 with VEX.VVVV and do not load from memory.
Definition: X86BaseInfo.h:312
unsigned char getBaseOpcodeFor(uint64_t TSFlags)
Definition: X86BaseInfo.h:562
const FeatureBitset & getFeatureBits() const
getFeatureBits - Return the feature bits.
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
unsigned getOpcode() const
Definition: MCInst.h:159
int64_t getImm() const
Definition: MCInst.h:74
const MCExpr * getRHS() const
Get the right-hand side expression of the binary operator.
Definition: MCExpr.h:517
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:199
unsigned getSizeOfImm(uint64_t TSFlags)
getSizeOfImm - Decode the "size of immediate" field from the TSFlags field of the specified instructi...
Definition: X86BaseInfo.h:572
unsigned getNumOperands() const
Definition: MCInst.h:166
MCSubtargetInfo - Generic base class for all target subtargets.
A eight-byte fixup.
Definition: MCFixup.h:27
References to labels and assigned expressions.
Definition: MCExpr.h:39
const unsigned Kind
VariantKind getKind() const
Definition: MCExpr.h:313
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
RawFrmDst - This form is for instructions that use the destination index register DI/EDI/ESI...
Definition: X86BaseInfo.h:247
MRMSrcRegOp4 - This form is used for instructions that use the Mod/RM byte to specify the fourth sour...
Definition: X86BaseInfo.h:317
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/ERI with a possib...
Definition: X86BaseInfo.h:252
LLVM Value Representation.
Definition: Value.h:71
Binary expressions.
Definition: MCExpr.h:37
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:243
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:210
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:81
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:44
void dump() const
Definition: MCInst.cpp:66
GlobalOffsetTableExprKind
StartsWithGlobalOffsetTable - Check if this expression starts with GLOBAL_OFFSET_TABLE and if it is o...
IRTranslator LLVM IR MI
Represents a location in source code.
Definition: SMLoc.h:24
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:33
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:117
A two-byte fixup.
Definition: MCFixup.h:25
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx)
Definition: MCExpr.cpp:149
Raw - This form is for instructions that don't have any operands, so they are just a fixed opcode val...
Definition: X86BaseInfo.h:231
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:164
MRMDestReg - This form is used for instructions that use the Mod/RM byte to specify a destination...
Definition: X86BaseInfo.h:302
int getMemoryOperandNo(uint64_t TSFlags)
getMemoryOperandNo - The function returns the MCInst operand # for the first field of the memory oper...
Definition: X86BaseInfo.h:659