Bug Summary

File:lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
Warning:line 1040, column 5
Value stored to 'CurOp' is never read

Annotated Source Code

1//===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the X86MCCodeEmitter class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MCTargetDesc/X86BaseInfo.h"
15#include "MCTargetDesc/X86FixupKinds.h"
16#include "MCTargetDesc/X86MCTargetDesc.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/MC/MCCodeEmitter.h"
19#include "llvm/MC/MCContext.h"
20#include "llvm/MC/MCExpr.h"
21#include "llvm/MC/MCFixup.h"
22#include "llvm/MC/MCInst.h"
23#include "llvm/MC/MCInstrDesc.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/MC/MCSubtargetInfo.h"
27#include "llvm/MC/MCSymbol.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/raw_ostream.h"
30#include <cassert>
31#include <cstdint>
32#include <cstdlib>
33
34using namespace llvm;
35
36#define DEBUG_TYPE"mccodeemitter" "mccodeemitter"
37
38namespace {
39
40class X86MCCodeEmitter : public MCCodeEmitter {
41 const MCInstrInfo &MCII;
42 MCContext &Ctx;
43
44public:
45 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
46 : MCII(mcii), Ctx(ctx) {
47 }
48 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
49 X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete;
50 ~X86MCCodeEmitter() override = default;
51
52 bool is64BitMode(const MCSubtargetInfo &STI) const {
53 return STI.getFeatureBits()[X86::Mode64Bit];
54 }
55
56 bool is32BitMode(const MCSubtargetInfo &STI) const {
57 return STI.getFeatureBits()[X86::Mode32Bit];
58 }
59
60 bool is16BitMode(const MCSubtargetInfo &STI) const {
61 return STI.getFeatureBits()[X86::Mode16Bit];
62 }
63
64 /// Is16BitMemOperand - Return true if the specified instruction has
65 /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
66 bool Is16BitMemOperand(const MCInst &MI, unsigned Op,
67 const MCSubtargetInfo &STI) const {
68 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
69 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
70 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
71
72 if (is16BitMode(STI) && BaseReg.getReg() == 0 &&
73 Disp.isImm() && Disp.getImm() < 0x10000)
74 return true;
75 if ((BaseReg.getReg() != 0 &&
76 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
77 (IndexReg.getReg() != 0 &&
78 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
79 return true;
80 return false;
81 }
82
83 unsigned GetX86RegNum(const MCOperand &MO) const {
84 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
85 }
86
87 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const {
88 return Ctx.getRegisterInfo()->getEncodingValue(
89 MI.getOperand(OpNum).getReg());
90 }
91
92 // Does this register require a bit to be set in REX prefix.
93 bool isREXExtendedReg(const MCInst &MI, unsigned OpNum) const {
94 return (getX86RegEncoding(MI, OpNum) >> 3) & 1;
95 }
96
97 void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const {
98 OS << (char)C;
99 ++CurByte;
100 }
101
102 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
103 raw_ostream &OS) const {
104 // Output the constant in little endian byte order.
105 for (unsigned i = 0; i != Size; ++i) {
106 EmitByte(Val & 255, CurByte, OS);
107 Val >>= 8;
108 }
109 }
110
111 void EmitImmediate(const MCOperand &Disp, SMLoc Loc,
112 unsigned ImmSize, MCFixupKind FixupKind,
113 unsigned &CurByte, raw_ostream &OS,
114 SmallVectorImpl<MCFixup> &Fixups,
115 int ImmOffset = 0) const;
116
117 static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) {
118 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!")((Mod < 4 && RegOpcode < 8 && RM < 8
&& "ModRM Fields out of range!") ? static_cast<void
> (0) : __assert_fail ("Mod < 4 && RegOpcode < 8 && RM < 8 && \"ModRM Fields out of range!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 118, __PRETTY_FUNCTION__))
;
119 return RM | (RegOpcode << 3) | (Mod << 6);
120 }
121
122 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
123 unsigned &CurByte, raw_ostream &OS) const {
124 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
125 }
126
127 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
128 unsigned &CurByte, raw_ostream &OS) const {
129 // SIB byte is in the same format as the ModRMByte.
130 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
131 }
132
133 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
134 uint64_t TSFlags, bool Rex, unsigned &CurByte,
135 raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
136 const MCSubtargetInfo &STI) const;
137
138 void encodeInstruction(const MCInst &MI, raw_ostream &OS,
139 SmallVectorImpl<MCFixup> &Fixups,
140 const MCSubtargetInfo &STI) const override;
141
142 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
143 const MCInst &MI, const MCInstrDesc &Desc,
144 raw_ostream &OS) const;
145
146 void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
147 const MCInst &MI, raw_ostream &OS) const;
148
149 bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
150 const MCInst &MI, const MCInstrDesc &Desc,
151 const MCSubtargetInfo &STI, raw_ostream &OS) const;
152
153 uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
154 int MemOperand, const MCInstrDesc &Desc) const;
155};
156
157} // end anonymous namespace
158
159/// isDisp8 - Return true if this signed displacement fits in a 8-bit
160/// sign-extended field.
161static bool isDisp8(int Value) {
162 return Value == (int8_t)Value;
163}
164
165/// isCDisp8 - Return true if this signed displacement fits in a 8-bit
166/// compressed dispacement field.
167static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
168 assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) &&((((TSFlags & X86II::EncodingMask) == X86II::EVEX) &&
"Compressed 8-bit displacement is only valid for EVEX inst."
) ? static_cast<void> (0) : __assert_fail ("((TSFlags & X86II::EncodingMask) == X86II::EVEX) && \"Compressed 8-bit displacement is only valid for EVEX inst.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 169, __PRETTY_FUNCTION__))
169 "Compressed 8-bit displacement is only valid for EVEX inst.")((((TSFlags & X86II::EncodingMask) == X86II::EVEX) &&
"Compressed 8-bit displacement is only valid for EVEX inst."
) ? static_cast<void> (0) : __assert_fail ("((TSFlags & X86II::EncodingMask) == X86II::EVEX) && \"Compressed 8-bit displacement is only valid for EVEX inst.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 169, __PRETTY_FUNCTION__))
;
170
171 unsigned CD8_Scale =
172 (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift;
173 if (CD8_Scale == 0) {
174 CValue = Value;
175 return isDisp8(Value);
176 }
177
178 unsigned Mask = CD8_Scale - 1;
179 assert((CD8_Scale & Mask) == 0 && "Invalid memory object size.")(((CD8_Scale & Mask) == 0 && "Invalid memory object size."
) ? static_cast<void> (0) : __assert_fail ("(CD8_Scale & Mask) == 0 && \"Invalid memory object size.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 179, __PRETTY_FUNCTION__))
;
180 if (Value & Mask) // Unaligned offset
181 return false;
182 Value /= (int)CD8_Scale;
183 bool Ret = (Value == (int8_t)Value);
184
185 if (Ret)
186 CValue = Value;
187 return Ret;
188}
189
190/// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
191/// in an instruction with the specified TSFlags.
192static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
193 unsigned Size = X86II::getSizeOfImm(TSFlags);
194 bool isPCRel = X86II::isImmPCRel(TSFlags);
195
196 if (X86II::isImmSigned(TSFlags)) {
197 switch (Size) {
198 default: llvm_unreachable("Unsupported signed fixup size!")::llvm::llvm_unreachable_internal("Unsupported signed fixup size!"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 198)
;
199 case 4: return MCFixupKind(X86::reloc_signed_4byte);
200 }
201 }
202 return MCFixup::getKindForSize(Size, isPCRel);
203}
204
205/// Is32BitMemOperand - Return true if the specified instruction has
206/// a 32-bit memory operand. Op specifies the operand # of the memoperand.
207static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
208 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
209 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
210
211 if ((BaseReg.getReg() != 0 &&
212 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
213 (IndexReg.getReg() != 0 &&
214 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
215 return true;
216 if (BaseReg.getReg() == X86::EIP) {
217 assert(IndexReg.getReg() == 0 && "Invalid eip-based address.")((IndexReg.getReg() == 0 && "Invalid eip-based address."
) ? static_cast<void> (0) : __assert_fail ("IndexReg.getReg() == 0 && \"Invalid eip-based address.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 217, __PRETTY_FUNCTION__))
;
218 return true;
219 }
220 return false;
221}
222
223/// Is64BitMemOperand - Return true if the specified instruction has
224/// a 64-bit memory operand. Op specifies the operand # of the memoperand.
225#ifndef NDEBUG
226static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
227 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
228 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
229
230 if ((BaseReg.getReg() != 0 &&
231 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
232 (IndexReg.getReg() != 0 &&
233 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
234 return true;
235 return false;
236}
237#endif
238
239/// StartsWithGlobalOffsetTable - Check if this expression starts with
240/// _GLOBAL_OFFSET_TABLE_ and if it is of the form
241/// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
242/// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
243/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
244/// of a binary expression.
245enum GlobalOffsetTableExprKind {
246 GOT_None,
247 GOT_Normal,
248 GOT_SymDiff
249};
250static GlobalOffsetTableExprKind
251StartsWithGlobalOffsetTable(const MCExpr *Expr) {
252 const MCExpr *RHS = nullptr;
253 if (Expr->getKind() == MCExpr::Binary) {
254 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
255 Expr = BE->getLHS();
256 RHS = BE->getRHS();
257 }
258
259 if (Expr->getKind() != MCExpr::SymbolRef)
260 return GOT_None;
261
262 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
263 const MCSymbol &S = Ref->getSymbol();
264 if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
265 return GOT_None;
266 if (RHS && RHS->getKind() == MCExpr::SymbolRef)
267 return GOT_SymDiff;
268 return GOT_Normal;
269}
270
271static bool HasSecRelSymbolRef(const MCExpr *Expr) {
272 if (Expr->getKind() == MCExpr::SymbolRef) {
273 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
274 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
275 }
276 return false;
277}
278
279void X86MCCodeEmitter::
280EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
281 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
282 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
283 const MCExpr *Expr = nullptr;
284 if (DispOp.isImm()) {
285 // If this is a simple integer displacement that doesn't require a
286 // relocation, emit it now.
287 if (FixupKind != FK_PCRel_1 &&
288 FixupKind != FK_PCRel_2 &&
289 FixupKind != FK_PCRel_4) {
290 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
291 return;
292 }
293 Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
294 } else {
295 Expr = DispOp.getExpr();
296 }
297
298 // If we have an immoffset, add it to the expression.
299 if ((FixupKind == FK_Data_4 ||
300 FixupKind == FK_Data_8 ||
301 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
302 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr);
303 if (Kind != GOT_None) {
304 assert(ImmOffset == 0)((ImmOffset == 0) ? static_cast<void> (0) : __assert_fail
("ImmOffset == 0", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 304, __PRETTY_FUNCTION__))
;
305
306 if (Size == 8) {
307 FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
308 } else {
309 assert(Size == 4)((Size == 4) ? static_cast<void> (0) : __assert_fail ("Size == 4"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 309, __PRETTY_FUNCTION__))
;
310 FixupKind = MCFixupKind(X86::reloc_global_offset_table);
311 }
312
313 if (Kind == GOT_Normal)
314 ImmOffset = CurByte;
315 } else if (Expr->getKind() == MCExpr::SymbolRef) {
316 if (HasSecRelSymbolRef(Expr)) {
317 FixupKind = MCFixupKind(FK_SecRel_4);
318 }
319 } else if (Expr->getKind() == MCExpr::Binary) {
320 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr);
321 if (HasSecRelSymbolRef(Bin->getLHS())
322 || HasSecRelSymbolRef(Bin->getRHS())) {
323 FixupKind = MCFixupKind(FK_SecRel_4);
324 }
325 }
326 }
327
328 // If the fixup is pc-relative, we need to bias the value to be relative to
329 // the start of the field, not the end of the field.
330 if (FixupKind == FK_PCRel_4 ||
331 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
332 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
333 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
334 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex))
335 ImmOffset -= 4;
336 if (FixupKind == FK_PCRel_2)
337 ImmOffset -= 2;
338 if (FixupKind == FK_PCRel_1)
339 ImmOffset -= 1;
340
341 if (ImmOffset)
342 Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
343 Ctx);
344
345 // Emit a symbolic constant as a fixup and 4 zeros.
346 Fixups.push_back(MCFixup::create(CurByte, Expr, FixupKind, Loc));
347 EmitConstant(0, Size, CurByte, OS);
348}
349
350void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
351 unsigned RegOpcodeField,
352 uint64_t TSFlags, bool Rex,
353 unsigned &CurByte, raw_ostream &OS,
354 SmallVectorImpl<MCFixup> &Fixups,
355 const MCSubtargetInfo &STI) const {
356 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
357 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
358 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
359 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
360 unsigned BaseReg = Base.getReg();
361 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
362
363 // Handle %rip relative addressing.
364 if (BaseReg == X86::RIP ||
365 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
366 assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode")((is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode"
) ? static_cast<void> (0) : __assert_fail ("is64BitMode(STI) && \"Rip-relative addressing requires 64-bit mode\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 366, __PRETTY_FUNCTION__))
;
367 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address")((IndexReg.getReg() == 0 && "Invalid rip-relative address"
) ? static_cast<void> (0) : __assert_fail ("IndexReg.getReg() == 0 && \"Invalid rip-relative address\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 367, __PRETTY_FUNCTION__))
;
368 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
369
370 unsigned Opcode = MI.getOpcode();
371 // movq loads are handled with a special relocation form which allows the
372 // linker to eliminate some loads for GOT references which end up in the
373 // same linkage unit.
374 unsigned FixupKind = [=]() {
375 switch (Opcode) {
376 default:
377 return X86::reloc_riprel_4byte;
378 case X86::MOV64rm:
379 assert(Rex)((Rex) ? static_cast<void> (0) : __assert_fail ("Rex", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 379, __PRETTY_FUNCTION__))
;
380 return X86::reloc_riprel_4byte_movq_load;
381 case X86::CALL64m:
382 case X86::JMP64m:
383 case X86::TEST64rm:
384 case X86::ADC64rm:
385 case X86::ADD64rm:
386 case X86::AND64rm:
387 case X86::CMP64rm:
388 case X86::OR64rm:
389 case X86::SBB64rm:
390 case X86::SUB64rm:
391 case X86::XOR64rm:
392 return Rex ? X86::reloc_riprel_4byte_relax_rex
393 : X86::reloc_riprel_4byte_relax;
394 }
395 }();
396
397 // rip-relative addressing is actually relative to the *next* instruction.
398 // Since an immediate can follow the mod/rm byte for an instruction, this
399 // means that we need to bias the immediate field of the instruction with
400 // the size of the immediate field. If we have this case, add it into the
401 // expression to emit.
402 int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
403
404 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
405 CurByte, OS, Fixups, -ImmSize);
406 return;
407 }
408
409 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
410
411 // 16-bit addressing forms of the ModR/M byte have a different encoding for
412 // the R/M field and are far more limited in which registers can be used.
413 if (Is16BitMemOperand(MI, Op, STI)) {
414 if (BaseReg) {
415 // For 32-bit addressing, the row and column values in Table 2-2 are
416 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
417 // some special cases. And GetX86RegNum reflects that numbering.
418 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
419 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
420 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
421 // while values 0-3 indicate the allowed combinations (base+index) of
422 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
423 //
424 // R16Table[] is a lookup from the normal RegNo, to the row values from
425 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
426 static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
427 unsigned RMfield = R16Table[BaseRegNo];
428
429 assert(RMfield && "invalid 16-bit base register")((RMfield && "invalid 16-bit base register") ? static_cast
<void> (0) : __assert_fail ("RMfield && \"invalid 16-bit base register\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 429, __PRETTY_FUNCTION__))
;
430
431 if (IndexReg.getReg()) {
432 unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)];
433
434 assert(IndexReg16 && "invalid 16-bit index register")((IndexReg16 && "invalid 16-bit index register") ? static_cast
<void> (0) : __assert_fail ("IndexReg16 && \"invalid 16-bit index register\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 434, __PRETTY_FUNCTION__))
;
435 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
436 assert(((IndexReg16 ^ RMfield) & 2) &&((((IndexReg16 ^ RMfield) & 2) && "invalid 16-bit base/index register combination"
) ? static_cast<void> (0) : __assert_fail ("((IndexReg16 ^ RMfield) & 2) && \"invalid 16-bit base/index register combination\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 437, __PRETTY_FUNCTION__))
437 "invalid 16-bit base/index register combination")((((IndexReg16 ^ RMfield) & 2) && "invalid 16-bit base/index register combination"
) ? static_cast<void> (0) : __assert_fail ("((IndexReg16 ^ RMfield) & 2) && \"invalid 16-bit base/index register combination\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 437, __PRETTY_FUNCTION__))
;
438 assert(Scale.getImm() == 1 &&((Scale.getImm() == 1 && "invalid scale for 16-bit memory reference"
) ? static_cast<void> (0) : __assert_fail ("Scale.getImm() == 1 && \"invalid scale for 16-bit memory reference\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 439, __PRETTY_FUNCTION__))
439 "invalid scale for 16-bit memory reference")((Scale.getImm() == 1 && "invalid scale for 16-bit memory reference"
) ? static_cast<void> (0) : __assert_fail ("Scale.getImm() == 1 && \"invalid scale for 16-bit memory reference\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 439, __PRETTY_FUNCTION__))
;
440
441 // Allow base/index to appear in either order (although GAS doesn't).
442 if (IndexReg16 & 2)
443 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
444 else
445 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
446 }
447
448 if (Disp.isImm() && isDisp8(Disp.getImm())) {
449 if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
450 // There is no displacement; just the register.
451 EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS);
452 return;
453 }
454 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
455 EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS);
456 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
457 return;
458 }
459 // This is the [REG]+disp16 case.
460 EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS);
461 } else {
462 // There is no BaseReg; this is the plain [disp16] case.
463 EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS);
464 }
465
466 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
467 EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups);
468 return;
469 }
470
471 // Determine whether a SIB byte is needed.
472 // If no BaseReg, issue a RIP relative instruction only if the MCE can
473 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
474 // 2-7) and absolute references.
475
476 if (// The SIB byte must be used if there is an index register.
477 IndexReg.getReg() == 0 &&
478 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
479 // encode to an R/M value of 4, which indicates that a SIB byte is
480 // present.
481 BaseRegNo != N86::ESP &&
482 // If there is no base register and we're in 64-bit mode, we need a SIB
483 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
484 (!is64BitMode(STI) || BaseReg != 0)) {
485
486 if (BaseReg == 0) { // [disp32] in X86-32 mode
487 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
488 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups);
489 return;
490 }
491
492 // If the base is not EBP/ESP and there is no displacement, use simple
493 // indirect register encoding, this handles addresses like [EAX]. The
494 // encoding for [EBP] with no displacement means [disp32] so we handle it
495 // by emitting a displacement of 0 below.
496 if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
497 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
498 return;
499 }
500
501 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
502 if (Disp.isImm()) {
503 if (!HasEVEX && isDisp8(Disp.getImm())) {
504 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
505 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
506 return;
507 }
508 // Try EVEX compressed 8-bit displacement first; if failed, fall back to
509 // 32-bit displacement.
510 int CDisp8 = 0;
511 if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
512 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
513 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups,
514 CDisp8 - Disp.getImm());
515 return;
516 }
517 }
518
519 // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
520 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
521 unsigned Opcode = MI.getOpcode();
522 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
523 : X86::reloc_signed_4byte;
524 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS,
525 Fixups);
526 return;
527 }
528
529 // We need a SIB byte, so start by outputting the ModR/M byte first
530 assert(IndexReg.getReg() != X86::ESP &&((IndexReg.getReg() != X86::ESP && IndexReg.getReg() !=
X86::RSP && "Cannot use ESP as index reg!") ? static_cast
<void> (0) : __assert_fail ("IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && \"Cannot use ESP as index reg!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 531, __PRETTY_FUNCTION__))
531 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!")((IndexReg.getReg() != X86::ESP && IndexReg.getReg() !=
X86::RSP && "Cannot use ESP as index reg!") ? static_cast
<void> (0) : __assert_fail ("IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && \"Cannot use ESP as index reg!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 531, __PRETTY_FUNCTION__))
;
532
533 bool ForceDisp32 = false;
534 bool ForceDisp8 = false;
535 int CDisp8 = 0;
536 int ImmOffset = 0;
537 if (BaseReg == 0) {
538 // If there is no base register, we emit the special case SIB byte with
539 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
540 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
541 ForceDisp32 = true;
542 } else if (!Disp.isImm()) {
543 // Emit the normal disp32 encoding.
544 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
545 ForceDisp32 = true;
546 } else if (Disp.getImm() == 0 &&
547 // Base reg can't be anything that ends up with '5' as the base
548 // reg, it is the magic [*] nomenclature that indicates no base.
549 BaseRegNo != N86::EBP) {
550 // Emit no displacement ModR/M byte
551 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
552 } else if (!HasEVEX && isDisp8(Disp.getImm())) {
553 // Emit the disp8 encoding.
554 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
555 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
556 } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
557 // Emit the disp8 encoding.
558 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
559 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
560 ImmOffset = CDisp8 - Disp.getImm();
561 } else {
562 // Emit the normal disp32 encoding.
563 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
564 }
565
566 // Calculate what the SS field value should be...
567 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
568 unsigned SS = SSTable[Scale.getImm()];
569
570 if (BaseReg == 0) {
571 // Handle the SIB byte for the case where there is no base, see Intel
572 // Manual 2A, table 2-7. The displacement has already been output.
573 unsigned IndexRegNo;
574 if (IndexReg.getReg())
575 IndexRegNo = GetX86RegNum(IndexReg);
576 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
577 IndexRegNo = 4;
578 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS);
579 } else {
580 unsigned IndexRegNo;
581 if (IndexReg.getReg())
582 IndexRegNo = GetX86RegNum(IndexReg);
583 else
584 IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
585 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
586 }
587
588 // Do we need to output a displacement?
589 if (ForceDisp8)
590 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset);
591 else if (ForceDisp32 || Disp.getImm() != 0)
592 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
593 CurByte, OS, Fixups);
594}
595
596/// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
597/// called VEX.
598void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
599 int MemOperand, const MCInst &MI,
600 const MCInstrDesc &Desc,
601 raw_ostream &OS) const {
602 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.")((!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX."
) ? static_cast<void> (0) : __assert_fail ("!(TSFlags & X86II::LOCK) && \"Can't have LOCK VEX.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 602, __PRETTY_FUNCTION__))
;
603
604 uint64_t Encoding = TSFlags & X86II::EncodingMask;
605 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
606 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
607 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
608
609 // VEX_R: opcode externsion equivalent to REX.R in
610 // 1's complement (inverted) form
611 //
612 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
613 // 0: Same as REX_R=1 (64 bit mode only)
614 //
615 uint8_t VEX_R = 0x1;
616 uint8_t EVEX_R2 = 0x1;
617
618 // VEX_X: equivalent to REX.X, only used when a
619 // register is used for index in SIB Byte.
620 //
621 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
622 // 0: Same as REX.X=1 (64-bit mode only)
623 uint8_t VEX_X = 0x1;
624
625 // VEX_B:
626 //
627 // 1: Same as REX_B=0 (ignored in 32-bit mode)
628 // 0: Same as REX_B=1 (64 bit mode only)
629 //
630 uint8_t VEX_B = 0x1;
631
632 // VEX_W: opcode specific (use like REX.W, or used for
633 // opcode extension, or ignored, depending on the opcode byte)
634 uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0;
635
636 // VEX_5M (VEX m-mmmmm field):
637 //
638 // 0b00000: Reserved for future use
639 // 0b00001: implied 0F leading opcode
640 // 0b00010: implied 0F 38 leading opcode bytes
641 // 0b00011: implied 0F 3A leading opcode bytes
642 // 0b00100-0b11111: Reserved for future use
643 // 0b01000: XOP map select - 08h instructions with imm byte
644 // 0b01001: XOP map select - 09h instructions with no imm byte
645 // 0b01010: XOP map select - 0Ah instructions with imm dword
646 uint8_t VEX_5M;
647 switch (TSFlags & X86II::OpMapMask) {
648 default: llvm_unreachable("Invalid prefix!")::llvm::llvm_unreachable_internal("Invalid prefix!", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 648)
;
649 case X86II::TB: VEX_5M = 0x1; break; // 0F
650 case X86II::T8: VEX_5M = 0x2; break; // 0F 38
651 case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
652 case X86II::XOP8: VEX_5M = 0x8; break;
653 case X86II::XOP9: VEX_5M = 0x9; break;
654 case X86II::XOPA: VEX_5M = 0xA; break;
655 }
656
657 // VEX_4V (VEX vvvv field): a register specifier
658 // (in 1's complement form) or 1111 if unused.
659 uint8_t VEX_4V = 0xf;
660 uint8_t EVEX_V2 = 0x1;
661
662 // EVEX_L2/VEX_L (Vector Length):
663 //
664 // L2 L
665 // 0 0: scalar or 128-bit vector
666 // 0 1: 256-bit vector
667 // 1 0: 512-bit vector
668 //
669 uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0;
670 uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0;
671
672 // VEX_PP: opcode extension providing equivalent
673 // functionality of a SIMD prefix
674 //
675 // 0b00: None
676 // 0b01: 66
677 // 0b10: F3
678 // 0b11: F2
679 //
680 uint8_t VEX_PP;
681 switch (TSFlags & X86II::OpPrefixMask) {
682 default: llvm_unreachable("Invalid op prefix!")::llvm::llvm_unreachable_internal("Invalid op prefix!", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 682)
;
683 case X86II::PS: VEX_PP = 0x0; break; // none
684 case X86II::PD: VEX_PP = 0x1; break; // 66
685 case X86II::XS: VEX_PP = 0x2; break; // F3
686 case X86II::XD: VEX_PP = 0x3; break; // F2
687 }
688
689 // EVEX_U
690 uint8_t EVEX_U = 1; // Always '1' so far
691
692 // EVEX_z
693 uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0;
694
695 // EVEX_b
696 uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0;
697
698 // EVEX_rc
699 uint8_t EVEX_rc = 0;
700
701 // EVEX_aaa
702 uint8_t EVEX_aaa = 0;
703
704 bool EncodeRC = false;
705
706 // Classify VEX_B, VEX_4V, VEX_R, VEX_X
707 unsigned NumOps = Desc.getNumOperands();
708 unsigned CurOp = X86II::getOperandBias(Desc);
709
710 switch (TSFlags & X86II::FormMask) {
711 default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!")::llvm::llvm_unreachable_internal("Unexpected form in EmitVEXOpcodePrefix!"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 711)
;
712 case X86II::RawFrm:
713 break;
714 case X86II::MRMDestMem: {
715 // MRMDestMem instructions forms:
716 // MemAddr, src1(ModR/M)
717 // MemAddr, src1(VEX_4V), src2(ModR/M)
718 // MemAddr, src1(ModR/M), imm8
719 //
720 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
721 VEX_B = ~(BaseRegEnc >> 3) & 1;
722 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
723 VEX_X = ~(IndexRegEnc >> 3) & 1;
724 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
725 EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
726
727 CurOp += X86::AddrNumOperands;
728
729 if (HasEVEX_K)
730 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
731
732 if (HasVEX_4V) {
733 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
734 VEX_4V = ~VRegEnc & 0xf;
735 EVEX_V2 = ~(VRegEnc >> 4) & 1;
736 }
737
738 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
739 VEX_R = ~(RegEnc >> 3) & 1;
740 EVEX_R2 = ~(RegEnc >> 4) & 1;
741 break;
742 }
743 case X86II::MRMSrcMem: {
744 // MRMSrcMem instructions forms:
745 // src1(ModR/M), MemAddr
746 // src1(ModR/M), src2(VEX_4V), MemAddr
747 // src1(ModR/M), MemAddr, imm8
748 // src1(ModR/M), MemAddr, src2(Imm[7:4])
749 //
750 // FMA4:
751 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
752 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
753 VEX_R = ~(RegEnc >> 3) & 1;
754 EVEX_R2 = ~(RegEnc >> 4) & 1;
755
756 if (HasEVEX_K)
757 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
758
759 if (HasVEX_4V) {
760 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
761 VEX_4V = ~VRegEnc & 0xf;
762 EVEX_V2 = ~(VRegEnc >> 4) & 1;
763 }
764
765 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
766 VEX_B = ~(BaseRegEnc >> 3) & 1;
767 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
768 VEX_X = ~(IndexRegEnc >> 3) & 1;
769 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
770 EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
771
772 break;
773 }
774 case X86II::MRMSrcMem4VOp3: {
775 // Instruction format for 4VOp3:
776 // src1(ModR/M), MemAddr, src3(VEX_4V)
777 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
778 VEX_R = ~(RegEnc >> 3) & 1;
779
780 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
781 VEX_B = ~(BaseRegEnc >> 3) & 1;
782 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
783 VEX_X = ~(IndexRegEnc >> 3) & 1;
784
785 VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
786 break;
787 }
788 case X86II::MRMSrcMemOp4: {
789 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
790 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
791 VEX_R = ~(RegEnc >> 3) & 1;
792
793 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
794 VEX_4V = ~VRegEnc & 0xf;
795
796 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
797 VEX_B = ~(BaseRegEnc >> 3) & 1;
798 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
799 VEX_X = ~(IndexRegEnc >> 3) & 1;
800 break;
801 }
802 case X86II::MRM0m: case X86II::MRM1m:
803 case X86II::MRM2m: case X86II::MRM3m:
804 case X86II::MRM4m: case X86II::MRM5m:
805 case X86II::MRM6m: case X86II::MRM7m: {
806 // MRM[0-9]m instructions forms:
807 // MemAddr
808 // src1(VEX_4V), MemAddr
809 if (HasVEX_4V) {
810 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
811 VEX_4V = ~VRegEnc & 0xf;
812 EVEX_V2 = ~(VRegEnc >> 4) & 1;
813 }
814
815 if (HasEVEX_K)
816 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
817
818 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
819 VEX_B = ~(BaseRegEnc >> 3) & 1;
820 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
821 VEX_X = ~(IndexRegEnc >> 3) & 1;
822 break;
823 }
824 case X86II::MRMSrcReg: {
825 // MRMSrcReg instructions forms:
826 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
827 // dst(ModR/M), src1(ModR/M)
828 // dst(ModR/M), src1(ModR/M), imm8
829 //
830 // FMA4:
831 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
832 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
833 VEX_R = ~(RegEnc >> 3) & 1;
834 EVEX_R2 = ~(RegEnc >> 4) & 1;
835
836 if (HasEVEX_K)
837 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
838
839 if (HasVEX_4V) {
840 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
841 VEX_4V = ~VRegEnc & 0xf;
842 EVEX_V2 = ~(VRegEnc >> 4) & 1;
843 }
844
845 RegEnc = getX86RegEncoding(MI, CurOp++);
846 VEX_B = ~(RegEnc >> 3) & 1;
847 VEX_X = ~(RegEnc >> 4) & 1;
848
849 if (EVEX_b) {
850 if (HasEVEX_RC) {
851 unsigned RcOperand = NumOps-1;
852 assert(RcOperand >= CurOp)((RcOperand >= CurOp) ? static_cast<void> (0) : __assert_fail
("RcOperand >= CurOp", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 852, __PRETTY_FUNCTION__))
;
853 EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
854 }
855 EncodeRC = true;
856 }
857 break;
858 }
859 case X86II::MRMSrcReg4VOp3: {
860 // Instruction format for 4VOp3:
861 // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
862 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
863 VEX_R = ~(RegEnc >> 3) & 1;
864
865 RegEnc = getX86RegEncoding(MI, CurOp++);
866 VEX_B = ~(RegEnc >> 3) & 1;
867
868 VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
869 break;
870 }
871 case X86II::MRMSrcRegOp4: {
872 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
873 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
874 VEX_R = ~(RegEnc >> 3) & 1;
875
876 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
877 VEX_4V = ~VRegEnc & 0xf;
878
879 // Skip second register source (encoded in Imm[7:4])
880 ++CurOp;
881
882 RegEnc = getX86RegEncoding(MI, CurOp++);
883 VEX_B = ~(RegEnc >> 3) & 1;
884 VEX_X = ~(RegEnc >> 4) & 1;
885 break;
886 }
887 case X86II::MRMDestReg: {
888 // MRMDestReg instructions forms:
889 // dst(ModR/M), src(ModR/M)
890 // dst(ModR/M), src(ModR/M), imm8
891 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
892 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
893 VEX_B = ~(RegEnc >> 3) & 1;
894 VEX_X = ~(RegEnc >> 4) & 1;
895
896 if (HasEVEX_K)
897 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
898
899 if (HasVEX_4V) {
900 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
901 VEX_4V = ~VRegEnc & 0xf;
902 EVEX_V2 = ~(VRegEnc >> 4) & 1;
903 }
904
905 RegEnc = getX86RegEncoding(MI, CurOp++);
906 VEX_R = ~(RegEnc >> 3) & 1;
907 EVEX_R2 = ~(RegEnc >> 4) & 1;
908 if (EVEX_b)
909 EncodeRC = true;
910 break;
911 }
912 case X86II::MRM0r: case X86II::MRM1r:
913 case X86II::MRM2r: case X86II::MRM3r:
914 case X86II::MRM4r: case X86II::MRM5r:
915 case X86II::MRM6r: case X86II::MRM7r: {
916 // MRM0r-MRM7r instructions forms:
917 // dst(VEX_4V), src(ModR/M), imm8
918 if (HasVEX_4V) {
919 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
920 VEX_4V = ~VRegEnc & 0xf;
921 EVEX_V2 = ~(VRegEnc >> 4) & 1;
922 }
923 if (HasEVEX_K)
924 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
925
926 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
927 VEX_B = ~(RegEnc >> 3) & 1;
928 VEX_X = ~(RegEnc >> 4) & 1;
929 break;
930 }
931 }
932
933 if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
934 // VEX opcode prefix can have 2 or 3 bytes
935 //
936 // 3 bytes:
937 // +-----+ +--------------+ +-------------------+
938 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
939 // +-----+ +--------------+ +-------------------+
940 // 2 bytes:
941 // +-----+ +-------------------+
942 // | C5h | | R | vvvv | L | pp |
943 // +-----+ +-------------------+
944 //
945 // XOP uses a similar prefix:
946 // +-----+ +--------------+ +-------------------+
947 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
948 // +-----+ +--------------+ +-------------------+
949 uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
950
951 // Can we use the 2 byte VEX prefix?
952 if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
953 EmitByte(0xC5, CurByte, OS);
954 EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
955 return;
956 }
957
958 // 3 byte VEX prefix
959 EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS);
960 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
961 EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
962 } else {
963 assert(Encoding == X86II::EVEX && "unknown encoding!")((Encoding == X86II::EVEX && "unknown encoding!") ? static_cast
<void> (0) : __assert_fail ("Encoding == X86II::EVEX && \"unknown encoding!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 963, __PRETTY_FUNCTION__))
;
964 // EVEX opcode prefix can have 4 bytes
965 //
966 // +-----+ +--------------+ +-------------------+ +------------------------+
967 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
968 // +-----+ +--------------+ +-------------------+ +------------------------+
969 assert((VEX_5M & 0x3) == VEX_5M(((VEX_5M & 0x3) == VEX_5M && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!"
) ? static_cast<void> (0) : __assert_fail ("(VEX_5M & 0x3) == VEX_5M && \"More than 2 significant bits in VEX.m-mmmm fields for EVEX!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 970, __PRETTY_FUNCTION__))
970 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!")(((VEX_5M & 0x3) == VEX_5M && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!"
) ? static_cast<void> (0) : __assert_fail ("(VEX_5M & 0x3) == VEX_5M && \"More than 2 significant bits in VEX.m-mmmm fields for EVEX!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 970, __PRETTY_FUNCTION__))
;
971
972 EmitByte(0x62, CurByte, OS);
973 EmitByte((VEX_R << 7) |
974 (VEX_X << 6) |
975 (VEX_B << 5) |
976 (EVEX_R2 << 4) |
977 VEX_5M, CurByte, OS);
978 EmitByte((VEX_W << 7) |
979 (VEX_4V << 3) |
980 (EVEX_U << 2) |
981 VEX_PP, CurByte, OS);
982 if (EncodeRC)
983 EmitByte((EVEX_z << 7) |
984 (EVEX_rc << 5) |
985 (EVEX_b << 4) |
986 (EVEX_V2 << 3) |
987 EVEX_aaa, CurByte, OS);
988 else
989 EmitByte((EVEX_z << 7) |
990 (EVEX_L2 << 6) |
991 (VEX_L << 5) |
992 (EVEX_b << 4) |
993 (EVEX_V2 << 3) |
994 EVEX_aaa, CurByte, OS);
995 }
996}
997
998/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
999/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
1000/// size, and 3) use of X86-64 extended registers.
1001uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
1002 int MemOperand,
1003 const MCInstrDesc &Desc) const {
1004 uint8_t REX = 0;
1005 bool UsesHighByteReg = false;
1006
1007 if (TSFlags & X86II::REX_W)
1008 REX |= 1 << 3; // set REX.W
1009
1010 if (MI.getNumOperands() == 0) return REX;
1011
1012 unsigned NumOps = MI.getNumOperands();
1013 unsigned CurOp = X86II::getOperandBias(Desc);
1014
1015 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
1016 for (unsigned i = CurOp; i != NumOps; ++i) {
1017 const MCOperand &MO = MI.getOperand(i);
1018 if (!MO.isReg()) continue;
1019 unsigned Reg = MO.getReg();
1020 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
1021 UsesHighByteReg = true;
1022 if (X86II::isX86_64NonExtLowByteReg(Reg))
1023 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
1024 // that returns non-zero.
1025 REX |= 0x40; // REX fixed encoding prefix
1026 }
1027
1028 switch (TSFlags & X86II::FormMask) {
1029 case X86II::AddRegFrm:
1030 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1031 break;
1032 case X86II::MRMSrcReg:
1033 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1034 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1035 break;
1036 case X86II::MRMSrcMem: {
1037 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1038 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1039 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1040 CurOp += X86::AddrNumOperands;
Value stored to 'CurOp' is never read
1041 break;
1042 }
1043 case X86II::MRMDestReg:
1044 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1045 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1046 break;
1047 case X86II::MRMDestMem:
1048 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1049 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1050 CurOp += X86::AddrNumOperands;
1051 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1052 break;
1053 case X86II::MRMXm:
1054 case X86II::MRM0m: case X86II::MRM1m:
1055 case X86II::MRM2m: case X86II::MRM3m:
1056 case X86II::MRM4m: case X86II::MRM5m:
1057 case X86II::MRM6m: case X86II::MRM7m:
1058 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1059 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1060 break;
1061 case X86II::MRMXr:
1062 case X86II::MRM0r: case X86II::MRM1r:
1063 case X86II::MRM2r: case X86II::MRM3r:
1064 case X86II::MRM4r: case X86II::MRM5r:
1065 case X86II::MRM6r: case X86II::MRM7r:
1066 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1067 break;
1068 }
1069 if (REX && UsesHighByteReg)
1070 report_fatal_error("Cannot encode high byte register in REX-prefixed instruction");
1071
1072 return REX;
1073}
1074
1075/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
1076void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
1077 unsigned SegOperand,
1078 const MCInst &MI,
1079 raw_ostream &OS) const {
1080 // Check for explicit segment override on memory operand.
1081 switch (MI.getOperand(SegOperand).getReg()) {
1082 default: llvm_unreachable("Unknown segment register!")::llvm::llvm_unreachable_internal("Unknown segment register!"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1082)
;
1083 case 0: break;
1084 case X86::CS: EmitByte(0x2E, CurByte, OS); break;
1085 case X86::SS: EmitByte(0x36, CurByte, OS); break;
1086 case X86::DS: EmitByte(0x3E, CurByte, OS); break;
1087 case X86::ES: EmitByte(0x26, CurByte, OS); break;
1088 case X86::FS: EmitByte(0x64, CurByte, OS); break;
1089 case X86::GS: EmitByte(0x65, CurByte, OS); break;
1090 }
1091}
1092
1093/// Emit all instruction prefixes prior to the opcode.
1094///
1095/// MemOperand is the operand # of the start of a memory operand if present. If
1096/// Not present, it is -1.
1097///
1098/// Returns true if a REX prefix was used.
1099bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
1100 int MemOperand, const MCInst &MI,
1101 const MCInstrDesc &Desc,
1102 const MCSubtargetInfo &STI,
1103 raw_ostream &OS) const {
1104 bool Ret = false;
1105 // Emit the operand size opcode prefix as needed.
1106 if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32
1107 : X86II::OpSize16))
1108 EmitByte(0x66, CurByte, OS);
1109
1110 // Emit the LOCK opcode prefix.
1111 if (TSFlags & X86II::LOCK)
1112 EmitByte(0xF0, CurByte, OS);
1113
1114 switch (TSFlags & X86II::OpPrefixMask) {
1115 case X86II::PD: // 66
1116 EmitByte(0x66, CurByte, OS);
1117 break;
1118 case X86II::XS: // F3
1119 EmitByte(0xF3, CurByte, OS);
1120 break;
1121 case X86II::XD: // F2
1122 EmitByte(0xF2, CurByte, OS);
1123 break;
1124 }
1125
1126 // Handle REX prefix.
1127 // FIXME: Can this come before F2 etc to simplify emission?
1128 if (is64BitMode(STI)) {
1129 if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) {
1130 EmitByte(0x40 | REX, CurByte, OS);
1131 Ret = true;
1132 }
1133 }
1134
1135 // 0x0F escape code must be emitted just before the opcode.
1136 switch (TSFlags & X86II::OpMapMask) {
1137 case X86II::TB: // Two-byte opcode map
1138 case X86II::T8: // 0F 38
1139 case X86II::TA: // 0F 3A
1140 EmitByte(0x0F, CurByte, OS);
1141 break;
1142 }
1143
1144 switch (TSFlags & X86II::OpMapMask) {
1145 case X86II::T8: // 0F 38
1146 EmitByte(0x38, CurByte, OS);
1147 break;
1148 case X86II::TA: // 0F 3A
1149 EmitByte(0x3A, CurByte, OS);
1150 break;
1151 }
1152 return Ret;
1153}
1154
1155void X86MCCodeEmitter::
1156encodeInstruction(const MCInst &MI, raw_ostream &OS,
1157 SmallVectorImpl<MCFixup> &Fixups,
1158 const MCSubtargetInfo &STI) const {
1159 unsigned Opcode = MI.getOpcode();
1160 const MCInstrDesc &Desc = MCII.get(Opcode);
1161 uint64_t TSFlags = Desc.TSFlags;
1162
1163 // Pseudo instructions don't get encoded.
1164 if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
1165 return;
1166
1167 unsigned NumOps = Desc.getNumOperands();
1168 unsigned CurOp = X86II::getOperandBias(Desc);
1169
1170 // Keep track of the current byte being emitted.
1171 unsigned CurByte = 0;
1172
1173 // Encoding type for this instruction.
1174 uint64_t Encoding = TSFlags & X86II::EncodingMask;
1175
1176 // It uses the VEX.VVVV field?
1177 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1178 bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
1179
1180 // It uses the EVEX.aaa field?
1181 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1182 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1183
1184 // Used if a register is encoded in 7:4 of immediate.
1185 unsigned I8RegNum = 0;
1186
1187 // Determine where the memory operand starts, if present.
1188 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
1189 if (MemoryOperand != -1) MemoryOperand += CurOp;
1190
1191 // Emit segment override opcode prefix as needed.
1192 if (MemoryOperand >= 0)
1193 EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg,
1194 MI, OS);
1195
1196 // Emit the repeat opcode prefix as needed.
1197 if (TSFlags & X86II::REP)
1198 EmitByte(0xF3, CurByte, OS);
1199
1200 // Emit the address size opcode prefix as needed.
1201 bool need_address_override;
1202 uint64_t AdSize = TSFlags & X86II::AdSizeMask;
1203 if ((is16BitMode(STI) && AdSize == X86II::AdSize32) ||
1204 (is32BitMode(STI) && AdSize == X86II::AdSize16) ||
1205 (is64BitMode(STI) && AdSize == X86II::AdSize32)) {
1206 need_address_override = true;
1207 } else if (MemoryOperand < 0) {
1208 need_address_override = false;
1209 } else if (is64BitMode(STI)) {
1210 assert(!Is16BitMemOperand(MI, MemoryOperand, STI))((!Is16BitMemOperand(MI, MemoryOperand, STI)) ? static_cast<
void> (0) : __assert_fail ("!Is16BitMemOperand(MI, MemoryOperand, STI)"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1210, __PRETTY_FUNCTION__))
;
1211 need_address_override = Is32BitMemOperand(MI, MemoryOperand);
1212 } else if (is32BitMode(STI)) {
1213 assert(!Is64BitMemOperand(MI, MemoryOperand))((!Is64BitMemOperand(MI, MemoryOperand)) ? static_cast<void
> (0) : __assert_fail ("!Is64BitMemOperand(MI, MemoryOperand)"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1213, __PRETTY_FUNCTION__))
;
1214 need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI);
1215 } else {
1216 assert(is16BitMode(STI))((is16BitMode(STI)) ? static_cast<void> (0) : __assert_fail
("is16BitMode(STI)", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1216, __PRETTY_FUNCTION__))
;
1217 assert(!Is64BitMemOperand(MI, MemoryOperand))((!Is64BitMemOperand(MI, MemoryOperand)) ? static_cast<void
> (0) : __assert_fail ("!Is64BitMemOperand(MI, MemoryOperand)"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1217, __PRETTY_FUNCTION__))
;
1218 need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI);
1219 }
1220
1221 if (need_address_override)
1222 EmitByte(0x67, CurByte, OS);
1223
1224 bool Rex = false;
1225 if (Encoding == 0)
1226 Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
1227 else
1228 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
1229
1230 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1231
1232 if (TSFlags & X86II::Has3DNow0F0FOpcode)
1233 BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1234
1235 uint64_t Form = TSFlags & X86II::FormMask;
1236 switch (Form) {
1237 default: errs() << "FORM: " << Form << "\n";
1238 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!")::llvm::llvm_unreachable_internal("Unknown FormMask value in X86MCCodeEmitter!"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1238)
;
1239 case X86II::Pseudo:
1240 llvm_unreachable("Pseudo instruction shouldn't be emitted")::llvm::llvm_unreachable_internal("Pseudo instruction shouldn't be emitted"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1240)
;
1241 case X86II::RawFrmDstSrc: {
1242 unsigned siReg = MI.getOperand(1).getReg();
1243 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||((((siReg == X86::SI && MI.getOperand(0).getReg() == X86
::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg
() == X86::EDI) || (siReg == X86::RSI && MI.getOperand
(0).getReg() == X86::RDI)) && "SI and DI register sizes do not match"
) ? static_cast<void> (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1246, __PRETTY_FUNCTION__))
1244 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||((((siReg == X86::SI && MI.getOperand(0).getReg() == X86
::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg
() == X86::EDI) || (siReg == X86::RSI && MI.getOperand
(0).getReg() == X86::RDI)) && "SI and DI register sizes do not match"
) ? static_cast<void> (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1246, __PRETTY_FUNCTION__))
1245 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&((((siReg == X86::SI && MI.getOperand(0).getReg() == X86
::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg
() == X86::EDI) || (siReg == X86::RSI && MI.getOperand
(0).getReg() == X86::RDI)) && "SI and DI register sizes do not match"
) ? static_cast<void> (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1246, __PRETTY_FUNCTION__))
1246 "SI and DI register sizes do not match")((((siReg == X86::SI && MI.getOperand(0).getReg() == X86
::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg
() == X86::EDI) || (siReg == X86::RSI && MI.getOperand
(0).getReg() == X86::RDI)) && "SI and DI register sizes do not match"
) ? static_cast<void> (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1246, __PRETTY_FUNCTION__))
;
1247 // Emit segment override opcode prefix as needed (not for %ds).
1248 if (MI.getOperand(2).getReg() != X86::DS)
1249 EmitSegmentOverridePrefix(CurByte, 2, MI, OS);
1250 // Emit AdSize prefix as needed.
1251 if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1252 (is32BitMode(STI) && siReg == X86::SI))
1253 EmitByte(0x67, CurByte, OS);
1254 CurOp += 3; // Consume operands.
1255 EmitByte(BaseOpcode, CurByte, OS);
1256 break;
1257 }
1258 case X86II::RawFrmSrc: {
1259 unsigned siReg = MI.getOperand(0).getReg();
1260 // Emit segment override opcode prefix as needed (not for %ds).
1261 if (MI.getOperand(1).getReg() != X86::DS)
1262 EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1263 // Emit AdSize prefix as needed.
1264 if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1265 (is32BitMode(STI) && siReg == X86::SI))
1266 EmitByte(0x67, CurByte, OS);
1267 CurOp += 2; // Consume operands.
1268 EmitByte(BaseOpcode, CurByte, OS);
1269 break;
1270 }
1271 case X86II::RawFrmDst: {
1272 unsigned siReg = MI.getOperand(0).getReg();
1273 // Emit AdSize prefix as needed.
1274 if ((!is32BitMode(STI) && siReg == X86::EDI) ||
1275 (is32BitMode(STI) && siReg == X86::DI))
1276 EmitByte(0x67, CurByte, OS);
1277 ++CurOp; // Consume operand.
1278 EmitByte(BaseOpcode, CurByte, OS);
1279 break;
1280 }
1281 case X86II::RawFrm:
1282 EmitByte(BaseOpcode, CurByte, OS);
1283 break;
1284 case X86II::RawFrmMemOffs:
1285 // Emit segment override opcode prefix as needed.
1286 EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1287 EmitByte(BaseOpcode, CurByte, OS);
1288 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1289 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1290 CurByte, OS, Fixups);
1291 ++CurOp; // skip segment operand
1292 break;
1293 case X86II::RawFrmImm8:
1294 EmitByte(BaseOpcode, CurByte, OS);
1295 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1296 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1297 CurByte, OS, Fixups);
1298 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
1299 OS, Fixups);
1300 break;
1301 case X86II::RawFrmImm16:
1302 EmitByte(BaseOpcode, CurByte, OS);
1303 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1304 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1305 CurByte, OS, Fixups);
1306 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
1307 OS, Fixups);
1308 break;
1309
1310 case X86II::AddRegFrm:
1311 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
1312 break;
1313
1314 case X86II::MRMDestReg: {
1315 EmitByte(BaseOpcode, CurByte, OS);
1316 unsigned SrcRegNum = CurOp + 1;
1317
1318 if (HasEVEX_K) // Skip writemask
1319 ++SrcRegNum;
1320
1321 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1322 ++SrcRegNum;
1323
1324 EmitRegModRMByte(MI.getOperand(CurOp),
1325 GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
1326 CurOp = SrcRegNum + 1;
1327 break;
1328 }
1329 case X86II::MRMDestMem: {
1330 EmitByte(BaseOpcode, CurByte, OS);
1331 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1332
1333 if (HasEVEX_K) // Skip writemask
1334 ++SrcRegNum;
1335
1336 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1337 ++SrcRegNum;
1338
1339 emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
1340 Rex, CurByte, OS, Fixups, STI);
1341 CurOp = SrcRegNum + 1;
1342 break;
1343 }
1344 case X86II::MRMSrcReg: {
1345 EmitByte(BaseOpcode, CurByte, OS);
1346 unsigned SrcRegNum = CurOp + 1;
1347
1348 if (HasEVEX_K) // Skip writemask
1349 ++SrcRegNum;
1350
1351 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1352 ++SrcRegNum;
1353
1354 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1355 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1356 CurOp = SrcRegNum + 1;
1357 if (HasVEX_I8Reg)
1358 I8RegNum = getX86RegEncoding(MI, CurOp++);
1359 // do not count the rounding control operand
1360 if (HasEVEX_RC)
1361 --NumOps;
1362 break;
1363 }
1364 case X86II::MRMSrcReg4VOp3: {
1365 EmitByte(BaseOpcode, CurByte, OS);
1366 unsigned SrcRegNum = CurOp + 1;
1367
1368 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1369 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1370 CurOp = SrcRegNum + 1;
1371 ++CurOp; // Encoded in VEX.VVVV
1372 break;
1373 }
1374 case X86II::MRMSrcRegOp4: {
1375 EmitByte(BaseOpcode, CurByte, OS);
1376 unsigned SrcRegNum = CurOp + 1;
1377
1378 // Skip 1st src (which is encoded in VEX_VVVV)
1379 ++SrcRegNum;
1380
1381 // Capture 2nd src (which is encoded in Imm[7:4])
1382 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg")((HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"
) ? static_cast<void> (0) : __assert_fail ("HasVEX_I8Reg && \"MRMSrcRegOp4 should imply VEX_I8Reg\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1382, __PRETTY_FUNCTION__))
;
1383 I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
1384
1385 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1386 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1387 CurOp = SrcRegNum + 1;
1388 break;
1389 }
1390 case X86II::MRMSrcMem: {
1391 unsigned FirstMemOp = CurOp+1;
1392
1393 if (HasEVEX_K) // Skip writemask
1394 ++FirstMemOp;
1395
1396 if (HasVEX_4V)
1397 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1398
1399 EmitByte(BaseOpcode, CurByte, OS);
1400
1401 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1402 TSFlags, Rex, CurByte, OS, Fixups, STI);
1403 CurOp = FirstMemOp + X86::AddrNumOperands;
1404 if (HasVEX_I8Reg)
1405 I8RegNum = getX86RegEncoding(MI, CurOp++);
1406 break;
1407 }
1408 case X86II::MRMSrcMem4VOp3: {
1409 unsigned FirstMemOp = CurOp+1;
1410
1411 EmitByte(BaseOpcode, CurByte, OS);
1412
1413 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1414 TSFlags, Rex, CurByte, OS, Fixups, STI);
1415 CurOp = FirstMemOp + X86::AddrNumOperands;
1416 ++CurOp; // Encoded in VEX.VVVV.
1417 break;
1418 }
1419 case X86II::MRMSrcMemOp4: {
1420 unsigned FirstMemOp = CurOp+1;
1421
1422 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1423
1424 // Capture second register source (encoded in Imm[7:4])
1425 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg")((HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"
) ? static_cast<void> (0) : __assert_fail ("HasVEX_I8Reg && \"MRMSrcRegOp4 should imply VEX_I8Reg\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1425, __PRETTY_FUNCTION__))
;
1426 I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
1427
1428 EmitByte(BaseOpcode, CurByte, OS);
1429
1430 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1431 TSFlags, Rex, CurByte, OS, Fixups, STI);
1432 CurOp = FirstMemOp + X86::AddrNumOperands;
1433 break;
1434 }
1435
1436 case X86II::MRMXr:
1437 case X86II::MRM0r: case X86II::MRM1r:
1438 case X86II::MRM2r: case X86II::MRM3r:
1439 case X86II::MRM4r: case X86II::MRM5r:
1440 case X86II::MRM6r: case X86II::MRM7r:
1441 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1442 ++CurOp;
1443 if (HasEVEX_K) // Skip writemask
1444 ++CurOp;
1445 EmitByte(BaseOpcode, CurByte, OS);
1446 EmitRegModRMByte(MI.getOperand(CurOp++),
1447 (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
1448 CurByte, OS);
1449 break;
1450
1451 case X86II::MRMXm:
1452 case X86II::MRM0m: case X86II::MRM1m:
1453 case X86II::MRM2m: case X86II::MRM3m:
1454 case X86II::MRM4m: case X86II::MRM5m:
1455 case X86II::MRM6m: case X86II::MRM7m:
1456 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1457 ++CurOp;
1458 if (HasEVEX_K) // Skip writemask
1459 ++CurOp;
1460 EmitByte(BaseOpcode, CurByte, OS);
1461 emitMemModRMByte(MI, CurOp,
1462 (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
1463 Rex, CurByte, OS, Fixups, STI);
1464 CurOp += X86::AddrNumOperands;
1465 break;
1466
1467 case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
1468 case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C5:
1469 case X86II::MRM_C6: case X86II::MRM_C7: case X86II::MRM_C8:
1470 case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
1471 case X86II::MRM_CC: case X86II::MRM_CD: case X86II::MRM_CE:
1472 case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1:
1473 case X86II::MRM_D2: case X86II::MRM_D3: case X86II::MRM_D4:
1474 case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D7:
1475 case X86II::MRM_D8: case X86II::MRM_D9: case X86II::MRM_DA:
1476 case X86II::MRM_DB: case X86II::MRM_DC: case X86II::MRM_DD:
1477 case X86II::MRM_DE: case X86II::MRM_DF: case X86II::MRM_E0:
1478 case X86II::MRM_E1: case X86II::MRM_E2: case X86II::MRM_E3:
1479 case X86II::MRM_E4: case X86II::MRM_E5: case X86II::MRM_E6:
1480 case X86II::MRM_E7: case X86II::MRM_E8: case X86II::MRM_E9:
1481 case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC:
1482 case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_EF:
1483 case X86II::MRM_F0: case X86II::MRM_F1: case X86II::MRM_F2:
1484 case X86II::MRM_F3: case X86II::MRM_F4: case X86II::MRM_F5:
1485 case X86II::MRM_F6: case X86II::MRM_F7: case X86II::MRM_F8:
1486 case X86II::MRM_F9: case X86II::MRM_FA: case X86II::MRM_FB:
1487 case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE:
1488 case X86II::MRM_FF:
1489 EmitByte(BaseOpcode, CurByte, OS);
1490 EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS);
1491 break;
1492 }
1493
1494 if (HasVEX_I8Reg) {
1495 // The last source register of a 4 operand instruction in AVX is encoded
1496 // in bits[7:4] of a immediate byte.
1497 assert(I8RegNum < 16 && "Register encoding out of range")((I8RegNum < 16 && "Register encoding out of range"
) ? static_cast<void> (0) : __assert_fail ("I8RegNum < 16 && \"Register encoding out of range\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1497, __PRETTY_FUNCTION__))
;
1498 I8RegNum <<= 4;
1499 if (CurOp != NumOps) {
1500 unsigned Val = MI.getOperand(CurOp++).getImm();
1501 assert(Val < 16 && "Immediate operand value out of range")((Val < 16 && "Immediate operand value out of range"
) ? static_cast<void> (0) : __assert_fail ("Val < 16 && \"Immediate operand value out of range\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1501, __PRETTY_FUNCTION__))
;
1502 I8RegNum |= Val;
1503 }
1504 EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
1505 CurByte, OS, Fixups);
1506 } else {
1507 // If there is a remaining operand, it must be a trailing immediate. Emit it
1508 // according to the right size for the instruction. Some instructions
1509 // (SSE4a extrq and insertq) have two trailing immediates.
1510 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1511 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1512 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1513 CurByte, OS, Fixups);
1514 }
1515 }
1516
1517 if (TSFlags & X86II::Has3DNow0F0FOpcode)
1518 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
1519
1520#ifndef NDEBUG
1521 // FIXME: Verify.
1522 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1523 errs() << "Cannot encode all operands of: ";
1524 MI.dump();
1525 errs() << '\n';
1526 abort();
1527 }
1528#endif
1529}
1530
1531MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
1532 const MCRegisterInfo &MRI,
1533 MCContext &Ctx) {
1534 return new X86MCCodeEmitter(MCII, Ctx);
1535}