Bug Summary

File:lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
Warning:line 1061, column 5
Value stored to 'CurOp' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86MCCodeEmitter.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/X86/MCTargetDesc -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86 -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Target/X86/MCTargetDesc -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
1//===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the X86MCCodeEmitter class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MCTargetDesc/X86BaseInfo.h"
15#include "MCTargetDesc/X86FixupKinds.h"
16#include "MCTargetDesc/X86MCTargetDesc.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/MC/MCCodeEmitter.h"
19#include "llvm/MC/MCContext.h"
20#include "llvm/MC/MCExpr.h"
21#include "llvm/MC/MCFixup.h"
22#include "llvm/MC/MCInst.h"
23#include "llvm/MC/MCInstrDesc.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/MC/MCSubtargetInfo.h"
27#include "llvm/MC/MCSymbol.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/raw_ostream.h"
30#include <cassert>
31#include <cstdint>
32#include <cstdlib>
33
34using namespace llvm;
35
36#define DEBUG_TYPE"mccodeemitter" "mccodeemitter"
37
38namespace {
39
40class X86MCCodeEmitter : public MCCodeEmitter {
41 const MCInstrInfo &MCII;
42 MCContext &Ctx;
43
44public:
45 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
46 : MCII(mcii), Ctx(ctx) {
47 }
48 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
49 X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete;
50 ~X86MCCodeEmitter() override = default;
51
52 bool is64BitMode(const MCSubtargetInfo &STI) const {
53 return STI.getFeatureBits()[X86::Mode64Bit];
54 }
55
56 bool is32BitMode(const MCSubtargetInfo &STI) const {
57 return STI.getFeatureBits()[X86::Mode32Bit];
58 }
59
60 bool is16BitMode(const MCSubtargetInfo &STI) const {
61 return STI.getFeatureBits()[X86::Mode16Bit];
62 }
63
64 /// Is16BitMemOperand - Return true if the specified instruction has
65 /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
66 bool Is16BitMemOperand(const MCInst &MI, unsigned Op,
67 const MCSubtargetInfo &STI) const {
68 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
69 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
70 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
71
72 if (is16BitMode(STI) && BaseReg.getReg() == 0 &&
73 Disp.isImm() && Disp.getImm() < 0x10000)
74 return true;
75 if ((BaseReg.getReg() != 0 &&
76 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
77 (IndexReg.getReg() != 0 &&
78 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
79 return true;
80 return false;
81 }
82
83 unsigned GetX86RegNum(const MCOperand &MO) const {
84 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
85 }
86
87 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const {
88 return Ctx.getRegisterInfo()->getEncodingValue(
89 MI.getOperand(OpNum).getReg());
90 }
91
92 // Does this register require a bit to be set in REX prefix.
93 bool isREXExtendedReg(const MCInst &MI, unsigned OpNum) const {
94 return (getX86RegEncoding(MI, OpNum) >> 3) & 1;
95 }
96
97 void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const {
98 OS << (char)C;
99 ++CurByte;
100 }
101
102 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
103 raw_ostream &OS) const {
104 // Output the constant in little endian byte order.
105 for (unsigned i = 0; i != Size; ++i) {
106 EmitByte(Val & 255, CurByte, OS);
107 Val >>= 8;
108 }
109 }
110
111 void EmitImmediate(const MCOperand &Disp, SMLoc Loc,
112 unsigned ImmSize, MCFixupKind FixupKind,
113 unsigned &CurByte, raw_ostream &OS,
114 SmallVectorImpl<MCFixup> &Fixups,
115 int ImmOffset = 0) const;
116
117 static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) {
118 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!")(static_cast <bool> (Mod < 4 && RegOpcode <
8 && RM < 8 && "ModRM Fields out of range!"
) ? void (0) : __assert_fail ("Mod < 4 && RegOpcode < 8 && RM < 8 && \"ModRM Fields out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 118, __extension__ __PRETTY_FUNCTION__))
;
119 return RM | (RegOpcode << 3) | (Mod << 6);
120 }
121
122 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
123 unsigned &CurByte, raw_ostream &OS) const {
124 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
125 }
126
127 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
128 unsigned &CurByte, raw_ostream &OS) const {
129 // SIB byte is in the same format as the ModRMByte.
130 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
131 }
132
133 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
134 uint64_t TSFlags, bool Rex, unsigned &CurByte,
135 raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
136 const MCSubtargetInfo &STI) const;
137
138 void encodeInstruction(const MCInst &MI, raw_ostream &OS,
139 SmallVectorImpl<MCFixup> &Fixups,
140 const MCSubtargetInfo &STI) const override;
141
142 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
143 const MCInst &MI, const MCInstrDesc &Desc,
144 raw_ostream &OS) const;
145
146 void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
147 const MCInst &MI, raw_ostream &OS) const;
148
149 bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
150 const MCInst &MI, const MCInstrDesc &Desc,
151 const MCSubtargetInfo &STI, raw_ostream &OS) const;
152
153 uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
154 int MemOperand, const MCInstrDesc &Desc) const;
155
156 bool isPCRel32Branch(const MCInst &MI) const;
157};
158
159} // end anonymous namespace
160
161/// isDisp8 - Return true if this signed displacement fits in a 8-bit
162/// sign-extended field.
163static bool isDisp8(int Value) {
164 return Value == (int8_t)Value;
165}
166
167/// isCDisp8 - Return true if this signed displacement fits in a 8-bit
168/// compressed dispacement field.
169static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
170 assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) &&(static_cast <bool> (((TSFlags & X86II::EncodingMask
) == X86II::EVEX) && "Compressed 8-bit displacement is only valid for EVEX inst."
) ? void (0) : __assert_fail ("((TSFlags & X86II::EncodingMask) == X86II::EVEX) && \"Compressed 8-bit displacement is only valid for EVEX inst.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 171, __extension__ __PRETTY_FUNCTION__))
171 "Compressed 8-bit displacement is only valid for EVEX inst.")(static_cast <bool> (((TSFlags & X86II::EncodingMask
) == X86II::EVEX) && "Compressed 8-bit displacement is only valid for EVEX inst."
) ? void (0) : __assert_fail ("((TSFlags & X86II::EncodingMask) == X86II::EVEX) && \"Compressed 8-bit displacement is only valid for EVEX inst.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 171, __extension__ __PRETTY_FUNCTION__))
;
172
173 unsigned CD8_Scale =
174 (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift;
175 if (CD8_Scale == 0) {
176 CValue = Value;
177 return isDisp8(Value);
178 }
179
180 unsigned Mask = CD8_Scale - 1;
181 assert((CD8_Scale & Mask) == 0 && "Invalid memory object size.")(static_cast <bool> ((CD8_Scale & Mask) == 0 &&
"Invalid memory object size.") ? void (0) : __assert_fail ("(CD8_Scale & Mask) == 0 && \"Invalid memory object size.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 181, __extension__ __PRETTY_FUNCTION__))
;
182 if (Value & Mask) // Unaligned offset
183 return false;
184 Value /= (int)CD8_Scale;
185 bool Ret = (Value == (int8_t)Value);
186
187 if (Ret)
188 CValue = Value;
189 return Ret;
190}
191
192/// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
193/// in an instruction with the specified TSFlags.
194static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
195 unsigned Size = X86II::getSizeOfImm(TSFlags);
196 bool isPCRel = X86II::isImmPCRel(TSFlags);
197
198 if (X86II::isImmSigned(TSFlags)) {
199 switch (Size) {
200 default: llvm_unreachable("Unsupported signed fixup size!")::llvm::llvm_unreachable_internal("Unsupported signed fixup size!"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 200)
;
201 case 4: return MCFixupKind(X86::reloc_signed_4byte);
202 }
203 }
204 return MCFixup::getKindForSize(Size, isPCRel);
205}
206
207/// Is32BitMemOperand - Return true if the specified instruction has
208/// a 32-bit memory operand. Op specifies the operand # of the memoperand.
209static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
210 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
211 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
212
213 if ((BaseReg.getReg() != 0 &&
214 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
215 (IndexReg.getReg() != 0 &&
216 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
217 return true;
218 if (BaseReg.getReg() == X86::EIP) {
219 assert(IndexReg.getReg() == 0 && "Invalid eip-based address.")(static_cast <bool> (IndexReg.getReg() == 0 && "Invalid eip-based address."
) ? void (0) : __assert_fail ("IndexReg.getReg() == 0 && \"Invalid eip-based address.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 219, __extension__ __PRETTY_FUNCTION__))
;
220 return true;
221 }
222 return false;
223}
224
225/// Is64BitMemOperand - Return true if the specified instruction has
226/// a 64-bit memory operand. Op specifies the operand # of the memoperand.
227#ifndef NDEBUG
228static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
229 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
230 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
231
232 if ((BaseReg.getReg() != 0 &&
233 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
234 (IndexReg.getReg() != 0 &&
235 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
236 return true;
237 return false;
238}
239#endif
240
241/// StartsWithGlobalOffsetTable - Check if this expression starts with
242/// _GLOBAL_OFFSET_TABLE_ and if it is of the form
243/// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
244/// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
245/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
246/// of a binary expression.
247enum GlobalOffsetTableExprKind {
248 GOT_None,
249 GOT_Normal,
250 GOT_SymDiff
251};
252static GlobalOffsetTableExprKind
253StartsWithGlobalOffsetTable(const MCExpr *Expr) {
254 const MCExpr *RHS = nullptr;
255 if (Expr->getKind() == MCExpr::Binary) {
256 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
257 Expr = BE->getLHS();
258 RHS = BE->getRHS();
259 }
260
261 if (Expr->getKind() != MCExpr::SymbolRef)
262 return GOT_None;
263
264 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
265 const MCSymbol &S = Ref->getSymbol();
266 if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
267 return GOT_None;
268 if (RHS && RHS->getKind() == MCExpr::SymbolRef)
269 return GOT_SymDiff;
270 return GOT_Normal;
271}
272
273static bool HasSecRelSymbolRef(const MCExpr *Expr) {
274 if (Expr->getKind() == MCExpr::SymbolRef) {
275 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
276 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
277 }
278 return false;
279}
280
281bool X86MCCodeEmitter::isPCRel32Branch(const MCInst &MI) const {
282 unsigned Opcode = MI.getOpcode();
283 const MCInstrDesc &Desc = MCII.get(Opcode);
284 if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4) ||
285 getImmFixupKind(Desc.TSFlags) != FK_PCRel_4)
286 return false;
287
288 unsigned CurOp = X86II::getOperandBias(Desc);
289 const MCOperand &Op = MI.getOperand(CurOp);
290 if (!Op.isExpr())
291 return false;
292
293 const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
294 return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None;
295}
296
297void X86MCCodeEmitter::
298EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
299 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
300 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
301 const MCExpr *Expr = nullptr;
302 if (DispOp.isImm()) {
303 // If this is a simple integer displacement that doesn't require a
304 // relocation, emit it now.
305 if (FixupKind != FK_PCRel_1 &&
306 FixupKind != FK_PCRel_2 &&
307 FixupKind != FK_PCRel_4) {
308 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
309 return;
310 }
311 Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
312 } else {
313 Expr = DispOp.getExpr();
314 }
315
316 // If we have an immoffset, add it to the expression.
317 if ((FixupKind == FK_Data_4 ||
318 FixupKind == FK_Data_8 ||
319 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
320 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr);
321 if (Kind != GOT_None) {
322 assert(ImmOffset == 0)(static_cast <bool> (ImmOffset == 0) ? void (0) : __assert_fail
("ImmOffset == 0", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 322, __extension__ __PRETTY_FUNCTION__))
;
323
324 if (Size == 8) {
325 FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
326 } else {
327 assert(Size == 4)(static_cast <bool> (Size == 4) ? void (0) : __assert_fail
("Size == 4", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 327, __extension__ __PRETTY_FUNCTION__))
;
328 FixupKind = MCFixupKind(X86::reloc_global_offset_table);
329 }
330
331 if (Kind == GOT_Normal)
332 ImmOffset = CurByte;
333 } else if (Expr->getKind() == MCExpr::SymbolRef) {
334 if (HasSecRelSymbolRef(Expr)) {
335 FixupKind = MCFixupKind(FK_SecRel_4);
336 }
337 } else if (Expr->getKind() == MCExpr::Binary) {
338 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr);
339 if (HasSecRelSymbolRef(Bin->getLHS())
340 || HasSecRelSymbolRef(Bin->getRHS())) {
341 FixupKind = MCFixupKind(FK_SecRel_4);
342 }
343 }
344 }
345
346 // If the fixup is pc-relative, we need to bias the value to be relative to
347 // the start of the field, not the end of the field.
348 if (FixupKind == FK_PCRel_4 ||
349 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
350 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
351 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
352 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) ||
353 FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel))
354 ImmOffset -= 4;
355 if (FixupKind == FK_PCRel_2)
356 ImmOffset -= 2;
357 if (FixupKind == FK_PCRel_1)
358 ImmOffset -= 1;
359
360 if (ImmOffset)
361 Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
362 Ctx);
363
364 // Emit a symbolic constant as a fixup and 4 zeros.
365 Fixups.push_back(MCFixup::create(CurByte, Expr, FixupKind, Loc));
366 EmitConstant(0, Size, CurByte, OS);
367}
368
369void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
370 unsigned RegOpcodeField,
371 uint64_t TSFlags, bool Rex,
372 unsigned &CurByte, raw_ostream &OS,
373 SmallVectorImpl<MCFixup> &Fixups,
374 const MCSubtargetInfo &STI) const {
375 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
376 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
377 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
378 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
379 unsigned BaseReg = Base.getReg();
380 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
381
382 // Handle %rip relative addressing.
383 if (BaseReg == X86::RIP ||
384 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
385 assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode")(static_cast <bool> (is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode"
) ? void (0) : __assert_fail ("is64BitMode(STI) && \"Rip-relative addressing requires 64-bit mode\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 385, __extension__ __PRETTY_FUNCTION__))
;
386 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address")(static_cast <bool> (IndexReg.getReg() == 0 && "Invalid rip-relative address"
) ? void (0) : __assert_fail ("IndexReg.getReg() == 0 && \"Invalid rip-relative address\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 386, __extension__ __PRETTY_FUNCTION__))
;
387 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
388
389 unsigned Opcode = MI.getOpcode();
390 // movq loads are handled with a special relocation form which allows the
391 // linker to eliminate some loads for GOT references which end up in the
392 // same linkage unit.
393 unsigned FixupKind = [=]() {
394 switch (Opcode) {
395 default:
396 return X86::reloc_riprel_4byte;
397 case X86::MOV64rm:
398 assert(Rex)(static_cast <bool> (Rex) ? void (0) : __assert_fail ("Rex"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 398, __extension__ __PRETTY_FUNCTION__))
;
399 return X86::reloc_riprel_4byte_movq_load;
400 case X86::CALL64m:
401 case X86::JMP64m:
402 case X86::TEST64mr:
403 case X86::ADC64rm:
404 case X86::ADD64rm:
405 case X86::AND64rm:
406 case X86::CMP64rm:
407 case X86::OR64rm:
408 case X86::SBB64rm:
409 case X86::SUB64rm:
410 case X86::XOR64rm:
411 return Rex ? X86::reloc_riprel_4byte_relax_rex
412 : X86::reloc_riprel_4byte_relax;
413 }
414 }();
415
416 // rip-relative addressing is actually relative to the *next* instruction.
417 // Since an immediate can follow the mod/rm byte for an instruction, this
418 // means that we need to bias the displacement field of the instruction with
419 // the size of the immediate field. If we have this case, add it into the
420 // expression to emit.
421 // Note: rip-relative addressing using immediate displacement values should
422 // not be adjusted, assuming it was the user's intent.
423 int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags)
424 ? X86II::getSizeOfImm(TSFlags)
425 : 0;
426
427 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
428 CurByte, OS, Fixups, -ImmSize);
429 return;
430 }
431
432 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
433
434 // 16-bit addressing forms of the ModR/M byte have a different encoding for
435 // the R/M field and are far more limited in which registers can be used.
436 if (Is16BitMemOperand(MI, Op, STI)) {
437 if (BaseReg) {
438 // For 32-bit addressing, the row and column values in Table 2-2 are
439 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
440 // some special cases. And GetX86RegNum reflects that numbering.
441 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
442 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
443 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
444 // while values 0-3 indicate the allowed combinations (base+index) of
445 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
446 //
447 // R16Table[] is a lookup from the normal RegNo, to the row values from
448 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
449 static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
450 unsigned RMfield = R16Table[BaseRegNo];
451
452 assert(RMfield && "invalid 16-bit base register")(static_cast <bool> (RMfield && "invalid 16-bit base register"
) ? void (0) : __assert_fail ("RMfield && \"invalid 16-bit base register\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 452, __extension__ __PRETTY_FUNCTION__))
;
453
454 if (IndexReg.getReg()) {
455 unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)];
456
457 assert(IndexReg16 && "invalid 16-bit index register")(static_cast <bool> (IndexReg16 && "invalid 16-bit index register"
) ? void (0) : __assert_fail ("IndexReg16 && \"invalid 16-bit index register\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 457, __extension__ __PRETTY_FUNCTION__))
;
458 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
459 assert(((IndexReg16 ^ RMfield) & 2) &&(static_cast <bool> (((IndexReg16 ^ RMfield) & 2) &&
"invalid 16-bit base/index register combination") ? void (0)
: __assert_fail ("((IndexReg16 ^ RMfield) & 2) && \"invalid 16-bit base/index register combination\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 460, __extension__ __PRETTY_FUNCTION__))
460 "invalid 16-bit base/index register combination")(static_cast <bool> (((IndexReg16 ^ RMfield) & 2) &&
"invalid 16-bit base/index register combination") ? void (0)
: __assert_fail ("((IndexReg16 ^ RMfield) & 2) && \"invalid 16-bit base/index register combination\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 460, __extension__ __PRETTY_FUNCTION__))
;
461 assert(Scale.getImm() == 1 &&(static_cast <bool> (Scale.getImm() == 1 && "invalid scale for 16-bit memory reference"
) ? void (0) : __assert_fail ("Scale.getImm() == 1 && \"invalid scale for 16-bit memory reference\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 462, __extension__ __PRETTY_FUNCTION__))
462 "invalid scale for 16-bit memory reference")(static_cast <bool> (Scale.getImm() == 1 && "invalid scale for 16-bit memory reference"
) ? void (0) : __assert_fail ("Scale.getImm() == 1 && \"invalid scale for 16-bit memory reference\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 462, __extension__ __PRETTY_FUNCTION__))
;
463
464 // Allow base/index to appear in either order (although GAS doesn't).
465 if (IndexReg16 & 2)
466 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
467 else
468 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
469 }
470
471 if (Disp.isImm() && isDisp8(Disp.getImm())) {
472 if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
473 // There is no displacement; just the register.
474 EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS);
475 return;
476 }
477 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
478 EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS);
479 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
480 return;
481 }
482 // This is the [REG]+disp16 case.
483 EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS);
484 } else {
485 // There is no BaseReg; this is the plain [disp16] case.
486 EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS);
487 }
488
489 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
490 EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups);
491 return;
492 }
493
494 // Determine whether a SIB byte is needed.
495 // If no BaseReg, issue a RIP relative instruction only if the MCE can
496 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
497 // 2-7) and absolute references.
498
499 if (// The SIB byte must be used if there is an index register.
500 IndexReg.getReg() == 0 &&
501 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
502 // encode to an R/M value of 4, which indicates that a SIB byte is
503 // present.
504 BaseRegNo != N86::ESP &&
505 // If there is no base register and we're in 64-bit mode, we need a SIB
506 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
507 (!is64BitMode(STI) || BaseReg != 0)) {
508
509 if (BaseReg == 0) { // [disp32] in X86-32 mode
510 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
511 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups);
512 return;
513 }
514
515 // If the base is not EBP/ESP and there is no displacement, use simple
516 // indirect register encoding, this handles addresses like [EAX]. The
517 // encoding for [EBP] with no displacement means [disp32] so we handle it
518 // by emitting a displacement of 0 below.
519 if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
520 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
521 return;
522 }
523
524 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
525 if (Disp.isImm()) {
526 if (!HasEVEX && isDisp8(Disp.getImm())) {
527 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
528 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
529 return;
530 }
531 // Try EVEX compressed 8-bit displacement first; if failed, fall back to
532 // 32-bit displacement.
533 int CDisp8 = 0;
534 if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
535 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
536 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups,
537 CDisp8 - Disp.getImm());
538 return;
539 }
540 }
541
542 // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
543 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
544 unsigned Opcode = MI.getOpcode();
545 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
546 : X86::reloc_signed_4byte;
547 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS,
548 Fixups);
549 return;
550 }
551
552 // We need a SIB byte, so start by outputting the ModR/M byte first
553 assert(IndexReg.getReg() != X86::ESP &&(static_cast <bool> (IndexReg.getReg() != X86::ESP &&
IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"
) ? void (0) : __assert_fail ("IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && \"Cannot use ESP as index reg!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 554, __extension__ __PRETTY_FUNCTION__))
554 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!")(static_cast <bool> (IndexReg.getReg() != X86::ESP &&
IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"
) ? void (0) : __assert_fail ("IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && \"Cannot use ESP as index reg!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 554, __extension__ __PRETTY_FUNCTION__))
;
555
556 bool ForceDisp32 = false;
557 bool ForceDisp8 = false;
558 int CDisp8 = 0;
559 int ImmOffset = 0;
560 if (BaseReg == 0) {
561 // If there is no base register, we emit the special case SIB byte with
562 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
563 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
564 ForceDisp32 = true;
565 } else if (!Disp.isImm()) {
566 // Emit the normal disp32 encoding.
567 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
568 ForceDisp32 = true;
569 } else if (Disp.getImm() == 0 &&
570 // Base reg can't be anything that ends up with '5' as the base
571 // reg, it is the magic [*] nomenclature that indicates no base.
572 BaseRegNo != N86::EBP) {
573 // Emit no displacement ModR/M byte
574 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
575 } else if (!HasEVEX && isDisp8(Disp.getImm())) {
576 // Emit the disp8 encoding.
577 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
578 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
579 } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
580 // Emit the disp8 encoding.
581 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
582 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
583 ImmOffset = CDisp8 - Disp.getImm();
584 } else {
585 // Emit the normal disp32 encoding.
586 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
587 }
588
589 // Calculate what the SS field value should be...
590 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
591 unsigned SS = SSTable[Scale.getImm()];
592
593 if (BaseReg == 0) {
594 // Handle the SIB byte for the case where there is no base, see Intel
595 // Manual 2A, table 2-7. The displacement has already been output.
596 unsigned IndexRegNo;
597 if (IndexReg.getReg())
598 IndexRegNo = GetX86RegNum(IndexReg);
599 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
600 IndexRegNo = 4;
601 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS);
602 } else {
603 unsigned IndexRegNo;
604 if (IndexReg.getReg())
605 IndexRegNo = GetX86RegNum(IndexReg);
606 else
607 IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
608 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
609 }
610
611 // Do we need to output a displacement?
612 if (ForceDisp8)
613 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset);
614 else if (ForceDisp32 || Disp.getImm() != 0)
615 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
616 CurByte, OS, Fixups);
617}
618
619/// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
620/// called VEX.
621void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
622 int MemOperand, const MCInst &MI,
623 const MCInstrDesc &Desc,
624 raw_ostream &OS) const {
625 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.")(static_cast <bool> (!(TSFlags & X86II::LOCK) &&
"Can't have LOCK VEX.") ? void (0) : __assert_fail ("!(TSFlags & X86II::LOCK) && \"Can't have LOCK VEX.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 625, __extension__ __PRETTY_FUNCTION__))
;
626
627 uint64_t Encoding = TSFlags & X86II::EncodingMask;
628 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
629 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
630 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
631
632 // VEX_R: opcode externsion equivalent to REX.R in
633 // 1's complement (inverted) form
634 //
635 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
636 // 0: Same as REX_R=1 (64 bit mode only)
637 //
638 uint8_t VEX_R = 0x1;
639 uint8_t EVEX_R2 = 0x1;
640
641 // VEX_X: equivalent to REX.X, only used when a
642 // register is used for index in SIB Byte.
643 //
644 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
645 // 0: Same as REX.X=1 (64-bit mode only)
646 uint8_t VEX_X = 0x1;
647
648 // VEX_B:
649 //
650 // 1: Same as REX_B=0 (ignored in 32-bit mode)
651 // 0: Same as REX_B=1 (64 bit mode only)
652 //
653 uint8_t VEX_B = 0x1;
654
655 // VEX_W: opcode specific (use like REX.W, or used for
656 // opcode extension, or ignored, depending on the opcode byte)
657 uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0;
658
659 // VEX_5M (VEX m-mmmmm field):
660 //
661 // 0b00000: Reserved for future use
662 // 0b00001: implied 0F leading opcode
663 // 0b00010: implied 0F 38 leading opcode bytes
664 // 0b00011: implied 0F 3A leading opcode bytes
665 // 0b00100-0b11111: Reserved for future use
666 // 0b01000: XOP map select - 08h instructions with imm byte
667 // 0b01001: XOP map select - 09h instructions with no imm byte
668 // 0b01010: XOP map select - 0Ah instructions with imm dword
669 uint8_t VEX_5M;
670 switch (TSFlags & X86II::OpMapMask) {
671 default: llvm_unreachable("Invalid prefix!")::llvm::llvm_unreachable_internal("Invalid prefix!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 671)
;
672 case X86II::TB: VEX_5M = 0x1; break; // 0F
673 case X86II::T8: VEX_5M = 0x2; break; // 0F 38
674 case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
675 case X86II::XOP8: VEX_5M = 0x8; break;
676 case X86II::XOP9: VEX_5M = 0x9; break;
677 case X86II::XOPA: VEX_5M = 0xA; break;
678 }
679
680 // VEX_4V (VEX vvvv field): a register specifier
681 // (in 1's complement form) or 1111 if unused.
682 uint8_t VEX_4V = 0xf;
683 uint8_t EVEX_V2 = 0x1;
684
685 // EVEX_L2/VEX_L (Vector Length):
686 //
687 // L2 L
688 // 0 0: scalar or 128-bit vector
689 // 0 1: 256-bit vector
690 // 1 0: 512-bit vector
691 //
692 uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0;
693 uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0;
694
695 // VEX_PP: opcode extension providing equivalent
696 // functionality of a SIMD prefix
697 //
698 // 0b00: None
699 // 0b01: 66
700 // 0b10: F3
701 // 0b11: F2
702 //
703 uint8_t VEX_PP = 0;
704 switch (TSFlags & X86II::OpPrefixMask) {
705 case X86II::PD: VEX_PP = 0x1; break; // 66
706 case X86II::XS: VEX_PP = 0x2; break; // F3
707 case X86II::XD: VEX_PP = 0x3; break; // F2
708 }
709
710 // EVEX_U
711 uint8_t EVEX_U = 1; // Always '1' so far
712
713 // EVEX_z
714 uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0;
715
716 // EVEX_b
717 uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0;
718
719 // EVEX_rc
720 uint8_t EVEX_rc = 0;
721
722 // EVEX_aaa
723 uint8_t EVEX_aaa = 0;
724
725 bool EncodeRC = false;
726
727 // Classify VEX_B, VEX_4V, VEX_R, VEX_X
728 unsigned NumOps = Desc.getNumOperands();
729 unsigned CurOp = X86II::getOperandBias(Desc);
730
731 switch (TSFlags & X86II::FormMask) {
732 default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!")::llvm::llvm_unreachable_internal("Unexpected form in EmitVEXOpcodePrefix!"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 732)
;
733 case X86II::RawFrm:
734 break;
735 case X86II::MRMDestMem: {
736 // MRMDestMem instructions forms:
737 // MemAddr, src1(ModR/M)
738 // MemAddr, src1(VEX_4V), src2(ModR/M)
739 // MemAddr, src1(ModR/M), imm8
740 //
741 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
742 VEX_B = ~(BaseRegEnc >> 3) & 1;
743 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
744 VEX_X = ~(IndexRegEnc >> 3) & 1;
745 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
746 EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
747
748 CurOp += X86::AddrNumOperands;
749
750 if (HasEVEX_K)
751 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
752
753 if (HasVEX_4V) {
754 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
755 VEX_4V = ~VRegEnc & 0xf;
756 EVEX_V2 = ~(VRegEnc >> 4) & 1;
757 }
758
759 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
760 VEX_R = ~(RegEnc >> 3) & 1;
761 EVEX_R2 = ~(RegEnc >> 4) & 1;
762 break;
763 }
764 case X86II::MRMSrcMem: {
765 // MRMSrcMem instructions forms:
766 // src1(ModR/M), MemAddr
767 // src1(ModR/M), src2(VEX_4V), MemAddr
768 // src1(ModR/M), MemAddr, imm8
769 // src1(ModR/M), MemAddr, src2(Imm[7:4])
770 //
771 // FMA4:
772 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
773 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
774 VEX_R = ~(RegEnc >> 3) & 1;
775 EVEX_R2 = ~(RegEnc >> 4) & 1;
776
777 if (HasEVEX_K)
778 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
779
780 if (HasVEX_4V) {
781 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
782 VEX_4V = ~VRegEnc & 0xf;
783 EVEX_V2 = ~(VRegEnc >> 4) & 1;
784 }
785
786 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
787 VEX_B = ~(BaseRegEnc >> 3) & 1;
788 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
789 VEX_X = ~(IndexRegEnc >> 3) & 1;
790 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
791 EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
792
793 break;
794 }
795 case X86II::MRMSrcMem4VOp3: {
796 // Instruction format for 4VOp3:
797 // src1(ModR/M), MemAddr, src3(VEX_4V)
798 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
799 VEX_R = ~(RegEnc >> 3) & 1;
800
801 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
802 VEX_B = ~(BaseRegEnc >> 3) & 1;
803 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
804 VEX_X = ~(IndexRegEnc >> 3) & 1;
805
806 VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
807 break;
808 }
809 case X86II::MRMSrcMemOp4: {
810 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
811 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
812 VEX_R = ~(RegEnc >> 3) & 1;
813
814 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
815 VEX_4V = ~VRegEnc & 0xf;
816
817 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
818 VEX_B = ~(BaseRegEnc >> 3) & 1;
819 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
820 VEX_X = ~(IndexRegEnc >> 3) & 1;
821 break;
822 }
823 case X86II::MRM0m: case X86II::MRM1m:
824 case X86II::MRM2m: case X86II::MRM3m:
825 case X86II::MRM4m: case X86II::MRM5m:
826 case X86II::MRM6m: case X86II::MRM7m: {
827 // MRM[0-9]m instructions forms:
828 // MemAddr
829 // src1(VEX_4V), MemAddr
830 if (HasVEX_4V) {
831 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
832 VEX_4V = ~VRegEnc & 0xf;
833 EVEX_V2 = ~(VRegEnc >> 4) & 1;
834 }
835
836 if (HasEVEX_K)
837 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
838
839 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
840 VEX_B = ~(BaseRegEnc >> 3) & 1;
841 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
842 VEX_X = ~(IndexRegEnc >> 3) & 1;
843 break;
844 }
845 case X86II::MRMSrcReg: {
846 // MRMSrcReg instructions forms:
847 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
848 // dst(ModR/M), src1(ModR/M)
849 // dst(ModR/M), src1(ModR/M), imm8
850 //
851 // FMA4:
852 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
853 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
854 VEX_R = ~(RegEnc >> 3) & 1;
855 EVEX_R2 = ~(RegEnc >> 4) & 1;
856
857 if (HasEVEX_K)
858 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
859
860 if (HasVEX_4V) {
861 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
862 VEX_4V = ~VRegEnc & 0xf;
863 EVEX_V2 = ~(VRegEnc >> 4) & 1;
864 }
865
866 RegEnc = getX86RegEncoding(MI, CurOp++);
867 VEX_B = ~(RegEnc >> 3) & 1;
868 VEX_X = ~(RegEnc >> 4) & 1;
869
870 if (EVEX_b) {
871 if (HasEVEX_RC) {
872 unsigned RcOperand = NumOps-1;
873 assert(RcOperand >= CurOp)(static_cast <bool> (RcOperand >= CurOp) ? void (0) :
__assert_fail ("RcOperand >= CurOp", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 873, __extension__ __PRETTY_FUNCTION__))
;
874 EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
875 }
876 EncodeRC = true;
877 }
878 break;
879 }
880 case X86II::MRMSrcReg4VOp3: {
881 // Instruction format for 4VOp3:
882 // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
883 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
884 VEX_R = ~(RegEnc >> 3) & 1;
885
886 RegEnc = getX86RegEncoding(MI, CurOp++);
887 VEX_B = ~(RegEnc >> 3) & 1;
888
889 VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
890 break;
891 }
892 case X86II::MRMSrcRegOp4: {
893 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
894 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
895 VEX_R = ~(RegEnc >> 3) & 1;
896
897 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
898 VEX_4V = ~VRegEnc & 0xf;
899
900 // Skip second register source (encoded in Imm[7:4])
901 ++CurOp;
902
903 RegEnc = getX86RegEncoding(MI, CurOp++);
904 VEX_B = ~(RegEnc >> 3) & 1;
905 VEX_X = ~(RegEnc >> 4) & 1;
906 break;
907 }
908 case X86II::MRMDestReg: {
909 // MRMDestReg instructions forms:
910 // dst(ModR/M), src(ModR/M)
911 // dst(ModR/M), src(ModR/M), imm8
912 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
913 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
914 VEX_B = ~(RegEnc >> 3) & 1;
915 VEX_X = ~(RegEnc >> 4) & 1;
916
917 if (HasEVEX_K)
918 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
919
920 if (HasVEX_4V) {
921 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
922 VEX_4V = ~VRegEnc & 0xf;
923 EVEX_V2 = ~(VRegEnc >> 4) & 1;
924 }
925
926 RegEnc = getX86RegEncoding(MI, CurOp++);
927 VEX_R = ~(RegEnc >> 3) & 1;
928 EVEX_R2 = ~(RegEnc >> 4) & 1;
929 if (EVEX_b)
930 EncodeRC = true;
931 break;
932 }
933 case X86II::MRM0r: case X86II::MRM1r:
934 case X86II::MRM2r: case X86II::MRM3r:
935 case X86II::MRM4r: case X86II::MRM5r:
936 case X86II::MRM6r: case X86II::MRM7r: {
937 // MRM0r-MRM7r instructions forms:
938 // dst(VEX_4V), src(ModR/M), imm8
939 if (HasVEX_4V) {
940 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
941 VEX_4V = ~VRegEnc & 0xf;
942 EVEX_V2 = ~(VRegEnc >> 4) & 1;
943 }
944 if (HasEVEX_K)
945 EVEX_aaa = getX86RegEncoding(MI, CurOp++);
946
947 unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
948 VEX_B = ~(RegEnc >> 3) & 1;
949 VEX_X = ~(RegEnc >> 4) & 1;
950 break;
951 }
952 }
953
954 if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
955 // VEX opcode prefix can have 2 or 3 bytes
956 //
957 // 3 bytes:
958 // +-----+ +--------------+ +-------------------+
959 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
960 // +-----+ +--------------+ +-------------------+
961 // 2 bytes:
962 // +-----+ +-------------------+
963 // | C5h | | R | vvvv | L | pp |
964 // +-----+ +-------------------+
965 //
966 // XOP uses a similar prefix:
967 // +-----+ +--------------+ +-------------------+
968 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
969 // +-----+ +--------------+ +-------------------+
970 uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
971
972 // Can we use the 2 byte VEX prefix?
973 if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
974 EmitByte(0xC5, CurByte, OS);
975 EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
976 return;
977 }
978
979 // 3 byte VEX prefix
980 EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS);
981 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
982 EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
983 } else {
984 assert(Encoding == X86II::EVEX && "unknown encoding!")(static_cast <bool> (Encoding == X86II::EVEX &&
"unknown encoding!") ? void (0) : __assert_fail ("Encoding == X86II::EVEX && \"unknown encoding!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 984, __extension__ __PRETTY_FUNCTION__))
;
985 // EVEX opcode prefix can have 4 bytes
986 //
987 // +-----+ +--------------+ +-------------------+ +------------------------+
988 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
989 // +-----+ +--------------+ +-------------------+ +------------------------+
990 assert((VEX_5M & 0x3) == VEX_5M(static_cast <bool> ((VEX_5M & 0x3) == VEX_5M &&
"More than 2 significant bits in VEX.m-mmmm fields for EVEX!"
) ? void (0) : __assert_fail ("(VEX_5M & 0x3) == VEX_5M && \"More than 2 significant bits in VEX.m-mmmm fields for EVEX!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 991, __extension__ __PRETTY_FUNCTION__))
991 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!")(static_cast <bool> ((VEX_5M & 0x3) == VEX_5M &&
"More than 2 significant bits in VEX.m-mmmm fields for EVEX!"
) ? void (0) : __assert_fail ("(VEX_5M & 0x3) == VEX_5M && \"More than 2 significant bits in VEX.m-mmmm fields for EVEX!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 991, __extension__ __PRETTY_FUNCTION__))
;
992
993 EmitByte(0x62, CurByte, OS);
994 EmitByte((VEX_R << 7) |
995 (VEX_X << 6) |
996 (VEX_B << 5) |
997 (EVEX_R2 << 4) |
998 VEX_5M, CurByte, OS);
999 EmitByte((VEX_W << 7) |
1000 (VEX_4V << 3) |
1001 (EVEX_U << 2) |
1002 VEX_PP, CurByte, OS);
1003 if (EncodeRC)
1004 EmitByte((EVEX_z << 7) |
1005 (EVEX_rc << 5) |
1006 (EVEX_b << 4) |
1007 (EVEX_V2 << 3) |
1008 EVEX_aaa, CurByte, OS);
1009 else
1010 EmitByte((EVEX_z << 7) |
1011 (EVEX_L2 << 6) |
1012 (VEX_L << 5) |
1013 (EVEX_b << 4) |
1014 (EVEX_V2 << 3) |
1015 EVEX_aaa, CurByte, OS);
1016 }
1017}
1018
1019/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
1020/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
1021/// size, and 3) use of X86-64 extended registers.
1022uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
1023 int MemOperand,
1024 const MCInstrDesc &Desc) const {
1025 uint8_t REX = 0;
1026 bool UsesHighByteReg = false;
1027
1028 if (TSFlags & X86II::REX_W)
1029 REX |= 1 << 3; // set REX.W
1030
1031 if (MI.getNumOperands() == 0) return REX;
1032
1033 unsigned NumOps = MI.getNumOperands();
1034 unsigned CurOp = X86II::getOperandBias(Desc);
1035
1036 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
1037 for (unsigned i = CurOp; i != NumOps; ++i) {
1038 const MCOperand &MO = MI.getOperand(i);
1039 if (!MO.isReg()) continue;
1040 unsigned Reg = MO.getReg();
1041 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
1042 UsesHighByteReg = true;
1043 if (X86II::isX86_64NonExtLowByteReg(Reg))
1044 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
1045 // that returns non-zero.
1046 REX |= 0x40; // REX fixed encoding prefix
1047 }
1048
1049 switch (TSFlags & X86II::FormMask) {
1050 case X86II::AddRegFrm:
1051 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1052 break;
1053 case X86II::MRMSrcReg:
1054 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1055 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1056 break;
1057 case X86II::MRMSrcMem: {
1058 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1059 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1060 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1061 CurOp += X86::AddrNumOperands;
Value stored to 'CurOp' is never read
1062 break;
1063 }
1064 case X86II::MRMDestReg:
1065 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1066 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1067 break;
1068 case X86II::MRMDestMem:
1069 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1070 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1071 CurOp += X86::AddrNumOperands;
1072 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
1073 break;
1074 case X86II::MRMXm:
1075 case X86II::MRM0m: case X86II::MRM1m:
1076 case X86II::MRM2m: case X86II::MRM3m:
1077 case X86II::MRM4m: case X86II::MRM5m:
1078 case X86II::MRM6m: case X86II::MRM7m:
1079 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
1080 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
1081 break;
1082 case X86II::MRMXr:
1083 case X86II::MRM0r: case X86II::MRM1r:
1084 case X86II::MRM2r: case X86II::MRM3r:
1085 case X86II::MRM4r: case X86II::MRM5r:
1086 case X86II::MRM6r: case X86II::MRM7r:
1087 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
1088 break;
1089 }
1090 if (REX && UsesHighByteReg)
1091 report_fatal_error("Cannot encode high byte register in REX-prefixed instruction");
1092
1093 return REX;
1094}
1095
1096/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
1097void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
1098 unsigned SegOperand,
1099 const MCInst &MI,
1100 raw_ostream &OS) const {
1101 // Check for explicit segment override on memory operand.
1102 switch (MI.getOperand(SegOperand).getReg()) {
1103 default: llvm_unreachable("Unknown segment register!")::llvm::llvm_unreachable_internal("Unknown segment register!"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1103)
;
1104 case 0: break;
1105 case X86::CS: EmitByte(0x2E, CurByte, OS); break;
1106 case X86::SS: EmitByte(0x36, CurByte, OS); break;
1107 case X86::DS: EmitByte(0x3E, CurByte, OS); break;
1108 case X86::ES: EmitByte(0x26, CurByte, OS); break;
1109 case X86::FS: EmitByte(0x64, CurByte, OS); break;
1110 case X86::GS: EmitByte(0x65, CurByte, OS); break;
1111 }
1112}
1113
1114/// Emit all instruction prefixes prior to the opcode.
1115///
1116/// MemOperand is the operand # of the start of a memory operand if present. If
1117/// Not present, it is -1.
1118///
1119/// Returns true if a REX prefix was used.
1120bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
1121 int MemOperand, const MCInst &MI,
1122 const MCInstrDesc &Desc,
1123 const MCSubtargetInfo &STI,
1124 raw_ostream &OS) const {
1125 bool Ret = false;
1126 // Emit the operand size opcode prefix as needed.
1127 if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32
1128 : X86II::OpSize16))
1129 EmitByte(0x66, CurByte, OS);
1130
1131 // Emit the LOCK opcode prefix.
1132 if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK)
1133 EmitByte(0xF0, CurByte, OS);
1134
1135 // Emit the NOTRACK opcode prefix.
1136 if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK)
1137 EmitByte(0x3E, CurByte, OS);
1138
1139 switch (TSFlags & X86II::OpPrefixMask) {
1140 case X86II::PD: // 66
1141 EmitByte(0x66, CurByte, OS);
1142 break;
1143 case X86II::XS: // F3
1144 EmitByte(0xF3, CurByte, OS);
1145 break;
1146 case X86II::XD: // F2
1147 EmitByte(0xF2, CurByte, OS);
1148 break;
1149 }
1150
1151 // Handle REX prefix.
1152 // FIXME: Can this come before F2 etc to simplify emission?
1153 if (is64BitMode(STI)) {
1154 if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) {
1155 EmitByte(0x40 | REX, CurByte, OS);
1156 Ret = true;
1157 }
1158 } else {
1159 assert(!(TSFlags & X86II::REX_W) && "REX.W requires 64bit mode.")(static_cast <bool> (!(TSFlags & X86II::REX_W) &&
"REX.W requires 64bit mode.") ? void (0) : __assert_fail ("!(TSFlags & X86II::REX_W) && \"REX.W requires 64bit mode.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1159, __extension__ __PRETTY_FUNCTION__))
;
1160 }
1161
1162 // 0x0F escape code must be emitted just before the opcode.
1163 switch (TSFlags & X86II::OpMapMask) {
1164 case X86II::TB: // Two-byte opcode map
1165 case X86II::T8: // 0F 38
1166 case X86II::TA: // 0F 3A
1167 case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller.
1168 EmitByte(0x0F, CurByte, OS);
1169 break;
1170 }
1171
1172 switch (TSFlags & X86II::OpMapMask) {
1173 case X86II::T8: // 0F 38
1174 EmitByte(0x38, CurByte, OS);
1175 break;
1176 case X86II::TA: // 0F 3A
1177 EmitByte(0x3A, CurByte, OS);
1178 break;
1179 }
1180 return Ret;
1181}
1182
1183void X86MCCodeEmitter::
1184encodeInstruction(const MCInst &MI, raw_ostream &OS,
1185 SmallVectorImpl<MCFixup> &Fixups,
1186 const MCSubtargetInfo &STI) const {
1187 unsigned Opcode = MI.getOpcode();
1188 const MCInstrDesc &Desc = MCII.get(Opcode);
1189 uint64_t TSFlags = Desc.TSFlags;
1190 unsigned Flags = MI.getFlags();
1191
1192 // Pseudo instructions don't get encoded.
1193 if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
1194 return;
1195
1196 unsigned NumOps = Desc.getNumOperands();
1197 unsigned CurOp = X86II::getOperandBias(Desc);
1198
1199 // Keep track of the current byte being emitted.
1200 unsigned CurByte = 0;
1201
1202 // Encoding type for this instruction.
1203 uint64_t Encoding = TSFlags & X86II::EncodingMask;
1204
1205 // It uses the VEX.VVVV field?
1206 bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1207 bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
1208
1209 // It uses the EVEX.aaa field?
1210 bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1211 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1212
1213 // Used if a register is encoded in 7:4 of immediate.
1214 unsigned I8RegNum = 0;
1215
1216 // Determine where the memory operand starts, if present.
1217 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
1218 if (MemoryOperand != -1) MemoryOperand += CurOp;
1219
1220 // Emit segment override opcode prefix as needed.
1221 if (MemoryOperand >= 0)
1222 EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg,
1223 MI, OS);
1224
1225 // Emit the repeat opcode prefix as needed.
1226 if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT)
1227 EmitByte(0xF3, CurByte, OS);
1228 if (Flags & X86::IP_HAS_REPEAT_NE)
1229 EmitByte(0xF2, CurByte, OS);
1230
1231 // Emit the address size opcode prefix as needed.
1232 bool need_address_override;
1233 uint64_t AdSize = TSFlags & X86II::AdSizeMask;
1234 if ((is16BitMode(STI) && AdSize == X86II::AdSize32) ||
1235 (is32BitMode(STI) && AdSize == X86II::AdSize16) ||
1236 (is64BitMode(STI) && AdSize == X86II::AdSize32)) {
1237 need_address_override = true;
1238 } else if (MemoryOperand < 0) {
1239 need_address_override = false;
1240 } else if (is64BitMode(STI)) {
1241 assert(!Is16BitMemOperand(MI, MemoryOperand, STI))(static_cast <bool> (!Is16BitMemOperand(MI, MemoryOperand
, STI)) ? void (0) : __assert_fail ("!Is16BitMemOperand(MI, MemoryOperand, STI)"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1241, __extension__ __PRETTY_FUNCTION__))
;
1242 need_address_override = Is32BitMemOperand(MI, MemoryOperand);
1243 } else if (is32BitMode(STI)) {
1244 assert(!Is64BitMemOperand(MI, MemoryOperand))(static_cast <bool> (!Is64BitMemOperand(MI, MemoryOperand
)) ? void (0) : __assert_fail ("!Is64BitMemOperand(MI, MemoryOperand)"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1244, __extension__ __PRETTY_FUNCTION__))
;
1245 need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI);
1246 } else {
1247 assert(is16BitMode(STI))(static_cast <bool> (is16BitMode(STI)) ? void (0) : __assert_fail
("is16BitMode(STI)", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1247, __extension__ __PRETTY_FUNCTION__))
;
1248 assert(!Is64BitMemOperand(MI, MemoryOperand))(static_cast <bool> (!Is64BitMemOperand(MI, MemoryOperand
)) ? void (0) : __assert_fail ("!Is64BitMemOperand(MI, MemoryOperand)"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1248, __extension__ __PRETTY_FUNCTION__))
;
1249 need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI);
1250 }
1251
1252 if (need_address_override)
1253 EmitByte(0x67, CurByte, OS);
1254
1255 bool Rex = false;
1256 if (Encoding == 0)
1257 Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
1258 else
1259 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
1260
1261 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1262
1263 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
1264 BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1265
1266 uint64_t Form = TSFlags & X86II::FormMask;
1267 switch (Form) {
1268 default: errs() << "FORM: " << Form << "\n";
1269 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!")::llvm::llvm_unreachable_internal("Unknown FormMask value in X86MCCodeEmitter!"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1269)
;
1270 case X86II::Pseudo:
1271 llvm_unreachable("Pseudo instruction shouldn't be emitted")::llvm::llvm_unreachable_internal("Pseudo instruction shouldn't be emitted"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1271)
;
1272 case X86II::RawFrmDstSrc: {
1273 unsigned siReg = MI.getOperand(1).getReg();
1274 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||(static_cast <bool> (((siReg == X86::SI && MI.getOperand
(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.
getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI &&
MI.getOperand(0).getReg() == X86::RDI)) && "SI and DI register sizes do not match"
) ? void (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1277, __extension__ __PRETTY_FUNCTION__))
1275 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||(static_cast <bool> (((siReg == X86::SI && MI.getOperand
(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.
getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI &&
MI.getOperand(0).getReg() == X86::RDI)) && "SI and DI register sizes do not match"
) ? void (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1277, __extension__ __PRETTY_FUNCTION__))
1276 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&(static_cast <bool> (((siReg == X86::SI && MI.getOperand
(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.
getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI &&
MI.getOperand(0).getReg() == X86::RDI)) && "SI and DI register sizes do not match"
) ? void (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1277, __extension__ __PRETTY_FUNCTION__))
1277 "SI and DI register sizes do not match")(static_cast <bool> (((siReg == X86::SI && MI.getOperand
(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.
getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI &&
MI.getOperand(0).getReg() == X86::RDI)) && "SI and DI register sizes do not match"
) ? void (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1277, __extension__ __PRETTY_FUNCTION__))
;
1278 // Emit segment override opcode prefix as needed (not for %ds).
1279 if (MI.getOperand(2).getReg() != X86::DS)
1280 EmitSegmentOverridePrefix(CurByte, 2, MI, OS);
1281 // Emit AdSize prefix as needed.
1282 if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1283 (is32BitMode(STI) && siReg == X86::SI))
1284 EmitByte(0x67, CurByte, OS);
1285 CurOp += 3; // Consume operands.
1286 EmitByte(BaseOpcode, CurByte, OS);
1287 break;
1288 }
1289 case X86II::RawFrmSrc: {
1290 unsigned siReg = MI.getOperand(0).getReg();
1291 // Emit segment override opcode prefix as needed (not for %ds).
1292 if (MI.getOperand(1).getReg() != X86::DS)
1293 EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1294 // Emit AdSize prefix as needed.
1295 if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1296 (is32BitMode(STI) && siReg == X86::SI))
1297 EmitByte(0x67, CurByte, OS);
1298 CurOp += 2; // Consume operands.
1299 EmitByte(BaseOpcode, CurByte, OS);
1300 break;
1301 }
1302 case X86II::RawFrmDst: {
1303 unsigned siReg = MI.getOperand(0).getReg();
1304 // Emit AdSize prefix as needed.
1305 if ((!is32BitMode(STI) && siReg == X86::EDI) ||
1306 (is32BitMode(STI) && siReg == X86::DI))
1307 EmitByte(0x67, CurByte, OS);
1308 ++CurOp; // Consume operand.
1309 EmitByte(BaseOpcode, CurByte, OS);
1310 break;
1311 }
1312 case X86II::RawFrm: {
1313 EmitByte(BaseOpcode, CurByte, OS);
1314
1315 if (!is64BitMode(STI) || !isPCRel32Branch(MI))
1316 break;
1317
1318 const MCOperand &Op = MI.getOperand(CurOp++);
1319 EmitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags),
1320 MCFixupKind(X86::reloc_branch_4byte_pcrel), CurByte, OS,
1321 Fixups);
1322 break;
1323 }
1324 case X86II::RawFrmMemOffs:
1325 // Emit segment override opcode prefix as needed.
1326 EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1327 EmitByte(BaseOpcode, CurByte, OS);
1328 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1329 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1330 CurByte, OS, Fixups);
1331 ++CurOp; // skip segment operand
1332 break;
1333 case X86II::RawFrmImm8:
1334 EmitByte(BaseOpcode, CurByte, OS);
1335 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1336 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1337 CurByte, OS, Fixups);
1338 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
1339 OS, Fixups);
1340 break;
1341 case X86II::RawFrmImm16:
1342 EmitByte(BaseOpcode, CurByte, OS);
1343 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1344 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1345 CurByte, OS, Fixups);
1346 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
1347 OS, Fixups);
1348 break;
1349
1350 case X86II::AddRegFrm:
1351 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
1352 break;
1353
1354 case X86II::MRMDestReg: {
1355 EmitByte(BaseOpcode, CurByte, OS);
1356 unsigned SrcRegNum = CurOp + 1;
1357
1358 if (HasEVEX_K) // Skip writemask
1359 ++SrcRegNum;
1360
1361 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1362 ++SrcRegNum;
1363
1364 EmitRegModRMByte(MI.getOperand(CurOp),
1365 GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
1366 CurOp = SrcRegNum + 1;
1367 break;
1368 }
1369 case X86II::MRMDestMem: {
1370 EmitByte(BaseOpcode, CurByte, OS);
1371 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1372
1373 if (HasEVEX_K) // Skip writemask
1374 ++SrcRegNum;
1375
1376 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1377 ++SrcRegNum;
1378
1379 emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
1380 Rex, CurByte, OS, Fixups, STI);
1381 CurOp = SrcRegNum + 1;
1382 break;
1383 }
1384 case X86II::MRMSrcReg: {
1385 EmitByte(BaseOpcode, CurByte, OS);
1386 unsigned SrcRegNum = CurOp + 1;
1387
1388 if (HasEVEX_K) // Skip writemask
1389 ++SrcRegNum;
1390
1391 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1392 ++SrcRegNum;
1393
1394 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1395 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1396 CurOp = SrcRegNum + 1;
1397 if (HasVEX_I8Reg)
1398 I8RegNum = getX86RegEncoding(MI, CurOp++);
1399 // do not count the rounding control operand
1400 if (HasEVEX_RC)
1401 --NumOps;
1402 break;
1403 }
1404 case X86II::MRMSrcReg4VOp3: {
1405 EmitByte(BaseOpcode, CurByte, OS);
1406 unsigned SrcRegNum = CurOp + 1;
1407
1408 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1409 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1410 CurOp = SrcRegNum + 1;
1411 ++CurOp; // Encoded in VEX.VVVV
1412 break;
1413 }
1414 case X86II::MRMSrcRegOp4: {
1415 EmitByte(BaseOpcode, CurByte, OS);
1416 unsigned SrcRegNum = CurOp + 1;
1417
1418 // Skip 1st src (which is encoded in VEX_VVVV)
1419 ++SrcRegNum;
1420
1421 // Capture 2nd src (which is encoded in Imm[7:4])
1422 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg")(static_cast <bool> (HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"
) ? void (0) : __assert_fail ("HasVEX_I8Reg && \"MRMSrcRegOp4 should imply VEX_I8Reg\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1422, __extension__ __PRETTY_FUNCTION__))
;
1423 I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
1424
1425 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1426 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1427 CurOp = SrcRegNum + 1;
1428 break;
1429 }
1430 case X86II::MRMSrcMem: {
1431 unsigned FirstMemOp = CurOp+1;
1432
1433 if (HasEVEX_K) // Skip writemask
1434 ++FirstMemOp;
1435
1436 if (HasVEX_4V)
1437 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1438
1439 EmitByte(BaseOpcode, CurByte, OS);
1440
1441 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1442 TSFlags, Rex, CurByte, OS, Fixups, STI);
1443 CurOp = FirstMemOp + X86::AddrNumOperands;
1444 if (HasVEX_I8Reg)
1445 I8RegNum = getX86RegEncoding(MI, CurOp++);
1446 break;
1447 }
1448 case X86II::MRMSrcMem4VOp3: {
1449 unsigned FirstMemOp = CurOp+1;
1450
1451 EmitByte(BaseOpcode, CurByte, OS);
1452
1453 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1454 TSFlags, Rex, CurByte, OS, Fixups, STI);
1455 CurOp = FirstMemOp + X86::AddrNumOperands;
1456 ++CurOp; // Encoded in VEX.VVVV.
1457 break;
1458 }
1459 case X86II::MRMSrcMemOp4: {
1460 unsigned FirstMemOp = CurOp+1;
1461
1462 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1463
1464 // Capture second register source (encoded in Imm[7:4])
1465 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg")(static_cast <bool> (HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"
) ? void (0) : __assert_fail ("HasVEX_I8Reg && \"MRMSrcRegOp4 should imply VEX_I8Reg\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1465, __extension__ __PRETTY_FUNCTION__))
;
1466 I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
1467
1468 EmitByte(BaseOpcode, CurByte, OS);
1469
1470 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1471 TSFlags, Rex, CurByte, OS, Fixups, STI);
1472 CurOp = FirstMemOp + X86::AddrNumOperands;
1473 break;
1474 }
1475
1476 case X86II::MRMXr:
1477 case X86II::MRM0r: case X86II::MRM1r:
1478 case X86II::MRM2r: case X86II::MRM3r:
1479 case X86II::MRM4r: case X86II::MRM5r:
1480 case X86II::MRM6r: case X86II::MRM7r:
1481 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1482 ++CurOp;
1483 if (HasEVEX_K) // Skip writemask
1484 ++CurOp;
1485 EmitByte(BaseOpcode, CurByte, OS);
1486 EmitRegModRMByte(MI.getOperand(CurOp++),
1487 (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
1488 CurByte, OS);
1489 break;
1490
1491 case X86II::MRMXm:
1492 case X86II::MRM0m: case X86II::MRM1m:
1493 case X86II::MRM2m: case X86II::MRM3m:
1494 case X86II::MRM4m: case X86II::MRM5m:
1495 case X86II::MRM6m: case X86II::MRM7m:
1496 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1497 ++CurOp;
1498 if (HasEVEX_K) // Skip writemask
1499 ++CurOp;
1500 EmitByte(BaseOpcode, CurByte, OS);
1501 emitMemModRMByte(MI, CurOp,
1502 (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
1503 Rex, CurByte, OS, Fixups, STI);
1504 CurOp += X86::AddrNumOperands;
1505 break;
1506
1507 case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
1508 case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C5:
1509 case X86II::MRM_C6: case X86II::MRM_C7: case X86II::MRM_C8:
1510 case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
1511 case X86II::MRM_CC: case X86II::MRM_CD: case X86II::MRM_CE:
1512 case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1:
1513 case X86II::MRM_D2: case X86II::MRM_D3: case X86II::MRM_D4:
1514 case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D7:
1515 case X86II::MRM_D8: case X86II::MRM_D9: case X86II::MRM_DA:
1516 case X86II::MRM_DB: case X86II::MRM_DC: case X86II::MRM_DD:
1517 case X86II::MRM_DE: case X86II::MRM_DF: case X86II::MRM_E0:
1518 case X86II::MRM_E1: case X86II::MRM_E2: case X86II::MRM_E3:
1519 case X86II::MRM_E4: case X86II::MRM_E5: case X86II::MRM_E6:
1520 case X86II::MRM_E7: case X86II::MRM_E8: case X86II::MRM_E9:
1521 case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC:
1522 case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_EF:
1523 case X86II::MRM_F0: case X86II::MRM_F1: case X86II::MRM_F2:
1524 case X86II::MRM_F3: case X86II::MRM_F4: case X86II::MRM_F5:
1525 case X86II::MRM_F6: case X86II::MRM_F7: case X86II::MRM_F8:
1526 case X86II::MRM_F9: case X86II::MRM_FA: case X86II::MRM_FB:
1527 case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE:
1528 case X86II::MRM_FF:
1529 EmitByte(BaseOpcode, CurByte, OS);
1530 EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS);
1531 break;
1532 }
1533
1534 if (HasVEX_I8Reg) {
1535 // The last source register of a 4 operand instruction in AVX is encoded
1536 // in bits[7:4] of a immediate byte.
1537 assert(I8RegNum < 16 && "Register encoding out of range")(static_cast <bool> (I8RegNum < 16 && "Register encoding out of range"
) ? void (0) : __assert_fail ("I8RegNum < 16 && \"Register encoding out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1537, __extension__ __PRETTY_FUNCTION__))
;
1538 I8RegNum <<= 4;
1539 if (CurOp != NumOps) {
1540 unsigned Val = MI.getOperand(CurOp++).getImm();
1541 assert(Val < 16 && "Immediate operand value out of range")(static_cast <bool> (Val < 16 && "Immediate operand value out of range"
) ? void (0) : __assert_fail ("Val < 16 && \"Immediate operand value out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp"
, 1541, __extension__ __PRETTY_FUNCTION__))
;
1542 I8RegNum |= Val;
1543 }
1544 EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
1545 CurByte, OS, Fixups);
1546 } else {
1547 // If there is a remaining operand, it must be a trailing immediate. Emit it
1548 // according to the right size for the instruction. Some instructions
1549 // (SSE4a extrq and insertq) have two trailing immediates.
1550 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1551 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1552 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1553 CurByte, OS, Fixups);
1554 }
1555 }
1556
1557 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
1558 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
1559
1560#ifndef NDEBUG
1561 // FIXME: Verify.
1562 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1563 errs() << "Cannot encode all operands of: ";
1564 MI.dump();
1565 errs() << '\n';
1566 abort();
1567 }
1568#endif
1569}
1570
1571MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
1572 const MCRegisterInfo &MRI,
1573 MCContext &Ctx) {
1574 return new X86MCCodeEmitter(MCII, Ctx);
1575}