File: | llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp |
Warning: | line 1214, column 5 Value stored to 'CurOp' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the X86MCCodeEmitter class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "MCTargetDesc/X86BaseInfo.h" |
14 | #include "MCTargetDesc/X86FixupKinds.h" |
15 | #include "MCTargetDesc/X86MCTargetDesc.h" |
16 | #include "llvm/ADT/SmallVector.h" |
17 | #include "llvm/MC/MCCodeEmitter.h" |
18 | #include "llvm/MC/MCContext.h" |
19 | #include "llvm/MC/MCExpr.h" |
20 | #include "llvm/MC/MCFixup.h" |
21 | #include "llvm/MC/MCInst.h" |
22 | #include "llvm/MC/MCInstrDesc.h" |
23 | #include "llvm/MC/MCInstrInfo.h" |
24 | #include "llvm/MC/MCRegisterInfo.h" |
25 | #include "llvm/MC/MCSubtargetInfo.h" |
26 | #include "llvm/MC/MCSymbol.h" |
27 | #include "llvm/Support/ErrorHandling.h" |
28 | #include "llvm/Support/raw_ostream.h" |
29 | #include <cassert> |
30 | #include <cstdint> |
31 | #include <cstdlib> |
32 | |
33 | using namespace llvm; |
34 | |
35 | #define DEBUG_TYPE"mccodeemitter" "mccodeemitter" |
36 | |
37 | namespace { |
38 | |
39 | class X86MCCodeEmitter : public MCCodeEmitter { |
40 | const MCInstrInfo &MCII; |
41 | MCContext &Ctx; |
42 | |
43 | public: |
44 | X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx) |
45 | : MCII(mcii), Ctx(ctx) {} |
46 | X86MCCodeEmitter(const X86MCCodeEmitter &) = delete; |
47 | X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete; |
48 | ~X86MCCodeEmitter() override = default; |
49 | |
50 | void emitPrefix(const MCInst &MI, raw_ostream &OS, |
51 | const MCSubtargetInfo &STI) const override; |
52 | |
53 | void encodeInstruction(const MCInst &MI, raw_ostream &OS, |
54 | SmallVectorImpl<MCFixup> &Fixups, |
55 | const MCSubtargetInfo &STI) const override; |
56 | |
57 | private: |
58 | unsigned getX86RegNum(const MCOperand &MO) const { |
59 | return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7; |
60 | } |
61 | |
62 | unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const { |
63 | return Ctx.getRegisterInfo()->getEncodingValue( |
64 | MI.getOperand(OpNum).getReg()); |
65 | } |
66 | |
67 | /// \param MI a single low-level machine instruction. |
68 | /// \param OpNum the operand #. |
69 | /// \returns true if the OpNumth operand of MI require a bit to be set in |
70 | /// REX prefix. |
71 | bool isREXExtendedReg(const MCInst &MI, unsigned OpNum) const { |
72 | return (getX86RegEncoding(MI, OpNum) >> 3) & 1; |
73 | } |
74 | |
75 | void emitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const { |
76 | OS << (char)C; |
77 | ++CurByte; |
78 | } |
79 | |
80 | void emitConstant(uint64_t Val, unsigned Size, unsigned &CurByte, |
81 | raw_ostream &OS) const { |
82 | // Output the constant in little endian byte order. |
83 | for (unsigned i = 0; i != Size; ++i) { |
84 | emitByte(Val & 255, CurByte, OS); |
85 | Val >>= 8; |
86 | } |
87 | } |
88 | |
89 | void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize, |
90 | MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS, |
91 | SmallVectorImpl<MCFixup> &Fixups, int ImmOffset = 0) const; |
92 | |
93 | static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) { |
94 | assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!")((Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!") ? static_cast<void > (0) : __assert_fail ("Mod < 4 && RegOpcode < 8 && RM < 8 && \"ModRM Fields out of range!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 94, __PRETTY_FUNCTION__)); |
95 | return RM | (RegOpcode << 3) | (Mod << 6); |
96 | } |
97 | |
98 | void emitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld, |
99 | unsigned &CurByte, raw_ostream &OS) const { |
100 | emitByte(modRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)), CurByte, OS); |
101 | } |
102 | |
103 | void emitSIBByte(unsigned SS, unsigned Index, unsigned Base, |
104 | unsigned &CurByte, raw_ostream &OS) const { |
105 | // SIB byte is in the same format as the modRMByte. |
106 | emitByte(modRMByte(SS, Index, Base), CurByte, OS); |
107 | } |
108 | |
109 | void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField, |
110 | uint64_t TSFlags, bool Rex, unsigned &CurByte, |
111 | raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups, |
112 | const MCSubtargetInfo &STI) const; |
113 | |
114 | void emitPrefixImpl(uint64_t TSFlags, unsigned &CurOp, unsigned &CurByte, |
115 | bool &Rex, const MCInst &MI, const MCInstrDesc &Desc, |
116 | const MCSubtargetInfo &STI, raw_ostream &OS) const; |
117 | |
118 | void emitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, |
119 | const MCInst &MI, const MCInstrDesc &Desc, |
120 | raw_ostream &OS) const; |
121 | |
122 | void emitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand, |
123 | const MCInst &MI, raw_ostream &OS) const; |
124 | |
125 | bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, |
126 | const MCInst &MI, const MCInstrDesc &Desc, |
127 | const MCSubtargetInfo &STI, raw_ostream &OS) const; |
128 | |
129 | uint8_t determineREXPrefix(const MCInst &MI, uint64_t TSFlags, int MemOperand, |
130 | const MCInstrDesc &Desc) const; |
131 | }; |
132 | |
133 | } // end anonymous namespace |
134 | |
135 | /// \returns true if this signed displacement fits in a 8-bit sign-extended |
136 | /// field. |
137 | static bool isDisp8(int Value) { return Value == (int8_t)Value; } |
138 | |
139 | /// \returns true if this signed displacement fits in a 8-bit compressed |
140 | /// dispacement field. |
141 | static bool isCDisp8(uint64_t TSFlags, int Value, int &CValue) { |
142 | assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) &&((((TSFlags & X86II::EncodingMask) == X86II::EVEX) && "Compressed 8-bit displacement is only valid for EVEX inst." ) ? static_cast<void> (0) : __assert_fail ("((TSFlags & X86II::EncodingMask) == X86II::EVEX) && \"Compressed 8-bit displacement is only valid for EVEX inst.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 143, __PRETTY_FUNCTION__)) |
143 | "Compressed 8-bit displacement is only valid for EVEX inst.")((((TSFlags & X86II::EncodingMask) == X86II::EVEX) && "Compressed 8-bit displacement is only valid for EVEX inst." ) ? static_cast<void> (0) : __assert_fail ("((TSFlags & X86II::EncodingMask) == X86II::EVEX) && \"Compressed 8-bit displacement is only valid for EVEX inst.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 143, __PRETTY_FUNCTION__)); |
144 | |
145 | unsigned CD8_Scale = |
146 | (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift; |
147 | if (CD8_Scale == 0) { |
148 | CValue = Value; |
149 | return isDisp8(Value); |
150 | } |
151 | |
152 | unsigned Mask = CD8_Scale - 1; |
153 | assert((CD8_Scale & Mask) == 0 && "Invalid memory object size.")(((CD8_Scale & Mask) == 0 && "Invalid memory object size." ) ? static_cast<void> (0) : __assert_fail ("(CD8_Scale & Mask) == 0 && \"Invalid memory object size.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 153, __PRETTY_FUNCTION__)); |
154 | if (Value & Mask) // Unaligned offset |
155 | return false; |
156 | Value /= (int)CD8_Scale; |
157 | bool Ret = (Value == (int8_t)Value); |
158 | |
159 | if (Ret) |
160 | CValue = Value; |
161 | return Ret; |
162 | } |
163 | |
164 | /// \returns the appropriate fixup kind to use for an immediate in an |
165 | /// instruction with the specified TSFlags. |
166 | static MCFixupKind getImmFixupKind(uint64_t TSFlags) { |
167 | unsigned Size = X86II::getSizeOfImm(TSFlags); |
168 | bool isPCRel = X86II::isImmPCRel(TSFlags); |
169 | |
170 | if (X86II::isImmSigned(TSFlags)) { |
171 | switch (Size) { |
172 | default: |
173 | llvm_unreachable("Unsupported signed fixup size!")::llvm::llvm_unreachable_internal("Unsupported signed fixup size!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 173); |
174 | case 4: |
175 | return MCFixupKind(X86::reloc_signed_4byte); |
176 | } |
177 | } |
178 | return MCFixup::getKindForSize(Size, isPCRel); |
179 | } |
180 | |
181 | /// \param Op operand # of the memory operand. |
182 | /// |
183 | /// \returns true if the specified instruction has a 16-bit memory operand. |
184 | static bool is16BitMemOperand(const MCInst &MI, unsigned Op, |
185 | const MCSubtargetInfo &STI) { |
186 | const MCOperand &BaseReg = MI.getOperand(Op + X86::AddrBaseReg); |
187 | const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg); |
188 | const MCOperand &Disp = MI.getOperand(Op + X86::AddrDisp); |
189 | |
190 | if (STI.hasFeature(X86::Mode16Bit) && BaseReg.getReg() == 0 && Disp.isImm() && |
191 | Disp.getImm() < 0x10000) |
192 | return true; |
193 | if ((BaseReg.getReg() != 0 && |
194 | X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) || |
195 | (IndexReg.getReg() != 0 && |
196 | X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg()))) |
197 | return true; |
198 | return false; |
199 | } |
200 | |
201 | /// \param Op operand # of the memory operand. |
202 | /// |
203 | /// \returns true if the specified instruction has a 32-bit memory operand. |
204 | static bool is32BitMemOperand(const MCInst &MI, unsigned Op) { |
205 | const MCOperand &BaseReg = MI.getOperand(Op + X86::AddrBaseReg); |
206 | const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg); |
207 | |
208 | if ((BaseReg.getReg() != 0 && |
209 | X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) || |
210 | (IndexReg.getReg() != 0 && |
211 | X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg()))) |
212 | return true; |
213 | if (BaseReg.getReg() == X86::EIP) { |
214 | assert(IndexReg.getReg() == 0 && "Invalid eip-based address.")((IndexReg.getReg() == 0 && "Invalid eip-based address." ) ? static_cast<void> (0) : __assert_fail ("IndexReg.getReg() == 0 && \"Invalid eip-based address.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 214, __PRETTY_FUNCTION__)); |
215 | return true; |
216 | } |
217 | if (IndexReg.getReg() == X86::EIZ) |
218 | return true; |
219 | return false; |
220 | } |
221 | |
222 | /// \param Op operand # of the memory operand. |
223 | /// |
224 | /// \returns true if the specified instruction has a 64-bit memory operand. |
225 | #ifndef NDEBUG |
226 | static bool is64BitMemOperand(const MCInst &MI, unsigned Op) { |
227 | const MCOperand &BaseReg = MI.getOperand(Op + X86::AddrBaseReg); |
228 | const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg); |
229 | |
230 | if ((BaseReg.getReg() != 0 && |
231 | X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) || |
232 | (IndexReg.getReg() != 0 && |
233 | X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg()))) |
234 | return true; |
235 | return false; |
236 | } |
237 | #endif |
238 | |
239 | enum GlobalOffsetTableExprKind { GOT_None, GOT_Normal, GOT_SymDiff }; |
240 | |
241 | /// Check if this expression starts with _GLOBAL_OFFSET_TABLE_ and if it is |
242 | /// of the form _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on |
243 | /// ELF i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that |
244 | /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start of a |
245 | /// binary expression. |
246 | static GlobalOffsetTableExprKind |
247 | startsWithGlobalOffsetTable(const MCExpr *Expr) { |
248 | const MCExpr *RHS = nullptr; |
249 | if (Expr->getKind() == MCExpr::Binary) { |
250 | const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr); |
251 | Expr = BE->getLHS(); |
252 | RHS = BE->getRHS(); |
253 | } |
254 | |
255 | if (Expr->getKind() != MCExpr::SymbolRef) |
256 | return GOT_None; |
257 | |
258 | const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr); |
259 | const MCSymbol &S = Ref->getSymbol(); |
260 | if (S.getName() != "_GLOBAL_OFFSET_TABLE_") |
261 | return GOT_None; |
262 | if (RHS && RHS->getKind() == MCExpr::SymbolRef) |
263 | return GOT_SymDiff; |
264 | return GOT_Normal; |
265 | } |
266 | |
267 | static bool hasSecRelSymbolRef(const MCExpr *Expr) { |
268 | if (Expr->getKind() == MCExpr::SymbolRef) { |
269 | const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr); |
270 | return Ref->getKind() == MCSymbolRefExpr::VK_SECREL; |
271 | } |
272 | return false; |
273 | } |
274 | |
275 | static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII) { |
276 | unsigned Opcode = MI.getOpcode(); |
277 | const MCInstrDesc &Desc = MCII.get(Opcode); |
278 | if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4 && |
279 | Opcode != X86::JCC_4) || |
280 | getImmFixupKind(Desc.TSFlags) != FK_PCRel_4) |
281 | return false; |
282 | |
283 | unsigned CurOp = X86II::getOperandBias(Desc); |
284 | const MCOperand &Op = MI.getOperand(CurOp); |
285 | if (!Op.isExpr()) |
286 | return false; |
287 | |
288 | const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr()); |
289 | return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None; |
290 | } |
291 | |
292 | void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc, |
293 | unsigned Size, MCFixupKind FixupKind, |
294 | unsigned &CurByte, raw_ostream &OS, |
295 | SmallVectorImpl<MCFixup> &Fixups, |
296 | int ImmOffset) const { |
297 | const MCExpr *Expr = nullptr; |
298 | if (DispOp.isImm()) { |
299 | // If this is a simple integer displacement that doesn't require a |
300 | // relocation, emit it now. |
301 | if (FixupKind != FK_PCRel_1 && FixupKind != FK_PCRel_2 && |
302 | FixupKind != FK_PCRel_4) { |
303 | emitConstant(DispOp.getImm() + ImmOffset, Size, CurByte, OS); |
304 | return; |
305 | } |
306 | Expr = MCConstantExpr::create(DispOp.getImm(), Ctx); |
307 | } else { |
308 | Expr = DispOp.getExpr(); |
309 | } |
310 | |
311 | // If we have an immoffset, add it to the expression. |
312 | if ((FixupKind == FK_Data_4 || FixupKind == FK_Data_8 || |
313 | FixupKind == MCFixupKind(X86::reloc_signed_4byte))) { |
314 | GlobalOffsetTableExprKind Kind = startsWithGlobalOffsetTable(Expr); |
315 | if (Kind != GOT_None) { |
316 | assert(ImmOffset == 0)((ImmOffset == 0) ? static_cast<void> (0) : __assert_fail ("ImmOffset == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 316, __PRETTY_FUNCTION__)); |
317 | |
318 | if (Size == 8) { |
319 | FixupKind = MCFixupKind(X86::reloc_global_offset_table8); |
320 | } else { |
321 | assert(Size == 4)((Size == 4) ? static_cast<void> (0) : __assert_fail ("Size == 4" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 321, __PRETTY_FUNCTION__)); |
322 | FixupKind = MCFixupKind(X86::reloc_global_offset_table); |
323 | } |
324 | |
325 | if (Kind == GOT_Normal) |
326 | ImmOffset = CurByte; |
327 | } else if (Expr->getKind() == MCExpr::SymbolRef) { |
328 | if (hasSecRelSymbolRef(Expr)) { |
329 | FixupKind = MCFixupKind(FK_SecRel_4); |
330 | } |
331 | } else if (Expr->getKind() == MCExpr::Binary) { |
332 | const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr *>(Expr); |
333 | if (hasSecRelSymbolRef(Bin->getLHS()) || |
334 | hasSecRelSymbolRef(Bin->getRHS())) { |
335 | FixupKind = MCFixupKind(FK_SecRel_4); |
336 | } |
337 | } |
338 | } |
339 | |
340 | // If the fixup is pc-relative, we need to bias the value to be relative to |
341 | // the start of the field, not the end of the field. |
342 | if (FixupKind == FK_PCRel_4 || |
343 | FixupKind == MCFixupKind(X86::reloc_riprel_4byte) || |
344 | FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) || |
345 | FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) || |
346 | FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) || |
347 | FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel)) { |
348 | ImmOffset -= 4; |
349 | // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_: |
350 | // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15 |
351 | // this needs to be a GOTPC32 relocation. |
352 | if (startsWithGlobalOffsetTable(Expr) != GOT_None) |
353 | FixupKind = MCFixupKind(X86::reloc_global_offset_table); |
354 | } |
355 | if (FixupKind == FK_PCRel_2) |
356 | ImmOffset -= 2; |
357 | if (FixupKind == FK_PCRel_1) |
358 | ImmOffset -= 1; |
359 | |
360 | if (ImmOffset) |
361 | Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx), |
362 | Ctx); |
363 | |
364 | // Emit a symbolic constant as a fixup and 4 zeros. |
365 | Fixups.push_back(MCFixup::create(CurByte, Expr, FixupKind, Loc)); |
366 | emitConstant(0, Size, CurByte, OS); |
367 | } |
368 | |
369 | void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op, |
370 | unsigned RegOpcodeField, |
371 | uint64_t TSFlags, bool Rex, |
372 | unsigned &CurByte, raw_ostream &OS, |
373 | SmallVectorImpl<MCFixup> &Fixups, |
374 | const MCSubtargetInfo &STI) const { |
375 | const MCOperand &Disp = MI.getOperand(Op + X86::AddrDisp); |
376 | const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); |
377 | const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt); |
378 | const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg); |
379 | unsigned BaseReg = Base.getReg(); |
380 | bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX; |
381 | |
382 | // Handle %rip relative addressing. |
383 | if (BaseReg == X86::RIP || |
384 | BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode |
385 | assert(STI.hasFeature(X86::Mode64Bit) &&((STI.hasFeature(X86::Mode64Bit) && "Rip-relative addressing requires 64-bit mode" ) ? static_cast<void> (0) : __assert_fail ("STI.hasFeature(X86::Mode64Bit) && \"Rip-relative addressing requires 64-bit mode\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 386, __PRETTY_FUNCTION__)) |
386 | "Rip-relative addressing requires 64-bit mode")((STI.hasFeature(X86::Mode64Bit) && "Rip-relative addressing requires 64-bit mode" ) ? static_cast<void> (0) : __assert_fail ("STI.hasFeature(X86::Mode64Bit) && \"Rip-relative addressing requires 64-bit mode\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 386, __PRETTY_FUNCTION__)); |
387 | assert(IndexReg.getReg() == 0 && "Invalid rip-relative address")((IndexReg.getReg() == 0 && "Invalid rip-relative address" ) ? static_cast<void> (0) : __assert_fail ("IndexReg.getReg() == 0 && \"Invalid rip-relative address\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 387, __PRETTY_FUNCTION__)); |
388 | emitByte(modRMByte(0, RegOpcodeField, 5), CurByte, OS); |
389 | |
390 | unsigned Opcode = MI.getOpcode(); |
391 | // movq loads are handled with a special relocation form which allows the |
392 | // linker to eliminate some loads for GOT references which end up in the |
393 | // same linkage unit. |
394 | unsigned FixupKind = [=]() { |
395 | switch (Opcode) { |
396 | default: |
397 | return X86::reloc_riprel_4byte; |
398 | case X86::MOV64rm: |
399 | assert(Rex)((Rex) ? static_cast<void> (0) : __assert_fail ("Rex", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 399, __PRETTY_FUNCTION__)); |
400 | return X86::reloc_riprel_4byte_movq_load; |
401 | case X86::CALL64m: |
402 | case X86::JMP64m: |
403 | case X86::TAILJMPm64: |
404 | case X86::TEST64mr: |
405 | case X86::ADC64rm: |
406 | case X86::ADD64rm: |
407 | case X86::AND64rm: |
408 | case X86::CMP64rm: |
409 | case X86::OR64rm: |
410 | case X86::SBB64rm: |
411 | case X86::SUB64rm: |
412 | case X86::XOR64rm: |
413 | return Rex ? X86::reloc_riprel_4byte_relax_rex |
414 | : X86::reloc_riprel_4byte_relax; |
415 | } |
416 | }(); |
417 | |
418 | // rip-relative addressing is actually relative to the *next* instruction. |
419 | // Since an immediate can follow the mod/rm byte for an instruction, this |
420 | // means that we need to bias the displacement field of the instruction with |
421 | // the size of the immediate field. If we have this case, add it into the |
422 | // expression to emit. |
423 | // Note: rip-relative addressing using immediate displacement values should |
424 | // not be adjusted, assuming it was the user's intent. |
425 | int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags) |
426 | ? X86II::getSizeOfImm(TSFlags) |
427 | : 0; |
428 | |
429 | emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS, |
430 | Fixups, -ImmSize); |
431 | return; |
432 | } |
433 | |
434 | unsigned BaseRegNo = BaseReg ? getX86RegNum(Base) : -1U; |
435 | |
436 | // 16-bit addressing forms of the ModR/M byte have a different encoding for |
437 | // the R/M field and are far more limited in which registers can be used. |
438 | if (is16BitMemOperand(MI, Op, STI)) { |
439 | if (BaseReg) { |
440 | // For 32-bit addressing, the row and column values in Table 2-2 are |
441 | // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with |
442 | // some special cases. And getX86RegNum reflects that numbering. |
443 | // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A, |
444 | // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only |
445 | // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order, |
446 | // while values 0-3 indicate the allowed combinations (base+index) of |
447 | // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI. |
448 | // |
449 | // R16Table[] is a lookup from the normal RegNo, to the row values from |
450 | // Table 2-1 for 16-bit addressing modes. Where zero means disallowed. |
451 | static const unsigned R16Table[] = {0, 0, 0, 7, 0, 6, 4, 5}; |
452 | unsigned RMfield = R16Table[BaseRegNo]; |
453 | |
454 | assert(RMfield && "invalid 16-bit base register")((RMfield && "invalid 16-bit base register") ? static_cast <void> (0) : __assert_fail ("RMfield && \"invalid 16-bit base register\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 454, __PRETTY_FUNCTION__)); |
455 | |
456 | if (IndexReg.getReg()) { |
457 | unsigned IndexReg16 = R16Table[getX86RegNum(IndexReg)]; |
458 | |
459 | assert(IndexReg16 && "invalid 16-bit index register")((IndexReg16 && "invalid 16-bit index register") ? static_cast <void> (0) : __assert_fail ("IndexReg16 && \"invalid 16-bit index register\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 459, __PRETTY_FUNCTION__)); |
460 | // We must have one of SI/DI (4,5), and one of BP/BX (6,7). |
461 | assert(((IndexReg16 ^ RMfield) & 2) &&((((IndexReg16 ^ RMfield) & 2) && "invalid 16-bit base/index register combination" ) ? static_cast<void> (0) : __assert_fail ("((IndexReg16 ^ RMfield) & 2) && \"invalid 16-bit base/index register combination\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 462, __PRETTY_FUNCTION__)) |
462 | "invalid 16-bit base/index register combination")((((IndexReg16 ^ RMfield) & 2) && "invalid 16-bit base/index register combination" ) ? static_cast<void> (0) : __assert_fail ("((IndexReg16 ^ RMfield) & 2) && \"invalid 16-bit base/index register combination\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 462, __PRETTY_FUNCTION__)); |
463 | assert(Scale.getImm() == 1 &&((Scale.getImm() == 1 && "invalid scale for 16-bit memory reference" ) ? static_cast<void> (0) : __assert_fail ("Scale.getImm() == 1 && \"invalid scale for 16-bit memory reference\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 464, __PRETTY_FUNCTION__)) |
464 | "invalid scale for 16-bit memory reference")((Scale.getImm() == 1 && "invalid scale for 16-bit memory reference" ) ? static_cast<void> (0) : __assert_fail ("Scale.getImm() == 1 && \"invalid scale for 16-bit memory reference\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 464, __PRETTY_FUNCTION__)); |
465 | |
466 | // Allow base/index to appear in either order (although GAS doesn't). |
467 | if (IndexReg16 & 2) |
468 | RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1); |
469 | else |
470 | RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1); |
471 | } |
472 | |
473 | if (Disp.isImm() && isDisp8(Disp.getImm())) { |
474 | if (Disp.getImm() == 0 && RMfield != 6) { |
475 | // There is no displacement; just the register. |
476 | emitByte(modRMByte(0, RegOpcodeField, RMfield), CurByte, OS); |
477 | return; |
478 | } |
479 | // Use the [REG]+disp8 form, including for [BP] which cannot be encoded. |
480 | emitByte(modRMByte(1, RegOpcodeField, RMfield), CurByte, OS); |
481 | emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); |
482 | return; |
483 | } |
484 | // This is the [REG]+disp16 case. |
485 | emitByte(modRMByte(2, RegOpcodeField, RMfield), CurByte, OS); |
486 | } else { |
487 | // There is no BaseReg; this is the plain [disp16] case. |
488 | emitByte(modRMByte(0, RegOpcodeField, 6), CurByte, OS); |
489 | } |
490 | |
491 | // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases. |
492 | emitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups); |
493 | return; |
494 | } |
495 | |
496 | // Determine whether a SIB byte is needed. |
497 | // If no BaseReg, issue a RIP relative instruction only if the MCE can |
498 | // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table |
499 | // 2-7) and absolute references. |
500 | |
501 | if ( // The SIB byte must be used if there is an index register. |
502 | IndexReg.getReg() == 0 && |
503 | // The SIB byte must be used if the base is ESP/RSP/R12, all of which |
504 | // encode to an R/M value of 4, which indicates that a SIB byte is |
505 | // present. |
506 | BaseRegNo != N86::ESP && |
507 | // If there is no base register and we're in 64-bit mode, we need a SIB |
508 | // byte to emit an addr that is just 'disp32' (the non-RIP relative form). |
509 | (!STI.hasFeature(X86::Mode64Bit) || BaseReg != 0)) { |
510 | |
511 | if (BaseReg == 0) { // [disp32] in X86-32 mode |
512 | emitByte(modRMByte(0, RegOpcodeField, 5), CurByte, OS); |
513 | emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups); |
514 | return; |
515 | } |
516 | |
517 | // If the base is not EBP/ESP and there is no displacement, use simple |
518 | // indirect register encoding, this handles addresses like [EAX]. The |
519 | // encoding for [EBP] with no displacement means [disp32] so we handle it |
520 | // by emitting a displacement of 0 below. |
521 | if (BaseRegNo != N86::EBP) { |
522 | if (Disp.isImm() && Disp.getImm() == 0) { |
523 | emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS); |
524 | return; |
525 | } |
526 | |
527 | // If the displacement is @tlscall, treat it as a zero. |
528 | if (Disp.isExpr()) { |
529 | auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr()); |
530 | if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) { |
531 | // This is exclusively used by call *a@tlscall(base). The relocation |
532 | // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning. |
533 | Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc())); |
534 | emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS); |
535 | return; |
536 | } |
537 | } |
538 | } |
539 | |
540 | // Otherwise, if the displacement fits in a byte, encode as [REG+disp8]. |
541 | if (Disp.isImm()) { |
542 | if (!HasEVEX && isDisp8(Disp.getImm())) { |
543 | emitByte(modRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); |
544 | emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); |
545 | return; |
546 | } |
547 | // Try EVEX compressed 8-bit displacement first; if failed, fall back to |
548 | // 32-bit displacement. |
549 | int CDisp8 = 0; |
550 | if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { |
551 | emitByte(modRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); |
552 | emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, |
553 | CDisp8 - Disp.getImm()); |
554 | return; |
555 | } |
556 | } |
557 | |
558 | // Otherwise, emit the most general non-SIB encoding: [REG+disp32] |
559 | emitByte(modRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS); |
560 | unsigned Opcode = MI.getOpcode(); |
561 | unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax |
562 | : X86::reloc_signed_4byte; |
563 | emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS, |
564 | Fixups); |
565 | return; |
566 | } |
567 | |
568 | // We need a SIB byte, so start by outputting the ModR/M byte first |
569 | assert(IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP &&((IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!") ? static_cast <void> (0) : __assert_fail ("IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && \"Cannot use ESP as index reg!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 570, __PRETTY_FUNCTION__)) |
570 | "Cannot use ESP as index reg!")((IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!") ? static_cast <void> (0) : __assert_fail ("IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && \"Cannot use ESP as index reg!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 570, __PRETTY_FUNCTION__)); |
571 | |
572 | bool ForceDisp32 = false; |
573 | bool ForceDisp8 = false; |
574 | int CDisp8 = 0; |
575 | int ImmOffset = 0; |
576 | if (BaseReg == 0) { |
577 | // If there is no base register, we emit the special case SIB byte with |
578 | // MOD=0, BASE=5, to JUST get the index, scale, and displacement. |
579 | emitByte(modRMByte(0, RegOpcodeField, 4), CurByte, OS); |
580 | ForceDisp32 = true; |
581 | } else if (!Disp.isImm()) { |
582 | // Emit the normal disp32 encoding. |
583 | emitByte(modRMByte(2, RegOpcodeField, 4), CurByte, OS); |
584 | ForceDisp32 = true; |
585 | } else if (Disp.getImm() == 0 && |
586 | // Base reg can't be anything that ends up with '5' as the base |
587 | // reg, it is the magic [*] nomenclature that indicates no base. |
588 | BaseRegNo != N86::EBP) { |
589 | // Emit no displacement ModR/M byte |
590 | emitByte(modRMByte(0, RegOpcodeField, 4), CurByte, OS); |
591 | } else if (!HasEVEX && isDisp8(Disp.getImm())) { |
592 | // Emit the disp8 encoding. |
593 | emitByte(modRMByte(1, RegOpcodeField, 4), CurByte, OS); |
594 | ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP |
595 | } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { |
596 | // Emit the disp8 encoding. |
597 | emitByte(modRMByte(1, RegOpcodeField, 4), CurByte, OS); |
598 | ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP |
599 | ImmOffset = CDisp8 - Disp.getImm(); |
600 | } else { |
601 | // Emit the normal disp32 encoding. |
602 | emitByte(modRMByte(2, RegOpcodeField, 4), CurByte, OS); |
603 | } |
604 | |
605 | // Calculate what the SS field value should be... |
606 | static const unsigned SSTable[] = {~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3}; |
607 | unsigned SS = SSTable[Scale.getImm()]; |
608 | |
609 | if (BaseReg == 0) { |
610 | // Handle the SIB byte for the case where there is no base, see Intel |
611 | // Manual 2A, table 2-7. The displacement has already been output. |
612 | unsigned IndexRegNo; |
613 | if (IndexReg.getReg()) |
614 | IndexRegNo = getX86RegNum(IndexReg); |
615 | else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5) |
616 | IndexRegNo = 4; |
617 | emitSIBByte(SS, IndexRegNo, 5, CurByte, OS); |
618 | } else { |
619 | unsigned IndexRegNo; |
620 | if (IndexReg.getReg()) |
621 | IndexRegNo = getX86RegNum(IndexReg); |
622 | else |
623 | IndexRegNo = 4; // For example [ESP+1*<noreg>+4] |
624 | emitSIBByte(SS, IndexRegNo, getX86RegNum(Base), CurByte, OS); |
625 | } |
626 | |
627 | // Do we need to output a displacement? |
628 | if (ForceDisp8) |
629 | emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, |
630 | ImmOffset); |
631 | else if (ForceDisp32 || Disp.getImm() != 0) |
632 | emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), |
633 | CurByte, OS, Fixups); |
634 | } |
635 | |
636 | void X86MCCodeEmitter::emitPrefixImpl(uint64_t TSFlags, unsigned &CurOp, |
637 | unsigned &CurByte, bool &Rex, |
638 | const MCInst &MI, const MCInstrDesc &Desc, |
639 | const MCSubtargetInfo &STI, |
640 | raw_ostream &OS) const { |
641 | // Determine where the memory operand starts, if present. |
642 | int MemoryOperand = X86II::getMemoryOperandNo(TSFlags); |
643 | if (MemoryOperand != -1) |
644 | MemoryOperand += CurOp; |
645 | |
646 | // Emit segment override opcode prefix as needed. |
647 | if (MemoryOperand >= 0) |
648 | emitSegmentOverridePrefix(CurByte, MemoryOperand + X86::AddrSegmentReg, MI, |
649 | OS); |
650 | |
651 | // Emit the repeat opcode prefix as needed. |
652 | unsigned Flags = MI.getFlags(); |
653 | if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT) |
654 | emitByte(0xF3, CurByte, OS); |
655 | if (Flags & X86::IP_HAS_REPEAT_NE) |
656 | emitByte(0xF2, CurByte, OS); |
657 | |
658 | // Emit the address size opcode prefix as needed. |
659 | bool need_address_override; |
660 | uint64_t AdSize = TSFlags & X86II::AdSizeMask; |
661 | if ((STI.hasFeature(X86::Mode16Bit) && AdSize == X86II::AdSize32) || |
662 | (STI.hasFeature(X86::Mode32Bit) && AdSize == X86II::AdSize16) || |
663 | (STI.hasFeature(X86::Mode64Bit) && AdSize == X86II::AdSize32)) { |
664 | need_address_override = true; |
665 | } else if (MemoryOperand < 0) { |
666 | need_address_override = false; |
667 | } else if (STI.hasFeature(X86::Mode64Bit)) { |
668 | assert(!is16BitMemOperand(MI, MemoryOperand, STI))((!is16BitMemOperand(MI, MemoryOperand, STI)) ? static_cast< void> (0) : __assert_fail ("!is16BitMemOperand(MI, MemoryOperand, STI)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 668, __PRETTY_FUNCTION__)); |
669 | need_address_override = is32BitMemOperand(MI, MemoryOperand); |
670 | } else if (STI.hasFeature(X86::Mode32Bit)) { |
671 | assert(!is64BitMemOperand(MI, MemoryOperand))((!is64BitMemOperand(MI, MemoryOperand)) ? static_cast<void > (0) : __assert_fail ("!is64BitMemOperand(MI, MemoryOperand)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 671, __PRETTY_FUNCTION__)); |
672 | need_address_override = is16BitMemOperand(MI, MemoryOperand, STI); |
673 | } else { |
674 | assert(STI.hasFeature(X86::Mode16Bit))((STI.hasFeature(X86::Mode16Bit)) ? static_cast<void> ( 0) : __assert_fail ("STI.hasFeature(X86::Mode16Bit)", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 674, __PRETTY_FUNCTION__)); |
675 | assert(!is64BitMemOperand(MI, MemoryOperand))((!is64BitMemOperand(MI, MemoryOperand)) ? static_cast<void > (0) : __assert_fail ("!is64BitMemOperand(MI, MemoryOperand)" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 675, __PRETTY_FUNCTION__)); |
676 | need_address_override = !is16BitMemOperand(MI, MemoryOperand, STI); |
677 | } |
678 | |
679 | if (need_address_override) |
680 | emitByte(0x67, CurByte, OS); |
681 | |
682 | // Encoding type for this instruction. |
683 | uint64_t Encoding = TSFlags & X86II::EncodingMask; |
684 | if (Encoding == 0) |
685 | Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS); |
686 | else |
687 | emitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS); |
688 | |
689 | uint64_t Form = TSFlags & X86II::FormMask; |
690 | switch (Form) { |
691 | default: |
692 | break; |
693 | case X86II::RawFrmDstSrc: { |
694 | unsigned siReg = MI.getOperand(1).getReg(); |
695 | assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||((((siReg == X86::SI && MI.getOperand(0).getReg() == X86 ::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg () == X86::EDI) || (siReg == X86::RSI && MI.getOperand (0).getReg() == X86::RDI)) && "SI and DI register sizes do not match" ) ? static_cast<void> (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 698, __PRETTY_FUNCTION__)) |
696 | (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||((((siReg == X86::SI && MI.getOperand(0).getReg() == X86 ::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg () == X86::EDI) || (siReg == X86::RSI && MI.getOperand (0).getReg() == X86::RDI)) && "SI and DI register sizes do not match" ) ? static_cast<void> (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 698, __PRETTY_FUNCTION__)) |
697 | (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&((((siReg == X86::SI && MI.getOperand(0).getReg() == X86 ::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg () == X86::EDI) || (siReg == X86::RSI && MI.getOperand (0).getReg() == X86::RDI)) && "SI and DI register sizes do not match" ) ? static_cast<void> (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 698, __PRETTY_FUNCTION__)) |
698 | "SI and DI register sizes do not match")((((siReg == X86::SI && MI.getOperand(0).getReg() == X86 ::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg () == X86::EDI) || (siReg == X86::RSI && MI.getOperand (0).getReg() == X86::RDI)) && "SI and DI register sizes do not match" ) ? static_cast<void> (0) : __assert_fail ("((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && \"SI and DI register sizes do not match\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 698, __PRETTY_FUNCTION__)); |
699 | // Emit segment override opcode prefix as needed (not for %ds). |
700 | if (MI.getOperand(2).getReg() != X86::DS) |
701 | emitSegmentOverridePrefix(CurByte, 2, MI, OS); |
702 | // Emit AdSize prefix as needed. |
703 | if ((!STI.hasFeature(X86::Mode32Bit) && siReg == X86::ESI) || |
704 | (STI.hasFeature(X86::Mode32Bit) && siReg == X86::SI)) |
705 | emitByte(0x67, CurByte, OS); |
706 | CurOp += 3; // Consume operands. |
707 | break; |
708 | } |
709 | case X86II::RawFrmSrc: { |
710 | unsigned siReg = MI.getOperand(0).getReg(); |
711 | // Emit segment override opcode prefix as needed (not for %ds). |
712 | if (MI.getOperand(1).getReg() != X86::DS) |
713 | emitSegmentOverridePrefix(CurByte, 1, MI, OS); |
714 | // Emit AdSize prefix as needed. |
715 | if ((!STI.hasFeature(X86::Mode32Bit) && siReg == X86::ESI) || |
716 | (STI.hasFeature(X86::Mode32Bit) && siReg == X86::SI)) |
717 | emitByte(0x67, CurByte, OS); |
718 | CurOp += 2; // Consume operands. |
719 | break; |
720 | } |
721 | case X86II::RawFrmDst: { |
722 | unsigned siReg = MI.getOperand(0).getReg(); |
723 | // Emit AdSize prefix as needed. |
724 | if ((!STI.hasFeature(X86::Mode32Bit) && siReg == X86::EDI) || |
725 | (STI.hasFeature(X86::Mode32Bit) && siReg == X86::DI)) |
726 | emitByte(0x67, CurByte, OS); |
727 | ++CurOp; // Consume operand. |
728 | break; |
729 | } |
730 | case X86II::RawFrmMemOffs: { |
731 | // Emit segment override opcode prefix as needed. |
732 | emitSegmentOverridePrefix(CurByte, 1, MI, OS); |
733 | break; |
734 | } |
735 | } |
736 | } |
737 | |
738 | /// emitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix |
739 | /// called VEX. |
740 | void X86MCCodeEmitter::emitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, |
741 | int MemOperand, const MCInst &MI, |
742 | const MCInstrDesc &Desc, |
743 | raw_ostream &OS) const { |
744 | assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.")((!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX." ) ? static_cast<void> (0) : __assert_fail ("!(TSFlags & X86II::LOCK) && \"Can't have LOCK VEX.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 744, __PRETTY_FUNCTION__)); |
745 | |
746 | uint64_t Encoding = TSFlags & X86II::EncodingMask; |
747 | bool HasEVEX_K = TSFlags & X86II::EVEX_K; |
748 | bool HasVEX_4V = TSFlags & X86II::VEX_4V; |
749 | bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; |
750 | |
751 | // VEX_R: opcode externsion equivalent to REX.R in |
752 | // 1's complement (inverted) form |
753 | // |
754 | // 1: Same as REX_R=0 (must be 1 in 32-bit mode) |
755 | // 0: Same as REX_R=1 (64 bit mode only) |
756 | // |
757 | uint8_t VEX_R = 0x1; |
758 | uint8_t EVEX_R2 = 0x1; |
759 | |
760 | // VEX_X: equivalent to REX.X, only used when a |
761 | // register is used for index in SIB Byte. |
762 | // |
763 | // 1: Same as REX.X=0 (must be 1 in 32-bit mode) |
764 | // 0: Same as REX.X=1 (64-bit mode only) |
765 | uint8_t VEX_X = 0x1; |
766 | |
767 | // VEX_B: |
768 | // |
769 | // 1: Same as REX_B=0 (ignored in 32-bit mode) |
770 | // 0: Same as REX_B=1 (64 bit mode only) |
771 | // |
772 | uint8_t VEX_B = 0x1; |
773 | |
774 | // VEX_W: opcode specific (use like REX.W, or used for |
775 | // opcode extension, or ignored, depending on the opcode byte) |
776 | uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0; |
777 | |
778 | // VEX_5M (VEX m-mmmmm field): |
779 | // |
780 | // 0b00000: Reserved for future use |
781 | // 0b00001: implied 0F leading opcode |
782 | // 0b00010: implied 0F 38 leading opcode bytes |
783 | // 0b00011: implied 0F 3A leading opcode bytes |
784 | // 0b00100-0b11111: Reserved for future use |
785 | // 0b01000: XOP map select - 08h instructions with imm byte |
786 | // 0b01001: XOP map select - 09h instructions with no imm byte |
787 | // 0b01010: XOP map select - 0Ah instructions with imm dword |
788 | uint8_t VEX_5M; |
789 | switch (TSFlags & X86II::OpMapMask) { |
790 | default: |
791 | llvm_unreachable("Invalid prefix!")::llvm::llvm_unreachable_internal("Invalid prefix!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 791); |
792 | case X86II::TB: |
793 | VEX_5M = 0x1; |
794 | break; // 0F |
795 | case X86II::T8: |
796 | VEX_5M = 0x2; |
797 | break; // 0F 38 |
798 | case X86II::TA: |
799 | VEX_5M = 0x3; |
800 | break; // 0F 3A |
801 | case X86II::XOP8: |
802 | VEX_5M = 0x8; |
803 | break; |
804 | case X86II::XOP9: |
805 | VEX_5M = 0x9; |
806 | break; |
807 | case X86II::XOPA: |
808 | VEX_5M = 0xA; |
809 | break; |
810 | } |
811 | |
812 | // VEX_4V (VEX vvvv field): a register specifier |
813 | // (in 1's complement form) or 1111 if unused. |
814 | uint8_t VEX_4V = 0xf; |
815 | uint8_t EVEX_V2 = 0x1; |
816 | |
817 | // EVEX_L2/VEX_L (Vector Length): |
818 | // |
819 | // L2 L |
820 | // 0 0: scalar or 128-bit vector |
821 | // 0 1: 256-bit vector |
822 | // 1 0: 512-bit vector |
823 | // |
824 | uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0; |
825 | uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0; |
826 | |
827 | // VEX_PP: opcode extension providing equivalent |
828 | // functionality of a SIMD prefix |
829 | // |
830 | // 0b00: None |
831 | // 0b01: 66 |
832 | // 0b10: F3 |
833 | // 0b11: F2 |
834 | // |
835 | uint8_t VEX_PP = 0; |
836 | switch (TSFlags & X86II::OpPrefixMask) { |
837 | case X86II::PD: |
838 | VEX_PP = 0x1; |
839 | break; // 66 |
840 | case X86II::XS: |
841 | VEX_PP = 0x2; |
842 | break; // F3 |
843 | case X86II::XD: |
844 | VEX_PP = 0x3; |
845 | break; // F2 |
846 | } |
847 | |
848 | // EVEX_U |
849 | uint8_t EVEX_U = 1; // Always '1' so far |
850 | |
851 | // EVEX_z |
852 | uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0; |
853 | |
854 | // EVEX_b |
855 | uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0; |
856 | |
857 | // EVEX_rc |
858 | uint8_t EVEX_rc = 0; |
859 | |
860 | // EVEX_aaa |
861 | uint8_t EVEX_aaa = 0; |
862 | |
863 | bool EncodeRC = false; |
864 | |
865 | // Classify VEX_B, VEX_4V, VEX_R, VEX_X |
866 | unsigned NumOps = Desc.getNumOperands(); |
867 | unsigned CurOp = X86II::getOperandBias(Desc); |
868 | |
869 | switch (TSFlags & X86II::FormMask) { |
870 | default: |
871 | llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!")::llvm::llvm_unreachable_internal("Unexpected form in emitVEXOpcodePrefix!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 871); |
872 | case X86II::RawFrm: |
873 | case X86II::PrefixByte: |
874 | break; |
875 | case X86II::MRMDestMem: { |
876 | // MRMDestMem instructions forms: |
877 | // MemAddr, src1(ModR/M) |
878 | // MemAddr, src1(VEX_4V), src2(ModR/M) |
879 | // MemAddr, src1(ModR/M), imm8 |
880 | // |
881 | unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); |
882 | VEX_B = ~(BaseRegEnc >> 3) & 1; |
883 | unsigned IndexRegEnc = |
884 | getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); |
885 | VEX_X = ~(IndexRegEnc >> 3) & 1; |
886 | if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. |
887 | EVEX_V2 = ~(IndexRegEnc >> 4) & 1; |
888 | |
889 | CurOp += X86::AddrNumOperands; |
890 | |
891 | if (HasEVEX_K) |
892 | EVEX_aaa = getX86RegEncoding(MI, CurOp++); |
893 | |
894 | if (HasVEX_4V) { |
895 | unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); |
896 | VEX_4V = ~VRegEnc & 0xf; |
897 | EVEX_V2 = ~(VRegEnc >> 4) & 1; |
898 | } |
899 | |
900 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
901 | VEX_R = ~(RegEnc >> 3) & 1; |
902 | EVEX_R2 = ~(RegEnc >> 4) & 1; |
903 | break; |
904 | } |
905 | case X86II::MRMSrcMem: { |
906 | // MRMSrcMem instructions forms: |
907 | // src1(ModR/M), MemAddr |
908 | // src1(ModR/M), src2(VEX_4V), MemAddr |
909 | // src1(ModR/M), MemAddr, imm8 |
910 | // src1(ModR/M), MemAddr, src2(Imm[7:4]) |
911 | // |
912 | // FMA4: |
913 | // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) |
914 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
915 | VEX_R = ~(RegEnc >> 3) & 1; |
916 | EVEX_R2 = ~(RegEnc >> 4) & 1; |
917 | |
918 | if (HasEVEX_K) |
919 | EVEX_aaa = getX86RegEncoding(MI, CurOp++); |
920 | |
921 | if (HasVEX_4V) { |
922 | unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); |
923 | VEX_4V = ~VRegEnc & 0xf; |
924 | EVEX_V2 = ~(VRegEnc >> 4) & 1; |
925 | } |
926 | |
927 | unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); |
928 | VEX_B = ~(BaseRegEnc >> 3) & 1; |
929 | unsigned IndexRegEnc = |
930 | getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); |
931 | VEX_X = ~(IndexRegEnc >> 3) & 1; |
932 | if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. |
933 | EVEX_V2 = ~(IndexRegEnc >> 4) & 1; |
934 | |
935 | break; |
936 | } |
937 | case X86II::MRMSrcMem4VOp3: { |
938 | // Instruction format for 4VOp3: |
939 | // src1(ModR/M), MemAddr, src3(VEX_4V) |
940 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
941 | VEX_R = ~(RegEnc >> 3) & 1; |
942 | |
943 | unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); |
944 | VEX_B = ~(BaseRegEnc >> 3) & 1; |
945 | unsigned IndexRegEnc = |
946 | getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); |
947 | VEX_X = ~(IndexRegEnc >> 3) & 1; |
948 | |
949 | VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf; |
950 | break; |
951 | } |
952 | case X86II::MRMSrcMemOp4: { |
953 | // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), |
954 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
955 | VEX_R = ~(RegEnc >> 3) & 1; |
956 | |
957 | unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); |
958 | VEX_4V = ~VRegEnc & 0xf; |
959 | |
960 | unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); |
961 | VEX_B = ~(BaseRegEnc >> 3) & 1; |
962 | unsigned IndexRegEnc = |
963 | getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); |
964 | VEX_X = ~(IndexRegEnc >> 3) & 1; |
965 | break; |
966 | } |
967 | case X86II::MRM0m: |
968 | case X86II::MRM1m: |
969 | case X86II::MRM2m: |
970 | case X86II::MRM3m: |
971 | case X86II::MRM4m: |
972 | case X86II::MRM5m: |
973 | case X86II::MRM6m: |
974 | case X86II::MRM7m: { |
975 | // MRM[0-9]m instructions forms: |
976 | // MemAddr |
977 | // src1(VEX_4V), MemAddr |
978 | if (HasVEX_4V) { |
979 | unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); |
980 | VEX_4V = ~VRegEnc & 0xf; |
981 | EVEX_V2 = ~(VRegEnc >> 4) & 1; |
982 | } |
983 | |
984 | if (HasEVEX_K) |
985 | EVEX_aaa = getX86RegEncoding(MI, CurOp++); |
986 | |
987 | unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); |
988 | VEX_B = ~(BaseRegEnc >> 3) & 1; |
989 | unsigned IndexRegEnc = |
990 | getX86RegEncoding(MI, MemOperand + X86::AddrIndexReg); |
991 | VEX_X = ~(IndexRegEnc >> 3) & 1; |
992 | if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. |
993 | EVEX_V2 = ~(IndexRegEnc >> 4) & 1; |
994 | |
995 | break; |
996 | } |
997 | case X86II::MRMSrcReg: { |
998 | // MRMSrcReg instructions forms: |
999 | // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) |
1000 | // dst(ModR/M), src1(ModR/M) |
1001 | // dst(ModR/M), src1(ModR/M), imm8 |
1002 | // |
1003 | // FMA4: |
1004 | // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), |
1005 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
1006 | VEX_R = ~(RegEnc >> 3) & 1; |
1007 | EVEX_R2 = ~(RegEnc >> 4) & 1; |
1008 | |
1009 | if (HasEVEX_K) |
1010 | EVEX_aaa = getX86RegEncoding(MI, CurOp++); |
1011 | |
1012 | if (HasVEX_4V) { |
1013 | unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); |
1014 | VEX_4V = ~VRegEnc & 0xf; |
1015 | EVEX_V2 = ~(VRegEnc >> 4) & 1; |
1016 | } |
1017 | |
1018 | RegEnc = getX86RegEncoding(MI, CurOp++); |
1019 | VEX_B = ~(RegEnc >> 3) & 1; |
1020 | VEX_X = ~(RegEnc >> 4) & 1; |
1021 | |
1022 | if (EVEX_b) { |
1023 | if (HasEVEX_RC) { |
1024 | unsigned RcOperand = NumOps - 1; |
1025 | assert(RcOperand >= CurOp)((RcOperand >= CurOp) ? static_cast<void> (0) : __assert_fail ("RcOperand >= CurOp", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1025, __PRETTY_FUNCTION__)); |
1026 | EVEX_rc = MI.getOperand(RcOperand).getImm(); |
1027 | assert(EVEX_rc <= 3 && "Invalid rounding control!")((EVEX_rc <= 3 && "Invalid rounding control!") ? static_cast <void> (0) : __assert_fail ("EVEX_rc <= 3 && \"Invalid rounding control!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1027, __PRETTY_FUNCTION__)); |
1028 | } |
1029 | EncodeRC = true; |
1030 | } |
1031 | break; |
1032 | } |
1033 | case X86II::MRMSrcReg4VOp3: { |
1034 | // Instruction format for 4VOp3: |
1035 | // src1(ModR/M), src2(ModR/M), src3(VEX_4V) |
1036 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
1037 | VEX_R = ~(RegEnc >> 3) & 1; |
1038 | |
1039 | RegEnc = getX86RegEncoding(MI, CurOp++); |
1040 | VEX_B = ~(RegEnc >> 3) & 1; |
1041 | |
1042 | VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf; |
1043 | break; |
1044 | } |
1045 | case X86II::MRMSrcRegOp4: { |
1046 | // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), |
1047 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
1048 | VEX_R = ~(RegEnc >> 3) & 1; |
1049 | |
1050 | unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); |
1051 | VEX_4V = ~VRegEnc & 0xf; |
1052 | |
1053 | // Skip second register source (encoded in Imm[7:4]) |
1054 | ++CurOp; |
1055 | |
1056 | RegEnc = getX86RegEncoding(MI, CurOp++); |
1057 | VEX_B = ~(RegEnc >> 3) & 1; |
1058 | VEX_X = ~(RegEnc >> 4) & 1; |
1059 | break; |
1060 | } |
1061 | case X86II::MRMDestReg: { |
1062 | // MRMDestReg instructions forms: |
1063 | // dst(ModR/M), src(ModR/M) |
1064 | // dst(ModR/M), src(ModR/M), imm8 |
1065 | // dst(ModR/M), src1(VEX_4V), src2(ModR/M) |
1066 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
1067 | VEX_B = ~(RegEnc >> 3) & 1; |
1068 | VEX_X = ~(RegEnc >> 4) & 1; |
1069 | |
1070 | if (HasEVEX_K) |
1071 | EVEX_aaa = getX86RegEncoding(MI, CurOp++); |
1072 | |
1073 | if (HasVEX_4V) { |
1074 | unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); |
1075 | VEX_4V = ~VRegEnc & 0xf; |
1076 | EVEX_V2 = ~(VRegEnc >> 4) & 1; |
1077 | } |
1078 | |
1079 | RegEnc = getX86RegEncoding(MI, CurOp++); |
1080 | VEX_R = ~(RegEnc >> 3) & 1; |
1081 | EVEX_R2 = ~(RegEnc >> 4) & 1; |
1082 | if (EVEX_b) |
1083 | EncodeRC = true; |
1084 | break; |
1085 | } |
1086 | case X86II::MRM0r: |
1087 | case X86II::MRM1r: |
1088 | case X86II::MRM2r: |
1089 | case X86II::MRM3r: |
1090 | case X86II::MRM4r: |
1091 | case X86II::MRM5r: |
1092 | case X86II::MRM6r: |
1093 | case X86II::MRM7r: { |
1094 | // MRM0r-MRM7r instructions forms: |
1095 | // dst(VEX_4V), src(ModR/M), imm8 |
1096 | if (HasVEX_4V) { |
1097 | unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); |
1098 | VEX_4V = ~VRegEnc & 0xf; |
1099 | EVEX_V2 = ~(VRegEnc >> 4) & 1; |
1100 | } |
1101 | if (HasEVEX_K) |
1102 | EVEX_aaa = getX86RegEncoding(MI, CurOp++); |
1103 | |
1104 | unsigned RegEnc = getX86RegEncoding(MI, CurOp++); |
1105 | VEX_B = ~(RegEnc >> 3) & 1; |
1106 | VEX_X = ~(RegEnc >> 4) & 1; |
1107 | break; |
1108 | } |
1109 | } |
1110 | |
1111 | if (Encoding == X86II::VEX || Encoding == X86II::XOP) { |
1112 | // VEX opcode prefix can have 2 or 3 bytes |
1113 | // |
1114 | // 3 bytes: |
1115 | // +-----+ +--------------+ +-------------------+ |
1116 | // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | |
1117 | // +-----+ +--------------+ +-------------------+ |
1118 | // 2 bytes: |
1119 | // +-----+ +-------------------+ |
1120 | // | C5h | | R | vvvv | L | pp | |
1121 | // +-----+ +-------------------+ |
1122 | // |
1123 | // XOP uses a similar prefix: |
1124 | // +-----+ +--------------+ +-------------------+ |
1125 | // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | |
1126 | // +-----+ +--------------+ +-------------------+ |
1127 | uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); |
1128 | |
1129 | // Can we use the 2 byte VEX prefix? |
1130 | if (!(MI.getFlags() & X86::IP_USE_VEX3) && Encoding == X86II::VEX && |
1131 | VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { |
1132 | emitByte(0xC5, CurByte, OS); |
1133 | emitByte(LastByte | (VEX_R << 7), CurByte, OS); |
1134 | return; |
1135 | } |
1136 | |
1137 | // 3 byte VEX prefix |
1138 | emitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS); |
1139 | emitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS); |
1140 | emitByte(LastByte | (VEX_W << 7), CurByte, OS); |
1141 | } else { |
1142 | assert(Encoding == X86II::EVEX && "unknown encoding!")((Encoding == X86II::EVEX && "unknown encoding!") ? static_cast <void> (0) : __assert_fail ("Encoding == X86II::EVEX && \"unknown encoding!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1142, __PRETTY_FUNCTION__)); |
1143 | // EVEX opcode prefix can have 4 bytes |
1144 | // |
1145 | // +-----+ +--------------+ +-------------------+ +------------------------+ |
1146 | // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa | |
1147 | // +-----+ +--------------+ +-------------------+ +------------------------+ |
1148 | assert((VEX_5M & 0x3) == VEX_5M &&(((VEX_5M & 0x3) == VEX_5M && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!" ) ? static_cast<void> (0) : __assert_fail ("(VEX_5M & 0x3) == VEX_5M && \"More than 2 significant bits in VEX.m-mmmm fields for EVEX!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1149, __PRETTY_FUNCTION__)) |
1149 | "More than 2 significant bits in VEX.m-mmmm fields for EVEX!")(((VEX_5M & 0x3) == VEX_5M && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!" ) ? static_cast<void> (0) : __assert_fail ("(VEX_5M & 0x3) == VEX_5M && \"More than 2 significant bits in VEX.m-mmmm fields for EVEX!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1149, __PRETTY_FUNCTION__)); |
1150 | |
1151 | emitByte(0x62, CurByte, OS); |
1152 | emitByte((VEX_R << 7) | (VEX_X << 6) | (VEX_B << 5) | (EVEX_R2 << 4) | |
1153 | VEX_5M, |
1154 | CurByte, OS); |
1155 | emitByte((VEX_W << 7) | (VEX_4V << 3) | (EVEX_U << 2) | VEX_PP, CurByte, |
1156 | OS); |
1157 | if (EncodeRC) |
1158 | emitByte((EVEX_z << 7) | (EVEX_rc << 5) | (EVEX_b << 4) | (EVEX_V2 << 3) | |
1159 | EVEX_aaa, |
1160 | CurByte, OS); |
1161 | else |
1162 | emitByte((EVEX_z << 7) | (EVEX_L2 << 6) | (VEX_L << 5) | (EVEX_b << 4) | |
1163 | (EVEX_V2 << 3) | EVEX_aaa, |
1164 | CurByte, OS); |
1165 | } |
1166 | } |
1167 | |
1168 | /// Determine if the MCInst has to be encoded with a X86-64 REX prefix which |
1169 | /// specifies 1) 64-bit instructions, 2) non-default operand size, and 3) use |
1170 | /// of X86-64 extended registers. |
1171 | uint8_t X86MCCodeEmitter::determineREXPrefix(const MCInst &MI, uint64_t TSFlags, |
1172 | int MemOperand, |
1173 | const MCInstrDesc &Desc) const { |
1174 | uint8_t REX = 0; |
1175 | bool UsesHighByteReg = false; |
1176 | |
1177 | if (TSFlags & X86II::REX_W) |
1178 | REX |= 1 << 3; // set REX.W |
1179 | |
1180 | if (MI.getNumOperands() == 0) |
1181 | return REX; |
1182 | |
1183 | unsigned NumOps = MI.getNumOperands(); |
1184 | unsigned CurOp = X86II::getOperandBias(Desc); |
1185 | |
1186 | // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. |
1187 | for (unsigned i = CurOp; i != NumOps; ++i) { |
1188 | const MCOperand &MO = MI.getOperand(i); |
1189 | if (!MO.isReg()) |
1190 | continue; |
1191 | unsigned Reg = MO.getReg(); |
1192 | if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH) |
1193 | UsesHighByteReg = true; |
1194 | if (X86II::isX86_64NonExtLowByteReg(Reg)) |
1195 | // FIXME: The caller of determineREXPrefix slaps this prefix onto anything |
1196 | // that returns non-zero. |
1197 | REX |= 0x40; // REX fixed encoding prefix |
1198 | } |
1199 | |
1200 | switch (TSFlags & X86II::FormMask) { |
1201 | case X86II::AddRegFrm: |
1202 | REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B |
1203 | break; |
1204 | case X86II::MRMSrcReg: |
1205 | case X86II::MRMSrcRegCC: |
1206 | REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R |
1207 | REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B |
1208 | break; |
1209 | case X86II::MRMSrcMem: |
1210 | case X86II::MRMSrcMemCC: |
1211 | REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R |
1212 | REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B |
1213 | REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X |
1214 | CurOp += X86::AddrNumOperands; |
Value stored to 'CurOp' is never read | |
1215 | break; |
1216 | case X86II::MRMDestReg: |
1217 | REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B |
1218 | REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R |
1219 | break; |
1220 | case X86II::MRMDestMem: |
1221 | REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B |
1222 | REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X |
1223 | CurOp += X86::AddrNumOperands; |
1224 | REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R |
1225 | break; |
1226 | case X86II::MRMXmCC: |
1227 | case X86II::MRMXm: |
1228 | case X86II::MRM0m: |
1229 | case X86II::MRM1m: |
1230 | case X86II::MRM2m: |
1231 | case X86II::MRM3m: |
1232 | case X86II::MRM4m: |
1233 | case X86II::MRM5m: |
1234 | case X86II::MRM6m: |
1235 | case X86II::MRM7m: |
1236 | REX |= isREXExtendedReg(MI, MemOperand + X86::AddrBaseReg) << 0; // REX.B |
1237 | REX |= isREXExtendedReg(MI, MemOperand + X86::AddrIndexReg) << 1; // REX.X |
1238 | break; |
1239 | case X86II::MRMXrCC: |
1240 | case X86II::MRMXr: |
1241 | case X86II::MRM0r: |
1242 | case X86II::MRM1r: |
1243 | case X86II::MRM2r: |
1244 | case X86II::MRM3r: |
1245 | case X86II::MRM4r: |
1246 | case X86II::MRM5r: |
1247 | case X86II::MRM6r: |
1248 | case X86II::MRM7r: |
1249 | REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B |
1250 | break; |
1251 | } |
1252 | if (REX && UsesHighByteReg) |
1253 | report_fatal_error( |
1254 | "Cannot encode high byte register in REX-prefixed instruction"); |
1255 | |
1256 | return REX; |
1257 | } |
1258 | |
1259 | /// Emit segment override opcode prefix as needed. |
1260 | void X86MCCodeEmitter::emitSegmentOverridePrefix(unsigned &CurByte, |
1261 | unsigned SegOperand, |
1262 | const MCInst &MI, |
1263 | raw_ostream &OS) const { |
1264 | // Check for explicit segment override on memory operand. |
1265 | if (unsigned Reg = MI.getOperand(SegOperand).getReg()) |
1266 | emitByte(X86::getSegmentOverridePrefixForReg(Reg), CurByte, OS); |
1267 | } |
1268 | |
1269 | /// Emit all instruction prefixes prior to the opcode. |
1270 | /// |
1271 | /// \param MemOperand the operand # of the start of a memory operand if present. |
1272 | /// If not present, it is -1. |
1273 | /// |
1274 | /// \returns true if a REX prefix was used. |
1275 | bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, |
1276 | int MemOperand, const MCInst &MI, |
1277 | const MCInstrDesc &Desc, |
1278 | const MCSubtargetInfo &STI, |
1279 | raw_ostream &OS) const { |
1280 | bool Ret = false; |
1281 | // Emit the operand size opcode prefix as needed. |
1282 | if ((TSFlags & X86II::OpSizeMask) == |
1283 | (STI.hasFeature(X86::Mode16Bit) ? X86II::OpSize32 : X86II::OpSize16)) |
1284 | emitByte(0x66, CurByte, OS); |
1285 | |
1286 | // Emit the LOCK opcode prefix. |
1287 | if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK) |
1288 | emitByte(0xF0, CurByte, OS); |
1289 | |
1290 | // Emit the NOTRACK opcode prefix. |
1291 | if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK) |
1292 | emitByte(0x3E, CurByte, OS); |
1293 | |
1294 | switch (TSFlags & X86II::OpPrefixMask) { |
1295 | case X86II::PD: // 66 |
1296 | emitByte(0x66, CurByte, OS); |
1297 | break; |
1298 | case X86II::XS: // F3 |
1299 | emitByte(0xF3, CurByte, OS); |
1300 | break; |
1301 | case X86II::XD: // F2 |
1302 | emitByte(0xF2, CurByte, OS); |
1303 | break; |
1304 | } |
1305 | |
1306 | // Handle REX prefix. |
1307 | // FIXME: Can this come before F2 etc to simplify emission? |
1308 | if (STI.hasFeature(X86::Mode64Bit)) { |
1309 | if (uint8_t REX = determineREXPrefix(MI, TSFlags, MemOperand, Desc)) { |
1310 | emitByte(0x40 | REX, CurByte, OS); |
1311 | Ret = true; |
1312 | } |
1313 | } else { |
1314 | assert(!(TSFlags & X86II::REX_W) && "REX.W requires 64bit mode.")((!(TSFlags & X86II::REX_W) && "REX.W requires 64bit mode." ) ? static_cast<void> (0) : __assert_fail ("!(TSFlags & X86II::REX_W) && \"REX.W requires 64bit mode.\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1314, __PRETTY_FUNCTION__)); |
1315 | } |
1316 | |
1317 | // 0x0F escape code must be emitted just before the opcode. |
1318 | switch (TSFlags & X86II::OpMapMask) { |
1319 | case X86II::TB: // Two-byte opcode map |
1320 | case X86II::T8: // 0F 38 |
1321 | case X86II::TA: // 0F 3A |
1322 | case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller. |
1323 | emitByte(0x0F, CurByte, OS); |
1324 | break; |
1325 | } |
1326 | |
1327 | switch (TSFlags & X86II::OpMapMask) { |
1328 | case X86II::T8: // 0F 38 |
1329 | emitByte(0x38, CurByte, OS); |
1330 | break; |
1331 | case X86II::TA: // 0F 3A |
1332 | emitByte(0x3A, CurByte, OS); |
1333 | break; |
1334 | } |
1335 | return Ret; |
1336 | } |
1337 | |
1338 | void X86MCCodeEmitter::emitPrefix(const MCInst &MI, raw_ostream &OS, |
1339 | const MCSubtargetInfo &STI) const { |
1340 | unsigned Opcode = MI.getOpcode(); |
1341 | const MCInstrDesc &Desc = MCII.get(Opcode); |
1342 | uint64_t TSFlags = Desc.TSFlags; |
1343 | |
1344 | // Pseudo instructions don't get encoded. |
1345 | if ((TSFlags & X86II::FormMask) == X86II::Pseudo) |
1346 | return; |
1347 | |
1348 | unsigned CurOp = X86II::getOperandBias(Desc); |
1349 | |
1350 | // Keep track of the current byte being emitted. |
1351 | unsigned CurByte = 0; |
1352 | |
1353 | bool Rex = false; |
1354 | emitPrefixImpl(TSFlags, CurOp, CurByte, Rex, MI, Desc, STI, OS); |
1355 | } |
1356 | |
1357 | void X86MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, |
1358 | SmallVectorImpl<MCFixup> &Fixups, |
1359 | const MCSubtargetInfo &STI) const { |
1360 | unsigned Opcode = MI.getOpcode(); |
1361 | const MCInstrDesc &Desc = MCII.get(Opcode); |
1362 | uint64_t TSFlags = Desc.TSFlags; |
1363 | |
1364 | // Pseudo instructions don't get encoded. |
1365 | if ((TSFlags & X86II::FormMask) == X86II::Pseudo) |
1366 | return; |
1367 | |
1368 | unsigned NumOps = Desc.getNumOperands(); |
1369 | unsigned CurOp = X86II::getOperandBias(Desc); |
1370 | |
1371 | // Keep track of the current byte being emitted. |
1372 | unsigned CurByte = 0; |
1373 | |
1374 | bool Rex = false; |
1375 | emitPrefixImpl(TSFlags, CurOp, CurByte, Rex, MI, Desc, STI, OS); |
1376 | |
1377 | // It uses the VEX.VVVV field? |
1378 | bool HasVEX_4V = TSFlags & X86II::VEX_4V; |
1379 | bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg; |
1380 | |
1381 | // It uses the EVEX.aaa field? |
1382 | bool HasEVEX_K = TSFlags & X86II::EVEX_K; |
1383 | bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; |
1384 | |
1385 | // Used if a register is encoded in 7:4 of immediate. |
1386 | unsigned I8RegNum = 0; |
1387 | |
1388 | uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags); |
1389 | |
1390 | if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow) |
1391 | BaseOpcode = 0x0F; // Weird 3DNow! encoding. |
1392 | |
1393 | unsigned OpcodeOffset = 0; |
1394 | |
1395 | uint64_t Form = TSFlags & X86II::FormMask; |
1396 | switch (Form) { |
1397 | default: |
1398 | errs() << "FORM: " << Form << "\n"; |
1399 | llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!")::llvm::llvm_unreachable_internal("Unknown FormMask value in X86MCCodeEmitter!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1399); |
1400 | case X86II::Pseudo: |
1401 | llvm_unreachable("Pseudo instruction shouldn't be emitted")::llvm::llvm_unreachable_internal("Pseudo instruction shouldn't be emitted" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1401); |
1402 | case X86II::RawFrmDstSrc: |
1403 | case X86II::RawFrmSrc: |
1404 | case X86II::RawFrmDst: |
1405 | case X86II::PrefixByte: |
1406 | emitByte(BaseOpcode, CurByte, OS); |
1407 | break; |
1408 | case X86II::AddCCFrm: { |
1409 | // This will be added to the opcode in the fallthrough. |
1410 | OpcodeOffset = MI.getOperand(NumOps - 1).getImm(); |
1411 | assert(OpcodeOffset < 16 && "Unexpected opcode offset!")((OpcodeOffset < 16 && "Unexpected opcode offset!" ) ? static_cast<void> (0) : __assert_fail ("OpcodeOffset < 16 && \"Unexpected opcode offset!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1411, __PRETTY_FUNCTION__)); |
1412 | --NumOps; // Drop the operand from the end. |
1413 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
1414 | case X86II::RawFrm: |
1415 | emitByte(BaseOpcode + OpcodeOffset, CurByte, OS); |
1416 | |
1417 | if (!STI.hasFeature(X86::Mode64Bit) || !isPCRel32Branch(MI, MCII)) |
1418 | break; |
1419 | |
1420 | const MCOperand &Op = MI.getOperand(CurOp++); |
1421 | emitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags), |
1422 | MCFixupKind(X86::reloc_branch_4byte_pcrel), CurByte, OS, |
1423 | Fixups); |
1424 | break; |
1425 | } |
1426 | case X86II::RawFrmMemOffs: |
1427 | emitByte(BaseOpcode, CurByte, OS); |
1428 | emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), |
1429 | X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), |
1430 | CurByte, OS, Fixups); |
1431 | ++CurOp; // skip segment operand |
1432 | break; |
1433 | case X86II::RawFrmImm8: |
1434 | emitByte(BaseOpcode, CurByte, OS); |
1435 | emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), |
1436 | X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), |
1437 | CurByte, OS, Fixups); |
1438 | emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte, |
1439 | OS, Fixups); |
1440 | break; |
1441 | case X86II::RawFrmImm16: |
1442 | emitByte(BaseOpcode, CurByte, OS); |
1443 | emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), |
1444 | X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), |
1445 | CurByte, OS, Fixups); |
1446 | emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte, |
1447 | OS, Fixups); |
1448 | break; |
1449 | |
1450 | case X86II::AddRegFrm: |
1451 | emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++)), CurByte, OS); |
1452 | break; |
1453 | |
1454 | case X86II::MRMDestReg: { |
1455 | emitByte(BaseOpcode, CurByte, OS); |
1456 | unsigned SrcRegNum = CurOp + 1; |
1457 | |
1458 | if (HasEVEX_K) // Skip writemask |
1459 | ++SrcRegNum; |
1460 | |
1461 | if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) |
1462 | ++SrcRegNum; |
1463 | |
1464 | emitRegModRMByte(MI.getOperand(CurOp), |
1465 | getX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS); |
1466 | CurOp = SrcRegNum + 1; |
1467 | break; |
1468 | } |
1469 | case X86II::MRMDestMem: { |
1470 | emitByte(BaseOpcode, CurByte, OS); |
1471 | unsigned SrcRegNum = CurOp + X86::AddrNumOperands; |
1472 | |
1473 | if (HasEVEX_K) // Skip writemask |
1474 | ++SrcRegNum; |
1475 | |
1476 | if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) |
1477 | ++SrcRegNum; |
1478 | |
1479 | emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(SrcRegNum)), TSFlags, |
1480 | Rex, CurByte, OS, Fixups, STI); |
1481 | CurOp = SrcRegNum + 1; |
1482 | break; |
1483 | } |
1484 | case X86II::MRMSrcReg: { |
1485 | emitByte(BaseOpcode, CurByte, OS); |
1486 | unsigned SrcRegNum = CurOp + 1; |
1487 | |
1488 | if (HasEVEX_K) // Skip writemask |
1489 | ++SrcRegNum; |
1490 | |
1491 | if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) |
1492 | ++SrcRegNum; |
1493 | |
1494 | emitRegModRMByte(MI.getOperand(SrcRegNum), |
1495 | getX86RegNum(MI.getOperand(CurOp)), CurByte, OS); |
1496 | CurOp = SrcRegNum + 1; |
1497 | if (HasVEX_I8Reg) |
1498 | I8RegNum = getX86RegEncoding(MI, CurOp++); |
1499 | // do not count the rounding control operand |
1500 | if (HasEVEX_RC) |
1501 | --NumOps; |
1502 | break; |
1503 | } |
1504 | case X86II::MRMSrcReg4VOp3: { |
1505 | emitByte(BaseOpcode, CurByte, OS); |
1506 | unsigned SrcRegNum = CurOp + 1; |
1507 | |
1508 | emitRegModRMByte(MI.getOperand(SrcRegNum), |
1509 | getX86RegNum(MI.getOperand(CurOp)), CurByte, OS); |
1510 | CurOp = SrcRegNum + 1; |
1511 | ++CurOp; // Encoded in VEX.VVVV |
1512 | break; |
1513 | } |
1514 | case X86II::MRMSrcRegOp4: { |
1515 | emitByte(BaseOpcode, CurByte, OS); |
1516 | unsigned SrcRegNum = CurOp + 1; |
1517 | |
1518 | // Skip 1st src (which is encoded in VEX_VVVV) |
1519 | ++SrcRegNum; |
1520 | |
1521 | // Capture 2nd src (which is encoded in Imm[7:4]) |
1522 | assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg")((HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg" ) ? static_cast<void> (0) : __assert_fail ("HasVEX_I8Reg && \"MRMSrcRegOp4 should imply VEX_I8Reg\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1522, __PRETTY_FUNCTION__)); |
1523 | I8RegNum = getX86RegEncoding(MI, SrcRegNum++); |
1524 | |
1525 | emitRegModRMByte(MI.getOperand(SrcRegNum), |
1526 | getX86RegNum(MI.getOperand(CurOp)), CurByte, OS); |
1527 | CurOp = SrcRegNum + 1; |
1528 | break; |
1529 | } |
1530 | case X86II::MRMSrcRegCC: { |
1531 | unsigned FirstOp = CurOp++; |
1532 | unsigned SecondOp = CurOp++; |
1533 | |
1534 | unsigned CC = MI.getOperand(CurOp++).getImm(); |
1535 | emitByte(BaseOpcode + CC, CurByte, OS); |
1536 | |
1537 | emitRegModRMByte(MI.getOperand(SecondOp), |
1538 | getX86RegNum(MI.getOperand(FirstOp)), CurByte, OS); |
1539 | break; |
1540 | } |
1541 | case X86II::MRMSrcMem: { |
1542 | unsigned FirstMemOp = CurOp + 1; |
1543 | |
1544 | if (HasEVEX_K) // Skip writemask |
1545 | ++FirstMemOp; |
1546 | |
1547 | if (HasVEX_4V) |
1548 | ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). |
1549 | |
1550 | emitByte(BaseOpcode, CurByte, OS); |
1551 | |
1552 | emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), |
1553 | TSFlags, Rex, CurByte, OS, Fixups, STI); |
1554 | CurOp = FirstMemOp + X86::AddrNumOperands; |
1555 | if (HasVEX_I8Reg) |
1556 | I8RegNum = getX86RegEncoding(MI, CurOp++); |
1557 | break; |
1558 | } |
1559 | case X86II::MRMSrcMem4VOp3: { |
1560 | unsigned FirstMemOp = CurOp + 1; |
1561 | |
1562 | emitByte(BaseOpcode, CurByte, OS); |
1563 | |
1564 | emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), |
1565 | TSFlags, Rex, CurByte, OS, Fixups, STI); |
1566 | CurOp = FirstMemOp + X86::AddrNumOperands; |
1567 | ++CurOp; // Encoded in VEX.VVVV. |
1568 | break; |
1569 | } |
1570 | case X86II::MRMSrcMemOp4: { |
1571 | unsigned FirstMemOp = CurOp + 1; |
1572 | |
1573 | ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). |
1574 | |
1575 | // Capture second register source (encoded in Imm[7:4]) |
1576 | assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg")((HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg" ) ? static_cast<void> (0) : __assert_fail ("HasVEX_I8Reg && \"MRMSrcRegOp4 should imply VEX_I8Reg\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1576, __PRETTY_FUNCTION__)); |
1577 | I8RegNum = getX86RegEncoding(MI, FirstMemOp++); |
1578 | |
1579 | emitByte(BaseOpcode, CurByte, OS); |
1580 | |
1581 | emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), |
1582 | TSFlags, Rex, CurByte, OS, Fixups, STI); |
1583 | CurOp = FirstMemOp + X86::AddrNumOperands; |
1584 | break; |
1585 | } |
1586 | case X86II::MRMSrcMemCC: { |
1587 | unsigned RegOp = CurOp++; |
1588 | unsigned FirstMemOp = CurOp; |
1589 | CurOp = FirstMemOp + X86::AddrNumOperands; |
1590 | |
1591 | unsigned CC = MI.getOperand(CurOp++).getImm(); |
1592 | emitByte(BaseOpcode + CC, CurByte, OS); |
1593 | |
1594 | emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(RegOp)), |
1595 | TSFlags, Rex, CurByte, OS, Fixups, STI); |
1596 | break; |
1597 | } |
1598 | |
1599 | case X86II::MRMXrCC: { |
1600 | unsigned RegOp = CurOp++; |
1601 | |
1602 | unsigned CC = MI.getOperand(CurOp++).getImm(); |
1603 | emitByte(BaseOpcode + CC, CurByte, OS); |
1604 | emitRegModRMByte(MI.getOperand(RegOp), 0, CurByte, OS); |
1605 | break; |
1606 | } |
1607 | |
1608 | case X86II::MRMXr: |
1609 | case X86II::MRM0r: |
1610 | case X86II::MRM1r: |
1611 | case X86II::MRM2r: |
1612 | case X86II::MRM3r: |
1613 | case X86II::MRM4r: |
1614 | case X86II::MRM5r: |
1615 | case X86II::MRM6r: |
1616 | case X86II::MRM7r: |
1617 | if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). |
1618 | ++CurOp; |
1619 | if (HasEVEX_K) // Skip writemask |
1620 | ++CurOp; |
1621 | emitByte(BaseOpcode, CurByte, OS); |
1622 | emitRegModRMByte(MI.getOperand(CurOp++), |
1623 | (Form == X86II::MRMXr) ? 0 : Form - X86II::MRM0r, CurByte, |
1624 | OS); |
1625 | break; |
1626 | |
1627 | case X86II::MRMXmCC: { |
1628 | unsigned FirstMemOp = CurOp; |
1629 | CurOp = FirstMemOp + X86::AddrNumOperands; |
1630 | |
1631 | unsigned CC = MI.getOperand(CurOp++).getImm(); |
1632 | emitByte(BaseOpcode + CC, CurByte, OS); |
1633 | |
1634 | emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Rex, CurByte, OS, Fixups, STI); |
1635 | break; |
1636 | } |
1637 | |
1638 | case X86II::MRMXm: |
1639 | case X86II::MRM0m: |
1640 | case X86II::MRM1m: |
1641 | case X86II::MRM2m: |
1642 | case X86II::MRM3m: |
1643 | case X86II::MRM4m: |
1644 | case X86II::MRM5m: |
1645 | case X86II::MRM6m: |
1646 | case X86II::MRM7m: |
1647 | if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). |
1648 | ++CurOp; |
1649 | if (HasEVEX_K) // Skip writemask |
1650 | ++CurOp; |
1651 | emitByte(BaseOpcode, CurByte, OS); |
1652 | emitMemModRMByte(MI, CurOp, |
1653 | (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags, |
1654 | Rex, CurByte, OS, Fixups, STI); |
1655 | CurOp += X86::AddrNumOperands; |
1656 | break; |
1657 | |
1658 | case X86II::MRM_C0: |
1659 | case X86II::MRM_C1: |
1660 | case X86II::MRM_C2: |
1661 | case X86II::MRM_C3: |
1662 | case X86II::MRM_C4: |
1663 | case X86II::MRM_C5: |
1664 | case X86II::MRM_C6: |
1665 | case X86II::MRM_C7: |
1666 | case X86II::MRM_C8: |
1667 | case X86II::MRM_C9: |
1668 | case X86II::MRM_CA: |
1669 | case X86II::MRM_CB: |
1670 | case X86II::MRM_CC: |
1671 | case X86II::MRM_CD: |
1672 | case X86II::MRM_CE: |
1673 | case X86II::MRM_CF: |
1674 | case X86II::MRM_D0: |
1675 | case X86II::MRM_D1: |
1676 | case X86II::MRM_D2: |
1677 | case X86II::MRM_D3: |
1678 | case X86II::MRM_D4: |
1679 | case X86II::MRM_D5: |
1680 | case X86II::MRM_D6: |
1681 | case X86II::MRM_D7: |
1682 | case X86II::MRM_D8: |
1683 | case X86II::MRM_D9: |
1684 | case X86II::MRM_DA: |
1685 | case X86II::MRM_DB: |
1686 | case X86II::MRM_DC: |
1687 | case X86II::MRM_DD: |
1688 | case X86II::MRM_DE: |
1689 | case X86II::MRM_DF: |
1690 | case X86II::MRM_E0: |
1691 | case X86II::MRM_E1: |
1692 | case X86II::MRM_E2: |
1693 | case X86II::MRM_E3: |
1694 | case X86II::MRM_E4: |
1695 | case X86II::MRM_E5: |
1696 | case X86II::MRM_E6: |
1697 | case X86II::MRM_E7: |
1698 | case X86II::MRM_E8: |
1699 | case X86II::MRM_E9: |
1700 | case X86II::MRM_EA: |
1701 | case X86II::MRM_EB: |
1702 | case X86II::MRM_EC: |
1703 | case X86II::MRM_ED: |
1704 | case X86II::MRM_EE: |
1705 | case X86II::MRM_EF: |
1706 | case X86II::MRM_F0: |
1707 | case X86II::MRM_F1: |
1708 | case X86II::MRM_F2: |
1709 | case X86II::MRM_F3: |
1710 | case X86II::MRM_F4: |
1711 | case X86II::MRM_F5: |
1712 | case X86II::MRM_F6: |
1713 | case X86II::MRM_F7: |
1714 | case X86II::MRM_F8: |
1715 | case X86II::MRM_F9: |
1716 | case X86II::MRM_FA: |
1717 | case X86II::MRM_FB: |
1718 | case X86II::MRM_FC: |
1719 | case X86II::MRM_FD: |
1720 | case X86II::MRM_FE: |
1721 | case X86II::MRM_FF: |
1722 | emitByte(BaseOpcode, CurByte, OS); |
1723 | emitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS); |
1724 | break; |
1725 | } |
1726 | |
1727 | if (HasVEX_I8Reg) { |
1728 | // The last source register of a 4 operand instruction in AVX is encoded |
1729 | // in bits[7:4] of a immediate byte. |
1730 | assert(I8RegNum < 16 && "Register encoding out of range")((I8RegNum < 16 && "Register encoding out of range" ) ? static_cast<void> (0) : __assert_fail ("I8RegNum < 16 && \"Register encoding out of range\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1730, __PRETTY_FUNCTION__)); |
1731 | I8RegNum <<= 4; |
1732 | if (CurOp != NumOps) { |
1733 | unsigned Val = MI.getOperand(CurOp++).getImm(); |
1734 | assert(Val < 16 && "Immediate operand value out of range")((Val < 16 && "Immediate operand value out of range" ) ? static_cast<void> (0) : __assert_fail ("Val < 16 && \"Immediate operand value out of range\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp" , 1734, __PRETTY_FUNCTION__)); |
1735 | I8RegNum |= Val; |
1736 | } |
1737 | emitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1, |
1738 | CurByte, OS, Fixups); |
1739 | } else { |
1740 | // If there is a remaining operand, it must be a trailing immediate. Emit it |
1741 | // according to the right size for the instruction. Some instructions |
1742 | // (SSE4a extrq and insertq) have two trailing immediates. |
1743 | while (CurOp != NumOps && NumOps - CurOp <= 2) { |
1744 | emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), |
1745 | X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), |
1746 | CurByte, OS, Fixups); |
1747 | } |
1748 | } |
1749 | |
1750 | if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow) |
1751 | emitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS); |
1752 | |
1753 | #ifndef NDEBUG |
1754 | // FIXME: Verify. |
1755 | if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) { |
1756 | errs() << "Cannot encode all operands of: "; |
1757 | MI.dump(); |
1758 | errs() << '\n'; |
1759 | abort(); |
1760 | } |
1761 | #endif |
1762 | } |
1763 | |
1764 | MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII, |
1765 | const MCRegisterInfo &MRI, |
1766 | MCContext &Ctx) { |
1767 | return new X86MCCodeEmitter(MCII, Ctx); |
1768 | } |