File: | lib/Target/X86/X86MCInstLower.cpp |
Warning: | line 1098, column 24 The right operand of '!=' is a garbage value due to array index out of bounds |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This file contains code to lower X86 MachineInstrs to their corresponding | |||
11 | // MCInst records. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "InstPrinter/X86ATTInstPrinter.h" | |||
16 | #include "InstPrinter/X86InstComments.h" | |||
17 | #include "MCTargetDesc/X86BaseInfo.h" | |||
18 | #include "MCTargetDesc/X86TargetStreamer.h" | |||
19 | #include "Utils/X86ShuffleDecode.h" | |||
20 | #include "X86AsmPrinter.h" | |||
21 | #include "X86RegisterInfo.h" | |||
22 | #include "X86ShuffleDecodeConstantPool.h" | |||
23 | #include "llvm/ADT/Optional.h" | |||
24 | #include "llvm/ADT/SmallString.h" | |||
25 | #include "llvm/ADT/iterator_range.h" | |||
26 | #include "llvm/CodeGen/MachineConstantPool.h" | |||
27 | #include "llvm/CodeGen/MachineFunction.h" | |||
28 | #include "llvm/CodeGen/MachineModuleInfoImpls.h" | |||
29 | #include "llvm/CodeGen/MachineOperand.h" | |||
30 | #include "llvm/CodeGen/StackMaps.h" | |||
31 | #include "llvm/CodeGen/TargetLoweringObjectFile.h" | |||
32 | #include "llvm/IR/DataLayout.h" | |||
33 | #include "llvm/IR/GlobalValue.h" | |||
34 | #include "llvm/IR/Mangler.h" | |||
35 | #include "llvm/MC/MCAsmInfo.h" | |||
36 | #include "llvm/MC/MCCodeEmitter.h" | |||
37 | #include "llvm/MC/MCContext.h" | |||
38 | #include "llvm/MC/MCExpr.h" | |||
39 | #include "llvm/MC/MCFixup.h" | |||
40 | #include "llvm/MC/MCInst.h" | |||
41 | #include "llvm/MC/MCInstBuilder.h" | |||
42 | #include "llvm/MC/MCSection.h" | |||
43 | #include "llvm/MC/MCSectionELF.h" | |||
44 | #include "llvm/MC/MCStreamer.h" | |||
45 | #include "llvm/MC/MCSymbol.h" | |||
46 | #include "llvm/MC/MCSymbolELF.h" | |||
47 | ||||
48 | using namespace llvm; | |||
49 | ||||
50 | namespace { | |||
51 | ||||
52 | /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. | |||
53 | class X86MCInstLower { | |||
54 | MCContext &Ctx; | |||
55 | const MachineFunction &MF; | |||
56 | const TargetMachine &TM; | |||
57 | const MCAsmInfo &MAI; | |||
58 | X86AsmPrinter &AsmPrinter; | |||
59 | public: | |||
60 | X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter); | |||
61 | ||||
62 | Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI, | |||
63 | const MachineOperand &MO) const; | |||
64 | void Lower(const MachineInstr *MI, MCInst &OutMI) const; | |||
65 | ||||
66 | MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; | |||
67 | MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; | |||
68 | ||||
69 | private: | |||
70 | MachineModuleInfoMachO &getMachOMMI() const; | |||
71 | }; | |||
72 | ||||
73 | } // end anonymous namespace | |||
74 | ||||
75 | // Emit a minimal sequence of nops spanning NumBytes bytes. | |||
76 | static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, | |||
77 | const MCSubtargetInfo &STI); | |||
78 | ||||
79 | void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst, | |||
80 | const MCSubtargetInfo &STI, | |||
81 | MCCodeEmitter *CodeEmitter) { | |||
82 | if (InShadow) { | |||
83 | SmallString<256> Code; | |||
84 | SmallVector<MCFixup, 4> Fixups; | |||
85 | raw_svector_ostream VecOS(Code); | |||
86 | CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI); | |||
87 | CurrentShadowSize += Code.size(); | |||
88 | if (CurrentShadowSize >= RequiredShadowSize) | |||
89 | InShadow = false; // The shadow is big enough. Stop counting. | |||
90 | } | |||
91 | } | |||
92 | ||||
93 | void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding( | |||
94 | MCStreamer &OutStreamer, const MCSubtargetInfo &STI) { | |||
95 | if (InShadow && CurrentShadowSize < RequiredShadowSize) { | |||
96 | InShadow = false; | |||
97 | EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize, | |||
98 | MF->getSubtarget<X86Subtarget>().is64Bit(), STI); | |||
99 | } | |||
100 | } | |||
101 | ||||
102 | void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) { | |||
103 | OutStreamer->EmitInstruction(Inst, getSubtargetInfo(), | |||
104 | EnablePrintSchedInfo && | |||
105 | !(Inst.getFlags() & X86::NO_SCHED_INFO)); | |||
106 | SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get()); | |||
107 | } | |||
108 | ||||
109 | X86MCInstLower::X86MCInstLower(const MachineFunction &mf, | |||
110 | X86AsmPrinter &asmprinter) | |||
111 | : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()), | |||
112 | AsmPrinter(asmprinter) {} | |||
113 | ||||
114 | MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { | |||
115 | return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); | |||
116 | } | |||
117 | ||||
118 | ||||
119 | /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol | |||
120 | /// operand to an MCSymbol. | |||
121 | MCSymbol *X86MCInstLower:: | |||
122 | GetSymbolFromOperand(const MachineOperand &MO) const { | |||
123 | const DataLayout &DL = MF.getDataLayout(); | |||
124 | assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference")(static_cast <bool> ((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference") ? void (0 ) : __assert_fail ("(MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && \"Isn't a symbol reference\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 124, __extension__ __PRETTY_FUNCTION__)); | |||
125 | ||||
126 | MCSymbol *Sym = nullptr; | |||
127 | SmallString<128> Name; | |||
128 | StringRef Suffix; | |||
129 | ||||
130 | switch (MO.getTargetFlags()) { | |||
131 | case X86II::MO_DLLIMPORT: | |||
132 | // Handle dllimport linkage. | |||
133 | Name += "__imp_"; | |||
134 | break; | |||
135 | case X86II::MO_DARWIN_NONLAZY: | |||
136 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: | |||
137 | Suffix = "$non_lazy_ptr"; | |||
138 | break; | |||
139 | } | |||
140 | ||||
141 | if (!Suffix.empty()) | |||
142 | Name += DL.getPrivateGlobalPrefix(); | |||
143 | ||||
144 | if (MO.isGlobal()) { | |||
145 | const GlobalValue *GV = MO.getGlobal(); | |||
146 | AsmPrinter.getNameWithPrefix(Name, GV); | |||
147 | } else if (MO.isSymbol()) { | |||
148 | Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL); | |||
149 | } else if (MO.isMBB()) { | |||
150 | assert(Suffix.empty())(static_cast <bool> (Suffix.empty()) ? void (0) : __assert_fail ("Suffix.empty()", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 150, __extension__ __PRETTY_FUNCTION__)); | |||
151 | Sym = MO.getMBB()->getSymbol(); | |||
152 | } | |||
153 | ||||
154 | Name += Suffix; | |||
155 | if (!Sym) | |||
156 | Sym = Ctx.getOrCreateSymbol(Name); | |||
157 | ||||
158 | // If the target flags on the operand changes the name of the symbol, do that | |||
159 | // before we return the symbol. | |||
160 | switch (MO.getTargetFlags()) { | |||
161 | default: break; | |||
162 | case X86II::MO_DARWIN_NONLAZY: | |||
163 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: { | |||
164 | MachineModuleInfoImpl::StubValueTy &StubSym = | |||
165 | getMachOMMI().getGVStubEntry(Sym); | |||
166 | if (!StubSym.getPointer()) { | |||
167 | assert(MO.isGlobal() && "Extern symbol not handled yet")(static_cast <bool> (MO.isGlobal() && "Extern symbol not handled yet" ) ? void (0) : __assert_fail ("MO.isGlobal() && \"Extern symbol not handled yet\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 167, __extension__ __PRETTY_FUNCTION__)); | |||
168 | StubSym = | |||
169 | MachineModuleInfoImpl:: | |||
170 | StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()), | |||
171 | !MO.getGlobal()->hasInternalLinkage()); | |||
172 | } | |||
173 | break; | |||
174 | } | |||
175 | } | |||
176 | ||||
177 | return Sym; | |||
178 | } | |||
179 | ||||
180 | MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, | |||
181 | MCSymbol *Sym) const { | |||
182 | // FIXME: We would like an efficient form for this, so we don't have to do a | |||
183 | // lot of extra uniquing. | |||
184 | const MCExpr *Expr = nullptr; | |||
185 | MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; | |||
186 | ||||
187 | switch (MO.getTargetFlags()) { | |||
188 | default: llvm_unreachable("Unknown target flag on GV operand")::llvm::llvm_unreachable_internal("Unknown target flag on GV operand" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 188); | |||
189 | case X86II::MO_NO_FLAG: // No flag. | |||
190 | // These affect the name of the symbol, not any suffix. | |||
191 | case X86II::MO_DARWIN_NONLAZY: | |||
192 | case X86II::MO_DLLIMPORT: | |||
193 | break; | |||
194 | ||||
195 | case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break; | |||
196 | case X86II::MO_TLVP_PIC_BASE: | |||
197 | Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx); | |||
198 | // Subtract the pic base. | |||
199 | Expr = MCBinaryExpr::createSub(Expr, | |||
200 | MCSymbolRefExpr::create(MF.getPICBaseSymbol(), | |||
201 | Ctx), | |||
202 | Ctx); | |||
203 | break; | |||
204 | case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break; | |||
205 | case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break; | |||
206 | case X86II::MO_TLSLD: RefKind = MCSymbolRefExpr::VK_TLSLD; break; | |||
207 | case X86II::MO_TLSLDM: RefKind = MCSymbolRefExpr::VK_TLSLDM; break; | |||
208 | case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break; | |||
209 | case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break; | |||
210 | case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break; | |||
211 | case X86II::MO_DTPOFF: RefKind = MCSymbolRefExpr::VK_DTPOFF; break; | |||
212 | case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break; | |||
213 | case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break; | |||
214 | case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break; | |||
215 | case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break; | |||
216 | case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break; | |||
217 | case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break; | |||
218 | case X86II::MO_ABS8: RefKind = MCSymbolRefExpr::VK_X86_ABS8; break; | |||
219 | case X86II::MO_PIC_BASE_OFFSET: | |||
220 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: | |||
221 | Expr = MCSymbolRefExpr::create(Sym, Ctx); | |||
222 | // Subtract the pic base. | |||
223 | Expr = MCBinaryExpr::createSub(Expr, | |||
224 | MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), | |||
225 | Ctx); | |||
226 | if (MO.isJTI()) { | |||
227 | assert(MAI.doesSetDirectiveSuppressReloc())(static_cast <bool> (MAI.doesSetDirectiveSuppressReloc( )) ? void (0) : __assert_fail ("MAI.doesSetDirectiveSuppressReloc()" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 227, __extension__ __PRETTY_FUNCTION__)); | |||
228 | // If .set directive is supported, use it to reduce the number of | |||
229 | // relocations the assembler will generate for differences between | |||
230 | // local labels. This is only safe when the symbols are in the same | |||
231 | // section so we are restricting it to jumptable references. | |||
232 | MCSymbol *Label = Ctx.createTempSymbol(); | |||
233 | AsmPrinter.OutStreamer->EmitAssignment(Label, Expr); | |||
234 | Expr = MCSymbolRefExpr::create(Label, Ctx); | |||
235 | } | |||
236 | break; | |||
237 | } | |||
238 | ||||
239 | if (!Expr) | |||
240 | Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx); | |||
241 | ||||
242 | if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) | |||
243 | Expr = MCBinaryExpr::createAdd(Expr, | |||
244 | MCConstantExpr::create(MO.getOffset(), Ctx), | |||
245 | Ctx); | |||
246 | return MCOperand::createExpr(Expr); | |||
247 | } | |||
248 | ||||
249 | ||||
250 | /// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with | |||
251 | /// a short fixed-register form. | |||
252 | static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { | |||
253 | unsigned ImmOp = Inst.getNumOperands() - 1; | |||
254 | assert(Inst.getOperand(0).isReg() &&(static_cast <bool> (Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr ()) && ((Inst.getNumOperands() == 3 && Inst.getOperand (1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand (1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 258, __extension__ __PRETTY_FUNCTION__)) | |||
255 | (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&(static_cast <bool> (Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr ()) && ((Inst.getNumOperands() == 3 && Inst.getOperand (1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand (1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 258, __extension__ __PRETTY_FUNCTION__)) | |||
256 | ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&(static_cast <bool> (Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr ()) && ((Inst.getNumOperands() == 3 && Inst.getOperand (1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand (1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 258, __extension__ __PRETTY_FUNCTION__)) | |||
257 | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||(static_cast <bool> (Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr ()) && ((Inst.getNumOperands() == 3 && Inst.getOperand (1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand (1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 258, __extension__ __PRETTY_FUNCTION__)) | |||
258 | Inst.getNumOperands() == 2) && "Unexpected instruction!")(static_cast <bool> (Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr ()) && ((Inst.getNumOperands() == 3 && Inst.getOperand (1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand (1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 258, __extension__ __PRETTY_FUNCTION__)); | |||
259 | ||||
260 | // Check whether the destination register can be fixed. | |||
261 | unsigned Reg = Inst.getOperand(0).getReg(); | |||
262 | if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) | |||
263 | return; | |||
264 | ||||
265 | // If so, rewrite the instruction. | |||
266 | MCOperand Saved = Inst.getOperand(ImmOp); | |||
267 | Inst = MCInst(); | |||
268 | Inst.setOpcode(Opcode); | |||
269 | Inst.addOperand(Saved); | |||
270 | } | |||
271 | ||||
272 | /// \brief If a movsx instruction has a shorter encoding for the used register | |||
273 | /// simplify the instruction to use it instead. | |||
274 | static void SimplifyMOVSX(MCInst &Inst) { | |||
275 | unsigned NewOpcode = 0; | |||
276 | unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg(); | |||
277 | switch (Inst.getOpcode()) { | |||
278 | default: | |||
279 | llvm_unreachable("Unexpected instruction!")::llvm::llvm_unreachable_internal("Unexpected instruction!", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 279); | |||
280 | case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw | |||
281 | if (Op0 == X86::AX && Op1 == X86::AL) | |||
282 | NewOpcode = X86::CBW; | |||
283 | break; | |||
284 | case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl | |||
285 | if (Op0 == X86::EAX && Op1 == X86::AX) | |||
286 | NewOpcode = X86::CWDE; | |||
287 | break; | |||
288 | case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq | |||
289 | if (Op0 == X86::RAX && Op1 == X86::EAX) | |||
290 | NewOpcode = X86::CDQE; | |||
291 | break; | |||
292 | } | |||
293 | ||||
294 | if (NewOpcode != 0) { | |||
295 | Inst = MCInst(); | |||
296 | Inst.setOpcode(NewOpcode); | |||
297 | } | |||
298 | } | |||
299 | ||||
300 | /// \brief Simplify things like MOV32rm to MOV32o32a. | |||
301 | static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, | |||
302 | unsigned Opcode) { | |||
303 | // Don't make these simplifications in 64-bit mode; other assemblers don't | |||
304 | // perform them because they make the code larger. | |||
305 | if (Printer.getSubtarget().is64Bit()) | |||
306 | return; | |||
307 | ||||
308 | bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); | |||
309 | unsigned AddrBase = IsStore; | |||
310 | unsigned RegOp = IsStore ? 0 : 5; | |||
311 | unsigned AddrOp = AddrBase + 3; | |||
312 | assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&(static_cast <bool> (Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp ).isExpr() || Inst.getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 319, __extension__ __PRETTY_FUNCTION__)) | |||
313 | Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() &&(static_cast <bool> (Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp ).isExpr() || Inst.getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 319, __extension__ __PRETTY_FUNCTION__)) | |||
314 | Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() &&(static_cast <bool> (Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp ).isExpr() || Inst.getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 319, __extension__ __PRETTY_FUNCTION__)) | |||
315 | Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() &&(static_cast <bool> (Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp ).isExpr() || Inst.getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 319, __extension__ __PRETTY_FUNCTION__)) | |||
316 | Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() &&(static_cast <bool> (Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp ).isExpr() || Inst.getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 319, __extension__ __PRETTY_FUNCTION__)) | |||
317 | (Inst.getOperand(AddrOp).isExpr() ||(static_cast <bool> (Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp ).isExpr() || Inst.getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 319, __extension__ __PRETTY_FUNCTION__)) | |||
318 | Inst.getOperand(AddrOp).isImm()) &&(static_cast <bool> (Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp ).isExpr() || Inst.getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 319, __extension__ __PRETTY_FUNCTION__)) | |||
319 | "Unexpected instruction!")(static_cast <bool> (Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp ).isExpr() || Inst.getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? void (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 319, __extension__ __PRETTY_FUNCTION__)); | |||
320 | ||||
321 | // Check whether the destination register can be fixed. | |||
322 | unsigned Reg = Inst.getOperand(RegOp).getReg(); | |||
323 | if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) | |||
324 | return; | |||
325 | ||||
326 | // Check whether this is an absolute address. | |||
327 | // FIXME: We know TLVP symbol refs aren't, but there should be a better way | |||
328 | // to do this here. | |||
329 | bool Absolute = true; | |||
330 | if (Inst.getOperand(AddrOp).isExpr()) { | |||
331 | const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); | |||
332 | if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE)) | |||
333 | if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) | |||
334 | Absolute = false; | |||
335 | } | |||
336 | ||||
337 | if (Absolute && | |||
338 | (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || | |||
339 | Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || | |||
340 | Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) | |||
341 | return; | |||
342 | ||||
343 | // If so, rewrite the instruction. | |||
344 | MCOperand Saved = Inst.getOperand(AddrOp); | |||
345 | MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg); | |||
346 | Inst = MCInst(); | |||
347 | Inst.setOpcode(Opcode); | |||
348 | Inst.addOperand(Saved); | |||
349 | Inst.addOperand(Seg); | |||
350 | } | |||
351 | ||||
352 | static unsigned getRetOpcode(const X86Subtarget &Subtarget) { | |||
353 | return Subtarget.is64Bit() ? X86::RETQ : X86::RETL; | |||
354 | } | |||
355 | ||||
356 | Optional<MCOperand> | |||
357 | X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, | |||
358 | const MachineOperand &MO) const { | |||
359 | switch (MO.getType()) { | |||
360 | default: | |||
361 | MI->print(errs()); | |||
362 | llvm_unreachable("unknown operand type")::llvm::llvm_unreachable_internal("unknown operand type", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 362); | |||
363 | case MachineOperand::MO_Register: | |||
364 | // Ignore all implicit register operands. | |||
365 | if (MO.isImplicit()) | |||
366 | return None; | |||
367 | return MCOperand::createReg(MO.getReg()); | |||
368 | case MachineOperand::MO_Immediate: | |||
369 | return MCOperand::createImm(MO.getImm()); | |||
370 | case MachineOperand::MO_MachineBasicBlock: | |||
371 | case MachineOperand::MO_GlobalAddress: | |||
372 | case MachineOperand::MO_ExternalSymbol: | |||
373 | return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); | |||
374 | case MachineOperand::MO_MCSymbol: | |||
375 | return LowerSymbolOperand(MO, MO.getMCSymbol()); | |||
376 | case MachineOperand::MO_JumpTableIndex: | |||
377 | return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); | |||
378 | case MachineOperand::MO_ConstantPoolIndex: | |||
379 | return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); | |||
380 | case MachineOperand::MO_BlockAddress: | |||
381 | return LowerSymbolOperand( | |||
382 | MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); | |||
383 | case MachineOperand::MO_RegisterMask: | |||
384 | // Ignore call clobbers. | |||
385 | return None; | |||
386 | } | |||
387 | } | |||
388 | ||||
389 | void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { | |||
390 | OutMI.setOpcode(MI->getOpcode()); | |||
391 | ||||
392 | for (const MachineOperand &MO : MI->operands()) | |||
393 | if (auto MaybeMCOp = LowerMachineOperand(MI, MO)) | |||
394 | OutMI.addOperand(MaybeMCOp.getValue()); | |||
395 | ||||
396 | // Handle a few special cases to eliminate operand modifiers. | |||
397 | ReSimplify: | |||
398 | switch (OutMI.getOpcode()) { | |||
399 | case X86::LEA64_32r: | |||
400 | case X86::LEA64r: | |||
401 | case X86::LEA16r: | |||
402 | case X86::LEA32r: | |||
403 | // LEA should have a segment register, but it must be empty. | |||
404 | assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&(static_cast <bool> (OutMI.getNumOperands() == 1+X86::AddrNumOperands && "Unexpected # of LEA operands") ? void (0) : __assert_fail ("OutMI.getNumOperands() == 1+X86::AddrNumOperands && \"Unexpected # of LEA operands\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 405, __extension__ __PRETTY_FUNCTION__)) | |||
405 | "Unexpected # of LEA operands")(static_cast <bool> (OutMI.getNumOperands() == 1+X86::AddrNumOperands && "Unexpected # of LEA operands") ? void (0) : __assert_fail ("OutMI.getNumOperands() == 1+X86::AddrNumOperands && \"Unexpected # of LEA operands\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 405, __extension__ __PRETTY_FUNCTION__)); | |||
406 | assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&(static_cast <bool> (OutMI.getOperand(1+X86::AddrSegmentReg ).getReg() == 0 && "LEA has segment specified!") ? void (0) : __assert_fail ("OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && \"LEA has segment specified!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 407, __extension__ __PRETTY_FUNCTION__)) | |||
407 | "LEA has segment specified!")(static_cast <bool> (OutMI.getOperand(1+X86::AddrSegmentReg ).getReg() == 0 && "LEA has segment specified!") ? void (0) : __assert_fail ("OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && \"LEA has segment specified!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 407, __extension__ __PRETTY_FUNCTION__)); | |||
408 | break; | |||
409 | ||||
410 | // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B | |||
411 | // if one of the registers is extended, but other isn't. | |||
412 | case X86::VMOVZPQILo2PQIrr: | |||
413 | case X86::VMOVAPDrr: | |||
414 | case X86::VMOVAPDYrr: | |||
415 | case X86::VMOVAPSrr: | |||
416 | case X86::VMOVAPSYrr: | |||
417 | case X86::VMOVDQArr: | |||
418 | case X86::VMOVDQAYrr: | |||
419 | case X86::VMOVDQUrr: | |||
420 | case X86::VMOVDQUYrr: | |||
421 | case X86::VMOVUPDrr: | |||
422 | case X86::VMOVUPDYrr: | |||
423 | case X86::VMOVUPSrr: | |||
424 | case X86::VMOVUPSYrr: { | |||
425 | if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && | |||
426 | X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) { | |||
427 | unsigned NewOpc; | |||
428 | switch (OutMI.getOpcode()) { | |||
429 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 429); | |||
430 | case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break; | |||
431 | case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; | |||
432 | case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; | |||
433 | case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; | |||
434 | case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; | |||
435 | case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; | |||
436 | case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; | |||
437 | case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; | |||
438 | case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; | |||
439 | case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; | |||
440 | case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; | |||
441 | case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; | |||
442 | case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; | |||
443 | } | |||
444 | OutMI.setOpcode(NewOpc); | |||
445 | } | |||
446 | break; | |||
447 | } | |||
448 | case X86::VMOVSDrr: | |||
449 | case X86::VMOVSSrr: { | |||
450 | if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && | |||
451 | X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) { | |||
452 | unsigned NewOpc; | |||
453 | switch (OutMI.getOpcode()) { | |||
454 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 454); | |||
455 | case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; | |||
456 | case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; | |||
457 | } | |||
458 | OutMI.setOpcode(NewOpc); | |||
459 | } | |||
460 | break; | |||
461 | } | |||
462 | ||||
463 | // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register | |||
464 | // inputs modeled as normal uses instead of implicit uses. As such, truncate | |||
465 | // off all but the first operand (the callee). FIXME: Change isel. | |||
466 | case X86::TAILJMPr64: | |||
467 | case X86::TAILJMPr64_REX: | |||
468 | case X86::CALL64r: | |||
469 | case X86::CALL64pcrel32: { | |||
470 | unsigned Opcode = OutMI.getOpcode(); | |||
471 | MCOperand Saved = OutMI.getOperand(0); | |||
472 | OutMI = MCInst(); | |||
473 | OutMI.setOpcode(Opcode); | |||
474 | OutMI.addOperand(Saved); | |||
475 | break; | |||
476 | } | |||
477 | ||||
478 | case X86::EH_RETURN: | |||
479 | case X86::EH_RETURN64: { | |||
480 | OutMI = MCInst(); | |||
481 | OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); | |||
482 | break; | |||
483 | } | |||
484 | ||||
485 | case X86::CLEANUPRET: { | |||
486 | // Replace CATCHRET with the appropriate RET. | |||
487 | OutMI = MCInst(); | |||
488 | OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); | |||
489 | break; | |||
490 | } | |||
491 | ||||
492 | case X86::CATCHRET: { | |||
493 | // Replace CATCHRET with the appropriate RET. | |||
494 | const X86Subtarget &Subtarget = AsmPrinter.getSubtarget(); | |||
495 | unsigned ReturnReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; | |||
496 | OutMI = MCInst(); | |||
497 | OutMI.setOpcode(getRetOpcode(Subtarget)); | |||
498 | OutMI.addOperand(MCOperand::createReg(ReturnReg)); | |||
499 | break; | |||
500 | } | |||
501 | ||||
502 | // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump instruction. | |||
503 | { unsigned Opcode; | |||
504 | case X86::TAILJMPr: Opcode = X86::JMP32r; goto SetTailJmpOpcode; | |||
505 | case X86::TAILJMPd: | |||
506 | case X86::TAILJMPd64: Opcode = X86::JMP_1; goto SetTailJmpOpcode; | |||
507 | case X86::TAILJMPd_CC: | |||
508 | case X86::TAILJMPd64_CC: | |||
509 | Opcode = X86::GetCondBranchFromCond( | |||
510 | static_cast<X86::CondCode>(MI->getOperand(1).getImm())); | |||
511 | goto SetTailJmpOpcode; | |||
512 | ||||
513 | SetTailJmpOpcode: | |||
514 | MCOperand Saved = OutMI.getOperand(0); | |||
515 | OutMI = MCInst(); | |||
516 | OutMI.setOpcode(Opcode); | |||
517 | OutMI.addOperand(Saved); | |||
518 | break; | |||
519 | } | |||
520 | ||||
521 | case X86::DEC16r: | |||
522 | case X86::DEC32r: | |||
523 | case X86::INC16r: | |||
524 | case X86::INC32r: | |||
525 | // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions. | |||
526 | if (!AsmPrinter.getSubtarget().is64Bit()) { | |||
527 | unsigned Opcode; | |||
528 | switch (OutMI.getOpcode()) { | |||
529 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 529); | |||
530 | case X86::DEC16r: Opcode = X86::DEC16r_alt; break; | |||
531 | case X86::DEC32r: Opcode = X86::DEC32r_alt; break; | |||
532 | case X86::INC16r: Opcode = X86::INC16r_alt; break; | |||
533 | case X86::INC32r: Opcode = X86::INC32r_alt; break; | |||
534 | } | |||
535 | OutMI.setOpcode(Opcode); | |||
536 | } | |||
537 | break; | |||
538 | ||||
539 | // These are pseudo-ops for OR to help with the OR->ADD transformation. We do | |||
540 | // this with an ugly goto in case the resultant OR uses EAX and needs the | |||
541 | // short form. | |||
542 | case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify; | |||
543 | case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify; | |||
544 | case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify; | |||
545 | case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify; | |||
546 | case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify; | |||
547 | case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify; | |||
548 | case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify; | |||
549 | case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify; | |||
550 | case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify; | |||
551 | ||||
552 | // Atomic load and store require a separate pseudo-inst because Acquire | |||
553 | // implies mayStore and Release implies mayLoad; fix these to regular MOV | |||
554 | // instructions here | |||
555 | case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify; | |||
556 | case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify; | |||
557 | case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify; | |||
558 | case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify; | |||
559 | case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify; | |||
560 | case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify; | |||
561 | case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify; | |||
562 | case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify; | |||
563 | case X86::RELEASE_MOV8mi: OutMI.setOpcode(X86::MOV8mi); goto ReSimplify; | |||
564 | case X86::RELEASE_MOV16mi: OutMI.setOpcode(X86::MOV16mi); goto ReSimplify; | |||
565 | case X86::RELEASE_MOV32mi: OutMI.setOpcode(X86::MOV32mi); goto ReSimplify; | |||
566 | case X86::RELEASE_MOV64mi32: OutMI.setOpcode(X86::MOV64mi32); goto ReSimplify; | |||
567 | case X86::RELEASE_ADD8mi: OutMI.setOpcode(X86::ADD8mi); goto ReSimplify; | |||
568 | case X86::RELEASE_ADD8mr: OutMI.setOpcode(X86::ADD8mr); goto ReSimplify; | |||
569 | case X86::RELEASE_ADD32mi: OutMI.setOpcode(X86::ADD32mi); goto ReSimplify; | |||
570 | case X86::RELEASE_ADD32mr: OutMI.setOpcode(X86::ADD32mr); goto ReSimplify; | |||
571 | case X86::RELEASE_ADD64mi32: OutMI.setOpcode(X86::ADD64mi32); goto ReSimplify; | |||
572 | case X86::RELEASE_ADD64mr: OutMI.setOpcode(X86::ADD64mr); goto ReSimplify; | |||
573 | case X86::RELEASE_AND8mi: OutMI.setOpcode(X86::AND8mi); goto ReSimplify; | |||
574 | case X86::RELEASE_AND8mr: OutMI.setOpcode(X86::AND8mr); goto ReSimplify; | |||
575 | case X86::RELEASE_AND32mi: OutMI.setOpcode(X86::AND32mi); goto ReSimplify; | |||
576 | case X86::RELEASE_AND32mr: OutMI.setOpcode(X86::AND32mr); goto ReSimplify; | |||
577 | case X86::RELEASE_AND64mi32: OutMI.setOpcode(X86::AND64mi32); goto ReSimplify; | |||
578 | case X86::RELEASE_AND64mr: OutMI.setOpcode(X86::AND64mr); goto ReSimplify; | |||
579 | case X86::RELEASE_OR8mi: OutMI.setOpcode(X86::OR8mi); goto ReSimplify; | |||
580 | case X86::RELEASE_OR8mr: OutMI.setOpcode(X86::OR8mr); goto ReSimplify; | |||
581 | case X86::RELEASE_OR32mi: OutMI.setOpcode(X86::OR32mi); goto ReSimplify; | |||
582 | case X86::RELEASE_OR32mr: OutMI.setOpcode(X86::OR32mr); goto ReSimplify; | |||
583 | case X86::RELEASE_OR64mi32: OutMI.setOpcode(X86::OR64mi32); goto ReSimplify; | |||
584 | case X86::RELEASE_OR64mr: OutMI.setOpcode(X86::OR64mr); goto ReSimplify; | |||
585 | case X86::RELEASE_XOR8mi: OutMI.setOpcode(X86::XOR8mi); goto ReSimplify; | |||
586 | case X86::RELEASE_XOR8mr: OutMI.setOpcode(X86::XOR8mr); goto ReSimplify; | |||
587 | case X86::RELEASE_XOR32mi: OutMI.setOpcode(X86::XOR32mi); goto ReSimplify; | |||
588 | case X86::RELEASE_XOR32mr: OutMI.setOpcode(X86::XOR32mr); goto ReSimplify; | |||
589 | case X86::RELEASE_XOR64mi32: OutMI.setOpcode(X86::XOR64mi32); goto ReSimplify; | |||
590 | case X86::RELEASE_XOR64mr: OutMI.setOpcode(X86::XOR64mr); goto ReSimplify; | |||
591 | case X86::RELEASE_INC8m: OutMI.setOpcode(X86::INC8m); goto ReSimplify; | |||
592 | case X86::RELEASE_INC16m: OutMI.setOpcode(X86::INC16m); goto ReSimplify; | |||
593 | case X86::RELEASE_INC32m: OutMI.setOpcode(X86::INC32m); goto ReSimplify; | |||
594 | case X86::RELEASE_INC64m: OutMI.setOpcode(X86::INC64m); goto ReSimplify; | |||
595 | case X86::RELEASE_DEC8m: OutMI.setOpcode(X86::DEC8m); goto ReSimplify; | |||
596 | case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify; | |||
597 | case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify; | |||
598 | case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify; | |||
599 | ||||
600 | // We don't currently select the correct instruction form for instructions | |||
601 | // which have a short %eax, etc. form. Handle this by custom lowering, for | |||
602 | // now. | |||
603 | // | |||
604 | // Note, we are currently not handling the following instructions: | |||
605 | // MOV64ao8, MOV64o8a | |||
606 | // XCHG16ar, XCHG32ar, XCHG64ar | |||
607 | case X86::MOV8mr_NOREX: | |||
608 | case X86::MOV8mr: | |||
609 | case X86::MOV8rm_NOREX: | |||
610 | case X86::MOV8rm: | |||
611 | case X86::MOV16mr: | |||
612 | case X86::MOV16rm: | |||
613 | case X86::MOV32mr: | |||
614 | case X86::MOV32rm: { | |||
615 | unsigned NewOpc; | |||
616 | switch (OutMI.getOpcode()) { | |||
617 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 617); | |||
618 | case X86::MOV8mr_NOREX: | |||
619 | case X86::MOV8mr: NewOpc = X86::MOV8o32a; break; | |||
620 | case X86::MOV8rm_NOREX: | |||
621 | case X86::MOV8rm: NewOpc = X86::MOV8ao32; break; | |||
622 | case X86::MOV16mr: NewOpc = X86::MOV16o32a; break; | |||
623 | case X86::MOV16rm: NewOpc = X86::MOV16ao32; break; | |||
624 | case X86::MOV32mr: NewOpc = X86::MOV32o32a; break; | |||
625 | case X86::MOV32rm: NewOpc = X86::MOV32ao32; break; | |||
626 | } | |||
627 | SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc); | |||
628 | break; | |||
629 | } | |||
630 | ||||
631 | case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32: | |||
632 | case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32: | |||
633 | case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32: | |||
634 | case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32: | |||
635 | case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32: | |||
636 | case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32: | |||
637 | case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32: | |||
638 | case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32: | |||
639 | case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: { | |||
640 | unsigned NewOpc; | |||
641 | switch (OutMI.getOpcode()) { | |||
642 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 642); | |||
643 | case X86::ADC8ri: NewOpc = X86::ADC8i8; break; | |||
644 | case X86::ADC16ri: NewOpc = X86::ADC16i16; break; | |||
645 | case X86::ADC32ri: NewOpc = X86::ADC32i32; break; | |||
646 | case X86::ADC64ri32: NewOpc = X86::ADC64i32; break; | |||
647 | case X86::ADD8ri: NewOpc = X86::ADD8i8; break; | |||
648 | case X86::ADD16ri: NewOpc = X86::ADD16i16; break; | |||
649 | case X86::ADD32ri: NewOpc = X86::ADD32i32; break; | |||
650 | case X86::ADD64ri32: NewOpc = X86::ADD64i32; break; | |||
651 | case X86::AND8ri: NewOpc = X86::AND8i8; break; | |||
652 | case X86::AND16ri: NewOpc = X86::AND16i16; break; | |||
653 | case X86::AND32ri: NewOpc = X86::AND32i32; break; | |||
654 | case X86::AND64ri32: NewOpc = X86::AND64i32; break; | |||
655 | case X86::CMP8ri: NewOpc = X86::CMP8i8; break; | |||
656 | case X86::CMP16ri: NewOpc = X86::CMP16i16; break; | |||
657 | case X86::CMP32ri: NewOpc = X86::CMP32i32; break; | |||
658 | case X86::CMP64ri32: NewOpc = X86::CMP64i32; break; | |||
659 | case X86::OR8ri: NewOpc = X86::OR8i8; break; | |||
660 | case X86::OR16ri: NewOpc = X86::OR16i16; break; | |||
661 | case X86::OR32ri: NewOpc = X86::OR32i32; break; | |||
662 | case X86::OR64ri32: NewOpc = X86::OR64i32; break; | |||
663 | case X86::SBB8ri: NewOpc = X86::SBB8i8; break; | |||
664 | case X86::SBB16ri: NewOpc = X86::SBB16i16; break; | |||
665 | case X86::SBB32ri: NewOpc = X86::SBB32i32; break; | |||
666 | case X86::SBB64ri32: NewOpc = X86::SBB64i32; break; | |||
667 | case X86::SUB8ri: NewOpc = X86::SUB8i8; break; | |||
668 | case X86::SUB16ri: NewOpc = X86::SUB16i16; break; | |||
669 | case X86::SUB32ri: NewOpc = X86::SUB32i32; break; | |||
670 | case X86::SUB64ri32: NewOpc = X86::SUB64i32; break; | |||
671 | case X86::TEST8ri: NewOpc = X86::TEST8i8; break; | |||
672 | case X86::TEST16ri: NewOpc = X86::TEST16i16; break; | |||
673 | case X86::TEST32ri: NewOpc = X86::TEST32i32; break; | |||
674 | case X86::TEST64ri32: NewOpc = X86::TEST64i32; break; | |||
675 | case X86::XOR8ri: NewOpc = X86::XOR8i8; break; | |||
676 | case X86::XOR16ri: NewOpc = X86::XOR16i16; break; | |||
677 | case X86::XOR32ri: NewOpc = X86::XOR32i32; break; | |||
678 | case X86::XOR64ri32: NewOpc = X86::XOR64i32; break; | |||
679 | } | |||
680 | SimplifyShortImmForm(OutMI, NewOpc); | |||
681 | break; | |||
682 | } | |||
683 | ||||
684 | // Try to shrink some forms of movsx. | |||
685 | case X86::MOVSX16rr8: | |||
686 | case X86::MOVSX32rr16: | |||
687 | case X86::MOVSX64rr32: | |||
688 | SimplifyMOVSX(OutMI); | |||
689 | break; | |||
690 | } | |||
691 | } | |||
692 | ||||
693 | void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering, | |||
694 | const MachineInstr &MI) { | |||
695 | ||||
696 | bool is64Bits = MI.getOpcode() == X86::TLS_addr64 || | |||
697 | MI.getOpcode() == X86::TLS_base_addr64; | |||
698 | ||||
699 | bool needsPadding = MI.getOpcode() == X86::TLS_addr64; | |||
700 | ||||
701 | MCContext &context = OutStreamer->getContext(); | |||
702 | ||||
703 | if (needsPadding) | |||
704 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); | |||
705 | ||||
706 | MCSymbolRefExpr::VariantKind SRVK; | |||
707 | switch (MI.getOpcode()) { | |||
708 | case X86::TLS_addr32: | |||
709 | case X86::TLS_addr64: | |||
710 | SRVK = MCSymbolRefExpr::VK_TLSGD; | |||
711 | break; | |||
712 | case X86::TLS_base_addr32: | |||
713 | SRVK = MCSymbolRefExpr::VK_TLSLDM; | |||
714 | break; | |||
715 | case X86::TLS_base_addr64: | |||
716 | SRVK = MCSymbolRefExpr::VK_TLSLD; | |||
717 | break; | |||
718 | default: | |||
719 | llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 719); | |||
720 | } | |||
721 | ||||
722 | MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)); | |||
723 | const MCSymbolRefExpr *symRef = MCSymbolRefExpr::create(sym, SRVK, context); | |||
724 | ||||
725 | MCInst LEA; | |||
726 | if (is64Bits) { | |||
727 | LEA.setOpcode(X86::LEA64r); | |||
728 | LEA.addOperand(MCOperand::createReg(X86::RDI)); // dest | |||
729 | LEA.addOperand(MCOperand::createReg(X86::RIP)); // base | |||
730 | LEA.addOperand(MCOperand::createImm(1)); // scale | |||
731 | LEA.addOperand(MCOperand::createReg(0)); // index | |||
732 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp | |||
733 | LEA.addOperand(MCOperand::createReg(0)); // seg | |||
734 | } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) { | |||
735 | LEA.setOpcode(X86::LEA32r); | |||
736 | LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest | |||
737 | LEA.addOperand(MCOperand::createReg(X86::EBX)); // base | |||
738 | LEA.addOperand(MCOperand::createImm(1)); // scale | |||
739 | LEA.addOperand(MCOperand::createReg(0)); // index | |||
740 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp | |||
741 | LEA.addOperand(MCOperand::createReg(0)); // seg | |||
742 | } else { | |||
743 | LEA.setOpcode(X86::LEA32r); | |||
744 | LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest | |||
745 | LEA.addOperand(MCOperand::createReg(0)); // base | |||
746 | LEA.addOperand(MCOperand::createImm(1)); // scale | |||
747 | LEA.addOperand(MCOperand::createReg(X86::EBX)); // index | |||
748 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp | |||
749 | LEA.addOperand(MCOperand::createReg(0)); // seg | |||
750 | } | |||
751 | EmitAndCountInstruction(LEA); | |||
752 | ||||
753 | if (needsPadding) { | |||
754 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); | |||
755 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); | |||
756 | EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX)); | |||
757 | } | |||
758 | ||||
759 | StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr"; | |||
760 | MCSymbol *tlsGetAddr = context.getOrCreateSymbol(name); | |||
761 | const MCSymbolRefExpr *tlsRef = | |||
762 | MCSymbolRefExpr::create(tlsGetAddr, | |||
763 | MCSymbolRefExpr::VK_PLT, | |||
764 | context); | |||
765 | ||||
766 | EmitAndCountInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32 | |||
767 | : X86::CALLpcrel32) | |||
768 | .addExpr(tlsRef)); | |||
769 | } | |||
770 | ||||
771 | /// \brief Emit the largest nop instruction smaller than or equal to \p NumBytes | |||
772 | /// bytes. Return the size of nop emitted. | |||
773 | static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, | |||
774 | const MCSubtargetInfo &STI) { | |||
775 | // This works only for 64bit. For 32bit we have to do additional checking if | |||
776 | // the CPU supports multi-byte nops. | |||
777 | assert(Is64Bit && "EmitNops only supports X86-64")(static_cast <bool> (Is64Bit && "EmitNops only supports X86-64" ) ? void (0) : __assert_fail ("Is64Bit && \"EmitNops only supports X86-64\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 777, __extension__ __PRETTY_FUNCTION__)); | |||
778 | ||||
779 | unsigned NopSize; | |||
780 | unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg; | |||
781 | Opc = IndexReg = Displacement = SegmentReg = 0; | |||
782 | BaseReg = X86::RAX; | |||
783 | ScaleVal = 1; | |||
784 | switch (NumBytes) { | |||
785 | case 0: llvm_unreachable("Zero nops?")::llvm::llvm_unreachable_internal("Zero nops?", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 785); break; | |||
786 | case 1: NopSize = 1; Opc = X86::NOOP; break; | |||
787 | case 2: NopSize = 2; Opc = X86::XCHG16ar; break; | |||
788 | case 3: NopSize = 3; Opc = X86::NOOPL; break; | |||
789 | case 4: NopSize = 4; Opc = X86::NOOPL; Displacement = 8; break; | |||
790 | case 5: NopSize = 5; Opc = X86::NOOPL; Displacement = 8; | |||
791 | IndexReg = X86::RAX; break; | |||
792 | case 6: NopSize = 6; Opc = X86::NOOPW; Displacement = 8; | |||
793 | IndexReg = X86::RAX; break; | |||
794 | case 7: NopSize = 7; Opc = X86::NOOPL; Displacement = 512; break; | |||
795 | case 8: NopSize = 8; Opc = X86::NOOPL; Displacement = 512; | |||
796 | IndexReg = X86::RAX; break; | |||
797 | case 9: NopSize = 9; Opc = X86::NOOPW; Displacement = 512; | |||
798 | IndexReg = X86::RAX; break; | |||
799 | default: NopSize = 10; Opc = X86::NOOPW; Displacement = 512; | |||
800 | IndexReg = X86::RAX; SegmentReg = X86::CS; break; | |||
801 | } | |||
802 | ||||
803 | unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U); | |||
804 | NopSize += NumPrefixes; | |||
805 | for (unsigned i = 0; i != NumPrefixes; ++i) | |||
806 | OS.EmitBytes("\x66"); | |||
807 | ||||
808 | switch (Opc) { | |||
809 | default: | |||
810 | llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 810); | |||
811 | break; | |||
812 | case X86::NOOP: | |||
813 | OS.EmitInstruction(MCInstBuilder(Opc), STI); | |||
814 | break; | |||
815 | case X86::XCHG16ar: | |||
816 | OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI); | |||
817 | break; | |||
818 | case X86::NOOPL: | |||
819 | case X86::NOOPW: | |||
820 | OS.EmitInstruction(MCInstBuilder(Opc) | |||
821 | .addReg(BaseReg) | |||
822 | .addImm(ScaleVal) | |||
823 | .addReg(IndexReg) | |||
824 | .addImm(Displacement) | |||
825 | .addReg(SegmentReg), | |||
826 | STI); | |||
827 | break; | |||
828 | } | |||
829 | assert(NopSize <= NumBytes && "We overemitted?")(static_cast <bool> (NopSize <= NumBytes && "We overemitted?" ) ? void (0) : __assert_fail ("NopSize <= NumBytes && \"We overemitted?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 829, __extension__ __PRETTY_FUNCTION__)); | |||
830 | return NopSize; | |||
831 | } | |||
832 | ||||
833 | /// \brief Emit the optimal amount of multi-byte nops on X86. | |||
834 | static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, | |||
835 | const MCSubtargetInfo &STI) { | |||
836 | unsigned NopsToEmit = NumBytes; | |||
837 | (void)NopsToEmit; | |||
838 | while (NumBytes) { | |||
839 | NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI); | |||
840 | assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!")(static_cast <bool> (NopsToEmit >= NumBytes && "Emitted more than I asked for!") ? void (0) : __assert_fail ("NopsToEmit >= NumBytes && \"Emitted more than I asked for!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 840, __extension__ __PRETTY_FUNCTION__)); | |||
841 | } | |||
842 | } | |||
843 | ||||
844 | void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, | |||
845 | X86MCInstLower &MCIL) { | |||
846 | assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64")(static_cast <bool> (Subtarget->is64Bit() && "Statepoint currently only supports X86-64") ? void (0) : __assert_fail ("Subtarget->is64Bit() && \"Statepoint currently only supports X86-64\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 846, __extension__ __PRETTY_FUNCTION__)); | |||
847 | ||||
848 | StatepointOpers SOpers(&MI); | |||
849 | if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { | |||
850 | EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(), | |||
851 | getSubtargetInfo()); | |||
852 | } else { | |||
853 | // Lower call target and choose correct opcode | |||
854 | const MachineOperand &CallTarget = SOpers.getCallTarget(); | |||
855 | MCOperand CallTargetMCOp; | |||
856 | unsigned CallOpcode; | |||
857 | switch (CallTarget.getType()) { | |||
858 | case MachineOperand::MO_GlobalAddress: | |||
859 | case MachineOperand::MO_ExternalSymbol: | |||
860 | CallTargetMCOp = MCIL.LowerSymbolOperand( | |||
861 | CallTarget, MCIL.GetSymbolFromOperand(CallTarget)); | |||
862 | CallOpcode = X86::CALL64pcrel32; | |||
863 | // Currently, we only support relative addressing with statepoints. | |||
864 | // Otherwise, we'll need a scratch register to hold the target | |||
865 | // address. You'll fail asserts during load & relocation if this | |||
866 | // symbol is to far away. (TODO: support non-relative addressing) | |||
867 | break; | |||
868 | case MachineOperand::MO_Immediate: | |||
869 | CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); | |||
870 | CallOpcode = X86::CALL64pcrel32; | |||
871 | // Currently, we only support relative addressing with statepoints. | |||
872 | // Otherwise, we'll need a scratch register to hold the target | |||
873 | // immediate. You'll fail asserts during load & relocation if this | |||
874 | // address is to far away. (TODO: support non-relative addressing) | |||
875 | break; | |||
876 | case MachineOperand::MO_Register: | |||
877 | // FIXME: Add retpoline support and remove this. | |||
878 | if (Subtarget->useRetpoline()) | |||
879 | report_fatal_error("Lowering register statepoints with retpoline not " | |||
880 | "yet implemented."); | |||
881 | CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); | |||
882 | CallOpcode = X86::CALL64r; | |||
883 | break; | |||
884 | default: | |||
885 | llvm_unreachable("Unsupported operand type in statepoint call target")::llvm::llvm_unreachable_internal("Unsupported operand type in statepoint call target" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 885); | |||
886 | break; | |||
887 | } | |||
888 | ||||
889 | // Emit call | |||
890 | MCInst CallInst; | |||
891 | CallInst.setOpcode(CallOpcode); | |||
892 | CallInst.addOperand(CallTargetMCOp); | |||
893 | OutStreamer->EmitInstruction(CallInst, getSubtargetInfo()); | |||
894 | } | |||
895 | ||||
896 | // Record our statepoint node in the same section used by STACKMAP | |||
897 | // and PATCHPOINT | |||
898 | SM.recordStatepoint(MI); | |||
899 | } | |||
900 | ||||
901 | void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI, | |||
902 | X86MCInstLower &MCIL) { | |||
903 | // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>, | |||
904 | // <opcode>, <operands> | |||
905 | ||||
906 | unsigned DefRegister = FaultingMI.getOperand(0).getReg(); | |||
907 | FaultMaps::FaultKind FK = | |||
908 | static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm()); | |||
909 | MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol(); | |||
910 | unsigned Opcode = FaultingMI.getOperand(3).getImm(); | |||
911 | unsigned OperandsBeginIdx = 4; | |||
912 | ||||
913 | assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!")(static_cast <bool> (FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!") ? void (0) : __assert_fail ("FK < FaultMaps::FaultKindMax && \"Invalid Faulting Kind!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 913, __extension__ __PRETTY_FUNCTION__)); | |||
914 | FM.recordFaultingOp(FK, HandlerLabel); | |||
915 | ||||
916 | MCInst MI; | |||
917 | MI.setOpcode(Opcode); | |||
918 | ||||
919 | if (DefRegister != X86::NoRegister) | |||
920 | MI.addOperand(MCOperand::createReg(DefRegister)); | |||
921 | ||||
922 | for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx, | |||
923 | E = FaultingMI.operands_end(); | |||
924 | I != E; ++I) | |||
925 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I)) | |||
926 | MI.addOperand(MaybeOperand.getValue()); | |||
927 | ||||
928 | OutStreamer->EmitInstruction(MI, getSubtargetInfo()); | |||
929 | } | |||
930 | ||||
931 | void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI, | |||
932 | X86MCInstLower &MCIL) { | |||
933 | bool Is64Bits = Subtarget->is64Bit(); | |||
934 | MCContext &Ctx = OutStreamer->getContext(); | |||
935 | MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__"); | |||
936 | const MCSymbolRefExpr *Op = | |||
937 | MCSymbolRefExpr::create(fentry, MCSymbolRefExpr::VK_None, Ctx); | |||
938 | ||||
939 | EmitAndCountInstruction( | |||
940 | MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32) | |||
941 | .addExpr(Op)); | |||
942 | } | |||
943 | ||||
944 | void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, | |||
945 | X86MCInstLower &MCIL) { | |||
946 | // PATCHABLE_OP minsize, opcode, operands | |||
947 | ||||
948 | unsigned MinSize = MI.getOperand(0).getImm(); | |||
949 | unsigned Opcode = MI.getOperand(1).getImm(); | |||
950 | ||||
951 | MCInst MCI; | |||
952 | MCI.setOpcode(Opcode); | |||
953 | for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end())) | |||
954 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) | |||
955 | MCI.addOperand(MaybeOperand.getValue()); | |||
956 | ||||
957 | SmallString<256> Code; | |||
958 | SmallVector<MCFixup, 4> Fixups; | |||
959 | raw_svector_ostream VecOS(Code); | |||
960 | CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo()); | |||
961 | ||||
962 | if (Code.size() < MinSize) { | |||
963 | if (MinSize == 2 && Opcode == X86::PUSH64r) { | |||
964 | // This is an optimization that lets us get away without emitting a nop in | |||
965 | // many cases. | |||
966 | // | |||
967 | // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %r9) takes two | |||
968 | // bytes too, so the check on MinSize is important. | |||
969 | MCI.setOpcode(X86::PUSH64rmr); | |||
970 | } else { | |||
971 | unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(), | |||
972 | getSubtargetInfo()); | |||
973 | assert(NopSize == MinSize && "Could not implement MinSize!")(static_cast <bool> (NopSize == MinSize && "Could not implement MinSize!" ) ? void (0) : __assert_fail ("NopSize == MinSize && \"Could not implement MinSize!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 973, __extension__ __PRETTY_FUNCTION__)); | |||
974 | (void) NopSize; | |||
975 | } | |||
976 | } | |||
977 | ||||
978 | OutStreamer->EmitInstruction(MCI, getSubtargetInfo()); | |||
979 | } | |||
980 | ||||
981 | // Lower a stackmap of the form: | |||
982 | // <id>, <shadowBytes>, ... | |||
983 | void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) { | |||
984 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); | |||
985 | SM.recordStackMap(MI); | |||
986 | unsigned NumShadowBytes = MI.getOperand(1).getImm(); | |||
987 | SMShadowTracker.reset(NumShadowBytes); | |||
988 | } | |||
989 | ||||
990 | // Lower a patchpoint of the form: | |||
991 | // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ... | |||
992 | void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI, | |||
993 | X86MCInstLower &MCIL) { | |||
994 | assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64")(static_cast <bool> (Subtarget->is64Bit() && "Patchpoint currently only supports X86-64") ? void (0) : __assert_fail ("Subtarget->is64Bit() && \"Patchpoint currently only supports X86-64\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 994, __extension__ __PRETTY_FUNCTION__)); | |||
995 | ||||
996 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); | |||
997 | ||||
998 | SM.recordPatchPoint(MI); | |||
999 | ||||
1000 | PatchPointOpers opers(&MI); | |||
1001 | unsigned ScratchIdx = opers.getNextScratchIdx(); | |||
1002 | unsigned EncodedBytes = 0; | |||
1003 | const MachineOperand &CalleeMO = opers.getCallTarget(); | |||
1004 | ||||
1005 | // Check for null target. If target is non-null (i.e. is non-zero or is | |||
1006 | // symbolic) then emit a call. | |||
1007 | if (!(CalleeMO.isImm() && !CalleeMO.getImm())) { | |||
1008 | MCOperand CalleeMCOp; | |||
1009 | switch (CalleeMO.getType()) { | |||
1010 | default: | |||
1011 | /// FIXME: Add a verifier check for bad callee types. | |||
1012 | llvm_unreachable("Unrecognized callee operand type.")::llvm::llvm_unreachable_internal("Unrecognized callee operand type." , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1012); | |||
1013 | case MachineOperand::MO_Immediate: | |||
1014 | if (CalleeMO.getImm()) | |||
1015 | CalleeMCOp = MCOperand::createImm(CalleeMO.getImm()); | |||
1016 | break; | |||
1017 | case MachineOperand::MO_ExternalSymbol: | |||
1018 | case MachineOperand::MO_GlobalAddress: | |||
1019 | CalleeMCOp = | |||
1020 | MCIL.LowerSymbolOperand(CalleeMO, | |||
1021 | MCIL.GetSymbolFromOperand(CalleeMO)); | |||
1022 | break; | |||
1023 | } | |||
1024 | ||||
1025 | // Emit MOV to materialize the target address and the CALL to target. | |||
1026 | // This is encoded with 12-13 bytes, depending on which register is used. | |||
1027 | unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg(); | |||
1028 | if (X86II::isX86_64ExtendedReg(ScratchReg)) | |||
1029 | EncodedBytes = 13; | |||
1030 | else | |||
1031 | EncodedBytes = 12; | |||
1032 | ||||
1033 | EmitAndCountInstruction( | |||
1034 | MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp)); | |||
1035 | // FIXME: Add retpoline support and remove this. | |||
1036 | if (Subtarget->useRetpoline()) | |||
1037 | report_fatal_error( | |||
1038 | "Lowering patchpoint with retpoline not yet implemented."); | |||
1039 | EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg)); | |||
1040 | } | |||
1041 | ||||
1042 | // Emit padding. | |||
1043 | unsigned NumBytes = opers.getNumPatchBytes(); | |||
1044 | assert(NumBytes >= EncodedBytes &&(static_cast <bool> (NumBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call." ) ? void (0) : __assert_fail ("NumBytes >= EncodedBytes && \"Patchpoint can't request size less than the length of a call.\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1045, __extension__ __PRETTY_FUNCTION__)) | |||
1045 | "Patchpoint can't request size less than the length of a call.")(static_cast <bool> (NumBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call." ) ? void (0) : __assert_fail ("NumBytes >= EncodedBytes && \"Patchpoint can't request size less than the length of a call.\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1045, __extension__ __PRETTY_FUNCTION__)); | |||
1046 | ||||
1047 | EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(), | |||
1048 | getSubtargetInfo()); | |||
1049 | } | |||
1050 | ||||
1051 | void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, | |||
1052 | X86MCInstLower &MCIL) { | |||
1053 | assert(Subtarget->is64Bit() && "XRay custom events only supports X86-64")(static_cast <bool> (Subtarget->is64Bit() && "XRay custom events only supports X86-64") ? void (0) : __assert_fail ("Subtarget->is64Bit() && \"XRay custom events only supports X86-64\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1053, __extension__ __PRETTY_FUNCTION__)); | |||
1054 | ||||
1055 | // We want to emit the following pattern, which follows the x86 calling | |||
1056 | // convention to prepare for the trampoline call to be patched in. | |||
1057 | // | |||
1058 | // .p2align 1, ... | |||
1059 | // .Lxray_event_sled_N: | |||
1060 | // jmp +N // jump across the instrumentation sled | |||
1061 | // ... // set up arguments in register | |||
1062 | // callq __xray_CustomEvent@plt // force dependency to symbol | |||
1063 | // ... | |||
1064 | // <jump here> | |||
1065 | // | |||
1066 | // After patching, it would look something like: | |||
1067 | // | |||
1068 | // nopw (2-byte nop) | |||
1069 | // ... | |||
1070 | // callq __xrayCustomEvent // already lowered | |||
1071 | // ... | |||
1072 | // | |||
1073 | // --- | |||
1074 | // First we emit the label and the jump. | |||
1075 | auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true); | |||
1076 | OutStreamer->AddComment("# XRay Custom Event Log"); | |||
1077 | OutStreamer->EmitCodeAlignment(2); | |||
1078 | OutStreamer->EmitLabel(CurSled); | |||
1079 | ||||
1080 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as | |||
1081 | // an operand (computed as an offset from the jmp instruction). | |||
1082 | // FIXME: Find another less hacky way do force the relative jump. | |||
1083 | OutStreamer->EmitBinaryData("\xeb\x0f"); | |||
1084 | ||||
1085 | // The default C calling convention will place two arguments into %rcx and | |||
1086 | // %rdx -- so we only work with those. | |||
1087 | unsigned UsedRegs[] = {X86::RDI, X86::RSI}; | |||
1088 | bool UsedMask[] = {false, false}; | |||
1089 | ||||
1090 | // Then we put the operands in the %rdi and %rsi registers. We spill the | |||
1091 | // values in the register before we clobber them, and mark them as used in | |||
1092 | // UsedMask. In case the arguments are already in the correct register, we use | |||
1093 | // emit nops appropriately sized to keep the sled the same size in every | |||
1094 | // situation. | |||
1095 | for (unsigned I = 0; I < MI.getNumOperands(); ++I) | |||
| ||||
1096 | if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) { | |||
1097 | assert(Op->isReg() && "Only support arguments in registers")(static_cast <bool> (Op->isReg() && "Only support arguments in registers" ) ? void (0) : __assert_fail ("Op->isReg() && \"Only support arguments in registers\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1097, __extension__ __PRETTY_FUNCTION__)); | |||
1098 | if (Op->getReg() != UsedRegs[I]) { | |||
| ||||
1099 | UsedMask[I] = true; | |||
1100 | EmitAndCountInstruction( | |||
1101 | MCInstBuilder(X86::PUSH64r).addReg(UsedRegs[I])); | |||
1102 | EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr) | |||
1103 | .addReg(UsedRegs[I]) | |||
1104 | .addReg(Op->getReg())); | |||
1105 | } else { | |||
1106 | EmitNops(*OutStreamer, 4, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1107 | } | |||
1108 | } | |||
1109 | ||||
1110 | // We emit a hard dependency on the __xray_CustomEvent symbol, which is the | |||
1111 | // name of the trampoline to be implemented by the XRay runtime. | |||
1112 | auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent"); | |||
1113 | MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym); | |||
1114 | if (isPositionIndependent()) | |||
1115 | TOp.setTargetFlags(X86II::MO_PLT); | |||
1116 | ||||
1117 | // Emit the call instruction. | |||
1118 | EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32) | |||
1119 | .addOperand(MCIL.LowerSymbolOperand(TOp, TSym))); | |||
1120 | ||||
1121 | // Restore caller-saved and used registers. | |||
1122 | for (unsigned I = sizeof UsedMask; I-- > 0;) | |||
1123 | if (UsedMask[I]) | |||
1124 | EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(UsedRegs[I])); | |||
1125 | else | |||
1126 | EmitNops(*OutStreamer, 1, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1127 | ||||
1128 | OutStreamer->AddComment("xray custom event end."); | |||
1129 | ||||
1130 | // Record the sled version. Older versions of this sled were spelled | |||
1131 | // differently, so we let the runtime handle the different offsets we're | |||
1132 | // using. | |||
1133 | recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 1); | |||
1134 | } | |||
1135 | ||||
1136 | void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI, | |||
1137 | X86MCInstLower &MCIL) { | |||
1138 | // We want to emit the following pattern: | |||
1139 | // | |||
1140 | // .p2align 1, ... | |||
1141 | // .Lxray_sled_N: | |||
1142 | // jmp .tmpN | |||
1143 | // # 9 bytes worth of noops | |||
1144 | // | |||
1145 | // We need the 9 bytes because at runtime, we'd be patching over the full 11 | |||
1146 | // bytes with the following pattern: | |||
1147 | // | |||
1148 | // mov %r10, <function id, 32-bit> // 6 bytes | |||
1149 | // call <relative offset, 32-bits> // 5 bytes | |||
1150 | // | |||
1151 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); | |||
1152 | OutStreamer->EmitCodeAlignment(2); | |||
1153 | OutStreamer->EmitLabel(CurSled); | |||
1154 | ||||
1155 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as | |||
1156 | // an operand (computed as an offset from the jmp instruction). | |||
1157 | // FIXME: Find another less hacky way do force the relative jump. | |||
1158 | OutStreamer->EmitBytes("\xeb\x09"); | |||
1159 | EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1160 | recordSled(CurSled, MI, SledKind::FUNCTION_ENTER); | |||
1161 | } | |||
1162 | ||||
1163 | void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI, | |||
1164 | X86MCInstLower &MCIL) { | |||
1165 | // Since PATCHABLE_RET takes the opcode of the return statement as an | |||
1166 | // argument, we use that to emit the correct form of the RET that we want. | |||
1167 | // i.e. when we see this: | |||
1168 | // | |||
1169 | // PATCHABLE_RET X86::RET ... | |||
1170 | // | |||
1171 | // We should emit the RET followed by sleds. | |||
1172 | // | |||
1173 | // .p2align 1, ... | |||
1174 | // .Lxray_sled_N: | |||
1175 | // ret # or equivalent instruction | |||
1176 | // # 10 bytes worth of noops | |||
1177 | // | |||
1178 | // This just makes sure that the alignment for the next instruction is 2. | |||
1179 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); | |||
1180 | OutStreamer->EmitCodeAlignment(2); | |||
1181 | OutStreamer->EmitLabel(CurSled); | |||
1182 | unsigned OpCode = MI.getOperand(0).getImm(); | |||
1183 | MCInst Ret; | |||
1184 | Ret.setOpcode(OpCode); | |||
1185 | for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) | |||
1186 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) | |||
1187 | Ret.addOperand(MaybeOperand.getValue()); | |||
1188 | OutStreamer->EmitInstruction(Ret, getSubtargetInfo()); | |||
1189 | EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1190 | recordSled(CurSled, MI, SledKind::FUNCTION_EXIT); | |||
1191 | } | |||
1192 | ||||
1193 | void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL) { | |||
1194 | // Like PATCHABLE_RET, we have the actual instruction in the operands to this | |||
1195 | // instruction so we lower that particular instruction and its operands. | |||
1196 | // Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how | |||
1197 | // we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to | |||
1198 | // the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual | |||
1199 | // tail call much like how we have it in PATCHABLE_RET. | |||
1200 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); | |||
1201 | OutStreamer->EmitCodeAlignment(2); | |||
1202 | OutStreamer->EmitLabel(CurSled); | |||
1203 | auto Target = OutContext.createTempSymbol(); | |||
1204 | ||||
1205 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as | |||
1206 | // an operand (computed as an offset from the jmp instruction). | |||
1207 | // FIXME: Find another less hacky way do force the relative jump. | |||
1208 | OutStreamer->EmitBytes("\xeb\x09"); | |||
1209 | EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1210 | OutStreamer->EmitLabel(Target); | |||
1211 | recordSled(CurSled, MI, SledKind::TAIL_CALL); | |||
1212 | ||||
1213 | unsigned OpCode = MI.getOperand(0).getImm(); | |||
1214 | MCInst TC; | |||
1215 | TC.setOpcode(OpCode); | |||
1216 | ||||
1217 | // Before emitting the instruction, add a comment to indicate that this is | |||
1218 | // indeed a tail call. | |||
1219 | OutStreamer->AddComment("TAILCALL"); | |||
1220 | for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) | |||
1221 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) | |||
1222 | TC.addOperand(MaybeOperand.getValue()); | |||
1223 | OutStreamer->EmitInstruction(TC, getSubtargetInfo()); | |||
1224 | } | |||
1225 | ||||
1226 | // Returns instruction preceding MBBI in MachineFunction. | |||
1227 | // If MBBI is the first instruction of the first basic block, returns null. | |||
1228 | static MachineBasicBlock::const_iterator | |||
1229 | PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) { | |||
1230 | const MachineBasicBlock *MBB = MBBI->getParent(); | |||
1231 | while (MBBI == MBB->begin()) { | |||
1232 | if (MBB == &MBB->getParent()->front()) | |||
1233 | return MachineBasicBlock::const_iterator(); | |||
1234 | MBB = MBB->getPrevNode(); | |||
1235 | MBBI = MBB->end(); | |||
1236 | } | |||
1237 | return --MBBI; | |||
1238 | } | |||
1239 | ||||
1240 | static const Constant *getConstantFromPool(const MachineInstr &MI, | |||
1241 | const MachineOperand &Op) { | |||
1242 | if (!Op.isCPI()) | |||
1243 | return nullptr; | |||
1244 | ||||
1245 | ArrayRef<MachineConstantPoolEntry> Constants = | |||
1246 | MI.getParent()->getParent()->getConstantPool()->getConstants(); | |||
1247 | const MachineConstantPoolEntry &ConstantEntry = | |||
1248 | Constants[Op.getIndex()]; | |||
1249 | ||||
1250 | // Bail if this is a machine constant pool entry, we won't be able to dig out | |||
1251 | // anything useful. | |||
1252 | if (ConstantEntry.isMachineConstantPoolEntry()) | |||
1253 | return nullptr; | |||
1254 | ||||
1255 | auto *C = dyn_cast<Constant>(ConstantEntry.Val.ConstVal); | |||
1256 | assert((!C || ConstantEntry.getType() == C->getType()) &&(static_cast <bool> ((!C || ConstantEntry.getType() == C ->getType()) && "Expected a constant of the same type!" ) ? void (0) : __assert_fail ("(!C || ConstantEntry.getType() == C->getType()) && \"Expected a constant of the same type!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1257, __extension__ __PRETTY_FUNCTION__)) | |||
1257 | "Expected a constant of the same type!")(static_cast <bool> ((!C || ConstantEntry.getType() == C ->getType()) && "Expected a constant of the same type!" ) ? void (0) : __assert_fail ("(!C || ConstantEntry.getType() == C->getType()) && \"Expected a constant of the same type!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1257, __extension__ __PRETTY_FUNCTION__)); | |||
1258 | return C; | |||
1259 | } | |||
1260 | ||||
1261 | static std::string getShuffleComment(const MachineInstr *MI, | |||
1262 | unsigned SrcOp1Idx, | |||
1263 | unsigned SrcOp2Idx, | |||
1264 | ArrayRef<int> Mask) { | |||
1265 | std::string Comment; | |||
1266 | ||||
1267 | // Compute the name for a register. This is really goofy because we have | |||
1268 | // multiple instruction printers that could (in theory) use different | |||
1269 | // names. Fortunately most people use the ATT style (outside of Windows) | |||
1270 | // and they actually agree on register naming here. Ultimately, this is | |||
1271 | // a comment, and so its OK if it isn't perfect. | |||
1272 | auto GetRegisterName = [](unsigned RegNum) -> StringRef { | |||
1273 | return X86ATTInstPrinter::getRegisterName(RegNum); | |||
1274 | }; | |||
1275 | ||||
1276 | const MachineOperand &DstOp = MI->getOperand(0); | |||
1277 | const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx); | |||
1278 | const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx); | |||
1279 | ||||
1280 | StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem"; | |||
1281 | StringRef Src1Name = | |||
1282 | SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem"; | |||
1283 | StringRef Src2Name = | |||
1284 | SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem"; | |||
1285 | ||||
1286 | // One source operand, fix the mask to print all elements in one span. | |||
1287 | SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end()); | |||
1288 | if (Src1Name == Src2Name) | |||
1289 | for (int i = 0, e = ShuffleMask.size(); i != e; ++i) | |||
1290 | if (ShuffleMask[i] >= e) | |||
1291 | ShuffleMask[i] -= e; | |||
1292 | ||||
1293 | raw_string_ostream CS(Comment); | |||
1294 | CS << DstName; | |||
1295 | ||||
1296 | // Handle AVX512 MASK/MASXZ write mask comments. | |||
1297 | // MASK: zmmX {%kY} | |||
1298 | // MASKZ: zmmX {%kY} {z} | |||
1299 | if (SrcOp1Idx > 1) { | |||
1300 | assert((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask")(static_cast <bool> ((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask") ? void (0) : __assert_fail ("(SrcOp1Idx == 2 || SrcOp1Idx == 3) && \"Unexpected writemask\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1300, __extension__ __PRETTY_FUNCTION__)); | |||
1301 | ||||
1302 | const MachineOperand &WriteMaskOp = MI->getOperand(SrcOp1Idx - 1); | |||
1303 | if (WriteMaskOp.isReg()) { | |||
1304 | CS << " {%" << GetRegisterName(WriteMaskOp.getReg()) << "}"; | |||
1305 | ||||
1306 | if (SrcOp1Idx == 2) { | |||
1307 | CS << " {z}"; | |||
1308 | } | |||
1309 | } | |||
1310 | } | |||
1311 | ||||
1312 | CS << " = "; | |||
1313 | ||||
1314 | for (int i = 0, e = ShuffleMask.size(); i != e; ++i) { | |||
1315 | if (i != 0) | |||
1316 | CS << ","; | |||
1317 | if (ShuffleMask[i] == SM_SentinelZero) { | |||
1318 | CS << "zero"; | |||
1319 | continue; | |||
1320 | } | |||
1321 | ||||
1322 | // Otherwise, it must come from src1 or src2. Print the span of elements | |||
1323 | // that comes from this src. | |||
1324 | bool isSrc1 = ShuffleMask[i] < (int)e; | |||
1325 | CS << (isSrc1 ? Src1Name : Src2Name) << '['; | |||
1326 | ||||
1327 | bool IsFirst = true; | |||
1328 | while (i != e && ShuffleMask[i] != SM_SentinelZero && | |||
1329 | (ShuffleMask[i] < (int)e) == isSrc1) { | |||
1330 | if (!IsFirst) | |||
1331 | CS << ','; | |||
1332 | else | |||
1333 | IsFirst = false; | |||
1334 | if (ShuffleMask[i] == SM_SentinelUndef) | |||
1335 | CS << "u"; | |||
1336 | else | |||
1337 | CS << ShuffleMask[i] % (int)e; | |||
1338 | ++i; | |||
1339 | } | |||
1340 | CS << ']'; | |||
1341 | --i; // For loop increments element #. | |||
1342 | } | |||
1343 | CS.flush(); | |||
1344 | ||||
1345 | return Comment; | |||
1346 | } | |||
1347 | ||||
1348 | static void printConstant(const Constant *COp, raw_ostream &CS) { | |||
1349 | if (isa<UndefValue>(COp)) { | |||
1350 | CS << "u"; | |||
1351 | } else if (auto *CI = dyn_cast<ConstantInt>(COp)) { | |||
1352 | if (CI->getBitWidth() <= 64) { | |||
1353 | CS << CI->getZExtValue(); | |||
1354 | } else { | |||
1355 | // print multi-word constant as (w0,w1) | |||
1356 | const auto &Val = CI->getValue(); | |||
1357 | CS << "("; | |||
1358 | for (int i = 0, N = Val.getNumWords(); i < N; ++i) { | |||
1359 | if (i > 0) | |||
1360 | CS << ","; | |||
1361 | CS << Val.getRawData()[i]; | |||
1362 | } | |||
1363 | CS << ")"; | |||
1364 | } | |||
1365 | } else if (auto *CF = dyn_cast<ConstantFP>(COp)) { | |||
1366 | SmallString<32> Str; | |||
1367 | CF->getValueAPF().toString(Str); | |||
1368 | CS << Str; | |||
1369 | } else { | |||
1370 | CS << "?"; | |||
1371 | } | |||
1372 | } | |||
1373 | ||||
1374 | void X86AsmPrinter::EmitSEHInstruction(const MachineInstr *MI) { | |||
1375 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")(static_cast <bool> (MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? void (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1375, __extension__ __PRETTY_FUNCTION__)); | |||
1376 | assert(getSubtarget().isOSWindows() && "SEH_ instruction Windows only")(static_cast <bool> (getSubtarget().isOSWindows() && "SEH_ instruction Windows only") ? void (0) : __assert_fail ( "getSubtarget().isOSWindows() && \"SEH_ instruction Windows only\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1376, __extension__ __PRETTY_FUNCTION__)); | |||
1377 | const X86RegisterInfo *RI = | |||
1378 | MF->getSubtarget<X86Subtarget>().getRegisterInfo(); | |||
1379 | ||||
1380 | // Use the .cv_fpo directives if we're emitting CodeView on 32-bit x86. | |||
1381 | if (EmitFPOData) { | |||
1382 | X86TargetStreamer *XTS = | |||
1383 | static_cast<X86TargetStreamer *>(OutStreamer->getTargetStreamer()); | |||
1384 | switch (MI->getOpcode()) { | |||
1385 | case X86::SEH_PushReg: | |||
1386 | XTS->emitFPOPushReg(MI->getOperand(0).getImm()); | |||
1387 | break; | |||
1388 | case X86::SEH_StackAlloc: | |||
1389 | XTS->emitFPOStackAlloc(MI->getOperand(0).getImm()); | |||
1390 | break; | |||
1391 | case X86::SEH_SetFrame: | |||
1392 | assert(MI->getOperand(1).getImm() == 0 &&(static_cast <bool> (MI->getOperand(1).getImm() == 0 && ".cv_fpo_setframe takes no offset") ? void (0) : __assert_fail ("MI->getOperand(1).getImm() == 0 && \".cv_fpo_setframe takes no offset\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1393, __extension__ __PRETTY_FUNCTION__)) | |||
1393 | ".cv_fpo_setframe takes no offset")(static_cast <bool> (MI->getOperand(1).getImm() == 0 && ".cv_fpo_setframe takes no offset") ? void (0) : __assert_fail ("MI->getOperand(1).getImm() == 0 && \".cv_fpo_setframe takes no offset\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1393, __extension__ __PRETTY_FUNCTION__)); | |||
1394 | XTS->emitFPOSetFrame(MI->getOperand(0).getImm()); | |||
1395 | break; | |||
1396 | case X86::SEH_EndPrologue: | |||
1397 | XTS->emitFPOEndPrologue(); | |||
1398 | break; | |||
1399 | case X86::SEH_SaveReg: | |||
1400 | case X86::SEH_SaveXMM: | |||
1401 | case X86::SEH_PushFrame: | |||
1402 | llvm_unreachable("SEH_ directive incompatible with FPO")::llvm::llvm_unreachable_internal("SEH_ directive incompatible with FPO" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1402); | |||
1403 | break; | |||
1404 | default: | |||
1405 | llvm_unreachable("expected SEH_ instruction")::llvm::llvm_unreachable_internal("expected SEH_ instruction" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1405); | |||
1406 | } | |||
1407 | return; | |||
1408 | } | |||
1409 | ||||
1410 | // Otherwise, use the .seh_ directives for all other Windows platforms. | |||
1411 | switch (MI->getOpcode()) { | |||
1412 | case X86::SEH_PushReg: | |||
1413 | OutStreamer->EmitWinCFIPushReg( | |||
1414 | RI->getSEHRegNum(MI->getOperand(0).getImm())); | |||
1415 | break; | |||
1416 | ||||
1417 | case X86::SEH_SaveReg: | |||
1418 | OutStreamer->EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()), | |||
1419 | MI->getOperand(1).getImm()); | |||
1420 | break; | |||
1421 | ||||
1422 | case X86::SEH_SaveXMM: | |||
1423 | OutStreamer->EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()), | |||
1424 | MI->getOperand(1).getImm()); | |||
1425 | break; | |||
1426 | ||||
1427 | case X86::SEH_StackAlloc: | |||
1428 | OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm()); | |||
1429 | break; | |||
1430 | ||||
1431 | case X86::SEH_SetFrame: | |||
1432 | OutStreamer->EmitWinCFISetFrame( | |||
1433 | RI->getSEHRegNum(MI->getOperand(0).getImm()), | |||
1434 | MI->getOperand(1).getImm()); | |||
1435 | break; | |||
1436 | ||||
1437 | case X86::SEH_PushFrame: | |||
1438 | OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm()); | |||
1439 | break; | |||
1440 | ||||
1441 | case X86::SEH_EndPrologue: | |||
1442 | OutStreamer->EmitWinCFIEndProlog(); | |||
1443 | break; | |||
1444 | ||||
1445 | default: | |||
1446 | llvm_unreachable("expected SEH_ instruction")::llvm::llvm_unreachable_internal("expected SEH_ instruction" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1446); | |||
1447 | } | |||
1448 | } | |||
1449 | ||||
1450 | void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { | |||
1451 | X86MCInstLower MCInstLowering(*MF, *this); | |||
1452 | const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo(); | |||
1453 | ||||
1454 | // Add a comment about EVEX-2-VEX compression for AVX-512 instrs that | |||
1455 | // are compressed from EVEX encoding to VEX encoding. | |||
1456 | if (TM.Options.MCOptions.ShowMCEncoding) { | |||
1457 | if (MI->getAsmPrinterFlags() & AC_EVEX_2_VEX) | |||
1458 | OutStreamer->AddComment("EVEX TO VEX Compression ", false); | |||
1459 | } | |||
1460 | ||||
1461 | switch (MI->getOpcode()) { | |||
1462 | case TargetOpcode::DBG_VALUE: | |||
1463 | llvm_unreachable("Should be handled target independently")::llvm::llvm_unreachable_internal("Should be handled target independently" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1463); | |||
1464 | ||||
1465 | // Emit nothing here but a comment if we can. | |||
1466 | case X86::Int_MemBarrier: | |||
1467 | OutStreamer->emitRawComment("MEMBARRIER"); | |||
1468 | return; | |||
1469 | ||||
1470 | ||||
1471 | case X86::EH_RETURN: | |||
1472 | case X86::EH_RETURN64: { | |||
1473 | // Lower these as normal, but add some comments. | |||
1474 | unsigned Reg = MI->getOperand(0).getReg(); | |||
1475 | OutStreamer->AddComment(StringRef("eh_return, addr: %") + | |||
1476 | X86ATTInstPrinter::getRegisterName(Reg)); | |||
1477 | break; | |||
1478 | } | |||
1479 | case X86::CLEANUPRET: { | |||
1480 | // Lower these as normal, but add some comments. | |||
1481 | OutStreamer->AddComment("CLEANUPRET"); | |||
1482 | break; | |||
1483 | } | |||
1484 | ||||
1485 | case X86::CATCHRET: { | |||
1486 | // Lower these as normal, but add some comments. | |||
1487 | OutStreamer->AddComment("CATCHRET"); | |||
1488 | break; | |||
1489 | } | |||
1490 | ||||
1491 | case X86::TAILJMPr: | |||
1492 | case X86::TAILJMPm: | |||
1493 | case X86::TAILJMPd: | |||
1494 | case X86::TAILJMPd_CC: | |||
1495 | case X86::TAILJMPr64: | |||
1496 | case X86::TAILJMPm64: | |||
1497 | case X86::TAILJMPd64: | |||
1498 | case X86::TAILJMPd64_CC: | |||
1499 | case X86::TAILJMPr64_REX: | |||
1500 | case X86::TAILJMPm64_REX: | |||
1501 | // Lower these as normal, but add some comments. | |||
1502 | OutStreamer->AddComment("TAILCALL"); | |||
1503 | break; | |||
1504 | ||||
1505 | case X86::TLS_addr32: | |||
1506 | case X86::TLS_addr64: | |||
1507 | case X86::TLS_base_addr32: | |||
1508 | case X86::TLS_base_addr64: | |||
1509 | return LowerTlsAddr(MCInstLowering, *MI); | |||
1510 | ||||
1511 | case X86::MOVPC32r: { | |||
1512 | // This is a pseudo op for a two instruction sequence with a label, which | |||
1513 | // looks like: | |||
1514 | // call "L1$pb" | |||
1515 | // "L1$pb": | |||
1516 | // popl %esi | |||
1517 | ||||
1518 | // Emit the call. | |||
1519 | MCSymbol *PICBase = MF->getPICBaseSymbol(); | |||
1520 | // FIXME: We would like an efficient form for this, so we don't have to do a | |||
1521 | // lot of extra uniquing. | |||
1522 | EmitAndCountInstruction(MCInstBuilder(X86::CALLpcrel32) | |||
1523 | .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); | |||
1524 | ||||
1525 | const X86FrameLowering* FrameLowering = | |||
1526 | MF->getSubtarget<X86Subtarget>().getFrameLowering(); | |||
1527 | bool hasFP = FrameLowering->hasFP(*MF); | |||
1528 | ||||
1529 | // TODO: This is needed only if we require precise CFA. | |||
1530 | bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() && | |||
1531 | !OutStreamer->getDwarfFrameInfos().back().End; | |||
1532 | ||||
1533 | int stackGrowth = -RI->getSlotSize(); | |||
1534 | ||||
1535 | if (HasActiveDwarfFrame && !hasFP) { | |||
1536 | OutStreamer->EmitCFIAdjustCfaOffset(-stackGrowth); | |||
1537 | } | |||
1538 | ||||
1539 | // Emit the label. | |||
1540 | OutStreamer->EmitLabel(PICBase); | |||
1541 | ||||
1542 | // popl $reg | |||
1543 | EmitAndCountInstruction(MCInstBuilder(X86::POP32r) | |||
1544 | .addReg(MI->getOperand(0).getReg())); | |||
1545 | ||||
1546 | if (HasActiveDwarfFrame && !hasFP) { | |||
1547 | OutStreamer->EmitCFIAdjustCfaOffset(stackGrowth); | |||
1548 | } | |||
1549 | return; | |||
1550 | } | |||
1551 | ||||
1552 | case X86::ADD32ri: { | |||
1553 | // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. | |||
1554 | if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) | |||
1555 | break; | |||
1556 | ||||
1557 | // Okay, we have something like: | |||
1558 | // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL) | |||
1559 | ||||
1560 | // For this, we want to print something like: | |||
1561 | // MYGLOBAL + (. - PICBASE) | |||
1562 | // However, we can't generate a ".", so just emit a new label here and refer | |||
1563 | // to it. | |||
1564 | MCSymbol *DotSym = OutContext.createTempSymbol(); | |||
1565 | OutStreamer->EmitLabel(DotSym); | |||
1566 | ||||
1567 | // Now that we have emitted the label, lower the complex operand expression. | |||
1568 | MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); | |||
1569 | ||||
1570 | const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); | |||
1571 | const MCExpr *PICBase = | |||
1572 | MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext); | |||
1573 | DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext); | |||
1574 | ||||
1575 | DotExpr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(OpSym,OutContext), | |||
1576 | DotExpr, OutContext); | |||
1577 | ||||
1578 | EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri) | |||
1579 | .addReg(MI->getOperand(0).getReg()) | |||
1580 | .addReg(MI->getOperand(1).getReg()) | |||
1581 | .addExpr(DotExpr)); | |||
1582 | return; | |||
1583 | } | |||
1584 | case TargetOpcode::STATEPOINT: | |||
1585 | return LowerSTATEPOINT(*MI, MCInstLowering); | |||
1586 | ||||
1587 | case TargetOpcode::FAULTING_OP: | |||
1588 | return LowerFAULTING_OP(*MI, MCInstLowering); | |||
1589 | ||||
1590 | case TargetOpcode::FENTRY_CALL: | |||
1591 | return LowerFENTRY_CALL(*MI, MCInstLowering); | |||
1592 | ||||
1593 | case TargetOpcode::PATCHABLE_OP: | |||
1594 | return LowerPATCHABLE_OP(*MI, MCInstLowering); | |||
1595 | ||||
1596 | case TargetOpcode::STACKMAP: | |||
1597 | return LowerSTACKMAP(*MI); | |||
1598 | ||||
1599 | case TargetOpcode::PATCHPOINT: | |||
1600 | return LowerPATCHPOINT(*MI, MCInstLowering); | |||
1601 | ||||
1602 | case TargetOpcode::PATCHABLE_FUNCTION_ENTER: | |||
1603 | return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering); | |||
1604 | ||||
1605 | case TargetOpcode::PATCHABLE_RET: | |||
1606 | return LowerPATCHABLE_RET(*MI, MCInstLowering); | |||
1607 | ||||
1608 | case TargetOpcode::PATCHABLE_TAIL_CALL: | |||
1609 | return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering); | |||
1610 | ||||
1611 | case TargetOpcode::PATCHABLE_EVENT_CALL: | |||
1612 | return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering); | |||
1613 | ||||
1614 | case X86::MORESTACK_RET: | |||
1615 | EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); | |||
1616 | return; | |||
1617 | ||||
1618 | case X86::MORESTACK_RET_RESTORE_R10: | |||
1619 | // Return, then restore R10. | |||
1620 | EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); | |||
1621 | EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr) | |||
1622 | .addReg(X86::R10) | |||
1623 | .addReg(X86::RAX)); | |||
1624 | return; | |||
1625 | ||||
1626 | case X86::SEH_PushReg: | |||
1627 | case X86::SEH_SaveReg: | |||
1628 | case X86::SEH_SaveXMM: | |||
1629 | case X86::SEH_StackAlloc: | |||
1630 | case X86::SEH_SetFrame: | |||
1631 | case X86::SEH_PushFrame: | |||
1632 | case X86::SEH_EndPrologue: | |||
1633 | EmitSEHInstruction(MI); | |||
1634 | return; | |||
1635 | ||||
1636 | case X86::SEH_Epilogue: { | |||
1637 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")(static_cast <bool> (MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? void (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1637, __extension__ __PRETTY_FUNCTION__)); | |||
1638 | MachineBasicBlock::const_iterator MBBI(MI); | |||
1639 | // Check if preceded by a call and emit nop if so. | |||
1640 | for (MBBI = PrevCrossBBInst(MBBI); | |||
1641 | MBBI != MachineBasicBlock::const_iterator(); | |||
1642 | MBBI = PrevCrossBBInst(MBBI)) { | |||
1643 | // Conservatively assume that pseudo instructions don't emit code and keep | |||
1644 | // looking for a call. We may emit an unnecessary nop in some cases. | |||
1645 | if (!MBBI->isPseudo()) { | |||
1646 | if (MBBI->isCall()) | |||
1647 | EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); | |||
1648 | break; | |||
1649 | } | |||
1650 | } | |||
1651 | return; | |||
1652 | } | |||
1653 | ||||
1654 | // Lower PSHUFB and VPERMILP normally but add a comment if we can find | |||
1655 | // a constant shuffle mask. We won't be able to do this at the MC layer | |||
1656 | // because the mask isn't an immediate. | |||
1657 | case X86::PSHUFBrm: | |||
1658 | case X86::VPSHUFBrm: | |||
1659 | case X86::VPSHUFBYrm: | |||
1660 | case X86::VPSHUFBZ128rm: | |||
1661 | case X86::VPSHUFBZ128rmk: | |||
1662 | case X86::VPSHUFBZ128rmkz: | |||
1663 | case X86::VPSHUFBZ256rm: | |||
1664 | case X86::VPSHUFBZ256rmk: | |||
1665 | case X86::VPSHUFBZ256rmkz: | |||
1666 | case X86::VPSHUFBZrm: | |||
1667 | case X86::VPSHUFBZrmk: | |||
1668 | case X86::VPSHUFBZrmkz: { | |||
1669 | if (!OutStreamer->isVerboseAsm()) | |||
1670 | break; | |||
1671 | unsigned SrcIdx, MaskIdx; | |||
1672 | switch (MI->getOpcode()) { | |||
1673 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1673); | |||
1674 | case X86::PSHUFBrm: | |||
1675 | case X86::VPSHUFBrm: | |||
1676 | case X86::VPSHUFBYrm: | |||
1677 | case X86::VPSHUFBZ128rm: | |||
1678 | case X86::VPSHUFBZ256rm: | |||
1679 | case X86::VPSHUFBZrm: | |||
1680 | SrcIdx = 1; MaskIdx = 5; break; | |||
1681 | case X86::VPSHUFBZ128rmkz: | |||
1682 | case X86::VPSHUFBZ256rmkz: | |||
1683 | case X86::VPSHUFBZrmkz: | |||
1684 | SrcIdx = 2; MaskIdx = 6; break; | |||
1685 | case X86::VPSHUFBZ128rmk: | |||
1686 | case X86::VPSHUFBZ256rmk: | |||
1687 | case X86::VPSHUFBZrmk: | |||
1688 | SrcIdx = 3; MaskIdx = 7; break; | |||
1689 | } | |||
1690 | ||||
1691 | assert(MI->getNumOperands() >= 6 &&(static_cast <bool> (MI->getNumOperands() >= 6 && "We should always have at least 6 operands!") ? void (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1692, __extension__ __PRETTY_FUNCTION__)) | |||
1692 | "We should always have at least 6 operands!")(static_cast <bool> (MI->getNumOperands() >= 6 && "We should always have at least 6 operands!") ? void (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1692, __extension__ __PRETTY_FUNCTION__)); | |||
1693 | ||||
1694 | const MachineOperand &MaskOp = MI->getOperand(MaskIdx); | |||
1695 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { | |||
1696 | SmallVector<int, 64> Mask; | |||
1697 | DecodePSHUFBMask(C, Mask); | |||
1698 | if (!Mask.empty()) | |||
1699 | OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask), | |||
1700 | !EnablePrintSchedInfo); | |||
1701 | } | |||
1702 | break; | |||
1703 | } | |||
1704 | ||||
1705 | case X86::VPERMILPSrm: | |||
1706 | case X86::VPERMILPSYrm: | |||
1707 | case X86::VPERMILPSZ128rm: | |||
1708 | case X86::VPERMILPSZ128rmk: | |||
1709 | case X86::VPERMILPSZ128rmkz: | |||
1710 | case X86::VPERMILPSZ256rm: | |||
1711 | case X86::VPERMILPSZ256rmk: | |||
1712 | case X86::VPERMILPSZ256rmkz: | |||
1713 | case X86::VPERMILPSZrm: | |||
1714 | case X86::VPERMILPSZrmk: | |||
1715 | case X86::VPERMILPSZrmkz: | |||
1716 | case X86::VPERMILPDrm: | |||
1717 | case X86::VPERMILPDYrm: | |||
1718 | case X86::VPERMILPDZ128rm: | |||
1719 | case X86::VPERMILPDZ128rmk: | |||
1720 | case X86::VPERMILPDZ128rmkz: | |||
1721 | case X86::VPERMILPDZ256rm: | |||
1722 | case X86::VPERMILPDZ256rmk: | |||
1723 | case X86::VPERMILPDZ256rmkz: | |||
1724 | case X86::VPERMILPDZrm: | |||
1725 | case X86::VPERMILPDZrmk: | |||
1726 | case X86::VPERMILPDZrmkz: { | |||
1727 | if (!OutStreamer->isVerboseAsm()) | |||
1728 | break; | |||
1729 | unsigned SrcIdx, MaskIdx; | |||
1730 | unsigned ElSize; | |||
1731 | switch (MI->getOpcode()) { | |||
1732 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1732); | |||
1733 | case X86::VPERMILPSrm: | |||
1734 | case X86::VPERMILPSYrm: | |||
1735 | case X86::VPERMILPSZ128rm: | |||
1736 | case X86::VPERMILPSZ256rm: | |||
1737 | case X86::VPERMILPSZrm: | |||
1738 | SrcIdx = 1; MaskIdx = 5; ElSize = 32; break; | |||
1739 | case X86::VPERMILPSZ128rmkz: | |||
1740 | case X86::VPERMILPSZ256rmkz: | |||
1741 | case X86::VPERMILPSZrmkz: | |||
1742 | SrcIdx = 2; MaskIdx = 6; ElSize = 32; break; | |||
1743 | case X86::VPERMILPSZ128rmk: | |||
1744 | case X86::VPERMILPSZ256rmk: | |||
1745 | case X86::VPERMILPSZrmk: | |||
1746 | SrcIdx = 3; MaskIdx = 7; ElSize = 32; break; | |||
1747 | case X86::VPERMILPDrm: | |||
1748 | case X86::VPERMILPDYrm: | |||
1749 | case X86::VPERMILPDZ128rm: | |||
1750 | case X86::VPERMILPDZ256rm: | |||
1751 | case X86::VPERMILPDZrm: | |||
1752 | SrcIdx = 1; MaskIdx = 5; ElSize = 64; break; | |||
1753 | case X86::VPERMILPDZ128rmkz: | |||
1754 | case X86::VPERMILPDZ256rmkz: | |||
1755 | case X86::VPERMILPDZrmkz: | |||
1756 | SrcIdx = 2; MaskIdx = 6; ElSize = 64; break; | |||
1757 | case X86::VPERMILPDZ128rmk: | |||
1758 | case X86::VPERMILPDZ256rmk: | |||
1759 | case X86::VPERMILPDZrmk: | |||
1760 | SrcIdx = 3; MaskIdx = 7; ElSize = 64; break; | |||
1761 | } | |||
1762 | ||||
1763 | assert(MI->getNumOperands() >= 6 &&(static_cast <bool> (MI->getNumOperands() >= 6 && "We should always have at least 6 operands!") ? void (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1764, __extension__ __PRETTY_FUNCTION__)) | |||
1764 | "We should always have at least 6 operands!")(static_cast <bool> (MI->getNumOperands() >= 6 && "We should always have at least 6 operands!") ? void (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1764, __extension__ __PRETTY_FUNCTION__)); | |||
1765 | ||||
1766 | const MachineOperand &MaskOp = MI->getOperand(MaskIdx); | |||
1767 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { | |||
1768 | SmallVector<int, 16> Mask; | |||
1769 | DecodeVPERMILPMask(C, ElSize, Mask); | |||
1770 | if (!Mask.empty()) | |||
1771 | OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask), | |||
1772 | !EnablePrintSchedInfo); | |||
1773 | } | |||
1774 | break; | |||
1775 | } | |||
1776 | ||||
1777 | case X86::VPERMIL2PDrm: | |||
1778 | case X86::VPERMIL2PSrm: | |||
1779 | case X86::VPERMIL2PDYrm: | |||
1780 | case X86::VPERMIL2PSYrm: { | |||
1781 | if (!OutStreamer->isVerboseAsm()) | |||
1782 | break; | |||
1783 | assert(MI->getNumOperands() >= 8 &&(static_cast <bool> (MI->getNumOperands() >= 8 && "We should always have at least 8 operands!") ? void (0) : __assert_fail ("MI->getNumOperands() >= 8 && \"We should always have at least 8 operands!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1784, __extension__ __PRETTY_FUNCTION__)) | |||
1784 | "We should always have at least 8 operands!")(static_cast <bool> (MI->getNumOperands() >= 8 && "We should always have at least 8 operands!") ? void (0) : __assert_fail ("MI->getNumOperands() >= 8 && \"We should always have at least 8 operands!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1784, __extension__ __PRETTY_FUNCTION__)); | |||
1785 | ||||
1786 | const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1); | |||
1787 | if (!CtrlOp.isImm()) | |||
1788 | break; | |||
1789 | ||||
1790 | unsigned ElSize; | |||
1791 | switch (MI->getOpcode()) { | |||
1792 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1792); | |||
1793 | case X86::VPERMIL2PSrm: case X86::VPERMIL2PSYrm: ElSize = 32; break; | |||
1794 | case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break; | |||
1795 | } | |||
1796 | ||||
1797 | const MachineOperand &MaskOp = MI->getOperand(6); | |||
1798 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { | |||
1799 | SmallVector<int, 16> Mask; | |||
1800 | DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Mask); | |||
1801 | if (!Mask.empty()) | |||
1802 | OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask), | |||
1803 | !EnablePrintSchedInfo); | |||
1804 | } | |||
1805 | break; | |||
1806 | } | |||
1807 | ||||
1808 | case X86::VPPERMrrm: { | |||
1809 | if (!OutStreamer->isVerboseAsm()) | |||
1810 | break; | |||
1811 | assert(MI->getNumOperands() >= 7 &&(static_cast <bool> (MI->getNumOperands() >= 7 && "We should always have at least 7 operands!") ? void (0) : __assert_fail ("MI->getNumOperands() >= 7 && \"We should always have at least 7 operands!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1812, __extension__ __PRETTY_FUNCTION__)) | |||
1812 | "We should always have at least 7 operands!")(static_cast <bool> (MI->getNumOperands() >= 7 && "We should always have at least 7 operands!") ? void (0) : __assert_fail ("MI->getNumOperands() >= 7 && \"We should always have at least 7 operands!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1812, __extension__ __PRETTY_FUNCTION__)); | |||
1813 | ||||
1814 | const MachineOperand &MaskOp = MI->getOperand(6); | |||
1815 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { | |||
1816 | SmallVector<int, 16> Mask; | |||
1817 | DecodeVPPERMMask(C, Mask); | |||
1818 | if (!Mask.empty()) | |||
1819 | OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask), | |||
1820 | !EnablePrintSchedInfo); | |||
1821 | } | |||
1822 | break; | |||
1823 | } | |||
1824 | ||||
1825 | #define MOV_CASE(Prefix, Suffix)case X86::PrefixMOVAPDSuffixrm: case X86::PrefixMOVAPSSuffixrm : case X86::PrefixMOVUPDSuffixrm: case X86::PrefixMOVUPSSuffixrm : case X86::PrefixMOVDQASuffixrm: case X86::PrefixMOVDQUSuffixrm : \ | |||
1826 | case X86::Prefix##MOVAPD##Suffix##rm: \ | |||
1827 | case X86::Prefix##MOVAPS##Suffix##rm: \ | |||
1828 | case X86::Prefix##MOVUPD##Suffix##rm: \ | |||
1829 | case X86::Prefix##MOVUPS##Suffix##rm: \ | |||
1830 | case X86::Prefix##MOVDQA##Suffix##rm: \ | |||
1831 | case X86::Prefix##MOVDQU##Suffix##rm: | |||
1832 | ||||
1833 | #define MOV_AVX512_CASE(Suffix)case X86::VMOVDQA64Suffixrm: case X86::VMOVDQA32Suffixrm: case X86::VMOVDQU64Suffixrm: case X86::VMOVDQU32Suffixrm: case X86 ::VMOVDQU16Suffixrm: case X86::VMOVDQU8Suffixrm: case X86::VMOVAPSSuffixrm : case X86::VMOVAPDSuffixrm: case X86::VMOVUPSSuffixrm: case X86 ::VMOVUPDSuffixrm: \ | |||
1834 | case X86::VMOVDQA64##Suffix##rm: \ | |||
1835 | case X86::VMOVDQA32##Suffix##rm: \ | |||
1836 | case X86::VMOVDQU64##Suffix##rm: \ | |||
1837 | case X86::VMOVDQU32##Suffix##rm: \ | |||
1838 | case X86::VMOVDQU16##Suffix##rm: \ | |||
1839 | case X86::VMOVDQU8##Suffix##rm: \ | |||
1840 | case X86::VMOVAPS##Suffix##rm: \ | |||
1841 | case X86::VMOVAPD##Suffix##rm: \ | |||
1842 | case X86::VMOVUPS##Suffix##rm: \ | |||
1843 | case X86::VMOVUPD##Suffix##rm: | |||
1844 | ||||
1845 | #define CASE_ALL_MOV_RM()case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: case X86 ::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm: case X86 ::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm: case X86 ::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm: case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm: case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: case X86::VMOVDQA64Z256rm: case X86:: VMOVDQA32Z256rm: case X86::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm : case X86::VMOVDQU16Z256rm: case X86::VMOVDQU8Z256rm: case X86 ::VMOVAPSZ256rm: case X86::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm : case X86::VMOVUPDZ256rm: case X86::VMOVDQA64Z128rm: case X86 ::VMOVDQA32Z128rm: case X86::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm : case X86::VMOVDQU16Z128rm: case X86::VMOVDQU8Z128rm: case X86 ::VMOVAPSZ128rm: case X86::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm : case X86::VMOVUPDZ128rm: \ | |||
1846 | MOV_CASE(, )case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: /* SSE */ \ | |||
1847 | MOV_CASE(V, )case X86::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm : case X86::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm : /* AVX-128 */ \ | |||
1848 | MOV_CASE(V, Y)case X86::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm : case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm : /* AVX-256 */ \ | |||
1849 | MOV_AVX512_CASE(Z)case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: \ | |||
1850 | MOV_AVX512_CASE(Z256)case X86::VMOVDQA64Z256rm: case X86::VMOVDQA32Z256rm: case X86 ::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm: case X86::VMOVDQU16Z256rm : case X86::VMOVDQU8Z256rm: case X86::VMOVAPSZ256rm: case X86 ::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm: case X86::VMOVUPDZ256rm : \ | |||
1851 | MOV_AVX512_CASE(Z128)case X86::VMOVDQA64Z128rm: case X86::VMOVDQA32Z128rm: case X86 ::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm: case X86::VMOVDQU16Z128rm : case X86::VMOVDQU8Z128rm: case X86::VMOVAPSZ128rm: case X86 ::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm: case X86::VMOVUPDZ128rm : | |||
1852 | ||||
1853 | // For loads from a constant pool to a vector register, print the constant | |||
1854 | // loaded. | |||
1855 | CASE_ALL_MOV_RM()case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: case X86 ::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm: case X86 ::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm: case X86 ::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm: case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm: case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: case X86::VMOVDQA64Z256rm: case X86:: VMOVDQA32Z256rm: case X86::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm : case X86::VMOVDQU16Z256rm: case X86::VMOVDQU8Z256rm: case X86 ::VMOVAPSZ256rm: case X86::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm : case X86::VMOVUPDZ256rm: case X86::VMOVDQA64Z128rm: case X86 ::VMOVDQA32Z128rm: case X86::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm : case X86::VMOVDQU16Z128rm: case X86::VMOVDQU8Z128rm: case X86 ::VMOVAPSZ128rm: case X86::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm : case X86::VMOVUPDZ128rm: | |||
1856 | case X86::VBROADCASTF128: | |||
1857 | case X86::VBROADCASTI128: | |||
1858 | case X86::VBROADCASTF32X4Z256rm: | |||
1859 | case X86::VBROADCASTF32X4rm: | |||
1860 | case X86::VBROADCASTF32X8rm: | |||
1861 | case X86::VBROADCASTF64X2Z128rm: | |||
1862 | case X86::VBROADCASTF64X2rm: | |||
1863 | case X86::VBROADCASTF64X4rm: | |||
1864 | case X86::VBROADCASTI32X4Z256rm: | |||
1865 | case X86::VBROADCASTI32X4rm: | |||
1866 | case X86::VBROADCASTI32X8rm: | |||
1867 | case X86::VBROADCASTI64X2Z128rm: | |||
1868 | case X86::VBROADCASTI64X2rm: | |||
1869 | case X86::VBROADCASTI64X4rm: | |||
1870 | if (!OutStreamer->isVerboseAsm()) | |||
1871 | break; | |||
1872 | if (MI->getNumOperands() <= 4) | |||
1873 | break; | |||
1874 | if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { | |||
1875 | int NumLanes = 1; | |||
1876 | // Override NumLanes for the broadcast instructions. | |||
1877 | switch (MI->getOpcode()) { | |||
1878 | case X86::VBROADCASTF128: NumLanes = 2; break; | |||
1879 | case X86::VBROADCASTI128: NumLanes = 2; break; | |||
1880 | case X86::VBROADCASTF32X4Z256rm: NumLanes = 2; break; | |||
1881 | case X86::VBROADCASTF32X4rm: NumLanes = 4; break; | |||
1882 | case X86::VBROADCASTF32X8rm: NumLanes = 2; break; | |||
1883 | case X86::VBROADCASTF64X2Z128rm: NumLanes = 2; break; | |||
1884 | case X86::VBROADCASTF64X2rm: NumLanes = 4; break; | |||
1885 | case X86::VBROADCASTF64X4rm: NumLanes = 2; break; | |||
1886 | case X86::VBROADCASTI32X4Z256rm: NumLanes = 2; break; | |||
1887 | case X86::VBROADCASTI32X4rm: NumLanes = 4; break; | |||
1888 | case X86::VBROADCASTI32X8rm: NumLanes = 2; break; | |||
1889 | case X86::VBROADCASTI64X2Z128rm: NumLanes = 2; break; | |||
1890 | case X86::VBROADCASTI64X2rm: NumLanes = 4; break; | |||
1891 | case X86::VBROADCASTI64X4rm: NumLanes = 2; break; | |||
1892 | } | |||
1893 | ||||
1894 | std::string Comment; | |||
1895 | raw_string_ostream CS(Comment); | |||
1896 | const MachineOperand &DstOp = MI->getOperand(0); | |||
1897 | CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; | |||
1898 | if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) { | |||
1899 | CS << "["; | |||
1900 | for (int l = 0; l != NumLanes; ++l) { | |||
1901 | for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) { | |||
1902 | if (i != 0 || l != 0) | |||
1903 | CS << ","; | |||
1904 | if (CDS->getElementType()->isIntegerTy()) | |||
1905 | CS << CDS->getElementAsInteger(i); | |||
1906 | else if (CDS->getElementType()->isFloatTy()) | |||
1907 | CS << CDS->getElementAsFloat(i); | |||
1908 | else if (CDS->getElementType()->isDoubleTy()) | |||
1909 | CS << CDS->getElementAsDouble(i); | |||
1910 | else | |||
1911 | CS << "?"; | |||
1912 | } | |||
1913 | } | |||
1914 | CS << "]"; | |||
1915 | OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo); | |||
1916 | } else if (auto *CV = dyn_cast<ConstantVector>(C)) { | |||
1917 | CS << "<"; | |||
1918 | for (int l = 0; l != NumLanes; ++l) { | |||
1919 | for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) { | |||
1920 | if (i != 0 || l != 0) | |||
1921 | CS << ","; | |||
1922 | printConstant(CV->getOperand(i), CS); | |||
1923 | } | |||
1924 | } | |||
1925 | CS << ">"; | |||
1926 | OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo); | |||
1927 | } | |||
1928 | } | |||
1929 | break; | |||
1930 | case X86::VBROADCASTSSrm: | |||
1931 | case X86::VBROADCASTSSYrm: | |||
1932 | case X86::VBROADCASTSSZ128m: | |||
1933 | case X86::VBROADCASTSSZ256m: | |||
1934 | case X86::VBROADCASTSSZm: | |||
1935 | case X86::VBROADCASTSDYrm: | |||
1936 | case X86::VBROADCASTSDZ256m: | |||
1937 | case X86::VBROADCASTSDZm: | |||
1938 | case X86::VPBROADCASTBrm: | |||
1939 | case X86::VPBROADCASTBYrm: | |||
1940 | case X86::VPBROADCASTBZ128m: | |||
1941 | case X86::VPBROADCASTBZ256m: | |||
1942 | case X86::VPBROADCASTBZm: | |||
1943 | case X86::VPBROADCASTDrm: | |||
1944 | case X86::VPBROADCASTDYrm: | |||
1945 | case X86::VPBROADCASTDZ128m: | |||
1946 | case X86::VPBROADCASTDZ256m: | |||
1947 | case X86::VPBROADCASTDZm: | |||
1948 | case X86::VPBROADCASTQrm: | |||
1949 | case X86::VPBROADCASTQYrm: | |||
1950 | case X86::VPBROADCASTQZ128m: | |||
1951 | case X86::VPBROADCASTQZ256m: | |||
1952 | case X86::VPBROADCASTQZm: | |||
1953 | case X86::VPBROADCASTWrm: | |||
1954 | case X86::VPBROADCASTWYrm: | |||
1955 | case X86::VPBROADCASTWZ128m: | |||
1956 | case X86::VPBROADCASTWZ256m: | |||
1957 | case X86::VPBROADCASTWZm: | |||
1958 | if (!OutStreamer->isVerboseAsm()) | |||
1959 | break; | |||
1960 | if (MI->getNumOperands() <= 4) | |||
1961 | break; | |||
1962 | if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { | |||
1963 | int NumElts; | |||
1964 | switch (MI->getOpcode()) { | |||
1965 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Target/X86/X86MCInstLower.cpp" , 1965); | |||
1966 | case X86::VBROADCASTSSrm: NumElts = 4; break; | |||
1967 | case X86::VBROADCASTSSYrm: NumElts = 8; break; | |||
1968 | case X86::VBROADCASTSSZ128m: NumElts = 4; break; | |||
1969 | case X86::VBROADCASTSSZ256m: NumElts = 8; break; | |||
1970 | case X86::VBROADCASTSSZm: NumElts = 16; break; | |||
1971 | case X86::VBROADCASTSDYrm: NumElts = 4; break; | |||
1972 | case X86::VBROADCASTSDZ256m: NumElts = 4; break; | |||
1973 | case X86::VBROADCASTSDZm: NumElts = 8; break; | |||
1974 | case X86::VPBROADCASTBrm: NumElts = 16; break; | |||
1975 | case X86::VPBROADCASTBYrm: NumElts = 32; break; | |||
1976 | case X86::VPBROADCASTBZ128m: NumElts = 16; break; | |||
1977 | case X86::VPBROADCASTBZ256m: NumElts = 32; break; | |||
1978 | case X86::VPBROADCASTBZm: NumElts = 64; break; | |||
1979 | case X86::VPBROADCASTDrm: NumElts = 4; break; | |||
1980 | case X86::VPBROADCASTDYrm: NumElts = 8; break; | |||
1981 | case X86::VPBROADCASTDZ128m: NumElts = 4; break; | |||
1982 | case X86::VPBROADCASTDZ256m: NumElts = 8; break; | |||
1983 | case X86::VPBROADCASTDZm: NumElts = 16; break; | |||
1984 | case X86::VPBROADCASTQrm: NumElts = 2; break; | |||
1985 | case X86::VPBROADCASTQYrm: NumElts = 4; break; | |||
1986 | case X86::VPBROADCASTQZ128m: NumElts = 2; break; | |||
1987 | case X86::VPBROADCASTQZ256m: NumElts = 4; break; | |||
1988 | case X86::VPBROADCASTQZm: NumElts = 8; break; | |||
1989 | case X86::VPBROADCASTWrm: NumElts = 8; break; | |||
1990 | case X86::VPBROADCASTWYrm: NumElts = 16; break; | |||
1991 | case X86::VPBROADCASTWZ128m: NumElts = 8; break; | |||
1992 | case X86::VPBROADCASTWZ256m: NumElts = 16; break; | |||
1993 | case X86::VPBROADCASTWZm: NumElts = 32; break; | |||
1994 | } | |||
1995 | ||||
1996 | std::string Comment; | |||
1997 | raw_string_ostream CS(Comment); | |||
1998 | const MachineOperand &DstOp = MI->getOperand(0); | |||
1999 | CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; | |||
2000 | CS << "["; | |||
2001 | for (int i = 0; i != NumElts; ++i) { | |||
2002 | if (i != 0) | |||
2003 | CS << ","; | |||
2004 | printConstant(C, CS); | |||
2005 | } | |||
2006 | CS << "]"; | |||
2007 | OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo); | |||
2008 | } | |||
2009 | } | |||
2010 | ||||
2011 | MCInst TmpInst; | |||
2012 | MCInstLowering.Lower(MI, TmpInst); | |||
2013 | if (MI->getAsmPrinterFlag(MachineInstr::NoSchedComment)) | |||
2014 | TmpInst.setFlags(TmpInst.getFlags() | X86::NO_SCHED_INFO); | |||
2015 | ||||
2016 | // Stackmap shadows cannot include branch targets, so we can count the bytes | |||
2017 | // in a call towards the shadow, but must ensure that the no thread returns | |||
2018 | // in to the stackmap shadow. The only way to achieve this is if the call | |||
2019 | // is at the end of the shadow. | |||
2020 | if (MI->isCall()) { | |||
2021 | // Count then size of the call towards the shadow | |||
2022 | SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get()); | |||
2023 | // Then flush the shadow so that we fill with nops before the call, not | |||
2024 | // after it. | |||
2025 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); | |||
2026 | // Then emit the call | |||
2027 | OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo()); | |||
2028 | return; | |||
2029 | } | |||
2030 | ||||
2031 | EmitAndCountInstruction(TmpInst); | |||
2032 | } |