File: | lib/Target/X86/X86MCInstLower.cpp |
Warning: | line 1088, column 33 1st function call argument is an uninitialized value |
1 | //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This file contains code to lower X86 MachineInstrs to their corresponding | |||
11 | // MCInst records. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "InstPrinter/X86ATTInstPrinter.h" | |||
16 | #include "InstPrinter/X86InstComments.h" | |||
17 | #include "MCTargetDesc/X86BaseInfo.h" | |||
18 | #include "Utils/X86ShuffleDecode.h" | |||
19 | #include "X86AsmPrinter.h" | |||
20 | #include "X86RegisterInfo.h" | |||
21 | #include "X86ShuffleDecodeConstantPool.h" | |||
22 | #include "llvm/ADT/Optional.h" | |||
23 | #include "llvm/ADT/SmallString.h" | |||
24 | #include "llvm/ADT/iterator_range.h" | |||
25 | #include "llvm/BinaryFormat/ELF.h" | |||
26 | #include "llvm/CodeGen/MachineConstantPool.h" | |||
27 | #include "llvm/CodeGen/MachineFunction.h" | |||
28 | #include "llvm/CodeGen/MachineModuleInfoImpls.h" | |||
29 | #include "llvm/CodeGen/MachineOperand.h" | |||
30 | #include "llvm/CodeGen/StackMaps.h" | |||
31 | #include "llvm/IR/DataLayout.h" | |||
32 | #include "llvm/IR/GlobalValue.h" | |||
33 | #include "llvm/IR/Mangler.h" | |||
34 | #include "llvm/MC/MCAsmInfo.h" | |||
35 | #include "llvm/MC/MCCodeEmitter.h" | |||
36 | #include "llvm/MC/MCContext.h" | |||
37 | #include "llvm/MC/MCExpr.h" | |||
38 | #include "llvm/MC/MCFixup.h" | |||
39 | #include "llvm/MC/MCInst.h" | |||
40 | #include "llvm/MC/MCInstBuilder.h" | |||
41 | #include "llvm/MC/MCSection.h" | |||
42 | #include "llvm/MC/MCSectionELF.h" | |||
43 | #include "llvm/MC/MCSectionMachO.h" | |||
44 | #include "llvm/MC/MCStreamer.h" | |||
45 | #include "llvm/MC/MCSymbol.h" | |||
46 | #include "llvm/MC/MCSymbolELF.h" | |||
47 | #include "llvm/Support/TargetRegistry.h" | |||
48 | #include "llvm/Target/TargetLoweringObjectFile.h" | |||
49 | ||||
50 | using namespace llvm; | |||
51 | ||||
52 | namespace { | |||
53 | ||||
54 | /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. | |||
55 | class X86MCInstLower { | |||
56 | MCContext &Ctx; | |||
57 | const MachineFunction &MF; | |||
58 | const TargetMachine &TM; | |||
59 | const MCAsmInfo &MAI; | |||
60 | X86AsmPrinter &AsmPrinter; | |||
61 | public: | |||
62 | X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter); | |||
63 | ||||
64 | Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI, | |||
65 | const MachineOperand &MO) const; | |||
66 | void Lower(const MachineInstr *MI, MCInst &OutMI) const; | |||
67 | ||||
68 | MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; | |||
69 | MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; | |||
70 | ||||
71 | private: | |||
72 | MachineModuleInfoMachO &getMachOMMI() const; | |||
73 | }; | |||
74 | ||||
75 | } // end anonymous namespace | |||
76 | ||||
77 | // Emit a minimal sequence of nops spanning NumBytes bytes. | |||
78 | static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, | |||
79 | const MCSubtargetInfo &STI); | |||
80 | ||||
81 | void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst, | |||
82 | const MCSubtargetInfo &STI, | |||
83 | MCCodeEmitter *CodeEmitter) { | |||
84 | if (InShadow) { | |||
85 | SmallString<256> Code; | |||
86 | SmallVector<MCFixup, 4> Fixups; | |||
87 | raw_svector_ostream VecOS(Code); | |||
88 | CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI); | |||
89 | CurrentShadowSize += Code.size(); | |||
90 | if (CurrentShadowSize >= RequiredShadowSize) | |||
91 | InShadow = false; // The shadow is big enough. Stop counting. | |||
92 | } | |||
93 | } | |||
94 | ||||
95 | void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding( | |||
96 | MCStreamer &OutStreamer, const MCSubtargetInfo &STI) { | |||
97 | if (InShadow && CurrentShadowSize < RequiredShadowSize) { | |||
98 | InShadow = false; | |||
99 | EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize, | |||
100 | MF->getSubtarget<X86Subtarget>().is64Bit(), STI); | |||
101 | } | |||
102 | } | |||
103 | ||||
104 | void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) { | |||
105 | OutStreamer->EmitInstruction(Inst, getSubtargetInfo(), EnablePrintSchedInfo); | |||
106 | SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get()); | |||
107 | } | |||
108 | ||||
109 | X86MCInstLower::X86MCInstLower(const MachineFunction &mf, | |||
110 | X86AsmPrinter &asmprinter) | |||
111 | : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()), | |||
112 | AsmPrinter(asmprinter) {} | |||
113 | ||||
114 | MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { | |||
115 | return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); | |||
116 | } | |||
117 | ||||
118 | ||||
119 | /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol | |||
120 | /// operand to an MCSymbol. | |||
121 | MCSymbol *X86MCInstLower:: | |||
122 | GetSymbolFromOperand(const MachineOperand &MO) const { | |||
123 | const DataLayout &DL = MF.getDataLayout(); | |||
124 | assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference")(((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference" ) ? static_cast<void> (0) : __assert_fail ("(MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && \"Isn't a symbol reference\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 124, __PRETTY_FUNCTION__)); | |||
125 | ||||
126 | MCSymbol *Sym = nullptr; | |||
127 | SmallString<128> Name; | |||
128 | StringRef Suffix; | |||
129 | ||||
130 | switch (MO.getTargetFlags()) { | |||
131 | case X86II::MO_DLLIMPORT: | |||
132 | // Handle dllimport linkage. | |||
133 | Name += "__imp_"; | |||
134 | break; | |||
135 | case X86II::MO_DARWIN_NONLAZY: | |||
136 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: | |||
137 | Suffix = "$non_lazy_ptr"; | |||
138 | break; | |||
139 | } | |||
140 | ||||
141 | if (!Suffix.empty()) | |||
142 | Name += DL.getPrivateGlobalPrefix(); | |||
143 | ||||
144 | if (MO.isGlobal()) { | |||
145 | const GlobalValue *GV = MO.getGlobal(); | |||
146 | AsmPrinter.getNameWithPrefix(Name, GV); | |||
147 | } else if (MO.isSymbol()) { | |||
148 | Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL); | |||
149 | } else if (MO.isMBB()) { | |||
150 | assert(Suffix.empty())((Suffix.empty()) ? static_cast<void> (0) : __assert_fail ("Suffix.empty()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 150, __PRETTY_FUNCTION__)); | |||
151 | Sym = MO.getMBB()->getSymbol(); | |||
152 | } | |||
153 | ||||
154 | Name += Suffix; | |||
155 | if (!Sym) | |||
156 | Sym = Ctx.getOrCreateSymbol(Name); | |||
157 | ||||
158 | // If the target flags on the operand changes the name of the symbol, do that | |||
159 | // before we return the symbol. | |||
160 | switch (MO.getTargetFlags()) { | |||
161 | default: break; | |||
162 | case X86II::MO_DARWIN_NONLAZY: | |||
163 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: { | |||
164 | MachineModuleInfoImpl::StubValueTy &StubSym = | |||
165 | getMachOMMI().getGVStubEntry(Sym); | |||
166 | if (!StubSym.getPointer()) { | |||
167 | assert(MO.isGlobal() && "Extern symbol not handled yet")((MO.isGlobal() && "Extern symbol not handled yet") ? static_cast<void> (0) : __assert_fail ("MO.isGlobal() && \"Extern symbol not handled yet\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 167, __PRETTY_FUNCTION__)); | |||
168 | StubSym = | |||
169 | MachineModuleInfoImpl:: | |||
170 | StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()), | |||
171 | !MO.getGlobal()->hasInternalLinkage()); | |||
172 | } | |||
173 | break; | |||
174 | } | |||
175 | } | |||
176 | ||||
177 | return Sym; | |||
178 | } | |||
179 | ||||
180 | MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, | |||
181 | MCSymbol *Sym) const { | |||
182 | // FIXME: We would like an efficient form for this, so we don't have to do a | |||
183 | // lot of extra uniquing. | |||
184 | const MCExpr *Expr = nullptr; | |||
185 | MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; | |||
186 | ||||
187 | switch (MO.getTargetFlags()) { | |||
188 | default: llvm_unreachable("Unknown target flag on GV operand")::llvm::llvm_unreachable_internal("Unknown target flag on GV operand" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 188); | |||
189 | case X86II::MO_NO_FLAG: // No flag. | |||
190 | // These affect the name of the symbol, not any suffix. | |||
191 | case X86II::MO_DARWIN_NONLAZY: | |||
192 | case X86II::MO_DLLIMPORT: | |||
193 | break; | |||
194 | ||||
195 | case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break; | |||
196 | case X86II::MO_TLVP_PIC_BASE: | |||
197 | Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx); | |||
198 | // Subtract the pic base. | |||
199 | Expr = MCBinaryExpr::createSub(Expr, | |||
200 | MCSymbolRefExpr::create(MF.getPICBaseSymbol(), | |||
201 | Ctx), | |||
202 | Ctx); | |||
203 | break; | |||
204 | case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break; | |||
205 | case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break; | |||
206 | case X86II::MO_TLSLD: RefKind = MCSymbolRefExpr::VK_TLSLD; break; | |||
207 | case X86II::MO_TLSLDM: RefKind = MCSymbolRefExpr::VK_TLSLDM; break; | |||
208 | case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break; | |||
209 | case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break; | |||
210 | case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break; | |||
211 | case X86II::MO_DTPOFF: RefKind = MCSymbolRefExpr::VK_DTPOFF; break; | |||
212 | case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break; | |||
213 | case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break; | |||
214 | case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break; | |||
215 | case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break; | |||
216 | case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break; | |||
217 | case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break; | |||
218 | case X86II::MO_ABS8: RefKind = MCSymbolRefExpr::VK_X86_ABS8; break; | |||
219 | case X86II::MO_PIC_BASE_OFFSET: | |||
220 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: | |||
221 | Expr = MCSymbolRefExpr::create(Sym, Ctx); | |||
222 | // Subtract the pic base. | |||
223 | Expr = MCBinaryExpr::createSub(Expr, | |||
224 | MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), | |||
225 | Ctx); | |||
226 | if (MO.isJTI()) { | |||
227 | assert(MAI.doesSetDirectiveSuppressReloc())((MAI.doesSetDirectiveSuppressReloc()) ? static_cast<void> (0) : __assert_fail ("MAI.doesSetDirectiveSuppressReloc()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 227, __PRETTY_FUNCTION__)); | |||
228 | // If .set directive is supported, use it to reduce the number of | |||
229 | // relocations the assembler will generate for differences between | |||
230 | // local labels. This is only safe when the symbols are in the same | |||
231 | // section so we are restricting it to jumptable references. | |||
232 | MCSymbol *Label = Ctx.createTempSymbol(); | |||
233 | AsmPrinter.OutStreamer->EmitAssignment(Label, Expr); | |||
234 | Expr = MCSymbolRefExpr::create(Label, Ctx); | |||
235 | } | |||
236 | break; | |||
237 | } | |||
238 | ||||
239 | if (!Expr) | |||
240 | Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx); | |||
241 | ||||
242 | if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) | |||
243 | Expr = MCBinaryExpr::createAdd(Expr, | |||
244 | MCConstantExpr::create(MO.getOffset(), Ctx), | |||
245 | Ctx); | |||
246 | return MCOperand::createExpr(Expr); | |||
247 | } | |||
248 | ||||
249 | ||||
250 | /// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with | |||
251 | /// a short fixed-register form. | |||
252 | static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { | |||
253 | unsigned ImmOp = Inst.getNumOperands() - 1; | |||
254 | assert(Inst.getOperand(0).isReg() &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 258, __PRETTY_FUNCTION__)) | |||
255 | (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 258, __PRETTY_FUNCTION__)) | |||
256 | ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 258, __PRETTY_FUNCTION__)) | |||
257 | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 258, __PRETTY_FUNCTION__)) | |||
258 | Inst.getNumOperands() == 2) && "Unexpected instruction!")((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 258, __PRETTY_FUNCTION__)); | |||
259 | ||||
260 | // Check whether the destination register can be fixed. | |||
261 | unsigned Reg = Inst.getOperand(0).getReg(); | |||
262 | if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) | |||
263 | return; | |||
264 | ||||
265 | // If so, rewrite the instruction. | |||
266 | MCOperand Saved = Inst.getOperand(ImmOp); | |||
267 | Inst = MCInst(); | |||
268 | Inst.setOpcode(Opcode); | |||
269 | Inst.addOperand(Saved); | |||
270 | } | |||
271 | ||||
272 | /// \brief If a movsx instruction has a shorter encoding for the used register | |||
273 | /// simplify the instruction to use it instead. | |||
274 | static void SimplifyMOVSX(MCInst &Inst) { | |||
275 | unsigned NewOpcode = 0; | |||
276 | unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg(); | |||
277 | switch (Inst.getOpcode()) { | |||
278 | default: | |||
279 | llvm_unreachable("Unexpected instruction!")::llvm::llvm_unreachable_internal("Unexpected instruction!", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 279); | |||
280 | case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw | |||
281 | if (Op0 == X86::AX && Op1 == X86::AL) | |||
282 | NewOpcode = X86::CBW; | |||
283 | break; | |||
284 | case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl | |||
285 | if (Op0 == X86::EAX && Op1 == X86::AX) | |||
286 | NewOpcode = X86::CWDE; | |||
287 | break; | |||
288 | case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq | |||
289 | if (Op0 == X86::RAX && Op1 == X86::EAX) | |||
290 | NewOpcode = X86::CDQE; | |||
291 | break; | |||
292 | } | |||
293 | ||||
294 | if (NewOpcode != 0) { | |||
295 | Inst = MCInst(); | |||
296 | Inst.setOpcode(NewOpcode); | |||
297 | } | |||
298 | } | |||
299 | ||||
300 | /// \brief Simplify things like MOV32rm to MOV32o32a. | |||
301 | static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, | |||
302 | unsigned Opcode) { | |||
303 | // Don't make these simplifications in 64-bit mode; other assemblers don't | |||
304 | // perform them because they make the code larger. | |||
305 | if (Printer.getSubtarget().is64Bit()) | |||
306 | return; | |||
307 | ||||
308 | bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); | |||
309 | unsigned AddrBase = IsStore; | |||
310 | unsigned RegOp = IsStore ? 0 : 5; | |||
311 | unsigned AddrOp = AddrBase + 3; | |||
312 | assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 319, __PRETTY_FUNCTION__)) | |||
313 | Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 319, __PRETTY_FUNCTION__)) | |||
314 | Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 319, __PRETTY_FUNCTION__)) | |||
315 | Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 319, __PRETTY_FUNCTION__)) | |||
316 | Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 319, __PRETTY_FUNCTION__)) | |||
317 | (Inst.getOperand(AddrOp).isExpr() ||((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 319, __PRETTY_FUNCTION__)) | |||
318 | Inst.getOperand(AddrOp).isImm()) &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 319, __PRETTY_FUNCTION__)) | |||
319 | "Unexpected instruction!")((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 319, __PRETTY_FUNCTION__)); | |||
320 | ||||
321 | // Check whether the destination register can be fixed. | |||
322 | unsigned Reg = Inst.getOperand(RegOp).getReg(); | |||
323 | if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) | |||
324 | return; | |||
325 | ||||
326 | // Check whether this is an absolute address. | |||
327 | // FIXME: We know TLVP symbol refs aren't, but there should be a better way | |||
328 | // to do this here. | |||
329 | bool Absolute = true; | |||
330 | if (Inst.getOperand(AddrOp).isExpr()) { | |||
331 | const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); | |||
332 | if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE)) | |||
333 | if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) | |||
334 | Absolute = false; | |||
335 | } | |||
336 | ||||
337 | if (Absolute && | |||
338 | (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || | |||
339 | Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || | |||
340 | Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) | |||
341 | return; | |||
342 | ||||
343 | // If so, rewrite the instruction. | |||
344 | MCOperand Saved = Inst.getOperand(AddrOp); | |||
345 | MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg); | |||
346 | Inst = MCInst(); | |||
347 | Inst.setOpcode(Opcode); | |||
348 | Inst.addOperand(Saved); | |||
349 | Inst.addOperand(Seg); | |||
350 | } | |||
351 | ||||
352 | static unsigned getRetOpcode(const X86Subtarget &Subtarget) { | |||
353 | return Subtarget.is64Bit() ? X86::RETQ : X86::RETL; | |||
354 | } | |||
355 | ||||
356 | Optional<MCOperand> | |||
357 | X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, | |||
358 | const MachineOperand &MO) const { | |||
359 | switch (MO.getType()) { | |||
360 | default: | |||
361 | MI->print(errs()); | |||
362 | llvm_unreachable("unknown operand type")::llvm::llvm_unreachable_internal("unknown operand type", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 362); | |||
363 | case MachineOperand::MO_Register: | |||
364 | // Ignore all implicit register operands. | |||
365 | if (MO.isImplicit()) | |||
366 | return None; | |||
367 | return MCOperand::createReg(MO.getReg()); | |||
368 | case MachineOperand::MO_Immediate: | |||
369 | return MCOperand::createImm(MO.getImm()); | |||
370 | case MachineOperand::MO_MachineBasicBlock: | |||
371 | case MachineOperand::MO_GlobalAddress: | |||
372 | case MachineOperand::MO_ExternalSymbol: | |||
373 | return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); | |||
374 | case MachineOperand::MO_MCSymbol: | |||
375 | return LowerSymbolOperand(MO, MO.getMCSymbol()); | |||
376 | case MachineOperand::MO_JumpTableIndex: | |||
377 | return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); | |||
378 | case MachineOperand::MO_ConstantPoolIndex: | |||
379 | return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); | |||
380 | case MachineOperand::MO_BlockAddress: | |||
381 | return LowerSymbolOperand( | |||
382 | MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); | |||
383 | case MachineOperand::MO_RegisterMask: | |||
384 | // Ignore call clobbers. | |||
385 | return None; | |||
386 | } | |||
387 | } | |||
388 | ||||
389 | void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { | |||
390 | OutMI.setOpcode(MI->getOpcode()); | |||
391 | ||||
392 | for (const MachineOperand &MO : MI->operands()) | |||
393 | if (auto MaybeMCOp = LowerMachineOperand(MI, MO)) | |||
394 | OutMI.addOperand(MaybeMCOp.getValue()); | |||
395 | ||||
396 | // Handle a few special cases to eliminate operand modifiers. | |||
397 | ReSimplify: | |||
398 | switch (OutMI.getOpcode()) { | |||
399 | case X86::LEA64_32r: | |||
400 | case X86::LEA64r: | |||
401 | case X86::LEA16r: | |||
402 | case X86::LEA32r: | |||
403 | // LEA should have a segment register, but it must be empty. | |||
404 | assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&((OutMI.getNumOperands() == 1+X86::AddrNumOperands && "Unexpected # of LEA operands") ? static_cast<void> (0 ) : __assert_fail ("OutMI.getNumOperands() == 1+X86::AddrNumOperands && \"Unexpected # of LEA operands\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 405, __PRETTY_FUNCTION__)) | |||
405 | "Unexpected # of LEA operands")((OutMI.getNumOperands() == 1+X86::AddrNumOperands && "Unexpected # of LEA operands") ? static_cast<void> (0 ) : __assert_fail ("OutMI.getNumOperands() == 1+X86::AddrNumOperands && \"Unexpected # of LEA operands\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 405, __PRETTY_FUNCTION__)); | |||
406 | assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&((OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!") ? static_cast<void> (0) : __assert_fail ("OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && \"LEA has segment specified!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 407, __PRETTY_FUNCTION__)) | |||
407 | "LEA has segment specified!")((OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!") ? static_cast<void> (0) : __assert_fail ("OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && \"LEA has segment specified!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 407, __PRETTY_FUNCTION__)); | |||
408 | break; | |||
409 | ||||
410 | // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B | |||
411 | // if one of the registers is extended, but other isn't. | |||
412 | case X86::VMOVZPQILo2PQIrr: | |||
413 | case X86::VMOVAPDrr: | |||
414 | case X86::VMOVAPDYrr: | |||
415 | case X86::VMOVAPSrr: | |||
416 | case X86::VMOVAPSYrr: | |||
417 | case X86::VMOVDQArr: | |||
418 | case X86::VMOVDQAYrr: | |||
419 | case X86::VMOVDQUrr: | |||
420 | case X86::VMOVDQUYrr: | |||
421 | case X86::VMOVUPDrr: | |||
422 | case X86::VMOVUPDYrr: | |||
423 | case X86::VMOVUPSrr: | |||
424 | case X86::VMOVUPSYrr: { | |||
425 | if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && | |||
426 | X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) { | |||
427 | unsigned NewOpc; | |||
428 | switch (OutMI.getOpcode()) { | |||
429 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 429); | |||
430 | case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break; | |||
431 | case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; | |||
432 | case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; | |||
433 | case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; | |||
434 | case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; | |||
435 | case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; | |||
436 | case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; | |||
437 | case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; | |||
438 | case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; | |||
439 | case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; | |||
440 | case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; | |||
441 | case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; | |||
442 | case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; | |||
443 | } | |||
444 | OutMI.setOpcode(NewOpc); | |||
445 | } | |||
446 | break; | |||
447 | } | |||
448 | case X86::VMOVSDrr: | |||
449 | case X86::VMOVSSrr: { | |||
450 | if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && | |||
451 | X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) { | |||
452 | unsigned NewOpc; | |||
453 | switch (OutMI.getOpcode()) { | |||
454 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 454); | |||
455 | case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; | |||
456 | case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; | |||
457 | } | |||
458 | OutMI.setOpcode(NewOpc); | |||
459 | } | |||
460 | break; | |||
461 | } | |||
462 | ||||
463 | // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register | |||
464 | // inputs modeled as normal uses instead of implicit uses. As such, truncate | |||
465 | // off all but the first operand (the callee). FIXME: Change isel. | |||
466 | case X86::TAILJMPr64: | |||
467 | case X86::TAILJMPr64_REX: | |||
468 | case X86::CALL64r: | |||
469 | case X86::CALL64pcrel32: { | |||
470 | unsigned Opcode = OutMI.getOpcode(); | |||
471 | MCOperand Saved = OutMI.getOperand(0); | |||
472 | OutMI = MCInst(); | |||
473 | OutMI.setOpcode(Opcode); | |||
474 | OutMI.addOperand(Saved); | |||
475 | break; | |||
476 | } | |||
477 | ||||
478 | case X86::EH_RETURN: | |||
479 | case X86::EH_RETURN64: { | |||
480 | OutMI = MCInst(); | |||
481 | OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); | |||
482 | break; | |||
483 | } | |||
484 | ||||
485 | case X86::CLEANUPRET: { | |||
486 | // Replace CATCHRET with the appropriate RET. | |||
487 | OutMI = MCInst(); | |||
488 | OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); | |||
489 | break; | |||
490 | } | |||
491 | ||||
492 | case X86::CATCHRET: { | |||
493 | // Replace CATCHRET with the appropriate RET. | |||
494 | const X86Subtarget &Subtarget = AsmPrinter.getSubtarget(); | |||
495 | unsigned ReturnReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; | |||
496 | OutMI = MCInst(); | |||
497 | OutMI.setOpcode(getRetOpcode(Subtarget)); | |||
498 | OutMI.addOperand(MCOperand::createReg(ReturnReg)); | |||
499 | break; | |||
500 | } | |||
501 | ||||
502 | // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump instruction. | |||
503 | { unsigned Opcode; | |||
504 | case X86::TAILJMPr: Opcode = X86::JMP32r; goto SetTailJmpOpcode; | |||
505 | case X86::TAILJMPd: | |||
506 | case X86::TAILJMPd64: Opcode = X86::JMP_1; goto SetTailJmpOpcode; | |||
507 | case X86::TAILJMPd_CC: | |||
508 | case X86::TAILJMPd64_CC: | |||
509 | Opcode = X86::GetCondBranchFromCond( | |||
510 | static_cast<X86::CondCode>(MI->getOperand(1).getImm())); | |||
511 | goto SetTailJmpOpcode; | |||
512 | ||||
513 | SetTailJmpOpcode: | |||
514 | MCOperand Saved = OutMI.getOperand(0); | |||
515 | OutMI = MCInst(); | |||
516 | OutMI.setOpcode(Opcode); | |||
517 | OutMI.addOperand(Saved); | |||
518 | break; | |||
519 | } | |||
520 | ||||
521 | case X86::DEC16r: | |||
522 | case X86::DEC32r: | |||
523 | case X86::INC16r: | |||
524 | case X86::INC32r: | |||
525 | // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions. | |||
526 | if (!AsmPrinter.getSubtarget().is64Bit()) { | |||
527 | unsigned Opcode; | |||
528 | switch (OutMI.getOpcode()) { | |||
529 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 529); | |||
530 | case X86::DEC16r: Opcode = X86::DEC16r_alt; break; | |||
531 | case X86::DEC32r: Opcode = X86::DEC32r_alt; break; | |||
532 | case X86::INC16r: Opcode = X86::INC16r_alt; break; | |||
533 | case X86::INC32r: Opcode = X86::INC32r_alt; break; | |||
534 | } | |||
535 | OutMI.setOpcode(Opcode); | |||
536 | } | |||
537 | break; | |||
538 | ||||
539 | // These are pseudo-ops for OR to help with the OR->ADD transformation. We do | |||
540 | // this with an ugly goto in case the resultant OR uses EAX and needs the | |||
541 | // short form. | |||
542 | case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify; | |||
543 | case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify; | |||
544 | case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify; | |||
545 | case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify; | |||
546 | case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify; | |||
547 | case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify; | |||
548 | case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify; | |||
549 | case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify; | |||
550 | case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify; | |||
551 | ||||
552 | // Atomic load and store require a separate pseudo-inst because Acquire | |||
553 | // implies mayStore and Release implies mayLoad; fix these to regular MOV | |||
554 | // instructions here | |||
555 | case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify; | |||
556 | case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify; | |||
557 | case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify; | |||
558 | case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify; | |||
559 | case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify; | |||
560 | case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify; | |||
561 | case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify; | |||
562 | case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify; | |||
563 | case X86::RELEASE_MOV8mi: OutMI.setOpcode(X86::MOV8mi); goto ReSimplify; | |||
564 | case X86::RELEASE_MOV16mi: OutMI.setOpcode(X86::MOV16mi); goto ReSimplify; | |||
565 | case X86::RELEASE_MOV32mi: OutMI.setOpcode(X86::MOV32mi); goto ReSimplify; | |||
566 | case X86::RELEASE_MOV64mi32: OutMI.setOpcode(X86::MOV64mi32); goto ReSimplify; | |||
567 | case X86::RELEASE_ADD8mi: OutMI.setOpcode(X86::ADD8mi); goto ReSimplify; | |||
568 | case X86::RELEASE_ADD8mr: OutMI.setOpcode(X86::ADD8mr); goto ReSimplify; | |||
569 | case X86::RELEASE_ADD32mi: OutMI.setOpcode(X86::ADD32mi); goto ReSimplify; | |||
570 | case X86::RELEASE_ADD32mr: OutMI.setOpcode(X86::ADD32mr); goto ReSimplify; | |||
571 | case X86::RELEASE_ADD64mi32: OutMI.setOpcode(X86::ADD64mi32); goto ReSimplify; | |||
572 | case X86::RELEASE_ADD64mr: OutMI.setOpcode(X86::ADD64mr); goto ReSimplify; | |||
573 | case X86::RELEASE_AND8mi: OutMI.setOpcode(X86::AND8mi); goto ReSimplify; | |||
574 | case X86::RELEASE_AND8mr: OutMI.setOpcode(X86::AND8mr); goto ReSimplify; | |||
575 | case X86::RELEASE_AND32mi: OutMI.setOpcode(X86::AND32mi); goto ReSimplify; | |||
576 | case X86::RELEASE_AND32mr: OutMI.setOpcode(X86::AND32mr); goto ReSimplify; | |||
577 | case X86::RELEASE_AND64mi32: OutMI.setOpcode(X86::AND64mi32); goto ReSimplify; | |||
578 | case X86::RELEASE_AND64mr: OutMI.setOpcode(X86::AND64mr); goto ReSimplify; | |||
579 | case X86::RELEASE_OR8mi: OutMI.setOpcode(X86::OR8mi); goto ReSimplify; | |||
580 | case X86::RELEASE_OR8mr: OutMI.setOpcode(X86::OR8mr); goto ReSimplify; | |||
581 | case X86::RELEASE_OR32mi: OutMI.setOpcode(X86::OR32mi); goto ReSimplify; | |||
582 | case X86::RELEASE_OR32mr: OutMI.setOpcode(X86::OR32mr); goto ReSimplify; | |||
583 | case X86::RELEASE_OR64mi32: OutMI.setOpcode(X86::OR64mi32); goto ReSimplify; | |||
584 | case X86::RELEASE_OR64mr: OutMI.setOpcode(X86::OR64mr); goto ReSimplify; | |||
585 | case X86::RELEASE_XOR8mi: OutMI.setOpcode(X86::XOR8mi); goto ReSimplify; | |||
586 | case X86::RELEASE_XOR8mr: OutMI.setOpcode(X86::XOR8mr); goto ReSimplify; | |||
587 | case X86::RELEASE_XOR32mi: OutMI.setOpcode(X86::XOR32mi); goto ReSimplify; | |||
588 | case X86::RELEASE_XOR32mr: OutMI.setOpcode(X86::XOR32mr); goto ReSimplify; | |||
589 | case X86::RELEASE_XOR64mi32: OutMI.setOpcode(X86::XOR64mi32); goto ReSimplify; | |||
590 | case X86::RELEASE_XOR64mr: OutMI.setOpcode(X86::XOR64mr); goto ReSimplify; | |||
591 | case X86::RELEASE_INC8m: OutMI.setOpcode(X86::INC8m); goto ReSimplify; | |||
592 | case X86::RELEASE_INC16m: OutMI.setOpcode(X86::INC16m); goto ReSimplify; | |||
593 | case X86::RELEASE_INC32m: OutMI.setOpcode(X86::INC32m); goto ReSimplify; | |||
594 | case X86::RELEASE_INC64m: OutMI.setOpcode(X86::INC64m); goto ReSimplify; | |||
595 | case X86::RELEASE_DEC8m: OutMI.setOpcode(X86::DEC8m); goto ReSimplify; | |||
596 | case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify; | |||
597 | case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify; | |||
598 | case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify; | |||
599 | ||||
600 | // We don't currently select the correct instruction form for instructions | |||
601 | // which have a short %eax, etc. form. Handle this by custom lowering, for | |||
602 | // now. | |||
603 | // | |||
604 | // Note, we are currently not handling the following instructions: | |||
605 | // MOV64ao8, MOV64o8a | |||
606 | // XCHG16ar, XCHG32ar, XCHG64ar | |||
607 | case X86::MOV8mr_NOREX: | |||
608 | case X86::MOV8mr: | |||
609 | case X86::MOV8rm_NOREX: | |||
610 | case X86::MOV8rm: | |||
611 | case X86::MOV16mr: | |||
612 | case X86::MOV16rm: | |||
613 | case X86::MOV32mr: | |||
614 | case X86::MOV32rm: { | |||
615 | unsigned NewOpc; | |||
616 | switch (OutMI.getOpcode()) { | |||
617 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 617); | |||
618 | case X86::MOV8mr_NOREX: | |||
619 | case X86::MOV8mr: NewOpc = X86::MOV8o32a; break; | |||
620 | case X86::MOV8rm_NOREX: | |||
621 | case X86::MOV8rm: NewOpc = X86::MOV8ao32; break; | |||
622 | case X86::MOV16mr: NewOpc = X86::MOV16o32a; break; | |||
623 | case X86::MOV16rm: NewOpc = X86::MOV16ao32; break; | |||
624 | case X86::MOV32mr: NewOpc = X86::MOV32o32a; break; | |||
625 | case X86::MOV32rm: NewOpc = X86::MOV32ao32; break; | |||
626 | } | |||
627 | SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc); | |||
628 | break; | |||
629 | } | |||
630 | ||||
631 | case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32: | |||
632 | case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32: | |||
633 | case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32: | |||
634 | case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32: | |||
635 | case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32: | |||
636 | case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32: | |||
637 | case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32: | |||
638 | case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32: | |||
639 | case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: { | |||
640 | unsigned NewOpc; | |||
641 | switch (OutMI.getOpcode()) { | |||
642 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 642); | |||
643 | case X86::ADC8ri: NewOpc = X86::ADC8i8; break; | |||
644 | case X86::ADC16ri: NewOpc = X86::ADC16i16; break; | |||
645 | case X86::ADC32ri: NewOpc = X86::ADC32i32; break; | |||
646 | case X86::ADC64ri32: NewOpc = X86::ADC64i32; break; | |||
647 | case X86::ADD8ri: NewOpc = X86::ADD8i8; break; | |||
648 | case X86::ADD16ri: NewOpc = X86::ADD16i16; break; | |||
649 | case X86::ADD32ri: NewOpc = X86::ADD32i32; break; | |||
650 | case X86::ADD64ri32: NewOpc = X86::ADD64i32; break; | |||
651 | case X86::AND8ri: NewOpc = X86::AND8i8; break; | |||
652 | case X86::AND16ri: NewOpc = X86::AND16i16; break; | |||
653 | case X86::AND32ri: NewOpc = X86::AND32i32; break; | |||
654 | case X86::AND64ri32: NewOpc = X86::AND64i32; break; | |||
655 | case X86::CMP8ri: NewOpc = X86::CMP8i8; break; | |||
656 | case X86::CMP16ri: NewOpc = X86::CMP16i16; break; | |||
657 | case X86::CMP32ri: NewOpc = X86::CMP32i32; break; | |||
658 | case X86::CMP64ri32: NewOpc = X86::CMP64i32; break; | |||
659 | case X86::OR8ri: NewOpc = X86::OR8i8; break; | |||
660 | case X86::OR16ri: NewOpc = X86::OR16i16; break; | |||
661 | case X86::OR32ri: NewOpc = X86::OR32i32; break; | |||
662 | case X86::OR64ri32: NewOpc = X86::OR64i32; break; | |||
663 | case X86::SBB8ri: NewOpc = X86::SBB8i8; break; | |||
664 | case X86::SBB16ri: NewOpc = X86::SBB16i16; break; | |||
665 | case X86::SBB32ri: NewOpc = X86::SBB32i32; break; | |||
666 | case X86::SBB64ri32: NewOpc = X86::SBB64i32; break; | |||
667 | case X86::SUB8ri: NewOpc = X86::SUB8i8; break; | |||
668 | case X86::SUB16ri: NewOpc = X86::SUB16i16; break; | |||
669 | case X86::SUB32ri: NewOpc = X86::SUB32i32; break; | |||
670 | case X86::SUB64ri32: NewOpc = X86::SUB64i32; break; | |||
671 | case X86::TEST8ri: NewOpc = X86::TEST8i8; break; | |||
672 | case X86::TEST16ri: NewOpc = X86::TEST16i16; break; | |||
673 | case X86::TEST32ri: NewOpc = X86::TEST32i32; break; | |||
674 | case X86::TEST64ri32: NewOpc = X86::TEST64i32; break; | |||
675 | case X86::XOR8ri: NewOpc = X86::XOR8i8; break; | |||
676 | case X86::XOR16ri: NewOpc = X86::XOR16i16; break; | |||
677 | case X86::XOR32ri: NewOpc = X86::XOR32i32; break; | |||
678 | case X86::XOR64ri32: NewOpc = X86::XOR64i32; break; | |||
679 | } | |||
680 | SimplifyShortImmForm(OutMI, NewOpc); | |||
681 | break; | |||
682 | } | |||
683 | ||||
684 | // Try to shrink some forms of movsx. | |||
685 | case X86::MOVSX16rr8: | |||
686 | case X86::MOVSX32rr16: | |||
687 | case X86::MOVSX64rr32: | |||
688 | SimplifyMOVSX(OutMI); | |||
689 | break; | |||
690 | } | |||
691 | } | |||
692 | ||||
693 | void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering, | |||
694 | const MachineInstr &MI) { | |||
695 | ||||
696 | bool is64Bits = MI.getOpcode() == X86::TLS_addr64 || | |||
697 | MI.getOpcode() == X86::TLS_base_addr64; | |||
698 | ||||
699 | bool needsPadding = MI.getOpcode() == X86::TLS_addr64; | |||
700 | ||||
701 | MCContext &context = OutStreamer->getContext(); | |||
702 | ||||
703 | if (needsPadding) | |||
704 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); | |||
705 | ||||
706 | MCSymbolRefExpr::VariantKind SRVK; | |||
707 | switch (MI.getOpcode()) { | |||
708 | case X86::TLS_addr32: | |||
709 | case X86::TLS_addr64: | |||
710 | SRVK = MCSymbolRefExpr::VK_TLSGD; | |||
711 | break; | |||
712 | case X86::TLS_base_addr32: | |||
713 | SRVK = MCSymbolRefExpr::VK_TLSLDM; | |||
714 | break; | |||
715 | case X86::TLS_base_addr64: | |||
716 | SRVK = MCSymbolRefExpr::VK_TLSLD; | |||
717 | break; | |||
718 | default: | |||
719 | llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 719); | |||
720 | } | |||
721 | ||||
722 | MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)); | |||
723 | const MCSymbolRefExpr *symRef = MCSymbolRefExpr::create(sym, SRVK, context); | |||
724 | ||||
725 | MCInst LEA; | |||
726 | if (is64Bits) { | |||
727 | LEA.setOpcode(X86::LEA64r); | |||
728 | LEA.addOperand(MCOperand::createReg(X86::RDI)); // dest | |||
729 | LEA.addOperand(MCOperand::createReg(X86::RIP)); // base | |||
730 | LEA.addOperand(MCOperand::createImm(1)); // scale | |||
731 | LEA.addOperand(MCOperand::createReg(0)); // index | |||
732 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp | |||
733 | LEA.addOperand(MCOperand::createReg(0)); // seg | |||
734 | } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) { | |||
735 | LEA.setOpcode(X86::LEA32r); | |||
736 | LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest | |||
737 | LEA.addOperand(MCOperand::createReg(X86::EBX)); // base | |||
738 | LEA.addOperand(MCOperand::createImm(1)); // scale | |||
739 | LEA.addOperand(MCOperand::createReg(0)); // index | |||
740 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp | |||
741 | LEA.addOperand(MCOperand::createReg(0)); // seg | |||
742 | } else { | |||
743 | LEA.setOpcode(X86::LEA32r); | |||
744 | LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest | |||
745 | LEA.addOperand(MCOperand::createReg(0)); // base | |||
746 | LEA.addOperand(MCOperand::createImm(1)); // scale | |||
747 | LEA.addOperand(MCOperand::createReg(X86::EBX)); // index | |||
748 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp | |||
749 | LEA.addOperand(MCOperand::createReg(0)); // seg | |||
750 | } | |||
751 | EmitAndCountInstruction(LEA); | |||
752 | ||||
753 | if (needsPadding) { | |||
754 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); | |||
755 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); | |||
756 | EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX)); | |||
757 | } | |||
758 | ||||
759 | StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr"; | |||
760 | MCSymbol *tlsGetAddr = context.getOrCreateSymbol(name); | |||
761 | const MCSymbolRefExpr *tlsRef = | |||
762 | MCSymbolRefExpr::create(tlsGetAddr, | |||
763 | MCSymbolRefExpr::VK_PLT, | |||
764 | context); | |||
765 | ||||
766 | EmitAndCountInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32 | |||
767 | : X86::CALLpcrel32) | |||
768 | .addExpr(tlsRef)); | |||
769 | } | |||
770 | ||||
771 | /// \brief Emit the largest nop instruction smaller than or equal to \p NumBytes | |||
772 | /// bytes. Return the size of nop emitted. | |||
773 | static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, | |||
774 | const MCSubtargetInfo &STI) { | |||
775 | // This works only for 64bit. For 32bit we have to do additional checking if | |||
776 | // the CPU supports multi-byte nops. | |||
777 | assert(Is64Bit && "EmitNops only supports X86-64")((Is64Bit && "EmitNops only supports X86-64") ? static_cast <void> (0) : __assert_fail ("Is64Bit && \"EmitNops only supports X86-64\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 777, __PRETTY_FUNCTION__)); | |||
778 | ||||
779 | unsigned NopSize; | |||
780 | unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg; | |||
781 | Opc = IndexReg = Displacement = SegmentReg = 0; | |||
782 | BaseReg = X86::RAX; | |||
783 | ScaleVal = 1; | |||
784 | switch (NumBytes) { | |||
785 | case 0: llvm_unreachable("Zero nops?")::llvm::llvm_unreachable_internal("Zero nops?", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 785); break; | |||
786 | case 1: NopSize = 1; Opc = X86::NOOP; break; | |||
787 | case 2: NopSize = 2; Opc = X86::XCHG16ar; break; | |||
788 | case 3: NopSize = 3; Opc = X86::NOOPL; break; | |||
789 | case 4: NopSize = 4; Opc = X86::NOOPL; Displacement = 8; break; | |||
790 | case 5: NopSize = 5; Opc = X86::NOOPL; Displacement = 8; | |||
791 | IndexReg = X86::RAX; break; | |||
792 | case 6: NopSize = 6; Opc = X86::NOOPW; Displacement = 8; | |||
793 | IndexReg = X86::RAX; break; | |||
794 | case 7: NopSize = 7; Opc = X86::NOOPL; Displacement = 512; break; | |||
795 | case 8: NopSize = 8; Opc = X86::NOOPL; Displacement = 512; | |||
796 | IndexReg = X86::RAX; break; | |||
797 | case 9: NopSize = 9; Opc = X86::NOOPW; Displacement = 512; | |||
798 | IndexReg = X86::RAX; break; | |||
799 | default: NopSize = 10; Opc = X86::NOOPW; Displacement = 512; | |||
800 | IndexReg = X86::RAX; SegmentReg = X86::CS; break; | |||
801 | } | |||
802 | ||||
803 | unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U); | |||
804 | NopSize += NumPrefixes; | |||
805 | for (unsigned i = 0; i != NumPrefixes; ++i) | |||
806 | OS.EmitBytes("\x66"); | |||
807 | ||||
808 | switch (Opc) { | |||
809 | default: | |||
810 | llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 810); | |||
811 | break; | |||
812 | case X86::NOOP: | |||
813 | OS.EmitInstruction(MCInstBuilder(Opc), STI); | |||
814 | break; | |||
815 | case X86::XCHG16ar: | |||
816 | OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI); | |||
817 | break; | |||
818 | case X86::NOOPL: | |||
819 | case X86::NOOPW: | |||
820 | OS.EmitInstruction(MCInstBuilder(Opc) | |||
821 | .addReg(BaseReg) | |||
822 | .addImm(ScaleVal) | |||
823 | .addReg(IndexReg) | |||
824 | .addImm(Displacement) | |||
825 | .addReg(SegmentReg), | |||
826 | STI); | |||
827 | break; | |||
828 | } | |||
829 | assert(NopSize <= NumBytes && "We overemitted?")((NopSize <= NumBytes && "We overemitted?") ? static_cast <void> (0) : __assert_fail ("NopSize <= NumBytes && \"We overemitted?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 829, __PRETTY_FUNCTION__)); | |||
830 | return NopSize; | |||
831 | } | |||
832 | ||||
833 | /// \brief Emit the optimal amount of multi-byte nops on X86. | |||
834 | static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, | |||
835 | const MCSubtargetInfo &STI) { | |||
836 | unsigned NopsToEmit = NumBytes; | |||
837 | (void)NopsToEmit; | |||
838 | while (NumBytes) { | |||
839 | NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI); | |||
840 | assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!")((NopsToEmit >= NumBytes && "Emitted more than I asked for!" ) ? static_cast<void> (0) : __assert_fail ("NopsToEmit >= NumBytes && \"Emitted more than I asked for!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 840, __PRETTY_FUNCTION__)); | |||
841 | } | |||
842 | } | |||
843 | ||||
844 | void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, | |||
845 | X86MCInstLower &MCIL) { | |||
846 | assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64")((Subtarget->is64Bit() && "Statepoint currently only supports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"Statepoint currently only supports X86-64\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 846, __PRETTY_FUNCTION__)); | |||
847 | ||||
848 | StatepointOpers SOpers(&MI); | |||
849 | if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { | |||
850 | EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(), | |||
851 | getSubtargetInfo()); | |||
852 | } else { | |||
853 | // Lower call target and choose correct opcode | |||
854 | const MachineOperand &CallTarget = SOpers.getCallTarget(); | |||
855 | MCOperand CallTargetMCOp; | |||
856 | unsigned CallOpcode; | |||
857 | switch (CallTarget.getType()) { | |||
858 | case MachineOperand::MO_GlobalAddress: | |||
859 | case MachineOperand::MO_ExternalSymbol: | |||
860 | CallTargetMCOp = MCIL.LowerSymbolOperand( | |||
861 | CallTarget, MCIL.GetSymbolFromOperand(CallTarget)); | |||
862 | CallOpcode = X86::CALL64pcrel32; | |||
863 | // Currently, we only support relative addressing with statepoints. | |||
864 | // Otherwise, we'll need a scratch register to hold the target | |||
865 | // address. You'll fail asserts during load & relocation if this | |||
866 | // symbol is to far away. (TODO: support non-relative addressing) | |||
867 | break; | |||
868 | case MachineOperand::MO_Immediate: | |||
869 | CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); | |||
870 | CallOpcode = X86::CALL64pcrel32; | |||
871 | // Currently, we only support relative addressing with statepoints. | |||
872 | // Otherwise, we'll need a scratch register to hold the target | |||
873 | // immediate. You'll fail asserts during load & relocation if this | |||
874 | // address is to far away. (TODO: support non-relative addressing) | |||
875 | break; | |||
876 | case MachineOperand::MO_Register: | |||
877 | CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); | |||
878 | CallOpcode = X86::CALL64r; | |||
879 | break; | |||
880 | default: | |||
881 | llvm_unreachable("Unsupported operand type in statepoint call target")::llvm::llvm_unreachable_internal("Unsupported operand type in statepoint call target" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 881); | |||
882 | break; | |||
883 | } | |||
884 | ||||
885 | // Emit call | |||
886 | MCInst CallInst; | |||
887 | CallInst.setOpcode(CallOpcode); | |||
888 | CallInst.addOperand(CallTargetMCOp); | |||
889 | OutStreamer->EmitInstruction(CallInst, getSubtargetInfo()); | |||
890 | } | |||
891 | ||||
892 | // Record our statepoint node in the same section used by STACKMAP | |||
893 | // and PATCHPOINT | |||
894 | SM.recordStatepoint(MI); | |||
895 | } | |||
896 | ||||
897 | void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI, | |||
898 | X86MCInstLower &MCIL) { | |||
899 | // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>, | |||
900 | // <opcode>, <operands> | |||
901 | ||||
902 | unsigned DefRegister = FaultingMI.getOperand(0).getReg(); | |||
903 | FaultMaps::FaultKind FK = | |||
904 | static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm()); | |||
905 | MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol(); | |||
906 | unsigned Opcode = FaultingMI.getOperand(3).getImm(); | |||
907 | unsigned OperandsBeginIdx = 4; | |||
908 | ||||
909 | assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!")((FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!" ) ? static_cast<void> (0) : __assert_fail ("FK < FaultMaps::FaultKindMax && \"Invalid Faulting Kind!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 909, __PRETTY_FUNCTION__)); | |||
910 | FM.recordFaultingOp(FK, HandlerLabel); | |||
911 | ||||
912 | MCInst MI; | |||
913 | MI.setOpcode(Opcode); | |||
914 | ||||
915 | if (DefRegister != X86::NoRegister) | |||
916 | MI.addOperand(MCOperand::createReg(DefRegister)); | |||
917 | ||||
918 | for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx, | |||
919 | E = FaultingMI.operands_end(); | |||
920 | I != E; ++I) | |||
921 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I)) | |||
922 | MI.addOperand(MaybeOperand.getValue()); | |||
923 | ||||
924 | OutStreamer->EmitInstruction(MI, getSubtargetInfo()); | |||
925 | } | |||
926 | ||||
927 | void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI, | |||
928 | X86MCInstLower &MCIL) { | |||
929 | bool Is64Bits = Subtarget->is64Bit(); | |||
930 | MCContext &Ctx = OutStreamer->getContext(); | |||
931 | MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__"); | |||
932 | const MCSymbolRefExpr *Op = | |||
933 | MCSymbolRefExpr::create(fentry, MCSymbolRefExpr::VK_None, Ctx); | |||
934 | ||||
935 | EmitAndCountInstruction( | |||
936 | MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32) | |||
937 | .addExpr(Op)); | |||
938 | } | |||
939 | ||||
940 | void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, | |||
941 | X86MCInstLower &MCIL) { | |||
942 | // PATCHABLE_OP minsize, opcode, operands | |||
943 | ||||
944 | unsigned MinSize = MI.getOperand(0).getImm(); | |||
945 | unsigned Opcode = MI.getOperand(1).getImm(); | |||
946 | ||||
947 | MCInst MCI; | |||
948 | MCI.setOpcode(Opcode); | |||
949 | for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end())) | |||
950 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) | |||
951 | MCI.addOperand(MaybeOperand.getValue()); | |||
952 | ||||
953 | SmallString<256> Code; | |||
954 | SmallVector<MCFixup, 4> Fixups; | |||
955 | raw_svector_ostream VecOS(Code); | |||
956 | CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo()); | |||
957 | ||||
958 | if (Code.size() < MinSize) { | |||
959 | if (MinSize == 2 && Opcode == X86::PUSH64r) { | |||
960 | // This is an optimization that lets us get away without emitting a nop in | |||
961 | // many cases. | |||
962 | // | |||
963 | // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %R9) takes two | |||
964 | // bytes too, so the check on MinSize is important. | |||
965 | MCI.setOpcode(X86::PUSH64rmr); | |||
966 | } else { | |||
967 | unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(), | |||
968 | getSubtargetInfo()); | |||
969 | assert(NopSize == MinSize && "Could not implement MinSize!")((NopSize == MinSize && "Could not implement MinSize!" ) ? static_cast<void> (0) : __assert_fail ("NopSize == MinSize && \"Could not implement MinSize!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 969, __PRETTY_FUNCTION__)); | |||
970 | (void) NopSize; | |||
971 | } | |||
972 | } | |||
973 | ||||
974 | OutStreamer->EmitInstruction(MCI, getSubtargetInfo()); | |||
975 | } | |||
976 | ||||
977 | // Lower a stackmap of the form: | |||
978 | // <id>, <shadowBytes>, ... | |||
979 | void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) { | |||
980 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); | |||
981 | SM.recordStackMap(MI); | |||
982 | unsigned NumShadowBytes = MI.getOperand(1).getImm(); | |||
983 | SMShadowTracker.reset(NumShadowBytes); | |||
984 | } | |||
985 | ||||
986 | // Lower a patchpoint of the form: | |||
987 | // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ... | |||
988 | void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI, | |||
989 | X86MCInstLower &MCIL) { | |||
990 | assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64")((Subtarget->is64Bit() && "Patchpoint currently only supports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"Patchpoint currently only supports X86-64\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 990, __PRETTY_FUNCTION__)); | |||
991 | ||||
992 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); | |||
993 | ||||
994 | SM.recordPatchPoint(MI); | |||
995 | ||||
996 | PatchPointOpers opers(&MI); | |||
997 | unsigned ScratchIdx = opers.getNextScratchIdx(); | |||
998 | unsigned EncodedBytes = 0; | |||
999 | const MachineOperand &CalleeMO = opers.getCallTarget(); | |||
1000 | ||||
1001 | // Check for null target. If target is non-null (i.e. is non-zero or is | |||
1002 | // symbolic) then emit a call. | |||
1003 | if (!(CalleeMO.isImm() && !CalleeMO.getImm())) { | |||
1004 | MCOperand CalleeMCOp; | |||
1005 | switch (CalleeMO.getType()) { | |||
1006 | default: | |||
1007 | /// FIXME: Add a verifier check for bad callee types. | |||
1008 | llvm_unreachable("Unrecognized callee operand type.")::llvm::llvm_unreachable_internal("Unrecognized callee operand type." , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1008); | |||
1009 | case MachineOperand::MO_Immediate: | |||
1010 | if (CalleeMO.getImm()) | |||
1011 | CalleeMCOp = MCOperand::createImm(CalleeMO.getImm()); | |||
1012 | break; | |||
1013 | case MachineOperand::MO_ExternalSymbol: | |||
1014 | case MachineOperand::MO_GlobalAddress: | |||
1015 | CalleeMCOp = | |||
1016 | MCIL.LowerSymbolOperand(CalleeMO, | |||
1017 | MCIL.GetSymbolFromOperand(CalleeMO)); | |||
1018 | break; | |||
1019 | } | |||
1020 | ||||
1021 | // Emit MOV to materialize the target address and the CALL to target. | |||
1022 | // This is encoded with 12-13 bytes, depending on which register is used. | |||
1023 | unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg(); | |||
1024 | if (X86II::isX86_64ExtendedReg(ScratchReg)) | |||
1025 | EncodedBytes = 13; | |||
1026 | else | |||
1027 | EncodedBytes = 12; | |||
1028 | ||||
1029 | EmitAndCountInstruction( | |||
1030 | MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp)); | |||
1031 | EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg)); | |||
1032 | } | |||
1033 | ||||
1034 | // Emit padding. | |||
1035 | unsigned NumBytes = opers.getNumPatchBytes(); | |||
1036 | assert(NumBytes >= EncodedBytes &&((NumBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call." ) ? static_cast<void> (0) : __assert_fail ("NumBytes >= EncodedBytes && \"Patchpoint can't request size less than the length of a call.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1037, __PRETTY_FUNCTION__)) | |||
1037 | "Patchpoint can't request size less than the length of a call.")((NumBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call." ) ? static_cast<void> (0) : __assert_fail ("NumBytes >= EncodedBytes && \"Patchpoint can't request size less than the length of a call.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1037, __PRETTY_FUNCTION__)); | |||
1038 | ||||
1039 | EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(), | |||
1040 | getSubtargetInfo()); | |||
1041 | } | |||
1042 | ||||
1043 | void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, | |||
1044 | X86MCInstLower &MCIL) { | |||
1045 | assert(Subtarget->is64Bit() && "XRay custom events only suports X86-64")((Subtarget->is64Bit() && "XRay custom events only suports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"XRay custom events only suports X86-64\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1045, __PRETTY_FUNCTION__)); | |||
1046 | ||||
1047 | // We want to emit the following pattern, which follows the x86 calling | |||
1048 | // convention to prepare for the trampoline call to be patched in. | |||
1049 | // | |||
1050 | // <args placement according SysV64 calling convention> | |||
1051 | // .p2align 1, ... | |||
1052 | // .Lxray_event_sled_N: | |||
1053 | // jmp +N // jump across the call instruction | |||
1054 | // callq __xray_CustomEvent // force relocation to symbol | |||
1055 | // <args cleanup, jump to here> | |||
1056 | // | |||
1057 | // The relative jump needs to jump forward 24 bytes: | |||
1058 | // 10 (args) + 5 (nops) + 9 (cleanup) | |||
1059 | // | |||
1060 | // After patching, it would look something like: | |||
1061 | // | |||
1062 | // nopw (2-byte nop) | |||
1063 | // callq __xrayCustomEvent // already lowered | |||
1064 | // | |||
1065 | // --- | |||
1066 | // First we emit the label and the jump. | |||
1067 | auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true); | |||
1068 | OutStreamer->AddComment("# XRay Custom Event Log"); | |||
1069 | OutStreamer->EmitCodeAlignment(2); | |||
1070 | OutStreamer->EmitLabel(CurSled); | |||
1071 | ||||
1072 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as | |||
1073 | // an operand (computed as an offset from the jmp instruction). | |||
1074 | // FIXME: Find another less hacky way do force the relative jump. | |||
1075 | OutStreamer->EmitBytes("\xeb\x14"); | |||
1076 | ||||
1077 | // The default C calling convention will place two arguments into %rcx and | |||
1078 | // %rdx -- so we only work with those. | |||
1079 | unsigned UsedRegs[] = {X86::RDI, X86::RSI, X86::RAX}; | |||
1080 | ||||
1081 | // Because we will use %rax, we preserve that across the call. | |||
1082 | EmitAndCountInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RAX)); | |||
1083 | ||||
1084 | // Then we put the operands in the %rdi and %rsi registers. | |||
1085 | for (unsigned I = 0; I < MI.getNumOperands(); ++I) | |||
| ||||
1086 | if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) { | |||
1087 | if (Op->isImm()) | |||
1088 | EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri) | |||
| ||||
1089 | .addReg(UsedRegs[I]) | |||
1090 | .addImm(Op->getImm())); | |||
1091 | else if (Op->isReg()) { | |||
1092 | if (Op->getReg() != UsedRegs[I]) | |||
1093 | EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr) | |||
1094 | .addReg(UsedRegs[I]) | |||
1095 | .addReg(Op->getReg())); | |||
1096 | else | |||
1097 | EmitNops(*OutStreamer, 3, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1098 | } | |||
1099 | } | |||
1100 | ||||
1101 | // We emit a hard dependency on the __xray_CustomEvent symbol, which is the | |||
1102 | // name of the trampoline to be implemented by the XRay runtime. We put this | |||
1103 | // explicitly in the %rax register. | |||
1104 | auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent"); | |||
1105 | MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym); | |||
1106 | EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri) | |||
1107 | .addReg(X86::RAX) | |||
1108 | .addOperand(MCIL.LowerSymbolOperand(TOp, TSym))); | |||
1109 | ||||
1110 | // Emit the call instruction. | |||
1111 | EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(X86::RAX)); | |||
1112 | ||||
1113 | // Restore caller-saved and used registers. | |||
1114 | OutStreamer->AddComment("xray custom event end."); | |||
1115 | EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RAX)); | |||
1116 | ||||
1117 | recordSled(CurSled, MI, SledKind::CUSTOM_EVENT); | |||
1118 | } | |||
1119 | ||||
1120 | void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI, | |||
1121 | X86MCInstLower &MCIL) { | |||
1122 | // We want to emit the following pattern: | |||
1123 | // | |||
1124 | // .p2align 1, ... | |||
1125 | // .Lxray_sled_N: | |||
1126 | // jmp .tmpN | |||
1127 | // # 9 bytes worth of noops | |||
1128 | // .tmpN | |||
1129 | // | |||
1130 | // We need the 9 bytes because at runtime, we'd be patching over the full 11 | |||
1131 | // bytes with the following pattern: | |||
1132 | // | |||
1133 | // mov %r10, <function id, 32-bit> // 6 bytes | |||
1134 | // call <relative offset, 32-bits> // 5 bytes | |||
1135 | // | |||
1136 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); | |||
1137 | OutStreamer->EmitCodeAlignment(2); | |||
1138 | OutStreamer->EmitLabel(CurSled); | |||
1139 | auto Target = OutContext.createTempSymbol(); | |||
1140 | ||||
1141 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as | |||
1142 | // an operand (computed as an offset from the jmp instruction). | |||
1143 | // FIXME: Find another less hacky way do force the relative jump. | |||
1144 | OutStreamer->EmitBytes("\xeb\x09"); | |||
1145 | EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1146 | OutStreamer->EmitLabel(Target); | |||
1147 | recordSled(CurSled, MI, SledKind::FUNCTION_ENTER); | |||
1148 | } | |||
1149 | ||||
1150 | void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI, | |||
1151 | X86MCInstLower &MCIL) { | |||
1152 | // Since PATCHABLE_RET takes the opcode of the return statement as an | |||
1153 | // argument, we use that to emit the correct form of the RET that we want. | |||
1154 | // i.e. when we see this: | |||
1155 | // | |||
1156 | // PATCHABLE_RET X86::RET ... | |||
1157 | // | |||
1158 | // We should emit the RET followed by sleds. | |||
1159 | // | |||
1160 | // .p2align 1, ... | |||
1161 | // .Lxray_sled_N: | |||
1162 | // ret # or equivalent instruction | |||
1163 | // # 10 bytes worth of noops | |||
1164 | // | |||
1165 | // This just makes sure that the alignment for the next instruction is 2. | |||
1166 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); | |||
1167 | OutStreamer->EmitCodeAlignment(2); | |||
1168 | OutStreamer->EmitLabel(CurSled); | |||
1169 | unsigned OpCode = MI.getOperand(0).getImm(); | |||
1170 | MCInst Ret; | |||
1171 | Ret.setOpcode(OpCode); | |||
1172 | for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) | |||
1173 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) | |||
1174 | Ret.addOperand(MaybeOperand.getValue()); | |||
1175 | OutStreamer->EmitInstruction(Ret, getSubtargetInfo()); | |||
1176 | EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1177 | recordSled(CurSled, MI, SledKind::FUNCTION_EXIT); | |||
1178 | } | |||
1179 | ||||
1180 | void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL) { | |||
1181 | // Like PATCHABLE_RET, we have the actual instruction in the operands to this | |||
1182 | // instruction so we lower that particular instruction and its operands. | |||
1183 | // Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how | |||
1184 | // we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to | |||
1185 | // the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual | |||
1186 | // tail call much like how we have it in PATCHABLE_RET. | |||
1187 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); | |||
1188 | OutStreamer->EmitCodeAlignment(2); | |||
1189 | OutStreamer->EmitLabel(CurSled); | |||
1190 | auto Target = OutContext.createTempSymbol(); | |||
1191 | ||||
1192 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as | |||
1193 | // an operand (computed as an offset from the jmp instruction). | |||
1194 | // FIXME: Find another less hacky way do force the relative jump. | |||
1195 | OutStreamer->EmitBytes("\xeb\x09"); | |||
1196 | EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); | |||
1197 | OutStreamer->EmitLabel(Target); | |||
1198 | recordSled(CurSled, MI, SledKind::TAIL_CALL); | |||
1199 | ||||
1200 | unsigned OpCode = MI.getOperand(0).getImm(); | |||
1201 | MCInst TC; | |||
1202 | TC.setOpcode(OpCode); | |||
1203 | ||||
1204 | // Before emitting the instruction, add a comment to indicate that this is | |||
1205 | // indeed a tail call. | |||
1206 | OutStreamer->AddComment("TAILCALL"); | |||
1207 | for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) | |||
1208 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) | |||
1209 | TC.addOperand(MaybeOperand.getValue()); | |||
1210 | OutStreamer->EmitInstruction(TC, getSubtargetInfo()); | |||
1211 | } | |||
1212 | ||||
1213 | // Returns instruction preceding MBBI in MachineFunction. | |||
1214 | // If MBBI is the first instruction of the first basic block, returns null. | |||
1215 | static MachineBasicBlock::const_iterator | |||
1216 | PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) { | |||
1217 | const MachineBasicBlock *MBB = MBBI->getParent(); | |||
1218 | while (MBBI == MBB->begin()) { | |||
1219 | if (MBB == &MBB->getParent()->front()) | |||
1220 | return MachineBasicBlock::const_iterator(); | |||
1221 | MBB = MBB->getPrevNode(); | |||
1222 | MBBI = MBB->end(); | |||
1223 | } | |||
1224 | return --MBBI; | |||
1225 | } | |||
1226 | ||||
1227 | static const Constant *getConstantFromPool(const MachineInstr &MI, | |||
1228 | const MachineOperand &Op) { | |||
1229 | if (!Op.isCPI()) | |||
1230 | return nullptr; | |||
1231 | ||||
1232 | ArrayRef<MachineConstantPoolEntry> Constants = | |||
1233 | MI.getParent()->getParent()->getConstantPool()->getConstants(); | |||
1234 | const MachineConstantPoolEntry &ConstantEntry = | |||
1235 | Constants[Op.getIndex()]; | |||
1236 | ||||
1237 | // Bail if this is a machine constant pool entry, we won't be able to dig out | |||
1238 | // anything useful. | |||
1239 | if (ConstantEntry.isMachineConstantPoolEntry()) | |||
1240 | return nullptr; | |||
1241 | ||||
1242 | auto *C = dyn_cast<Constant>(ConstantEntry.Val.ConstVal); | |||
1243 | assert((!C || ConstantEntry.getType() == C->getType()) &&(((!C || ConstantEntry.getType() == C->getType()) && "Expected a constant of the same type!") ? static_cast<void > (0) : __assert_fail ("(!C || ConstantEntry.getType() == C->getType()) && \"Expected a constant of the same type!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1244, __PRETTY_FUNCTION__)) | |||
1244 | "Expected a constant of the same type!")(((!C || ConstantEntry.getType() == C->getType()) && "Expected a constant of the same type!") ? static_cast<void > (0) : __assert_fail ("(!C || ConstantEntry.getType() == C->getType()) && \"Expected a constant of the same type!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1244, __PRETTY_FUNCTION__)); | |||
1245 | return C; | |||
1246 | } | |||
1247 | ||||
1248 | static std::string getShuffleComment(const MachineInstr *MI, | |||
1249 | unsigned SrcOp1Idx, | |||
1250 | unsigned SrcOp2Idx, | |||
1251 | ArrayRef<int> Mask) { | |||
1252 | std::string Comment; | |||
1253 | ||||
1254 | // Compute the name for a register. This is really goofy because we have | |||
1255 | // multiple instruction printers that could (in theory) use different | |||
1256 | // names. Fortunately most people use the ATT style (outside of Windows) | |||
1257 | // and they actually agree on register naming here. Ultimately, this is | |||
1258 | // a comment, and so its OK if it isn't perfect. | |||
1259 | auto GetRegisterName = [](unsigned RegNum) -> StringRef { | |||
1260 | return X86ATTInstPrinter::getRegisterName(RegNum); | |||
1261 | }; | |||
1262 | ||||
1263 | const MachineOperand &DstOp = MI->getOperand(0); | |||
1264 | const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx); | |||
1265 | const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx); | |||
1266 | ||||
1267 | StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem"; | |||
1268 | StringRef Src1Name = | |||
1269 | SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem"; | |||
1270 | StringRef Src2Name = | |||
1271 | SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem"; | |||
1272 | ||||
1273 | // One source operand, fix the mask to print all elements in one span. | |||
1274 | SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end()); | |||
1275 | if (Src1Name == Src2Name) | |||
1276 | for (int i = 0, e = ShuffleMask.size(); i != e; ++i) | |||
1277 | if (ShuffleMask[i] >= e) | |||
1278 | ShuffleMask[i] -= e; | |||
1279 | ||||
1280 | raw_string_ostream CS(Comment); | |||
1281 | CS << DstName; | |||
1282 | ||||
1283 | // Handle AVX512 MASK/MASXZ write mask comments. | |||
1284 | // MASK: zmmX {%kY} | |||
1285 | // MASKZ: zmmX {%kY} {z} | |||
1286 | if (SrcOp1Idx > 1) { | |||
1287 | assert((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask")(((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask" ) ? static_cast<void> (0) : __assert_fail ("(SrcOp1Idx == 2 || SrcOp1Idx == 3) && \"Unexpected writemask\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1287, __PRETTY_FUNCTION__)); | |||
1288 | ||||
1289 | const MachineOperand &WriteMaskOp = MI->getOperand(SrcOp1Idx - 1); | |||
1290 | if (WriteMaskOp.isReg()) { | |||
1291 | CS << " {%" << GetRegisterName(WriteMaskOp.getReg()) << "}"; | |||
1292 | ||||
1293 | if (SrcOp1Idx == 2) { | |||
1294 | CS << " {z}"; | |||
1295 | } | |||
1296 | } | |||
1297 | } | |||
1298 | ||||
1299 | CS << " = "; | |||
1300 | ||||
1301 | for (int i = 0, e = ShuffleMask.size(); i != e; ++i) { | |||
1302 | if (i != 0) | |||
1303 | CS << ","; | |||
1304 | if (ShuffleMask[i] == SM_SentinelZero) { | |||
1305 | CS << "zero"; | |||
1306 | continue; | |||
1307 | } | |||
1308 | ||||
1309 | // Otherwise, it must come from src1 or src2. Print the span of elements | |||
1310 | // that comes from this src. | |||
1311 | bool isSrc1 = ShuffleMask[i] < (int)e; | |||
1312 | CS << (isSrc1 ? Src1Name : Src2Name) << '['; | |||
1313 | ||||
1314 | bool IsFirst = true; | |||
1315 | while (i != e && ShuffleMask[i] != SM_SentinelZero && | |||
1316 | (ShuffleMask[i] < (int)e) == isSrc1) { | |||
1317 | if (!IsFirst) | |||
1318 | CS << ','; | |||
1319 | else | |||
1320 | IsFirst = false; | |||
1321 | if (ShuffleMask[i] == SM_SentinelUndef) | |||
1322 | CS << "u"; | |||
1323 | else | |||
1324 | CS << ShuffleMask[i] % (int)e; | |||
1325 | ++i; | |||
1326 | } | |||
1327 | CS << ']'; | |||
1328 | --i; // For loop increments element #. | |||
1329 | } | |||
1330 | CS.flush(); | |||
1331 | ||||
1332 | return Comment; | |||
1333 | } | |||
1334 | ||||
1335 | void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { | |||
1336 | X86MCInstLower MCInstLowering(*MF, *this); | |||
1337 | const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo(); | |||
1338 | ||||
1339 | // Add a comment about EVEX-2-VEX compression for AVX-512 instrs that | |||
1340 | // are compressed from EVEX encoding to VEX encoding. | |||
1341 | if (TM.Options.MCOptions.ShowMCEncoding) { | |||
1342 | if (MI->getAsmPrinterFlags() & AC_EVEX_2_VEX) | |||
1343 | OutStreamer->AddComment("EVEX TO VEX Compression ", false); | |||
1344 | } | |||
1345 | ||||
1346 | switch (MI->getOpcode()) { | |||
1347 | case TargetOpcode::DBG_VALUE: | |||
1348 | llvm_unreachable("Should be handled target independently")::llvm::llvm_unreachable_internal("Should be handled target independently" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1348); | |||
1349 | ||||
1350 | // Emit nothing here but a comment if we can. | |||
1351 | case X86::Int_MemBarrier: | |||
1352 | OutStreamer->emitRawComment("MEMBARRIER"); | |||
1353 | return; | |||
1354 | ||||
1355 | ||||
1356 | case X86::EH_RETURN: | |||
1357 | case X86::EH_RETURN64: { | |||
1358 | // Lower these as normal, but add some comments. | |||
1359 | unsigned Reg = MI->getOperand(0).getReg(); | |||
1360 | OutStreamer->AddComment(StringRef("eh_return, addr: %") + | |||
1361 | X86ATTInstPrinter::getRegisterName(Reg)); | |||
1362 | break; | |||
1363 | } | |||
1364 | case X86::CLEANUPRET: { | |||
1365 | // Lower these as normal, but add some comments. | |||
1366 | OutStreamer->AddComment("CLEANUPRET"); | |||
1367 | break; | |||
1368 | } | |||
1369 | ||||
1370 | case X86::CATCHRET: { | |||
1371 | // Lower these as normal, but add some comments. | |||
1372 | OutStreamer->AddComment("CATCHRET"); | |||
1373 | break; | |||
1374 | } | |||
1375 | ||||
1376 | case X86::TAILJMPr: | |||
1377 | case X86::TAILJMPm: | |||
1378 | case X86::TAILJMPd: | |||
1379 | case X86::TAILJMPd_CC: | |||
1380 | case X86::TAILJMPr64: | |||
1381 | case X86::TAILJMPm64: | |||
1382 | case X86::TAILJMPd64: | |||
1383 | case X86::TAILJMPd64_CC: | |||
1384 | case X86::TAILJMPr64_REX: | |||
1385 | case X86::TAILJMPm64_REX: | |||
1386 | // Lower these as normal, but add some comments. | |||
1387 | OutStreamer->AddComment("TAILCALL"); | |||
1388 | break; | |||
1389 | ||||
1390 | case X86::TLS_addr32: | |||
1391 | case X86::TLS_addr64: | |||
1392 | case X86::TLS_base_addr32: | |||
1393 | case X86::TLS_base_addr64: | |||
1394 | return LowerTlsAddr(MCInstLowering, *MI); | |||
1395 | ||||
1396 | case X86::MOVPC32r: { | |||
1397 | // This is a pseudo op for a two instruction sequence with a label, which | |||
1398 | // looks like: | |||
1399 | // call "L1$pb" | |||
1400 | // "L1$pb": | |||
1401 | // popl %esi | |||
1402 | ||||
1403 | // Emit the call. | |||
1404 | MCSymbol *PICBase = MF->getPICBaseSymbol(); | |||
1405 | // FIXME: We would like an efficient form for this, so we don't have to do a | |||
1406 | // lot of extra uniquing. | |||
1407 | EmitAndCountInstruction(MCInstBuilder(X86::CALLpcrel32) | |||
1408 | .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); | |||
1409 | ||||
1410 | const X86FrameLowering* FrameLowering = | |||
1411 | MF->getSubtarget<X86Subtarget>().getFrameLowering(); | |||
1412 | bool hasFP = FrameLowering->hasFP(*MF); | |||
1413 | ||||
1414 | // TODO: This is needed only if we require precise CFA. | |||
1415 | bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() && | |||
1416 | !OutStreamer->getDwarfFrameInfos().back().End; | |||
1417 | ||||
1418 | int stackGrowth = -RI->getSlotSize(); | |||
1419 | ||||
1420 | if (HasActiveDwarfFrame && !hasFP) { | |||
1421 | OutStreamer->EmitCFIAdjustCfaOffset(-stackGrowth); | |||
1422 | } | |||
1423 | ||||
1424 | // Emit the label. | |||
1425 | OutStreamer->EmitLabel(PICBase); | |||
1426 | ||||
1427 | // popl $reg | |||
1428 | EmitAndCountInstruction(MCInstBuilder(X86::POP32r) | |||
1429 | .addReg(MI->getOperand(0).getReg())); | |||
1430 | ||||
1431 | if (HasActiveDwarfFrame && !hasFP) { | |||
1432 | OutStreamer->EmitCFIAdjustCfaOffset(stackGrowth); | |||
1433 | } | |||
1434 | return; | |||
1435 | } | |||
1436 | ||||
1437 | case X86::ADD32ri: { | |||
1438 | // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. | |||
1439 | if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) | |||
1440 | break; | |||
1441 | ||||
1442 | // Okay, we have something like: | |||
1443 | // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL) | |||
1444 | ||||
1445 | // For this, we want to print something like: | |||
1446 | // MYGLOBAL + (. - PICBASE) | |||
1447 | // However, we can't generate a ".", so just emit a new label here and refer | |||
1448 | // to it. | |||
1449 | MCSymbol *DotSym = OutContext.createTempSymbol(); | |||
1450 | OutStreamer->EmitLabel(DotSym); | |||
1451 | ||||
1452 | // Now that we have emitted the label, lower the complex operand expression. | |||
1453 | MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); | |||
1454 | ||||
1455 | const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); | |||
1456 | const MCExpr *PICBase = | |||
1457 | MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext); | |||
1458 | DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext); | |||
1459 | ||||
1460 | DotExpr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(OpSym,OutContext), | |||
1461 | DotExpr, OutContext); | |||
1462 | ||||
1463 | EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri) | |||
1464 | .addReg(MI->getOperand(0).getReg()) | |||
1465 | .addReg(MI->getOperand(1).getReg()) | |||
1466 | .addExpr(DotExpr)); | |||
1467 | return; | |||
1468 | } | |||
1469 | case TargetOpcode::STATEPOINT: | |||
1470 | return LowerSTATEPOINT(*MI, MCInstLowering); | |||
1471 | ||||
1472 | case TargetOpcode::FAULTING_OP: | |||
1473 | return LowerFAULTING_OP(*MI, MCInstLowering); | |||
1474 | ||||
1475 | case TargetOpcode::FENTRY_CALL: | |||
1476 | return LowerFENTRY_CALL(*MI, MCInstLowering); | |||
1477 | ||||
1478 | case TargetOpcode::PATCHABLE_OP: | |||
1479 | return LowerPATCHABLE_OP(*MI, MCInstLowering); | |||
1480 | ||||
1481 | case TargetOpcode::STACKMAP: | |||
1482 | return LowerSTACKMAP(*MI); | |||
1483 | ||||
1484 | case TargetOpcode::PATCHPOINT: | |||
1485 | return LowerPATCHPOINT(*MI, MCInstLowering); | |||
1486 | ||||
1487 | case TargetOpcode::PATCHABLE_FUNCTION_ENTER: | |||
1488 | return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering); | |||
1489 | ||||
1490 | case TargetOpcode::PATCHABLE_RET: | |||
1491 | return LowerPATCHABLE_RET(*MI, MCInstLowering); | |||
1492 | ||||
1493 | case TargetOpcode::PATCHABLE_TAIL_CALL: | |||
1494 | return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering); | |||
1495 | ||||
1496 | case TargetOpcode::PATCHABLE_EVENT_CALL: | |||
1497 | return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering); | |||
1498 | ||||
1499 | case X86::MORESTACK_RET: | |||
1500 | EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); | |||
1501 | return; | |||
1502 | ||||
1503 | case X86::MORESTACK_RET_RESTORE_R10: | |||
1504 | // Return, then restore R10. | |||
1505 | EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); | |||
1506 | EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr) | |||
1507 | .addReg(X86::R10) | |||
1508 | .addReg(X86::RAX)); | |||
1509 | return; | |||
1510 | ||||
1511 | case X86::SEH_PushReg: | |||
1512 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1512, __PRETTY_FUNCTION__)); | |||
1513 | OutStreamer->EmitWinCFIPushReg(RI->getSEHRegNum(MI->getOperand(0).getImm())); | |||
1514 | return; | |||
1515 | ||||
1516 | case X86::SEH_SaveReg: | |||
1517 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1517, __PRETTY_FUNCTION__)); | |||
1518 | OutStreamer->EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()), | |||
1519 | MI->getOperand(1).getImm()); | |||
1520 | return; | |||
1521 | ||||
1522 | case X86::SEH_SaveXMM: | |||
1523 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1523, __PRETTY_FUNCTION__)); | |||
1524 | OutStreamer->EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()), | |||
1525 | MI->getOperand(1).getImm()); | |||
1526 | return; | |||
1527 | ||||
1528 | case X86::SEH_StackAlloc: | |||
1529 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1529, __PRETTY_FUNCTION__)); | |||
1530 | OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm()); | |||
1531 | return; | |||
1532 | ||||
1533 | case X86::SEH_SetFrame: | |||
1534 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1534, __PRETTY_FUNCTION__)); | |||
1535 | OutStreamer->EmitWinCFISetFrame(RI->getSEHRegNum(MI->getOperand(0).getImm()), | |||
1536 | MI->getOperand(1).getImm()); | |||
1537 | return; | |||
1538 | ||||
1539 | case X86::SEH_PushFrame: | |||
1540 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1540, __PRETTY_FUNCTION__)); | |||
1541 | OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm()); | |||
1542 | return; | |||
1543 | ||||
1544 | case X86::SEH_EndPrologue: | |||
1545 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1545, __PRETTY_FUNCTION__)); | |||
1546 | OutStreamer->EmitWinCFIEndProlog(); | |||
1547 | return; | |||
1548 | ||||
1549 | case X86::SEH_Epilogue: { | |||
1550 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1550, __PRETTY_FUNCTION__)); | |||
1551 | MachineBasicBlock::const_iterator MBBI(MI); | |||
1552 | // Check if preceded by a call and emit nop if so. | |||
1553 | for (MBBI = PrevCrossBBInst(MBBI); | |||
1554 | MBBI != MachineBasicBlock::const_iterator(); | |||
1555 | MBBI = PrevCrossBBInst(MBBI)) { | |||
1556 | // Conservatively assume that pseudo instructions don't emit code and keep | |||
1557 | // looking for a call. We may emit an unnecessary nop in some cases. | |||
1558 | if (!MBBI->isPseudo()) { | |||
1559 | if (MBBI->isCall()) | |||
1560 | EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); | |||
1561 | break; | |||
1562 | } | |||
1563 | } | |||
1564 | return; | |||
1565 | } | |||
1566 | ||||
1567 | // Lower PSHUFB and VPERMILP normally but add a comment if we can find | |||
1568 | // a constant shuffle mask. We won't be able to do this at the MC layer | |||
1569 | // because the mask isn't an immediate. | |||
1570 | case X86::PSHUFBrm: | |||
1571 | case X86::VPSHUFBrm: | |||
1572 | case X86::VPSHUFBYrm: | |||
1573 | case X86::VPSHUFBZ128rm: | |||
1574 | case X86::VPSHUFBZ128rmk: | |||
1575 | case X86::VPSHUFBZ128rmkz: | |||
1576 | case X86::VPSHUFBZ256rm: | |||
1577 | case X86::VPSHUFBZ256rmk: | |||
1578 | case X86::VPSHUFBZ256rmkz: | |||
1579 | case X86::VPSHUFBZrm: | |||
1580 | case X86::VPSHUFBZrmk: | |||
1581 | case X86::VPSHUFBZrmkz: { | |||
1582 | if (!OutStreamer->isVerboseAsm()) | |||
1583 | break; | |||
1584 | unsigned SrcIdx, MaskIdx; | |||
1585 | switch (MI->getOpcode()) { | |||
1586 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1586); | |||
1587 | case X86::PSHUFBrm: | |||
1588 | case X86::VPSHUFBrm: | |||
1589 | case X86::VPSHUFBYrm: | |||
1590 | case X86::VPSHUFBZ128rm: | |||
1591 | case X86::VPSHUFBZ256rm: | |||
1592 | case X86::VPSHUFBZrm: | |||
1593 | SrcIdx = 1; MaskIdx = 5; break; | |||
1594 | case X86::VPSHUFBZ128rmkz: | |||
1595 | case X86::VPSHUFBZ256rmkz: | |||
1596 | case X86::VPSHUFBZrmkz: | |||
1597 | SrcIdx = 2; MaskIdx = 6; break; | |||
1598 | case X86::VPSHUFBZ128rmk: | |||
1599 | case X86::VPSHUFBZ256rmk: | |||
1600 | case X86::VPSHUFBZrmk: | |||
1601 | SrcIdx = 3; MaskIdx = 7; break; | |||
1602 | } | |||
1603 | ||||
1604 | assert(MI->getNumOperands() >= 6 &&((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1605, __PRETTY_FUNCTION__)) | |||
1605 | "We should always have at least 6 operands!")((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1605, __PRETTY_FUNCTION__)); | |||
1606 | ||||
1607 | const MachineOperand &MaskOp = MI->getOperand(MaskIdx); | |||
1608 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { | |||
1609 | SmallVector<int, 64> Mask; | |||
1610 | DecodePSHUFBMask(C, Mask); | |||
1611 | if (!Mask.empty()) | |||
1612 | OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask), | |||
1613 | !EnablePrintSchedInfo); | |||
1614 | } | |||
1615 | break; | |||
1616 | } | |||
1617 | ||||
1618 | case X86::VPERMILPSrm: | |||
1619 | case X86::VPERMILPSYrm: | |||
1620 | case X86::VPERMILPSZ128rm: | |||
1621 | case X86::VPERMILPSZ128rmk: | |||
1622 | case X86::VPERMILPSZ128rmkz: | |||
1623 | case X86::VPERMILPSZ256rm: | |||
1624 | case X86::VPERMILPSZ256rmk: | |||
1625 | case X86::VPERMILPSZ256rmkz: | |||
1626 | case X86::VPERMILPSZrm: | |||
1627 | case X86::VPERMILPSZrmk: | |||
1628 | case X86::VPERMILPSZrmkz: | |||
1629 | case X86::VPERMILPDrm: | |||
1630 | case X86::VPERMILPDYrm: | |||
1631 | case X86::VPERMILPDZ128rm: | |||
1632 | case X86::VPERMILPDZ128rmk: | |||
1633 | case X86::VPERMILPDZ128rmkz: | |||
1634 | case X86::VPERMILPDZ256rm: | |||
1635 | case X86::VPERMILPDZ256rmk: | |||
1636 | case X86::VPERMILPDZ256rmkz: | |||
1637 | case X86::VPERMILPDZrm: | |||
1638 | case X86::VPERMILPDZrmk: | |||
1639 | case X86::VPERMILPDZrmkz: { | |||
1640 | if (!OutStreamer->isVerboseAsm()) | |||
1641 | break; | |||
1642 | unsigned SrcIdx, MaskIdx; | |||
1643 | unsigned ElSize; | |||
1644 | switch (MI->getOpcode()) { | |||
1645 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1645); | |||
1646 | case X86::VPERMILPSrm: | |||
1647 | case X86::VPERMILPSYrm: | |||
1648 | case X86::VPERMILPSZ128rm: | |||
1649 | case X86::VPERMILPSZ256rm: | |||
1650 | case X86::VPERMILPSZrm: | |||
1651 | SrcIdx = 1; MaskIdx = 5; ElSize = 32; break; | |||
1652 | case X86::VPERMILPSZ128rmkz: | |||
1653 | case X86::VPERMILPSZ256rmkz: | |||
1654 | case X86::VPERMILPSZrmkz: | |||
1655 | SrcIdx = 2; MaskIdx = 6; ElSize = 32; break; | |||
1656 | case X86::VPERMILPSZ128rmk: | |||
1657 | case X86::VPERMILPSZ256rmk: | |||
1658 | case X86::VPERMILPSZrmk: | |||
1659 | SrcIdx = 3; MaskIdx = 7; ElSize = 32; break; | |||
1660 | case X86::VPERMILPDrm: | |||
1661 | case X86::VPERMILPDYrm: | |||
1662 | case X86::VPERMILPDZ128rm: | |||
1663 | case X86::VPERMILPDZ256rm: | |||
1664 | case X86::VPERMILPDZrm: | |||
1665 | SrcIdx = 1; MaskIdx = 5; ElSize = 64; break; | |||
1666 | case X86::VPERMILPDZ128rmkz: | |||
1667 | case X86::VPERMILPDZ256rmkz: | |||
1668 | case X86::VPERMILPDZrmkz: | |||
1669 | SrcIdx = 2; MaskIdx = 6; ElSize = 64; break; | |||
1670 | case X86::VPERMILPDZ128rmk: | |||
1671 | case X86::VPERMILPDZ256rmk: | |||
1672 | case X86::VPERMILPDZrmk: | |||
1673 | SrcIdx = 3; MaskIdx = 7; ElSize = 64; break; | |||
1674 | } | |||
1675 | ||||
1676 | assert(MI->getNumOperands() >= 6 &&((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1677, __PRETTY_FUNCTION__)) | |||
1677 | "We should always have at least 6 operands!")((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1677, __PRETTY_FUNCTION__)); | |||
1678 | ||||
1679 | const MachineOperand &MaskOp = MI->getOperand(MaskIdx); | |||
1680 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { | |||
1681 | SmallVector<int, 16> Mask; | |||
1682 | DecodeVPERMILPMask(C, ElSize, Mask); | |||
1683 | if (!Mask.empty()) | |||
1684 | OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask), | |||
1685 | !EnablePrintSchedInfo); | |||
1686 | } | |||
1687 | break; | |||
1688 | } | |||
1689 | ||||
1690 | case X86::VPERMIL2PDrm: | |||
1691 | case X86::VPERMIL2PSrm: | |||
1692 | case X86::VPERMIL2PDYrm: | |||
1693 | case X86::VPERMIL2PSYrm: { | |||
1694 | if (!OutStreamer->isVerboseAsm()) | |||
1695 | break; | |||
1696 | assert(MI->getNumOperands() >= 8 &&((MI->getNumOperands() >= 8 && "We should always have at least 8 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 8 && \"We should always have at least 8 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1697, __PRETTY_FUNCTION__)) | |||
1697 | "We should always have at least 8 operands!")((MI->getNumOperands() >= 8 && "We should always have at least 8 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 8 && \"We should always have at least 8 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1697, __PRETTY_FUNCTION__)); | |||
1698 | ||||
1699 | const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1); | |||
1700 | if (!CtrlOp.isImm()) | |||
1701 | break; | |||
1702 | ||||
1703 | unsigned ElSize; | |||
1704 | switch (MI->getOpcode()) { | |||
1705 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1705); | |||
1706 | case X86::VPERMIL2PSrm: case X86::VPERMIL2PSYrm: ElSize = 32; break; | |||
1707 | case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break; | |||
1708 | } | |||
1709 | ||||
1710 | const MachineOperand &MaskOp = MI->getOperand(6); | |||
1711 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { | |||
1712 | SmallVector<int, 16> Mask; | |||
1713 | DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Mask); | |||
1714 | if (!Mask.empty()) | |||
1715 | OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask), | |||
1716 | !EnablePrintSchedInfo); | |||
1717 | } | |||
1718 | break; | |||
1719 | } | |||
1720 | ||||
1721 | case X86::VPPERMrrm: { | |||
1722 | if (!OutStreamer->isVerboseAsm()) | |||
1723 | break; | |||
1724 | assert(MI->getNumOperands() >= 7 &&((MI->getNumOperands() >= 7 && "We should always have at least 7 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 7 && \"We should always have at least 7 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1725, __PRETTY_FUNCTION__)) | |||
1725 | "We should always have at least 7 operands!")((MI->getNumOperands() >= 7 && "We should always have at least 7 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 7 && \"We should always have at least 7 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn306458/lib/Target/X86/X86MCInstLower.cpp" , 1725, __PRETTY_FUNCTION__)); | |||
1726 | ||||
1727 | const MachineOperand &MaskOp = MI->getOperand(6); | |||
1728 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { | |||
1729 | SmallVector<int, 16> Mask; | |||
1730 | DecodeVPPERMMask(C, Mask); | |||
1731 | if (!Mask.empty()) | |||
1732 | OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask), | |||
1733 | !EnablePrintSchedInfo); | |||
1734 | } | |||
1735 | break; | |||
1736 | } | |||
1737 | ||||
1738 | #define MOV_CASE(Prefix, Suffix)case X86::PrefixMOVAPDSuffixrm: case X86::PrefixMOVAPSSuffixrm : case X86::PrefixMOVUPDSuffixrm: case X86::PrefixMOVUPSSuffixrm : case X86::PrefixMOVDQASuffixrm: case X86::PrefixMOVDQUSuffixrm : \ | |||
1739 | case X86::Prefix##MOVAPD##Suffix##rm: \ | |||
1740 | case X86::Prefix##MOVAPS##Suffix##rm: \ | |||
1741 | case X86::Prefix##MOVUPD##Suffix##rm: \ | |||
1742 | case X86::Prefix##MOVUPS##Suffix##rm: \ | |||
1743 | case X86::Prefix##MOVDQA##Suffix##rm: \ | |||
1744 | case X86::Prefix##MOVDQU##Suffix##rm: | |||
1745 | ||||
1746 | #define MOV_AVX512_CASE(Suffix)case X86::VMOVDQA64Suffixrm: case X86::VMOVDQA32Suffixrm: case X86::VMOVDQU64Suffixrm: case X86::VMOVDQU32Suffixrm: case X86 ::VMOVDQU16Suffixrm: case X86::VMOVDQU8Suffixrm: case X86::VMOVAPSSuffixrm : case X86::VMOVAPDSuffixrm: case X86::VMOVUPSSuffixrm: case X86 ::VMOVUPDSuffixrm: \ | |||
1747 | case X86::VMOVDQA64##Suffix##rm: \ | |||
1748 | case X86::VMOVDQA32##Suffix##rm: \ | |||
1749 | case X86::VMOVDQU64##Suffix##rm: \ | |||
1750 | case X86::VMOVDQU32##Suffix##rm: \ | |||
1751 | case X86::VMOVDQU16##Suffix##rm: \ | |||
1752 | case X86::VMOVDQU8##Suffix##rm: \ | |||
1753 | case X86::VMOVAPS##Suffix##rm: \ | |||
1754 | case X86::VMOVAPD##Suffix##rm: \ | |||
1755 | case X86::VMOVUPS##Suffix##rm: \ | |||
1756 | case X86::VMOVUPD##Suffix##rm: | |||
1757 | ||||
1758 | #define CASE_ALL_MOV_RM()case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: case X86 ::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm: case X86 ::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm: case X86 ::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm: case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm: case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: case X86::VMOVDQA64Z256rm: case X86:: VMOVDQA32Z256rm: case X86::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm : case X86::VMOVDQU16Z256rm: case X86::VMOVDQU8Z256rm: case X86 ::VMOVAPSZ256rm: case X86::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm : case X86::VMOVUPDZ256rm: case X86::VMOVDQA64Z128rm: case X86 ::VMOVDQA32Z128rm: case X86::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm : case X86::VMOVDQU16Z128rm: case X86::VMOVDQU8Z128rm: case X86 ::VMOVAPSZ128rm: case X86::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm : case X86::VMOVUPDZ128rm: \ | |||
1759 | MOV_CASE(, )case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: /* SSE */ \ | |||
1760 | MOV_CASE(V, )case X86::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm : case X86::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm : /* AVX-128 */ \ | |||
1761 | MOV_CASE(V, Y)case X86::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm : case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm : /* AVX-256 */ \ | |||
1762 | MOV_AVX512_CASE(Z)case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: \ | |||
1763 | MOV_AVX512_CASE(Z256)case X86::VMOVDQA64Z256rm: case X86::VMOVDQA32Z256rm: case X86 ::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm: case X86::VMOVDQU16Z256rm : case X86::VMOVDQU8Z256rm: case X86::VMOVAPSZ256rm: case X86 ::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm: case X86::VMOVUPDZ256rm : \ | |||
1764 | MOV_AVX512_CASE(Z128)case X86::VMOVDQA64Z128rm: case X86::VMOVDQA32Z128rm: case X86 ::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm: case X86::VMOVDQU16Z128rm : case X86::VMOVDQU8Z128rm: case X86::VMOVAPSZ128rm: case X86 ::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm: case X86::VMOVUPDZ128rm : | |||
1765 | ||||
1766 | // For loads from a constant pool to a vector register, print the constant | |||
1767 | // loaded. | |||
1768 | CASE_ALL_MOV_RM()case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: case X86 ::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm: case X86 ::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm: case X86 ::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm: case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm: case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: case X86::VMOVDQA64Z256rm: case X86:: VMOVDQA32Z256rm: case X86::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm : case X86::VMOVDQU16Z256rm: case X86::VMOVDQU8Z256rm: case X86 ::VMOVAPSZ256rm: case X86::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm : case X86::VMOVUPDZ256rm: case X86::VMOVDQA64Z128rm: case X86 ::VMOVDQA32Z128rm: case X86::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm : case X86::VMOVDQU16Z128rm: case X86::VMOVDQU8Z128rm: case X86 ::VMOVAPSZ128rm: case X86::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm : case X86::VMOVUPDZ128rm: | |||
1769 | if (!OutStreamer->isVerboseAsm()) | |||
1770 | break; | |||
1771 | if (MI->getNumOperands() <= 4) | |||
1772 | break; | |||
1773 | if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { | |||
1774 | std::string Comment; | |||
1775 | raw_string_ostream CS(Comment); | |||
1776 | const MachineOperand &DstOp = MI->getOperand(0); | |||
1777 | CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; | |||
1778 | if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) { | |||
1779 | CS << "["; | |||
1780 | for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) { | |||
1781 | if (i != 0) | |||
1782 | CS << ","; | |||
1783 | if (CDS->getElementType()->isIntegerTy()) | |||
1784 | CS << CDS->getElementAsInteger(i); | |||
1785 | else if (CDS->getElementType()->isFloatTy()) | |||
1786 | CS << CDS->getElementAsFloat(i); | |||
1787 | else if (CDS->getElementType()->isDoubleTy()) | |||
1788 | CS << CDS->getElementAsDouble(i); | |||
1789 | else | |||
1790 | CS << "?"; | |||
1791 | } | |||
1792 | CS << "]"; | |||
1793 | OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo); | |||
1794 | } else if (auto *CV = dyn_cast<ConstantVector>(C)) { | |||
1795 | CS << "<"; | |||
1796 | for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) { | |||
1797 | if (i != 0) | |||
1798 | CS << ","; | |||
1799 | Constant *COp = CV->getOperand(i); | |||
1800 | if (isa<UndefValue>(COp)) { | |||
1801 | CS << "u"; | |||
1802 | } else if (auto *CI = dyn_cast<ConstantInt>(COp)) { | |||
1803 | if (CI->getBitWidth() <= 64) { | |||
1804 | CS << CI->getZExtValue(); | |||
1805 | } else { | |||
1806 | // print multi-word constant as (w0,w1) | |||
1807 | const auto &Val = CI->getValue(); | |||
1808 | CS << "("; | |||
1809 | for (int i = 0, N = Val.getNumWords(); i < N; ++i) { | |||
1810 | if (i > 0) | |||
1811 | CS << ","; | |||
1812 | CS << Val.getRawData()[i]; | |||
1813 | } | |||
1814 | CS << ")"; | |||
1815 | } | |||
1816 | } else if (auto *CF = dyn_cast<ConstantFP>(COp)) { | |||
1817 | SmallString<32> Str; | |||
1818 | CF->getValueAPF().toString(Str); | |||
1819 | CS << Str; | |||
1820 | } else { | |||
1821 | CS << "?"; | |||
1822 | } | |||
1823 | } | |||
1824 | CS << ">"; | |||
1825 | OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo); | |||
1826 | } | |||
1827 | } | |||
1828 | break; | |||
1829 | } | |||
1830 | ||||
1831 | MCInst TmpInst; | |||
1832 | MCInstLowering.Lower(MI, TmpInst); | |||
1833 | ||||
1834 | // Stackmap shadows cannot include branch targets, so we can count the bytes | |||
1835 | // in a call towards the shadow, but must ensure that the no thread returns | |||
1836 | // in to the stackmap shadow. The only way to achieve this is if the call | |||
1837 | // is at the end of the shadow. | |||
1838 | if (MI->isCall()) { | |||
1839 | // Count then size of the call towards the shadow | |||
1840 | SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get()); | |||
1841 | // Then flush the shadow so that we fill with nops before the call, not | |||
1842 | // after it. | |||
1843 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); | |||
1844 | // Then emit the call | |||
1845 | OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo()); | |||
1846 | return; | |||
1847 | } | |||
1848 | ||||
1849 | EmitAndCountInstruction(TmpInst); | |||
1850 | } |