File: | lib/Target/X86/X86MCInstLower.cpp |
Location: | line 795, column 5 |
Description: | Value stored to 'Opc' is never read |
1 | //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file contains code to lower X86 MachineInstrs to their corresponding |
11 | // MCInst records. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "X86AsmPrinter.h" |
16 | #include "X86RegisterInfo.h" |
17 | #include "X86ShuffleDecodeConstantPool.h" |
18 | #include "InstPrinter/X86ATTInstPrinter.h" |
19 | #include "MCTargetDesc/X86BaseInfo.h" |
20 | #include "Utils/X86ShuffleDecode.h" |
21 | #include "llvm/ADT/Optional.h" |
22 | #include "llvm/ADT/SmallString.h" |
23 | #include "llvm/CodeGen/MachineFunction.h" |
24 | #include "llvm/CodeGen/MachineConstantPool.h" |
25 | #include "llvm/CodeGen/MachineOperand.h" |
26 | #include "llvm/CodeGen/MachineModuleInfoImpls.h" |
27 | #include "llvm/CodeGen/StackMaps.h" |
28 | #include "llvm/IR/DataLayout.h" |
29 | #include "llvm/IR/GlobalValue.h" |
30 | #include "llvm/IR/Mangler.h" |
31 | #include "llvm/MC/MCAsmInfo.h" |
32 | #include "llvm/MC/MCCodeEmitter.h" |
33 | #include "llvm/MC/MCContext.h" |
34 | #include "llvm/MC/MCExpr.h" |
35 | #include "llvm/MC/MCFixup.h" |
36 | #include "llvm/MC/MCInst.h" |
37 | #include "llvm/MC/MCInstBuilder.h" |
38 | #include "llvm/MC/MCStreamer.h" |
39 | #include "llvm/MC/MCSymbol.h" |
40 | #include "llvm/Support/TargetRegistry.h" |
41 | using namespace llvm; |
42 | |
43 | namespace { |
44 | |
45 | /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. |
46 | class X86MCInstLower { |
47 | MCContext &Ctx; |
48 | const MachineFunction &MF; |
49 | const TargetMachine &TM; |
50 | const MCAsmInfo &MAI; |
51 | X86AsmPrinter &AsmPrinter; |
52 | public: |
53 | X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter); |
54 | |
55 | Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI, |
56 | const MachineOperand &MO) const; |
57 | void Lower(const MachineInstr *MI, MCInst &OutMI) const; |
58 | |
59 | MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; |
60 | MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; |
61 | |
62 | private: |
63 | MachineModuleInfoMachO &getMachOMMI() const; |
64 | Mangler *getMang() const { |
65 | return AsmPrinter.Mang; |
66 | } |
67 | }; |
68 | |
69 | } // end anonymous namespace |
70 | |
71 | // Emit a minimal sequence of nops spanning NumBytes bytes. |
72 | static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, |
73 | const MCSubtargetInfo &STI); |
74 | |
75 | namespace llvm { |
76 | X86AsmPrinter::StackMapShadowTracker::StackMapShadowTracker(TargetMachine &TM) |
77 | : TM(TM), InShadow(false), RequiredShadowSize(0), CurrentShadowSize(0) {} |
78 | |
79 | X86AsmPrinter::StackMapShadowTracker::~StackMapShadowTracker() {} |
80 | |
81 | void |
82 | X86AsmPrinter::StackMapShadowTracker::startFunction(MachineFunction &F) { |
83 | MF = &F; |
84 | CodeEmitter.reset(TM.getTarget().createMCCodeEmitter( |
85 | *MF->getSubtarget().getInstrInfo(), |
86 | *MF->getSubtarget().getRegisterInfo(), MF->getContext())); |
87 | } |
88 | |
89 | void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst, |
90 | const MCSubtargetInfo &STI) { |
91 | if (InShadow) { |
92 | SmallString<256> Code; |
93 | SmallVector<MCFixup, 4> Fixups; |
94 | raw_svector_ostream VecOS(Code); |
95 | CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI); |
96 | CurrentShadowSize += Code.size(); |
97 | if (CurrentShadowSize >= RequiredShadowSize) |
98 | InShadow = false; // The shadow is big enough. Stop counting. |
99 | } |
100 | } |
101 | |
102 | void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding( |
103 | MCStreamer &OutStreamer, const MCSubtargetInfo &STI) { |
104 | if (InShadow && CurrentShadowSize < RequiredShadowSize) { |
105 | InShadow = false; |
106 | EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize, |
107 | MF->getSubtarget<X86Subtarget>().is64Bit(), STI); |
108 | } |
109 | } |
110 | |
111 | void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) { |
112 | OutStreamer->EmitInstruction(Inst, getSubtargetInfo()); |
113 | SMShadowTracker.count(Inst, getSubtargetInfo()); |
114 | } |
115 | } // end llvm namespace |
116 | |
117 | X86MCInstLower::X86MCInstLower(const MachineFunction &mf, |
118 | X86AsmPrinter &asmprinter) |
119 | : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()), |
120 | AsmPrinter(asmprinter) {} |
121 | |
122 | MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { |
123 | return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); |
124 | } |
125 | |
126 | |
127 | /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol |
128 | /// operand to an MCSymbol. |
129 | MCSymbol *X86MCInstLower:: |
130 | GetSymbolFromOperand(const MachineOperand &MO) const { |
131 | const DataLayout &DL = MF.getDataLayout(); |
132 | assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference")(((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference" ) ? static_cast<void> (0) : __assert_fail ("(MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && \"Isn't a symbol reference\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 132, __PRETTY_FUNCTION__)); |
133 | |
134 | MCSymbol *Sym = nullptr; |
135 | SmallString<128> Name; |
136 | StringRef Suffix; |
137 | |
138 | switch (MO.getTargetFlags()) { |
139 | case X86II::MO_DLLIMPORT: |
140 | // Handle dllimport linkage. |
141 | Name += "__imp_"; |
142 | break; |
143 | case X86II::MO_DARWIN_STUB: |
144 | Suffix = "$stub"; |
145 | break; |
146 | case X86II::MO_DARWIN_NONLAZY: |
147 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: |
148 | case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: |
149 | Suffix = "$non_lazy_ptr"; |
150 | break; |
151 | } |
152 | |
153 | if (!Suffix.empty()) |
154 | Name += DL.getPrivateGlobalPrefix(); |
155 | |
156 | unsigned PrefixLen = Name.size(); |
157 | |
158 | if (MO.isGlobal()) { |
159 | const GlobalValue *GV = MO.getGlobal(); |
160 | AsmPrinter.getNameWithPrefix(Name, GV); |
161 | } else if (MO.isSymbol()) { |
162 | Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL); |
163 | } else if (MO.isMBB()) { |
164 | assert(Suffix.empty())((Suffix.empty()) ? static_cast<void> (0) : __assert_fail ("Suffix.empty()", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 164, __PRETTY_FUNCTION__)); |
165 | Sym = MO.getMBB()->getSymbol(); |
166 | } |
167 | unsigned OrigLen = Name.size() - PrefixLen; |
168 | |
169 | Name += Suffix; |
170 | if (!Sym) |
171 | Sym = Ctx.getOrCreateSymbol(Name); |
172 | |
173 | StringRef OrigName = StringRef(Name).substr(PrefixLen, OrigLen); |
174 | |
175 | // If the target flags on the operand changes the name of the symbol, do that |
176 | // before we return the symbol. |
177 | switch (MO.getTargetFlags()) { |
178 | default: break; |
179 | case X86II::MO_DARWIN_NONLAZY: |
180 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: { |
181 | MachineModuleInfoImpl::StubValueTy &StubSym = |
182 | getMachOMMI().getGVStubEntry(Sym); |
183 | if (!StubSym.getPointer()) { |
184 | assert(MO.isGlobal() && "Extern symbol not handled yet")((MO.isGlobal() && "Extern symbol not handled yet") ? static_cast<void> (0) : __assert_fail ("MO.isGlobal() && \"Extern symbol not handled yet\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 184, __PRETTY_FUNCTION__)); |
185 | StubSym = |
186 | MachineModuleInfoImpl:: |
187 | StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()), |
188 | !MO.getGlobal()->hasInternalLinkage()); |
189 | } |
190 | break; |
191 | } |
192 | case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: { |
193 | MachineModuleInfoImpl::StubValueTy &StubSym = |
194 | getMachOMMI().getHiddenGVStubEntry(Sym); |
195 | if (!StubSym.getPointer()) { |
196 | assert(MO.isGlobal() && "Extern symbol not handled yet")((MO.isGlobal() && "Extern symbol not handled yet") ? static_cast<void> (0) : __assert_fail ("MO.isGlobal() && \"Extern symbol not handled yet\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 196, __PRETTY_FUNCTION__)); |
197 | StubSym = |
198 | MachineModuleInfoImpl:: |
199 | StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()), |
200 | !MO.getGlobal()->hasInternalLinkage()); |
201 | } |
202 | break; |
203 | } |
204 | case X86II::MO_DARWIN_STUB: { |
205 | MachineModuleInfoImpl::StubValueTy &StubSym = |
206 | getMachOMMI().getFnStubEntry(Sym); |
207 | if (StubSym.getPointer()) |
208 | return Sym; |
209 | |
210 | if (MO.isGlobal()) { |
211 | StubSym = |
212 | MachineModuleInfoImpl:: |
213 | StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()), |
214 | !MO.getGlobal()->hasInternalLinkage()); |
215 | } else { |
216 | StubSym = |
217 | MachineModuleInfoImpl:: |
218 | StubValueTy(Ctx.getOrCreateSymbol(OrigName), false); |
219 | } |
220 | break; |
221 | } |
222 | } |
223 | |
224 | return Sym; |
225 | } |
226 | |
227 | MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, |
228 | MCSymbol *Sym) const { |
229 | // FIXME: We would like an efficient form for this, so we don't have to do a |
230 | // lot of extra uniquing. |
231 | const MCExpr *Expr = nullptr; |
232 | MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; |
233 | |
234 | switch (MO.getTargetFlags()) { |
235 | default: llvm_unreachable("Unknown target flag on GV operand")::llvm::llvm_unreachable_internal("Unknown target flag on GV operand" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 235); |
236 | case X86II::MO_NO_FLAG: // No flag. |
237 | // These affect the name of the symbol, not any suffix. |
238 | case X86II::MO_DARWIN_NONLAZY: |
239 | case X86II::MO_DLLIMPORT: |
240 | case X86II::MO_DARWIN_STUB: |
241 | break; |
242 | |
243 | case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break; |
244 | case X86II::MO_TLVP_PIC_BASE: |
245 | Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx); |
246 | // Subtract the pic base. |
247 | Expr = MCBinaryExpr::createSub(Expr, |
248 | MCSymbolRefExpr::create(MF.getPICBaseSymbol(), |
249 | Ctx), |
250 | Ctx); |
251 | break; |
252 | case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break; |
253 | case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break; |
254 | case X86II::MO_TLSLD: RefKind = MCSymbolRefExpr::VK_TLSLD; break; |
255 | case X86II::MO_TLSLDM: RefKind = MCSymbolRefExpr::VK_TLSLDM; break; |
256 | case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break; |
257 | case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break; |
258 | case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break; |
259 | case X86II::MO_DTPOFF: RefKind = MCSymbolRefExpr::VK_DTPOFF; break; |
260 | case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break; |
261 | case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break; |
262 | case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break; |
263 | case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break; |
264 | case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break; |
265 | case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break; |
266 | case X86II::MO_PIC_BASE_OFFSET: |
267 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: |
268 | case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: |
269 | Expr = MCSymbolRefExpr::create(Sym, Ctx); |
270 | // Subtract the pic base. |
271 | Expr = MCBinaryExpr::createSub(Expr, |
272 | MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), |
273 | Ctx); |
274 | if (MO.isJTI()) { |
275 | assert(MAI.doesSetDirectiveSuppressesReloc())((MAI.doesSetDirectiveSuppressesReloc()) ? static_cast<void > (0) : __assert_fail ("MAI.doesSetDirectiveSuppressesReloc()" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 275, __PRETTY_FUNCTION__)); |
276 | // If .set directive is supported, use it to reduce the number of |
277 | // relocations the assembler will generate for differences between |
278 | // local labels. This is only safe when the symbols are in the same |
279 | // section so we are restricting it to jumptable references. |
280 | MCSymbol *Label = Ctx.createTempSymbol(); |
281 | AsmPrinter.OutStreamer->EmitAssignment(Label, Expr); |
282 | Expr = MCSymbolRefExpr::create(Label, Ctx); |
283 | } |
284 | break; |
285 | } |
286 | |
287 | if (!Expr) |
288 | Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx); |
289 | |
290 | if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) |
291 | Expr = MCBinaryExpr::createAdd(Expr, |
292 | MCConstantExpr::create(MO.getOffset(), Ctx), |
293 | Ctx); |
294 | return MCOperand::createExpr(Expr); |
295 | } |
296 | |
297 | |
298 | /// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with |
299 | /// a short fixed-register form. |
300 | static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { |
301 | unsigned ImmOp = Inst.getNumOperands() - 1; |
302 | assert(Inst.getOperand(0).isReg() &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 306, __PRETTY_FUNCTION__)) |
303 | (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 306, __PRETTY_FUNCTION__)) |
304 | ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 306, __PRETTY_FUNCTION__)) |
305 | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 306, __PRETTY_FUNCTION__)) |
306 | Inst.getNumOperands() == 2) && "Unexpected instruction!")((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 306, __PRETTY_FUNCTION__)); |
307 | |
308 | // Check whether the destination register can be fixed. |
309 | unsigned Reg = Inst.getOperand(0).getReg(); |
310 | if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) |
311 | return; |
312 | |
313 | // If so, rewrite the instruction. |
314 | MCOperand Saved = Inst.getOperand(ImmOp); |
315 | Inst = MCInst(); |
316 | Inst.setOpcode(Opcode); |
317 | Inst.addOperand(Saved); |
318 | } |
319 | |
320 | /// \brief If a movsx instruction has a shorter encoding for the used register |
321 | /// simplify the instruction to use it instead. |
322 | static void SimplifyMOVSX(MCInst &Inst) { |
323 | unsigned NewOpcode = 0; |
324 | unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg(); |
325 | switch (Inst.getOpcode()) { |
326 | default: |
327 | llvm_unreachable("Unexpected instruction!")::llvm::llvm_unreachable_internal("Unexpected instruction!", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 327); |
328 | case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw |
329 | if (Op0 == X86::AX && Op1 == X86::AL) |
330 | NewOpcode = X86::CBW; |
331 | break; |
332 | case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl |
333 | if (Op0 == X86::EAX && Op1 == X86::AX) |
334 | NewOpcode = X86::CWDE; |
335 | break; |
336 | case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq |
337 | if (Op0 == X86::RAX && Op1 == X86::EAX) |
338 | NewOpcode = X86::CDQE; |
339 | break; |
340 | } |
341 | |
342 | if (NewOpcode != 0) { |
343 | Inst = MCInst(); |
344 | Inst.setOpcode(NewOpcode); |
345 | } |
346 | } |
347 | |
348 | /// \brief Simplify things like MOV32rm to MOV32o32a. |
349 | static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, |
350 | unsigned Opcode) { |
351 | // Don't make these simplifications in 64-bit mode; other assemblers don't |
352 | // perform them because they make the code larger. |
353 | if (Printer.getSubtarget().is64Bit()) |
354 | return; |
355 | |
356 | bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); |
357 | unsigned AddrBase = IsStore; |
358 | unsigned RegOp = IsStore ? 0 : 5; |
359 | unsigned AddrOp = AddrBase + 3; |
360 | assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 367, __PRETTY_FUNCTION__)) |
361 | Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 367, __PRETTY_FUNCTION__)) |
362 | Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 367, __PRETTY_FUNCTION__)) |
363 | Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 367, __PRETTY_FUNCTION__)) |
364 | Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 367, __PRETTY_FUNCTION__)) |
365 | (Inst.getOperand(AddrOp).isExpr() ||((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 367, __PRETTY_FUNCTION__)) |
366 | Inst.getOperand(AddrOp).isImm()) &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 367, __PRETTY_FUNCTION__)) |
367 | "Unexpected instruction!")((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 367, __PRETTY_FUNCTION__)); |
368 | |
369 | // Check whether the destination register can be fixed. |
370 | unsigned Reg = Inst.getOperand(RegOp).getReg(); |
371 | if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) |
372 | return; |
373 | |
374 | // Check whether this is an absolute address. |
375 | // FIXME: We know TLVP symbol refs aren't, but there should be a better way |
376 | // to do this here. |
377 | bool Absolute = true; |
378 | if (Inst.getOperand(AddrOp).isExpr()) { |
379 | const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); |
380 | if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE)) |
381 | if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) |
382 | Absolute = false; |
383 | } |
384 | |
385 | if (Absolute && |
386 | (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || |
387 | Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || |
388 | Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) |
389 | return; |
390 | |
391 | // If so, rewrite the instruction. |
392 | MCOperand Saved = Inst.getOperand(AddrOp); |
393 | MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg); |
394 | Inst = MCInst(); |
395 | Inst.setOpcode(Opcode); |
396 | Inst.addOperand(Saved); |
397 | Inst.addOperand(Seg); |
398 | } |
399 | |
400 | static unsigned getRetOpcode(const X86Subtarget &Subtarget) { |
401 | return Subtarget.is64Bit() ? X86::RETQ : X86::RETL; |
402 | } |
403 | |
404 | Optional<MCOperand> |
405 | X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, |
406 | const MachineOperand &MO) const { |
407 | switch (MO.getType()) { |
408 | default: |
409 | MI->dump(); |
410 | llvm_unreachable("unknown operand type")::llvm::llvm_unreachable_internal("unknown operand type", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 410); |
411 | case MachineOperand::MO_Register: |
412 | // Ignore all implicit register operands. |
413 | if (MO.isImplicit()) |
414 | return None; |
415 | return MCOperand::createReg(MO.getReg()); |
416 | case MachineOperand::MO_Immediate: |
417 | return MCOperand::createImm(MO.getImm()); |
418 | case MachineOperand::MO_MachineBasicBlock: |
419 | case MachineOperand::MO_GlobalAddress: |
420 | case MachineOperand::MO_ExternalSymbol: |
421 | return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); |
422 | case MachineOperand::MO_MCSymbol: |
423 | return LowerSymbolOperand(MO, MO.getMCSymbol()); |
424 | case MachineOperand::MO_JumpTableIndex: |
425 | return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); |
426 | case MachineOperand::MO_ConstantPoolIndex: |
427 | return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); |
428 | case MachineOperand::MO_BlockAddress: |
429 | return LowerSymbolOperand( |
430 | MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); |
431 | case MachineOperand::MO_RegisterMask: |
432 | // Ignore call clobbers. |
433 | return None; |
434 | } |
435 | } |
436 | |
437 | void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { |
438 | OutMI.setOpcode(MI->getOpcode()); |
439 | |
440 | for (const MachineOperand &MO : MI->operands()) |
441 | if (auto MaybeMCOp = LowerMachineOperand(MI, MO)) |
442 | OutMI.addOperand(MaybeMCOp.getValue()); |
443 | |
444 | // Handle a few special cases to eliminate operand modifiers. |
445 | ReSimplify: |
446 | switch (OutMI.getOpcode()) { |
447 | case X86::LEA64_32r: |
448 | case X86::LEA64r: |
449 | case X86::LEA16r: |
450 | case X86::LEA32r: |
451 | // LEA should have a segment register, but it must be empty. |
452 | assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&((OutMI.getNumOperands() == 1+X86::AddrNumOperands && "Unexpected # of LEA operands") ? static_cast<void> (0 ) : __assert_fail ("OutMI.getNumOperands() == 1+X86::AddrNumOperands && \"Unexpected # of LEA operands\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 453, __PRETTY_FUNCTION__)) |
453 | "Unexpected # of LEA operands")((OutMI.getNumOperands() == 1+X86::AddrNumOperands && "Unexpected # of LEA operands") ? static_cast<void> (0 ) : __assert_fail ("OutMI.getNumOperands() == 1+X86::AddrNumOperands && \"Unexpected # of LEA operands\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 453, __PRETTY_FUNCTION__)); |
454 | assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&((OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!") ? static_cast<void> (0) : __assert_fail ("OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && \"LEA has segment specified!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 455, __PRETTY_FUNCTION__)) |
455 | "LEA has segment specified!")((OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!") ? static_cast<void> (0) : __assert_fail ("OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && \"LEA has segment specified!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 455, __PRETTY_FUNCTION__)); |
456 | break; |
457 | |
458 | // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B |
459 | // if one of the registers is extended, but other isn't. |
460 | case X86::VMOVZPQILo2PQIrr: |
461 | case X86::VMOVAPDrr: |
462 | case X86::VMOVAPDYrr: |
463 | case X86::VMOVAPSrr: |
464 | case X86::VMOVAPSYrr: |
465 | case X86::VMOVDQArr: |
466 | case X86::VMOVDQAYrr: |
467 | case X86::VMOVDQUrr: |
468 | case X86::VMOVDQUYrr: |
469 | case X86::VMOVUPDrr: |
470 | case X86::VMOVUPDYrr: |
471 | case X86::VMOVUPSrr: |
472 | case X86::VMOVUPSYrr: { |
473 | if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && |
474 | X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) { |
475 | unsigned NewOpc; |
476 | switch (OutMI.getOpcode()) { |
477 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 477); |
478 | case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break; |
479 | case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; |
480 | case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; |
481 | case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; |
482 | case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; |
483 | case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; |
484 | case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; |
485 | case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; |
486 | case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; |
487 | case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; |
488 | case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; |
489 | case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; |
490 | case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; |
491 | } |
492 | OutMI.setOpcode(NewOpc); |
493 | } |
494 | break; |
495 | } |
496 | case X86::VMOVSDrr: |
497 | case X86::VMOVSSrr: { |
498 | if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && |
499 | X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) { |
500 | unsigned NewOpc; |
501 | switch (OutMI.getOpcode()) { |
502 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 502); |
503 | case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; |
504 | case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; |
505 | } |
506 | OutMI.setOpcode(NewOpc); |
507 | } |
508 | break; |
509 | } |
510 | |
511 | // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register |
512 | // inputs modeled as normal uses instead of implicit uses. As such, truncate |
513 | // off all but the first operand (the callee). FIXME: Change isel. |
514 | case X86::TAILJMPr64: |
515 | case X86::TAILJMPr64_REX: |
516 | case X86::CALL64r: |
517 | case X86::CALL64pcrel32: { |
518 | unsigned Opcode = OutMI.getOpcode(); |
519 | MCOperand Saved = OutMI.getOperand(0); |
520 | OutMI = MCInst(); |
521 | OutMI.setOpcode(Opcode); |
522 | OutMI.addOperand(Saved); |
523 | break; |
524 | } |
525 | |
526 | case X86::EH_RETURN: |
527 | case X86::EH_RETURN64: { |
528 | OutMI = MCInst(); |
529 | OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); |
530 | break; |
531 | } |
532 | |
533 | case X86::CLEANUPRET: { |
534 | // Replace CATCHRET with the appropriate RET. |
535 | OutMI = MCInst(); |
536 | OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); |
537 | break; |
538 | } |
539 | |
540 | case X86::CATCHRET: { |
541 | // Replace CATCHRET with the appropriate RET. |
542 | const X86Subtarget &Subtarget = AsmPrinter.getSubtarget(); |
543 | unsigned ReturnReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; |
544 | OutMI = MCInst(); |
545 | OutMI.setOpcode(getRetOpcode(Subtarget)); |
546 | OutMI.addOperand(MCOperand::createReg(ReturnReg)); |
547 | break; |
548 | } |
549 | |
550 | // TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions. |
551 | case X86::TAILJMPr: |
552 | case X86::TAILJMPd: |
553 | case X86::TAILJMPd64: { |
554 | unsigned Opcode; |
555 | switch (OutMI.getOpcode()) { |
556 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 556); |
557 | case X86::TAILJMPr: Opcode = X86::JMP32r; break; |
558 | case X86::TAILJMPd: |
559 | case X86::TAILJMPd64: Opcode = X86::JMP_1; break; |
560 | } |
561 | |
562 | MCOperand Saved = OutMI.getOperand(0); |
563 | OutMI = MCInst(); |
564 | OutMI.setOpcode(Opcode); |
565 | OutMI.addOperand(Saved); |
566 | break; |
567 | } |
568 | |
569 | case X86::DEC16r: |
570 | case X86::DEC32r: |
571 | case X86::INC16r: |
572 | case X86::INC32r: |
573 | // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions. |
574 | if (!AsmPrinter.getSubtarget().is64Bit()) { |
575 | unsigned Opcode; |
576 | switch (OutMI.getOpcode()) { |
577 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 577); |
578 | case X86::DEC16r: Opcode = X86::DEC16r_alt; break; |
579 | case X86::DEC32r: Opcode = X86::DEC32r_alt; break; |
580 | case X86::INC16r: Opcode = X86::INC16r_alt; break; |
581 | case X86::INC32r: Opcode = X86::INC32r_alt; break; |
582 | } |
583 | OutMI.setOpcode(Opcode); |
584 | } |
585 | break; |
586 | |
587 | // These are pseudo-ops for OR to help with the OR->ADD transformation. We do |
588 | // this with an ugly goto in case the resultant OR uses EAX and needs the |
589 | // short form. |
590 | case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify; |
591 | case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify; |
592 | case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify; |
593 | case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify; |
594 | case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify; |
595 | case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify; |
596 | case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify; |
597 | case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify; |
598 | case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify; |
599 | |
600 | // Atomic load and store require a separate pseudo-inst because Acquire |
601 | // implies mayStore and Release implies mayLoad; fix these to regular MOV |
602 | // instructions here |
603 | case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify; |
604 | case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify; |
605 | case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify; |
606 | case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify; |
607 | case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify; |
608 | case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify; |
609 | case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify; |
610 | case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify; |
611 | case X86::RELEASE_MOV8mi: OutMI.setOpcode(X86::MOV8mi); goto ReSimplify; |
612 | case X86::RELEASE_MOV16mi: OutMI.setOpcode(X86::MOV16mi); goto ReSimplify; |
613 | case X86::RELEASE_MOV32mi: OutMI.setOpcode(X86::MOV32mi); goto ReSimplify; |
614 | case X86::RELEASE_MOV64mi32: OutMI.setOpcode(X86::MOV64mi32); goto ReSimplify; |
615 | case X86::RELEASE_ADD8mi: OutMI.setOpcode(X86::ADD8mi); goto ReSimplify; |
616 | case X86::RELEASE_ADD8mr: OutMI.setOpcode(X86::ADD8mr); goto ReSimplify; |
617 | case X86::RELEASE_ADD32mi: OutMI.setOpcode(X86::ADD32mi); goto ReSimplify; |
618 | case X86::RELEASE_ADD32mr: OutMI.setOpcode(X86::ADD32mr); goto ReSimplify; |
619 | case X86::RELEASE_ADD64mi32: OutMI.setOpcode(X86::ADD64mi32); goto ReSimplify; |
620 | case X86::RELEASE_ADD64mr: OutMI.setOpcode(X86::ADD64mr); goto ReSimplify; |
621 | case X86::RELEASE_AND8mi: OutMI.setOpcode(X86::AND8mi); goto ReSimplify; |
622 | case X86::RELEASE_AND8mr: OutMI.setOpcode(X86::AND8mr); goto ReSimplify; |
623 | case X86::RELEASE_AND32mi: OutMI.setOpcode(X86::AND32mi); goto ReSimplify; |
624 | case X86::RELEASE_AND32mr: OutMI.setOpcode(X86::AND32mr); goto ReSimplify; |
625 | case X86::RELEASE_AND64mi32: OutMI.setOpcode(X86::AND64mi32); goto ReSimplify; |
626 | case X86::RELEASE_AND64mr: OutMI.setOpcode(X86::AND64mr); goto ReSimplify; |
627 | case X86::RELEASE_OR8mi: OutMI.setOpcode(X86::OR8mi); goto ReSimplify; |
628 | case X86::RELEASE_OR8mr: OutMI.setOpcode(X86::OR8mr); goto ReSimplify; |
629 | case X86::RELEASE_OR32mi: OutMI.setOpcode(X86::OR32mi); goto ReSimplify; |
630 | case X86::RELEASE_OR32mr: OutMI.setOpcode(X86::OR32mr); goto ReSimplify; |
631 | case X86::RELEASE_OR64mi32: OutMI.setOpcode(X86::OR64mi32); goto ReSimplify; |
632 | case X86::RELEASE_OR64mr: OutMI.setOpcode(X86::OR64mr); goto ReSimplify; |
633 | case X86::RELEASE_XOR8mi: OutMI.setOpcode(X86::XOR8mi); goto ReSimplify; |
634 | case X86::RELEASE_XOR8mr: OutMI.setOpcode(X86::XOR8mr); goto ReSimplify; |
635 | case X86::RELEASE_XOR32mi: OutMI.setOpcode(X86::XOR32mi); goto ReSimplify; |
636 | case X86::RELEASE_XOR32mr: OutMI.setOpcode(X86::XOR32mr); goto ReSimplify; |
637 | case X86::RELEASE_XOR64mi32: OutMI.setOpcode(X86::XOR64mi32); goto ReSimplify; |
638 | case X86::RELEASE_XOR64mr: OutMI.setOpcode(X86::XOR64mr); goto ReSimplify; |
639 | case X86::RELEASE_INC8m: OutMI.setOpcode(X86::INC8m); goto ReSimplify; |
640 | case X86::RELEASE_INC16m: OutMI.setOpcode(X86::INC16m); goto ReSimplify; |
641 | case X86::RELEASE_INC32m: OutMI.setOpcode(X86::INC32m); goto ReSimplify; |
642 | case X86::RELEASE_INC64m: OutMI.setOpcode(X86::INC64m); goto ReSimplify; |
643 | case X86::RELEASE_DEC8m: OutMI.setOpcode(X86::DEC8m); goto ReSimplify; |
644 | case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify; |
645 | case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify; |
646 | case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify; |
647 | |
648 | // We don't currently select the correct instruction form for instructions |
649 | // which have a short %eax, etc. form. Handle this by custom lowering, for |
650 | // now. |
651 | // |
652 | // Note, we are currently not handling the following instructions: |
653 | // MOV64ao8, MOV64o8a |
654 | // XCHG16ar, XCHG32ar, XCHG64ar |
655 | case X86::MOV8mr_NOREX: |
656 | case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o32a); break; |
657 | case X86::MOV8rm_NOREX: |
658 | case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao32); break; |
659 | case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o32a); break; |
660 | case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao32); break; |
661 | case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break; |
662 | case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break; |
663 | |
664 | case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break; |
665 | case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break; |
666 | case X86::ADC32ri: SimplifyShortImmForm(OutMI, X86::ADC32i32); break; |
667 | case X86::ADC64ri32: SimplifyShortImmForm(OutMI, X86::ADC64i32); break; |
668 | case X86::ADD8ri: SimplifyShortImmForm(OutMI, X86::ADD8i8); break; |
669 | case X86::ADD16ri: SimplifyShortImmForm(OutMI, X86::ADD16i16); break; |
670 | case X86::ADD32ri: SimplifyShortImmForm(OutMI, X86::ADD32i32); break; |
671 | case X86::ADD64ri32: SimplifyShortImmForm(OutMI, X86::ADD64i32); break; |
672 | case X86::AND8ri: SimplifyShortImmForm(OutMI, X86::AND8i8); break; |
673 | case X86::AND16ri: SimplifyShortImmForm(OutMI, X86::AND16i16); break; |
674 | case X86::AND32ri: SimplifyShortImmForm(OutMI, X86::AND32i32); break; |
675 | case X86::AND64ri32: SimplifyShortImmForm(OutMI, X86::AND64i32); break; |
676 | case X86::CMP8ri: SimplifyShortImmForm(OutMI, X86::CMP8i8); break; |
677 | case X86::CMP16ri: SimplifyShortImmForm(OutMI, X86::CMP16i16); break; |
678 | case X86::CMP32ri: SimplifyShortImmForm(OutMI, X86::CMP32i32); break; |
679 | case X86::CMP64ri32: SimplifyShortImmForm(OutMI, X86::CMP64i32); break; |
680 | case X86::OR8ri: SimplifyShortImmForm(OutMI, X86::OR8i8); break; |
681 | case X86::OR16ri: SimplifyShortImmForm(OutMI, X86::OR16i16); break; |
682 | case X86::OR32ri: SimplifyShortImmForm(OutMI, X86::OR32i32); break; |
683 | case X86::OR64ri32: SimplifyShortImmForm(OutMI, X86::OR64i32); break; |
684 | case X86::SBB8ri: SimplifyShortImmForm(OutMI, X86::SBB8i8); break; |
685 | case X86::SBB16ri: SimplifyShortImmForm(OutMI, X86::SBB16i16); break; |
686 | case X86::SBB32ri: SimplifyShortImmForm(OutMI, X86::SBB32i32); break; |
687 | case X86::SBB64ri32: SimplifyShortImmForm(OutMI, X86::SBB64i32); break; |
688 | case X86::SUB8ri: SimplifyShortImmForm(OutMI, X86::SUB8i8); break; |
689 | case X86::SUB16ri: SimplifyShortImmForm(OutMI, X86::SUB16i16); break; |
690 | case X86::SUB32ri: SimplifyShortImmForm(OutMI, X86::SUB32i32); break; |
691 | case X86::SUB64ri32: SimplifyShortImmForm(OutMI, X86::SUB64i32); break; |
692 | case X86::TEST8ri: SimplifyShortImmForm(OutMI, X86::TEST8i8); break; |
693 | case X86::TEST16ri: SimplifyShortImmForm(OutMI, X86::TEST16i16); break; |
694 | case X86::TEST32ri: SimplifyShortImmForm(OutMI, X86::TEST32i32); break; |
695 | case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break; |
696 | case X86::XOR8ri: SimplifyShortImmForm(OutMI, X86::XOR8i8); break; |
697 | case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break; |
698 | case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break; |
699 | case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break; |
700 | |
701 | // Try to shrink some forms of movsx. |
702 | case X86::MOVSX16rr8: |
703 | case X86::MOVSX32rr16: |
704 | case X86::MOVSX64rr32: |
705 | SimplifyMOVSX(OutMI); |
706 | break; |
707 | } |
708 | } |
709 | |
710 | void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering, |
711 | const MachineInstr &MI) { |
712 | |
713 | bool is64Bits = MI.getOpcode() == X86::TLS_addr64 || |
714 | MI.getOpcode() == X86::TLS_base_addr64; |
715 | |
716 | bool needsPadding = MI.getOpcode() == X86::TLS_addr64; |
717 | |
718 | MCContext &context = OutStreamer->getContext(); |
719 | |
720 | if (needsPadding) |
721 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); |
722 | |
723 | MCSymbolRefExpr::VariantKind SRVK; |
724 | switch (MI.getOpcode()) { |
725 | case X86::TLS_addr32: |
726 | case X86::TLS_addr64: |
727 | SRVK = MCSymbolRefExpr::VK_TLSGD; |
728 | break; |
729 | case X86::TLS_base_addr32: |
730 | SRVK = MCSymbolRefExpr::VK_TLSLDM; |
731 | break; |
732 | case X86::TLS_base_addr64: |
733 | SRVK = MCSymbolRefExpr::VK_TLSLD; |
734 | break; |
735 | default: |
736 | llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 736); |
737 | } |
738 | |
739 | MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)); |
740 | const MCSymbolRefExpr *symRef = MCSymbolRefExpr::create(sym, SRVK, context); |
741 | |
742 | MCInst LEA; |
743 | if (is64Bits) { |
744 | LEA.setOpcode(X86::LEA64r); |
745 | LEA.addOperand(MCOperand::createReg(X86::RDI)); // dest |
746 | LEA.addOperand(MCOperand::createReg(X86::RIP)); // base |
747 | LEA.addOperand(MCOperand::createImm(1)); // scale |
748 | LEA.addOperand(MCOperand::createReg(0)); // index |
749 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp |
750 | LEA.addOperand(MCOperand::createReg(0)); // seg |
751 | } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) { |
752 | LEA.setOpcode(X86::LEA32r); |
753 | LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest |
754 | LEA.addOperand(MCOperand::createReg(X86::EBX)); // base |
755 | LEA.addOperand(MCOperand::createImm(1)); // scale |
756 | LEA.addOperand(MCOperand::createReg(0)); // index |
757 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp |
758 | LEA.addOperand(MCOperand::createReg(0)); // seg |
759 | } else { |
760 | LEA.setOpcode(X86::LEA32r); |
761 | LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest |
762 | LEA.addOperand(MCOperand::createReg(0)); // base |
763 | LEA.addOperand(MCOperand::createImm(1)); // scale |
764 | LEA.addOperand(MCOperand::createReg(X86::EBX)); // index |
765 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp |
766 | LEA.addOperand(MCOperand::createReg(0)); // seg |
767 | } |
768 | EmitAndCountInstruction(LEA); |
769 | |
770 | if (needsPadding) { |
771 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); |
772 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); |
773 | EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX)); |
774 | } |
775 | |
776 | StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr"; |
777 | MCSymbol *tlsGetAddr = context.getOrCreateSymbol(name); |
778 | const MCSymbolRefExpr *tlsRef = |
779 | MCSymbolRefExpr::create(tlsGetAddr, |
780 | MCSymbolRefExpr::VK_PLT, |
781 | context); |
782 | |
783 | EmitAndCountInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32 |
784 | : X86::CALLpcrel32) |
785 | .addExpr(tlsRef)); |
786 | } |
787 | |
788 | /// \brief Emit the optimal amount of multi-byte nops on X86. |
789 | static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, const MCSubtargetInfo &STI) { |
790 | // This works only for 64bit. For 32bit we have to do additional checking if |
791 | // the CPU supports multi-byte nops. |
792 | assert(Is64Bit && "EmitNops only supports X86-64")((Is64Bit && "EmitNops only supports X86-64") ? static_cast <void> (0) : __assert_fail ("Is64Bit && \"EmitNops only supports X86-64\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 792, __PRETTY_FUNCTION__)); |
793 | while (NumBytes) { |
794 | unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg; |
795 | Opc = IndexReg = Displacement = SegmentReg = 0; |
Value stored to 'Opc' is never read | |
796 | BaseReg = X86::RAX; ScaleVal = 1; |
797 | switch (NumBytes) { |
798 | case 0: llvm_unreachable("Zero nops?")::llvm::llvm_unreachable_internal("Zero nops?", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 798); break; |
799 | case 1: NumBytes -= 1; Opc = X86::NOOP; break; |
800 | case 2: NumBytes -= 2; Opc = X86::XCHG16ar; break; |
801 | case 3: NumBytes -= 3; Opc = X86::NOOPL; break; |
802 | case 4: NumBytes -= 4; Opc = X86::NOOPL; Displacement = 8; break; |
803 | case 5: NumBytes -= 5; Opc = X86::NOOPL; Displacement = 8; |
804 | IndexReg = X86::RAX; break; |
805 | case 6: NumBytes -= 6; Opc = X86::NOOPW; Displacement = 8; |
806 | IndexReg = X86::RAX; break; |
807 | case 7: NumBytes -= 7; Opc = X86::NOOPL; Displacement = 512; break; |
808 | case 8: NumBytes -= 8; Opc = X86::NOOPL; Displacement = 512; |
809 | IndexReg = X86::RAX; break; |
810 | case 9: NumBytes -= 9; Opc = X86::NOOPW; Displacement = 512; |
811 | IndexReg = X86::RAX; break; |
812 | default: NumBytes -= 10; Opc = X86::NOOPW; Displacement = 512; |
813 | IndexReg = X86::RAX; SegmentReg = X86::CS; break; |
814 | } |
815 | |
816 | unsigned NumPrefixes = std::min(NumBytes, 5U); |
817 | NumBytes -= NumPrefixes; |
818 | for (unsigned i = 0; i != NumPrefixes; ++i) |
819 | OS.EmitBytes("\x66"); |
820 | |
821 | switch (Opc) { |
822 | default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 822); break; |
823 | case X86::NOOP: |
824 | OS.EmitInstruction(MCInstBuilder(Opc), STI); |
825 | break; |
826 | case X86::XCHG16ar: |
827 | OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI); |
828 | break; |
829 | case X86::NOOPL: |
830 | case X86::NOOPW: |
831 | OS.EmitInstruction(MCInstBuilder(Opc).addReg(BaseReg) |
832 | .addImm(ScaleVal).addReg(IndexReg) |
833 | .addImm(Displacement).addReg(SegmentReg), STI); |
834 | break; |
835 | } |
836 | } // while (NumBytes) |
837 | } |
838 | |
839 | void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, |
840 | X86MCInstLower &MCIL) { |
841 | assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64")((Subtarget->is64Bit() && "Statepoint currently only supports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"Statepoint currently only supports X86-64\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 841, __PRETTY_FUNCTION__)); |
842 | |
843 | StatepointOpers SOpers(&MI); |
844 | if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { |
845 | EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(), |
846 | getSubtargetInfo()); |
847 | } else { |
848 | // Lower call target and choose correct opcode |
849 | const MachineOperand &CallTarget = SOpers.getCallTarget(); |
850 | MCOperand CallTargetMCOp; |
851 | unsigned CallOpcode; |
852 | switch (CallTarget.getType()) { |
853 | case MachineOperand::MO_GlobalAddress: |
854 | case MachineOperand::MO_ExternalSymbol: |
855 | CallTargetMCOp = MCIL.LowerSymbolOperand( |
856 | CallTarget, MCIL.GetSymbolFromOperand(CallTarget)); |
857 | CallOpcode = X86::CALL64pcrel32; |
858 | // Currently, we only support relative addressing with statepoints. |
859 | // Otherwise, we'll need a scratch register to hold the target |
860 | // address. You'll fail asserts during load & relocation if this |
861 | // symbol is to far away. (TODO: support non-relative addressing) |
862 | break; |
863 | case MachineOperand::MO_Immediate: |
864 | CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); |
865 | CallOpcode = X86::CALL64pcrel32; |
866 | // Currently, we only support relative addressing with statepoints. |
867 | // Otherwise, we'll need a scratch register to hold the target |
868 | // immediate. You'll fail asserts during load & relocation if this |
869 | // address is to far away. (TODO: support non-relative addressing) |
870 | break; |
871 | case MachineOperand::MO_Register: |
872 | CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); |
873 | CallOpcode = X86::CALL64r; |
874 | break; |
875 | default: |
876 | llvm_unreachable("Unsupported operand type in statepoint call target")::llvm::llvm_unreachable_internal("Unsupported operand type in statepoint call target" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 876); |
877 | break; |
878 | } |
879 | |
880 | // Emit call |
881 | MCInst CallInst; |
882 | CallInst.setOpcode(CallOpcode); |
883 | CallInst.addOperand(CallTargetMCOp); |
884 | OutStreamer->EmitInstruction(CallInst, getSubtargetInfo()); |
885 | } |
886 | |
887 | // Record our statepoint node in the same section used by STACKMAP |
888 | // and PATCHPOINT |
889 | SM.recordStatepoint(MI); |
890 | } |
891 | |
892 | void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI, |
893 | X86MCInstLower &MCIL) { |
894 | // FAULTING_LOAD_OP <def>, <handler label>, <load opcode>, <load operands> |
895 | |
896 | unsigned LoadDefRegister = MI.getOperand(0).getReg(); |
897 | MCSymbol *HandlerLabel = MI.getOperand(1).getMCSymbol(); |
898 | unsigned LoadOpcode = MI.getOperand(2).getImm(); |
899 | unsigned LoadOperandsBeginIdx = 3; |
900 | |
901 | FM.recordFaultingOp(FaultMaps::FaultingLoad, HandlerLabel); |
902 | |
903 | MCInst LoadMI; |
904 | LoadMI.setOpcode(LoadOpcode); |
905 | |
906 | if (LoadDefRegister != X86::NoRegister) |
907 | LoadMI.addOperand(MCOperand::createReg(LoadDefRegister)); |
908 | |
909 | for (auto I = MI.operands_begin() + LoadOperandsBeginIdx, |
910 | E = MI.operands_end(); |
911 | I != E; ++I) |
912 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, *I)) |
913 | LoadMI.addOperand(MaybeOperand.getValue()); |
914 | |
915 | OutStreamer->EmitInstruction(LoadMI, getSubtargetInfo()); |
916 | } |
917 | |
918 | // Lower a stackmap of the form: |
919 | // <id>, <shadowBytes>, ... |
920 | void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) { |
921 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); |
922 | SM.recordStackMap(MI); |
923 | unsigned NumShadowBytes = MI.getOperand(1).getImm(); |
924 | SMShadowTracker.reset(NumShadowBytes); |
925 | } |
926 | |
927 | // Lower a patchpoint of the form: |
928 | // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ... |
929 | void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI, |
930 | X86MCInstLower &MCIL) { |
931 | assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64")((Subtarget->is64Bit() && "Patchpoint currently only supports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"Patchpoint currently only supports X86-64\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 931, __PRETTY_FUNCTION__)); |
932 | |
933 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); |
934 | |
935 | SM.recordPatchPoint(MI); |
936 | |
937 | PatchPointOpers opers(&MI); |
938 | unsigned ScratchIdx = opers.getNextScratchIdx(); |
939 | unsigned EncodedBytes = 0; |
940 | const MachineOperand &CalleeMO = |
941 | opers.getMetaOper(PatchPointOpers::TargetPos); |
942 | |
943 | // Check for null target. If target is non-null (i.e. is non-zero or is |
944 | // symbolic) then emit a call. |
945 | if (!(CalleeMO.isImm() && !CalleeMO.getImm())) { |
946 | MCOperand CalleeMCOp; |
947 | switch (CalleeMO.getType()) { |
948 | default: |
949 | /// FIXME: Add a verifier check for bad callee types. |
950 | llvm_unreachable("Unrecognized callee operand type.")::llvm::llvm_unreachable_internal("Unrecognized callee operand type." , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 950); |
951 | case MachineOperand::MO_Immediate: |
952 | if (CalleeMO.getImm()) |
953 | CalleeMCOp = MCOperand::createImm(CalleeMO.getImm()); |
954 | break; |
955 | case MachineOperand::MO_ExternalSymbol: |
956 | case MachineOperand::MO_GlobalAddress: |
957 | CalleeMCOp = |
958 | MCIL.LowerSymbolOperand(CalleeMO, |
959 | MCIL.GetSymbolFromOperand(CalleeMO)); |
960 | break; |
961 | } |
962 | |
963 | // Emit MOV to materialize the target address and the CALL to target. |
964 | // This is encoded with 12-13 bytes, depending on which register is used. |
965 | unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg(); |
966 | if (X86II::isX86_64ExtendedReg(ScratchReg)) |
967 | EncodedBytes = 13; |
968 | else |
969 | EncodedBytes = 12; |
970 | |
971 | EmitAndCountInstruction( |
972 | MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp)); |
973 | EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg)); |
974 | } |
975 | |
976 | // Emit padding. |
977 | unsigned NumBytes = opers.getMetaOper(PatchPointOpers::NBytesPos).getImm(); |
978 | assert(NumBytes >= EncodedBytes &&((NumBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call." ) ? static_cast<void> (0) : __assert_fail ("NumBytes >= EncodedBytes && \"Patchpoint can't request size less than the length of a call.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 979, __PRETTY_FUNCTION__)) |
979 | "Patchpoint can't request size less than the length of a call.")((NumBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call." ) ? static_cast<void> (0) : __assert_fail ("NumBytes >= EncodedBytes && \"Patchpoint can't request size less than the length of a call.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 979, __PRETTY_FUNCTION__)); |
980 | |
981 | EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(), |
982 | getSubtargetInfo()); |
983 | } |
984 | |
985 | // Returns instruction preceding MBBI in MachineFunction. |
986 | // If MBBI is the first instruction of the first basic block, returns null. |
987 | static MachineBasicBlock::const_iterator |
988 | PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) { |
989 | const MachineBasicBlock *MBB = MBBI->getParent(); |
990 | while (MBBI == MBB->begin()) { |
991 | if (MBB == &MBB->getParent()->front()) |
992 | return nullptr; |
993 | MBB = MBB->getPrevNode(); |
994 | MBBI = MBB->end(); |
995 | } |
996 | return --MBBI; |
997 | } |
998 | |
999 | static const Constant *getConstantFromPool(const MachineInstr &MI, |
1000 | const MachineOperand &Op) { |
1001 | if (!Op.isCPI()) |
1002 | return nullptr; |
1003 | |
1004 | ArrayRef<MachineConstantPoolEntry> Constants = |
1005 | MI.getParent()->getParent()->getConstantPool()->getConstants(); |
1006 | const MachineConstantPoolEntry &ConstantEntry = |
1007 | Constants[Op.getIndex()]; |
1008 | |
1009 | // Bail if this is a machine constant pool entry, we won't be able to dig out |
1010 | // anything useful. |
1011 | if (ConstantEntry.isMachineConstantPoolEntry()) |
1012 | return nullptr; |
1013 | |
1014 | auto *C = dyn_cast<Constant>(ConstantEntry.Val.ConstVal); |
1015 | assert((!C || ConstantEntry.getType() == C->getType()) &&(((!C || ConstantEntry.getType() == C->getType()) && "Expected a constant of the same type!") ? static_cast<void > (0) : __assert_fail ("(!C || ConstantEntry.getType() == C->getType()) && \"Expected a constant of the same type!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1016, __PRETTY_FUNCTION__)) |
1016 | "Expected a constant of the same type!")(((!C || ConstantEntry.getType() == C->getType()) && "Expected a constant of the same type!") ? static_cast<void > (0) : __assert_fail ("(!C || ConstantEntry.getType() == C->getType()) && \"Expected a constant of the same type!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1016, __PRETTY_FUNCTION__)); |
1017 | return C; |
1018 | } |
1019 | |
1020 | static std::string getShuffleComment(const MachineOperand &DstOp, |
1021 | const MachineOperand &SrcOp1, |
1022 | const MachineOperand &SrcOp2, |
1023 | ArrayRef<int> Mask) { |
1024 | std::string Comment; |
1025 | |
1026 | // Compute the name for a register. This is really goofy because we have |
1027 | // multiple instruction printers that could (in theory) use different |
1028 | // names. Fortunately most people use the ATT style (outside of Windows) |
1029 | // and they actually agree on register naming here. Ultimately, this is |
1030 | // a comment, and so its OK if it isn't perfect. |
1031 | auto GetRegisterName = [](unsigned RegNum) -> StringRef { |
1032 | return X86ATTInstPrinter::getRegisterName(RegNum); |
1033 | }; |
1034 | |
1035 | StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem"; |
1036 | StringRef Src1Name = |
1037 | SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem"; |
1038 | StringRef Src2Name = |
1039 | SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem"; |
1040 | |
1041 | // One source operand, fix the mask to print all elements in one span. |
1042 | SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end()); |
1043 | if (Src1Name == Src2Name) |
1044 | for (int i = 0, e = ShuffleMask.size(); i != e; ++i) |
1045 | if (ShuffleMask[i] >= e) |
1046 | ShuffleMask[i] -= e; |
1047 | |
1048 | raw_string_ostream CS(Comment); |
1049 | CS << DstName << " = "; |
1050 | for (int i = 0, e = ShuffleMask.size(); i != e; ++i) { |
1051 | if (i != 0) |
1052 | CS << ","; |
1053 | if (ShuffleMask[i] == SM_SentinelZero) { |
1054 | CS << "zero"; |
1055 | continue; |
1056 | } |
1057 | |
1058 | // Otherwise, it must come from src1 or src2. Print the span of elements |
1059 | // that comes from this src. |
1060 | bool isSrc1 = ShuffleMask[i] < (int)e; |
1061 | CS << (isSrc1 ? Src1Name : Src2Name) << '['; |
1062 | |
1063 | bool IsFirst = true; |
1064 | while (i != e && ShuffleMask[i] != SM_SentinelZero && |
1065 | (ShuffleMask[i] < (int)e) == isSrc1) { |
1066 | if (!IsFirst) |
1067 | CS << ','; |
1068 | else |
1069 | IsFirst = false; |
1070 | if (ShuffleMask[i] == SM_SentinelUndef) |
1071 | CS << "u"; |
1072 | else |
1073 | CS << ShuffleMask[i] % (int)e; |
1074 | ++i; |
1075 | } |
1076 | CS << ']'; |
1077 | --i; // For loop increments element #. |
1078 | } |
1079 | CS.flush(); |
1080 | |
1081 | return Comment; |
1082 | } |
1083 | |
1084 | void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { |
1085 | X86MCInstLower MCInstLowering(*MF, *this); |
1086 | const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo(); |
1087 | |
1088 | switch (MI->getOpcode()) { |
1089 | case TargetOpcode::DBG_VALUE: |
1090 | llvm_unreachable("Should be handled target independently")::llvm::llvm_unreachable_internal("Should be handled target independently" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1090); |
1091 | |
1092 | // Emit nothing here but a comment if we can. |
1093 | case X86::Int_MemBarrier: |
1094 | OutStreamer->emitRawComment("MEMBARRIER"); |
1095 | return; |
1096 | |
1097 | |
1098 | case X86::EH_RETURN: |
1099 | case X86::EH_RETURN64: { |
1100 | // Lower these as normal, but add some comments. |
1101 | unsigned Reg = MI->getOperand(0).getReg(); |
1102 | OutStreamer->AddComment(StringRef("eh_return, addr: %") + |
1103 | X86ATTInstPrinter::getRegisterName(Reg)); |
1104 | break; |
1105 | } |
1106 | case X86::CLEANUPRET: { |
1107 | // Lower these as normal, but add some comments. |
1108 | OutStreamer->AddComment("CLEANUPRET"); |
1109 | break; |
1110 | } |
1111 | |
1112 | case X86::CATCHRET: { |
1113 | // Lower these as normal, but add some comments. |
1114 | OutStreamer->AddComment("CATCHRET"); |
1115 | break; |
1116 | } |
1117 | |
1118 | case X86::TAILJMPr: |
1119 | case X86::TAILJMPm: |
1120 | case X86::TAILJMPd: |
1121 | case X86::TAILJMPr64: |
1122 | case X86::TAILJMPm64: |
1123 | case X86::TAILJMPd64: |
1124 | case X86::TAILJMPr64_REX: |
1125 | case X86::TAILJMPm64_REX: |
1126 | case X86::TAILJMPd64_REX: |
1127 | // Lower these as normal, but add some comments. |
1128 | OutStreamer->AddComment("TAILCALL"); |
1129 | break; |
1130 | |
1131 | case X86::TLS_addr32: |
1132 | case X86::TLS_addr64: |
1133 | case X86::TLS_base_addr32: |
1134 | case X86::TLS_base_addr64: |
1135 | return LowerTlsAddr(MCInstLowering, *MI); |
1136 | |
1137 | case X86::MOVPC32r: { |
1138 | // This is a pseudo op for a two instruction sequence with a label, which |
1139 | // looks like: |
1140 | // call "L1$pb" |
1141 | // "L1$pb": |
1142 | // popl %esi |
1143 | |
1144 | // Emit the call. |
1145 | MCSymbol *PICBase = MF->getPICBaseSymbol(); |
1146 | // FIXME: We would like an efficient form for this, so we don't have to do a |
1147 | // lot of extra uniquing. |
1148 | EmitAndCountInstruction(MCInstBuilder(X86::CALLpcrel32) |
1149 | .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); |
1150 | |
1151 | const X86FrameLowering* FrameLowering = |
1152 | MF->getSubtarget<X86Subtarget>().getFrameLowering(); |
1153 | bool hasFP = FrameLowering->hasFP(*MF); |
1154 | |
1155 | // TODO: This is needed only if we require precise CFA. |
1156 | bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() && |
1157 | !OutStreamer->getDwarfFrameInfos().back().End; |
1158 | |
1159 | int stackGrowth = -RI->getSlotSize(); |
1160 | |
1161 | if (HasActiveDwarfFrame && !hasFP) { |
1162 | OutStreamer->EmitCFIAdjustCfaOffset(-stackGrowth); |
1163 | } |
1164 | |
1165 | // Emit the label. |
1166 | OutStreamer->EmitLabel(PICBase); |
1167 | |
1168 | // popl $reg |
1169 | EmitAndCountInstruction(MCInstBuilder(X86::POP32r) |
1170 | .addReg(MI->getOperand(0).getReg())); |
1171 | |
1172 | if (HasActiveDwarfFrame && !hasFP) { |
1173 | OutStreamer->EmitCFIAdjustCfaOffset(stackGrowth); |
1174 | } |
1175 | return; |
1176 | } |
1177 | |
1178 | case X86::ADD32ri: { |
1179 | // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. |
1180 | if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) |
1181 | break; |
1182 | |
1183 | // Okay, we have something like: |
1184 | // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL) |
1185 | |
1186 | // For this, we want to print something like: |
1187 | // MYGLOBAL + (. - PICBASE) |
1188 | // However, we can't generate a ".", so just emit a new label here and refer |
1189 | // to it. |
1190 | MCSymbol *DotSym = OutContext.createTempSymbol(); |
1191 | OutStreamer->EmitLabel(DotSym); |
1192 | |
1193 | // Now that we have emitted the label, lower the complex operand expression. |
1194 | MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); |
1195 | |
1196 | const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); |
1197 | const MCExpr *PICBase = |
1198 | MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext); |
1199 | DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext); |
1200 | |
1201 | DotExpr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(OpSym,OutContext), |
1202 | DotExpr, OutContext); |
1203 | |
1204 | EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri) |
1205 | .addReg(MI->getOperand(0).getReg()) |
1206 | .addReg(MI->getOperand(1).getReg()) |
1207 | .addExpr(DotExpr)); |
1208 | return; |
1209 | } |
1210 | case TargetOpcode::STATEPOINT: |
1211 | return LowerSTATEPOINT(*MI, MCInstLowering); |
1212 | |
1213 | case TargetOpcode::FAULTING_LOAD_OP: |
1214 | return LowerFAULTING_LOAD_OP(*MI, MCInstLowering); |
1215 | |
1216 | case TargetOpcode::STACKMAP: |
1217 | return LowerSTACKMAP(*MI); |
1218 | |
1219 | case TargetOpcode::PATCHPOINT: |
1220 | return LowerPATCHPOINT(*MI, MCInstLowering); |
1221 | |
1222 | case X86::MORESTACK_RET: |
1223 | EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); |
1224 | return; |
1225 | |
1226 | case X86::MORESTACK_RET_RESTORE_R10: |
1227 | // Return, then restore R10. |
1228 | EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); |
1229 | EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr) |
1230 | .addReg(X86::R10) |
1231 | .addReg(X86::RAX)); |
1232 | return; |
1233 | |
1234 | case X86::SEH_PushReg: |
1235 | OutStreamer->EmitWinCFIPushReg(RI->getSEHRegNum(MI->getOperand(0).getImm())); |
1236 | return; |
1237 | |
1238 | case X86::SEH_SaveReg: |
1239 | OutStreamer->EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()), |
1240 | MI->getOperand(1).getImm()); |
1241 | return; |
1242 | |
1243 | case X86::SEH_SaveXMM: |
1244 | OutStreamer->EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()), |
1245 | MI->getOperand(1).getImm()); |
1246 | return; |
1247 | |
1248 | case X86::SEH_StackAlloc: |
1249 | OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm()); |
1250 | return; |
1251 | |
1252 | case X86::SEH_SetFrame: |
1253 | OutStreamer->EmitWinCFISetFrame(RI->getSEHRegNum(MI->getOperand(0).getImm()), |
1254 | MI->getOperand(1).getImm()); |
1255 | return; |
1256 | |
1257 | case X86::SEH_PushFrame: |
1258 | OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm()); |
1259 | return; |
1260 | |
1261 | case X86::SEH_EndPrologue: |
1262 | OutStreamer->EmitWinCFIEndProlog(); |
1263 | return; |
1264 | |
1265 | case X86::SEH_Epilogue: { |
1266 | MachineBasicBlock::const_iterator MBBI(MI); |
1267 | // Check if preceded by a call and emit nop if so. |
1268 | for (MBBI = PrevCrossBBInst(MBBI); MBBI; MBBI = PrevCrossBBInst(MBBI)) { |
1269 | // Conservatively assume that pseudo instructions don't emit code and keep |
1270 | // looking for a call. We may emit an unnecessary nop in some cases. |
1271 | if (!MBBI->isPseudo()) { |
1272 | if (MBBI->isCall()) |
1273 | EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); |
1274 | break; |
1275 | } |
1276 | } |
1277 | return; |
1278 | } |
1279 | |
1280 | // Lower PSHUFB and VPERMILP normally but add a comment if we can find |
1281 | // a constant shuffle mask. We won't be able to do this at the MC layer |
1282 | // because the mask isn't an immediate. |
1283 | case X86::PSHUFBrm: |
1284 | case X86::VPSHUFBrm: |
1285 | case X86::VPSHUFBYrm: |
1286 | case X86::VPSHUFBZ128rm: |
1287 | case X86::VPSHUFBZ128rmk: |
1288 | case X86::VPSHUFBZ128rmkz: |
1289 | case X86::VPSHUFBZ256rm: |
1290 | case X86::VPSHUFBZ256rmk: |
1291 | case X86::VPSHUFBZ256rmkz: |
1292 | case X86::VPSHUFBZrm: |
1293 | case X86::VPSHUFBZrmk: |
1294 | case X86::VPSHUFBZrmkz: { |
1295 | if (!OutStreamer->isVerboseAsm()) |
1296 | break; |
1297 | unsigned SrcIdx, MaskIdx; |
1298 | switch (MI->getOpcode()) { |
1299 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1299); |
1300 | case X86::PSHUFBrm: |
1301 | case X86::VPSHUFBrm: |
1302 | case X86::VPSHUFBYrm: |
1303 | case X86::VPSHUFBZ128rm: |
1304 | case X86::VPSHUFBZ256rm: |
1305 | case X86::VPSHUFBZrm: |
1306 | SrcIdx = 1; MaskIdx = 5; break; |
1307 | case X86::VPSHUFBZ128rmkz: |
1308 | case X86::VPSHUFBZ256rmkz: |
1309 | case X86::VPSHUFBZrmkz: |
1310 | SrcIdx = 2; MaskIdx = 6; break; |
1311 | case X86::VPSHUFBZ128rmk: |
1312 | case X86::VPSHUFBZ256rmk: |
1313 | case X86::VPSHUFBZrmk: |
1314 | SrcIdx = 3; MaskIdx = 7; break; |
1315 | } |
1316 | |
1317 | assert(MI->getNumOperands() >= 6 &&((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1318, __PRETTY_FUNCTION__)) |
1318 | "We should always have at least 6 operands!")((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1318, __PRETTY_FUNCTION__)); |
1319 | const MachineOperand &DstOp = MI->getOperand(0); |
1320 | const MachineOperand &SrcOp = MI->getOperand(SrcIdx); |
1321 | const MachineOperand &MaskOp = MI->getOperand(MaskIdx); |
1322 | |
1323 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { |
1324 | SmallVector<int, 16> Mask; |
1325 | DecodePSHUFBMask(C, Mask); |
1326 | if (!Mask.empty()) |
1327 | OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, SrcOp, Mask)); |
1328 | } |
1329 | break; |
1330 | } |
1331 | case X86::VPERMILPSrm: |
1332 | case X86::VPERMILPDrm: |
1333 | case X86::VPERMILPSYrm: |
1334 | case X86::VPERMILPDYrm: { |
1335 | if (!OutStreamer->isVerboseAsm()) |
1336 | break; |
1337 | assert(MI->getNumOperands() > 5 &&((MI->getNumOperands() > 5 && "We should always have at least 5 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() > 5 && \"We should always have at least 5 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1338, __PRETTY_FUNCTION__)) |
1338 | "We should always have at least 5 operands!")((MI->getNumOperands() > 5 && "We should always have at least 5 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() > 5 && \"We should always have at least 5 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1338, __PRETTY_FUNCTION__)); |
1339 | const MachineOperand &DstOp = MI->getOperand(0); |
1340 | const MachineOperand &SrcOp = MI->getOperand(1); |
1341 | const MachineOperand &MaskOp = MI->getOperand(5); |
1342 | |
1343 | unsigned ElSize; |
1344 | switch (MI->getOpcode()) { |
1345 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1345); |
1346 | case X86::VPERMILPSrm: case X86::VPERMILPSYrm: ElSize = 32; break; |
1347 | case X86::VPERMILPDrm: case X86::VPERMILPDYrm: ElSize = 64; break; |
1348 | } |
1349 | |
1350 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { |
1351 | SmallVector<int, 16> Mask; |
1352 | DecodeVPERMILPMask(C, ElSize, Mask); |
1353 | if (!Mask.empty()) |
1354 | OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, SrcOp, Mask)); |
1355 | } |
1356 | break; |
1357 | } |
1358 | case X86::VPPERMrrm: { |
1359 | if (!OutStreamer->isVerboseAsm()) |
1360 | break; |
1361 | assert(MI->getNumOperands() > 6 &&((MI->getNumOperands() > 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() > 6 && \"We should always have at least 6 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1362, __PRETTY_FUNCTION__)) |
1362 | "We should always have at least 6 operands!")((MI->getNumOperands() > 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() > 6 && \"We should always have at least 6 operands!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn266593/lib/Target/X86/X86MCInstLower.cpp" , 1362, __PRETTY_FUNCTION__)); |
1363 | const MachineOperand &DstOp = MI->getOperand(0); |
1364 | const MachineOperand &SrcOp1 = MI->getOperand(1); |
1365 | const MachineOperand &SrcOp2 = MI->getOperand(2); |
1366 | const MachineOperand &MaskOp = MI->getOperand(6); |
1367 | |
1368 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { |
1369 | SmallVector<int, 16> Mask; |
1370 | DecodeVPPERMMask(C, Mask); |
1371 | if (!Mask.empty()) |
1372 | OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp1, SrcOp2, Mask)); |
1373 | } |
1374 | break; |
1375 | } |
1376 | |
1377 | #define MOV_CASE(Prefix, Suffix)case X86::PrefixMOVAPDSuffixrm: case X86::PrefixMOVAPSSuffixrm : case X86::PrefixMOVUPDSuffixrm: case X86::PrefixMOVUPSSuffixrm : case X86::PrefixMOVDQASuffixrm: case X86::PrefixMOVDQUSuffixrm : \ |
1378 | case X86::Prefix##MOVAPD##Suffix##rm: \ |
1379 | case X86::Prefix##MOVAPS##Suffix##rm: \ |
1380 | case X86::Prefix##MOVUPD##Suffix##rm: \ |
1381 | case X86::Prefix##MOVUPS##Suffix##rm: \ |
1382 | case X86::Prefix##MOVDQA##Suffix##rm: \ |
1383 | case X86::Prefix##MOVDQU##Suffix##rm: |
1384 | |
1385 | #define MOV_AVX512_CASE(Suffix)case X86::VMOVDQA64Suffixrm: case X86::VMOVDQA32Suffixrm: case X86::VMOVDQU64Suffixrm: case X86::VMOVDQU32Suffixrm: case X86 ::VMOVDQU16Suffixrm: case X86::VMOVDQU8Suffixrm: case X86::VMOVAPSSuffixrm : case X86::VMOVAPDSuffixrm: case X86::VMOVUPSSuffixrm: case X86 ::VMOVUPDSuffixrm: \ |
1386 | case X86::VMOVDQA64##Suffix##rm: \ |
1387 | case X86::VMOVDQA32##Suffix##rm: \ |
1388 | case X86::VMOVDQU64##Suffix##rm: \ |
1389 | case X86::VMOVDQU32##Suffix##rm: \ |
1390 | case X86::VMOVDQU16##Suffix##rm: \ |
1391 | case X86::VMOVDQU8##Suffix##rm: \ |
1392 | case X86::VMOVAPS##Suffix##rm: \ |
1393 | case X86::VMOVAPD##Suffix##rm: \ |
1394 | case X86::VMOVUPS##Suffix##rm: \ |
1395 | case X86::VMOVUPD##Suffix##rm: |
1396 | |
1397 | #define CASE_ALL_MOV_RM()case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: case X86 ::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm: case X86 ::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm: case X86 ::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm: case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm: case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: case X86::VMOVDQA64Z256rm: case X86:: VMOVDQA32Z256rm: case X86::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm : case X86::VMOVDQU16Z256rm: case X86::VMOVDQU8Z256rm: case X86 ::VMOVAPSZ256rm: case X86::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm : case X86::VMOVUPDZ256rm: case X86::VMOVDQA64Z128rm: case X86 ::VMOVDQA32Z128rm: case X86::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm : case X86::VMOVDQU16Z128rm: case X86::VMOVDQU8Z128rm: case X86 ::VMOVAPSZ128rm: case X86::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm : case X86::VMOVUPDZ128rm: \ |
1398 | MOV_CASE(, )case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: /* SSE */ \ |
1399 | MOV_CASE(V, )case X86::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm : case X86::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm : /* AVX-128 */ \ |
1400 | MOV_CASE(V, Y)case X86::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm : case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm : /* AVX-256 */ \ |
1401 | MOV_AVX512_CASE(Z)case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: \ |
1402 | MOV_AVX512_CASE(Z256)case X86::VMOVDQA64Z256rm: case X86::VMOVDQA32Z256rm: case X86 ::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm: case X86::VMOVDQU16Z256rm : case X86::VMOVDQU8Z256rm: case X86::VMOVAPSZ256rm: case X86 ::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm: case X86::VMOVUPDZ256rm : \ |
1403 | MOV_AVX512_CASE(Z128)case X86::VMOVDQA64Z128rm: case X86::VMOVDQA32Z128rm: case X86 ::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm: case X86::VMOVDQU16Z128rm : case X86::VMOVDQU8Z128rm: case X86::VMOVAPSZ128rm: case X86 ::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm: case X86::VMOVUPDZ128rm : |
1404 | |
1405 | // For loads from a constant pool to a vector register, print the constant |
1406 | // loaded. |
1407 | CASE_ALL_MOV_RM()case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: case X86 ::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm: case X86 ::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm: case X86 ::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm: case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm: case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: case X86::VMOVDQA64Z256rm: case X86:: VMOVDQA32Z256rm: case X86::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm : case X86::VMOVDQU16Z256rm: case X86::VMOVDQU8Z256rm: case X86 ::VMOVAPSZ256rm: case X86::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm : case X86::VMOVUPDZ256rm: case X86::VMOVDQA64Z128rm: case X86 ::VMOVDQA32Z128rm: case X86::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm : case X86::VMOVDQU16Z128rm: case X86::VMOVDQU8Z128rm: case X86 ::VMOVAPSZ128rm: case X86::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm : case X86::VMOVUPDZ128rm: |
1408 | if (!OutStreamer->isVerboseAsm()) |
1409 | break; |
1410 | if (MI->getNumOperands() > 4) |
1411 | if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { |
1412 | std::string Comment; |
1413 | raw_string_ostream CS(Comment); |
1414 | const MachineOperand &DstOp = MI->getOperand(0); |
1415 | CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; |
1416 | if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) { |
1417 | CS << "["; |
1418 | for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) { |
1419 | if (i != 0) |
1420 | CS << ","; |
1421 | if (CDS->getElementType()->isIntegerTy()) |
1422 | CS << CDS->getElementAsInteger(i); |
1423 | else if (CDS->getElementType()->isFloatTy()) |
1424 | CS << CDS->getElementAsFloat(i); |
1425 | else if (CDS->getElementType()->isDoubleTy()) |
1426 | CS << CDS->getElementAsDouble(i); |
1427 | else |
1428 | CS << "?"; |
1429 | } |
1430 | CS << "]"; |
1431 | OutStreamer->AddComment(CS.str()); |
1432 | } else if (auto *CV = dyn_cast<ConstantVector>(C)) { |
1433 | CS << "<"; |
1434 | for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) { |
1435 | if (i != 0) |
1436 | CS << ","; |
1437 | Constant *COp = CV->getOperand(i); |
1438 | if (isa<UndefValue>(COp)) { |
1439 | CS << "u"; |
1440 | } else if (auto *CI = dyn_cast<ConstantInt>(COp)) { |
1441 | if (CI->getBitWidth() <= 64) { |
1442 | CS << CI->getZExtValue(); |
1443 | } else { |
1444 | // print multi-word constant as (w0,w1) |
1445 | auto Val = CI->getValue(); |
1446 | CS << "("; |
1447 | for (int i = 0, N = Val.getNumWords(); i < N; ++i) { |
1448 | if (i > 0) |
1449 | CS << ","; |
1450 | CS << Val.getRawData()[i]; |
1451 | } |
1452 | CS << ")"; |
1453 | } |
1454 | } else if (auto *CF = dyn_cast<ConstantFP>(COp)) { |
1455 | SmallString<32> Str; |
1456 | CF->getValueAPF().toString(Str); |
1457 | CS << Str; |
1458 | } else { |
1459 | CS << "?"; |
1460 | } |
1461 | } |
1462 | CS << ">"; |
1463 | OutStreamer->AddComment(CS.str()); |
1464 | } |
1465 | } |
1466 | break; |
1467 | } |
1468 | |
1469 | MCInst TmpInst; |
1470 | MCInstLowering.Lower(MI, TmpInst); |
1471 | |
1472 | // Stackmap shadows cannot include branch targets, so we can count the bytes |
1473 | // in a call towards the shadow, but must ensure that the no thread returns |
1474 | // in to the stackmap shadow. The only way to achieve this is if the call |
1475 | // is at the end of the shadow. |
1476 | if (MI->isCall()) { |
1477 | // Count then size of the call towards the shadow |
1478 | SMShadowTracker.count(TmpInst, getSubtargetInfo()); |
1479 | // Then flush the shadow so that we fill with nops before the call, not |
1480 | // after it. |
1481 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); |
1482 | // Then emit the call |
1483 | OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo()); |
1484 | return; |
1485 | } |
1486 | |
1487 | EmitAndCountInstruction(TmpInst); |
1488 | } |