LLVM 23.0.0git
X86MCInstLower.cpp
Go to the documentation of this file.
1//===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains code to lower X86 MachineInstrs to their corresponding
10// MCInst records.
11//
12//===----------------------------------------------------------------------===//
13
21#include "X86AsmPrinter.h"
23#include "X86RegisterInfo.h"
25#include "X86Subtarget.h"
26#include "llvm/ADT/STLExtras.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/GlobalValue.h"
38#include "llvm/IR/Mangler.h"
39#include "llvm/MC/MCAsmInfo.h"
41#include "llvm/MC/MCContext.h"
42#include "llvm/MC/MCExpr.h"
43#include "llvm/MC/MCFixup.h"
44#include "llvm/MC/MCInst.h"
46#include "llvm/MC/MCSection.h"
47#include "llvm/MC/MCStreamer.h"
48#include "llvm/MC/MCSymbol.h"
55#include <string>
56
57using namespace llvm;
58
59static cl::opt<bool> EnableBranchHint("enable-branch-hint",
60 cl::desc("Enable branch hint."),
61 cl::init(false), cl::Hidden);
63 "branch-hint-probability-threshold",
64 cl::desc("The probability threshold of enabling branch hint."),
65 cl::init(50), cl::Hidden);
66
67namespace {
68
69/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
70class X86MCInstLower {
71 MCContext &Ctx;
72 const MachineFunction &MF;
73 const TargetMachine &TM;
74 const MCAsmInfo &MAI;
76
77public:
78 X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
79
80 MCOperand LowerMachineOperand(const MachineInstr *MI,
81 const MachineOperand &MO) const;
82 void Lower(const MachineInstr *MI, MCInst &OutMI) const;
83
86
87private:
88 MachineModuleInfoMachO &getMachOMMI() const;
89};
90
91} // end anonymous namespace
92
93/// A RAII helper which defines a region of instructions which can't have
94/// padding added between them for correctness.
99 : OS(OS), OldAllowAutoPadding(OS.getAllowAutoPadding()) {
100 changeAndComment(false);
101 }
103 void changeAndComment(bool b) {
104 if (b == OS.getAllowAutoPadding())
105 return;
106 OS.setAllowAutoPadding(b);
107 if (b)
108 OS.emitRawComment("autopadding");
109 else
110 OS.emitRawComment("noautopadding");
111 }
112};
113
114// Emit a minimal sequence of nops spanning NumBytes bytes.
115static void emitX86Nops(MCStreamer &OS, unsigned NumBytes,
116 const X86Subtarget *Subtarget);
117
118void X86AsmPrinter::StackMapShadowTracker::count(const MCInst &Inst,
119 const MCSubtargetInfo &STI,
120 MCCodeEmitter *CodeEmitter) {
121 if (InShadow) {
122 SmallString<256> Code;
124 CodeEmitter->encodeInstruction(Inst, Code, Fixups, STI);
125 CurrentShadowSize += Code.size();
126 if (CurrentShadowSize >= RequiredShadowSize)
127 InShadow = false; // The shadow is big enough. Stop counting.
128 }
129}
130
131void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
132 MCStreamer &OutStreamer, const MCSubtargetInfo &STI) {
133 if (InShadow && CurrentShadowSize < RequiredShadowSize) {
134 InShadow = false;
135 emitX86Nops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
136 &MF->getSubtarget<X86Subtarget>());
137 }
138}
139
140void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
141 OutStreamer->emitInstruction(Inst, getSubtargetInfo());
142 SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get());
143}
144
145X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
146 X86AsmPrinter &asmprinter)
147 : Ctx(asmprinter.OutContext), MF(mf), TM(mf.getTarget()),
148 MAI(TM.getMCAsmInfo()), AsmPrinter(asmprinter) {}
149
150MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
151 return AsmPrinter.MMI->getObjFileInfo<MachineModuleInfoMachO>();
152}
153
154/// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
155/// operand to an MCSymbol.
156MCSymbol *X86MCInstLower::GetSymbolFromOperand(const MachineOperand &MO) const {
157 const Triple &TT = TM.getTargetTriple();
158 if (MO.isGlobal() && TT.isOSBinFormatELF())
159 return AsmPrinter.getSymbolPreferLocal(*MO.getGlobal());
160
161 const DataLayout &DL = MF.getDataLayout();
162 assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) &&
163 "Isn't a symbol reference");
164
165 MCSymbol *Sym = nullptr;
166 SmallString<128> Name;
167 StringRef Suffix;
168
169 switch (MO.getTargetFlags()) {
171 // Handle dllimport linkage.
172 Name += "__imp_";
173 break;
175 Name += ".refptr.";
176 break;
179 Suffix = "$non_lazy_ptr";
180 break;
181 }
182
183 if (!Suffix.empty())
184 Name += DL.getInternalSymbolPrefix();
185
186 if (MO.isGlobal()) {
187 const GlobalValue *GV = MO.getGlobal();
188 AsmPrinter.getNameWithPrefix(Name, GV);
189 } else if (MO.isSymbol()) {
191 } else if (MO.isMBB()) {
192 assert(Suffix.empty());
193 Sym = MO.getMBB()->getSymbol();
194 }
195
196 Name += Suffix;
197 if (!Sym)
198 Sym = Ctx.getOrCreateSymbol(Name);
199
200 // If the target flags on the operand changes the name of the symbol, do that
201 // before we return the symbol.
202 switch (MO.getTargetFlags()) {
203 default:
204 break;
205 case X86II::MO_COFFSTUB: {
206 MachineModuleInfoCOFF &MMICOFF =
207 AsmPrinter.MMI->getObjFileInfo<MachineModuleInfoCOFF>();
209 if (!StubSym.getPointer()) {
210 assert(MO.isGlobal() && "Extern symbol not handled yet");
212 AsmPrinter.getSymbol(MO.getGlobal()), true);
213 }
214 break;
215 }
219 getMachOMMI().getGVStubEntry(Sym);
220 if (!StubSym.getPointer()) {
221 assert(MO.isGlobal() && "Extern symbol not handled yet");
223 AsmPrinter.getSymbol(MO.getGlobal()),
225 }
226 break;
227 }
228 }
229
230 return Sym;
231}
232
233MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
234 MCSymbol *Sym) const {
235 // FIXME: We would like an efficient form for this, so we don't have to do a
236 // lot of extra uniquing.
237 const MCExpr *Expr = nullptr;
238 uint16_t Specifier = X86::S_None;
239
240 switch (MO.getTargetFlags()) {
241 default:
242 llvm_unreachable("Unknown target flag on GV operand");
243 case X86II::MO_NO_FLAG: // No flag.
244 // These affect the name of the symbol, not any suffix.
248 break;
249
250 case X86II::MO_TLVP:
252 break;
254 Expr = MCSymbolRefExpr::create(Sym, X86::S_TLVP, Ctx);
255 // Subtract the pic base.
257 Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx);
258 break;
259 case X86II::MO_SECREL:
260 Specifier = uint16_t(X86::S_COFF_SECREL);
261 break;
262 case X86II::MO_TLSGD:
264 break;
265 case X86II::MO_TLSLD:
267 break;
268 case X86II::MO_TLSLDM:
270 break;
273 break;
276 break;
277 case X86II::MO_TPOFF:
279 break;
280 case X86II::MO_DTPOFF:
282 break;
283 case X86II::MO_NTPOFF:
285 break;
288 break;
291 break;
294 break;
295 case X86II::MO_GOT:
297 break;
298 case X86II::MO_GOTOFF:
300 break;
301 case X86II::MO_PLT:
303 break;
304 case X86II::MO_ABS8:
306 break;
309 Expr = MCSymbolRefExpr::create(Sym, Ctx);
310 // Subtract the pic base.
312 Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx);
313 if (MO.isJTI()) {
315 // If .set directive is supported, use it to reduce the number of
316 // relocations the assembler will generate for differences between
317 // local labels. This is only safe when the symbols are in the same
318 // section so we are restricting it to jumptable references.
320 AsmPrinter.OutStreamer->emitAssignment(Label, Expr);
321 Expr = MCSymbolRefExpr::create(Label, Ctx);
322 }
323 break;
324 }
325
326 if (!Expr)
327 Expr = MCSymbolRefExpr::create(Sym, Specifier, Ctx);
328
329 if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
331 Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
332 return MCOperand::createExpr(Expr);
333}
334
335static unsigned getRetOpcode(const X86Subtarget &Subtarget) {
336 return Subtarget.is64Bit() ? X86::RET64 : X86::RET32;
337}
338
339MCOperand X86MCInstLower::LowerMachineOperand(const MachineInstr *MI,
340 const MachineOperand &MO) const {
341 switch (MO.getType()) {
342 default:
343 MI->print(errs());
344 llvm_unreachable("unknown operand type");
346 // Ignore all implicit register operands.
347 if (MO.isImplicit())
348 return MCOperand();
349 return MCOperand::createReg(MO.getReg());
351 return MCOperand::createImm(MO.getImm());
357 return LowerSymbolOperand(MO, MO.getMCSymbol());
359 return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
361 return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
363 return LowerSymbolOperand(
364 MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
366 // Ignore call clobbers.
367 return MCOperand();
368 }
369}
370
371// Replace TAILJMP opcodes with their equivalent opcodes that have encoding
372// information.
373static unsigned convertTailJumpOpcode(unsigned Opcode, bool IsLarge = false) {
374 switch (Opcode) {
375 case X86::TAILJMPr:
376 Opcode = X86::JMP32r;
377 break;
378 case X86::TAILJMPm:
379 Opcode = X86::JMP32m;
380 break;
381 case X86::TAILJMPr64:
382 Opcode = X86::JMP64r;
383 break;
384 case X86::TAILJMPm64:
385 Opcode = X86::JMP64m;
386 break;
387 case X86::TAILJMPr64_REX:
388 Opcode = X86::JMP64r_REX;
389 break;
390 case X86::TAILJMPm64_REX:
391 Opcode = X86::JMP64m_REX;
392 break;
393 case X86::TAILJMPd:
394 case X86::TAILJMPd64:
395 Opcode = IsLarge ? X86::JMPABS64i : X86::JMP_1;
396 break;
397 case X86::TAILJMPd_CC:
398 case X86::TAILJMPd64_CC:
399 Opcode = X86::JCC_1;
400 break;
401 }
402
403 return Opcode;
404}
405
406void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
407 OutMI.setOpcode(MI->getOpcode());
408
409 for (const MachineOperand &MO : MI->operands())
410 if (auto Op = LowerMachineOperand(MI, MO); Op.isValid())
411 OutMI.addOperand(Op);
412
413 bool In64BitMode = AsmPrinter.getSubtarget().is64Bit();
414 if (X86::optimizeInstFromVEX3ToVEX2(OutMI, MI->getDesc()) ||
417 X86::optimizeMOVSX(OutMI) || X86::optimizeINCDEC(OutMI, In64BitMode) ||
418 X86::optimizeMOV(OutMI, In64BitMode) ||
420 return;
421
422 // Handle a few special cases to eliminate operand modifiers.
423 switch (OutMI.getOpcode()) {
424 case X86::LEA64_32r:
425 case X86::LEA64r:
426 case X86::LEA16r:
427 case X86::LEA32r:
428 // LEA should have a segment register, but it must be empty.
430 "Unexpected # of LEA operands");
431 assert(OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
432 "LEA has segment specified!");
433 break;
434 case X86::MULX32Hrr:
435 case X86::MULX32Hrm:
436 case X86::MULX64Hrr:
437 case X86::MULX64Hrm: {
438 // Turn into regular MULX by duplicating the destination.
439 unsigned NewOpc;
440 switch (OutMI.getOpcode()) {
441 default: llvm_unreachable("Invalid opcode");
442 case X86::MULX32Hrr: NewOpc = X86::MULX32rr; break;
443 case X86::MULX32Hrm: NewOpc = X86::MULX32rm; break;
444 case X86::MULX64Hrr: NewOpc = X86::MULX64rr; break;
445 case X86::MULX64Hrm: NewOpc = X86::MULX64rm; break;
446 }
447 OutMI.setOpcode(NewOpc);
448 // Duplicate the destination.
449 MCRegister DestReg = OutMI.getOperand(0).getReg();
450 OutMI.insert(OutMI.begin(), MCOperand::createReg(DestReg));
451 break;
452 }
453 // CALL64r, CALL64pcrel32 - These instructions used to have
454 // register inputs modeled as normal uses instead of implicit uses. As such,
455 // they we used to truncate off all but the first operand (the callee). This
456 // issue seems to have been fixed at some point. This assert verifies that.
457 case X86::CALL64r:
458 case X86::CALL64pcrel32:
459 assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!");
460 break;
461 case X86::EH_RETURN:
462 case X86::EH_RETURN64: {
463 OutMI = MCInst();
464 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget()));
465 break;
466 }
467 case X86::CLEANUPRET: {
468 // Replace CLEANUPRET with the appropriate RET.
469 OutMI = MCInst();
470 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget()));
471 break;
472 }
473 case X86::CATCHRET: {
474 // Replace CATCHRET with the appropriate RET.
475 const X86Subtarget &Subtarget = AsmPrinter.getSubtarget();
476 unsigned ReturnReg = In64BitMode ? X86::RAX : X86::EAX;
477 OutMI = MCInst();
478 OutMI.setOpcode(getRetOpcode(Subtarget));
479 OutMI.addOperand(MCOperand::createReg(ReturnReg));
480 break;
481 }
482 // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump
483 // instruction.
484 case X86::TAILJMPr:
485 case X86::TAILJMPr64:
486 case X86::TAILJMPr64_REX:
487 case X86::TAILJMPd:
488 assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!");
490 break;
491 case X86::TAILJMPd64: {
492 assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!");
493 bool IsLarge = TM.getCodeModel() == CodeModel::Large;
494 assert((!IsLarge || AsmPrinter.getSubtarget().hasJMPABS()) &&
495 "Unexpected TAILJMPd64 in large code model without JMPABS");
496 OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode(), IsLarge));
497 break;
498 }
499 case X86::TAILJMPd_CC:
500 case X86::TAILJMPd64_CC:
501 assert(OutMI.getNumOperands() == 2 && "Unexpected number of operands!");
503 break;
504 case X86::TAILJMPm:
505 case X86::TAILJMPm64:
506 case X86::TAILJMPm64_REX:
508 "Unexpected number of operands!");
510 break;
511 case X86::MASKMOVDQU:
512 case X86::VMASKMOVDQU:
513 if (In64BitMode)
515 break;
516 case X86::BSF16rm:
517 case X86::BSF16rr:
518 case X86::BSF32rm:
519 case X86::BSF32rr:
520 case X86::BSF64rm:
521 case X86::BSF64rr: {
522 // Add an REP prefix to BSF instructions so that new processors can
523 // recognize as TZCNT, which has better performance than BSF.
524 // BSF and TZCNT have different interpretations on ZF bit. So make sure
525 // it won't be used later.
526 const MachineOperand *FlagDef =
527 MI->findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
528 if (!MF.getFunction().hasOptSize() && FlagDef && FlagDef->isDead())
530 break;
531 }
532 default:
533 break;
534 }
535}
536
537void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering,
538 const MachineInstr &MI) {
539 NoAutoPaddingScope NoPadScope(*OutStreamer);
540 bool Is64Bits = getSubtarget().is64Bit();
541 bool Is64BitsLP64 = getSubtarget().isTarget64BitLP64();
542 MCContext &Ctx = OutStreamer->getContext();
543
545 switch (MI.getOpcode()) {
546 case X86::TLS_addr32:
547 case X86::TLS_addr64:
548 case X86::TLS_addrX32:
550 break;
551 case X86::TLS_base_addr32:
553 break;
554 case X86::TLS_base_addr64:
555 case X86::TLS_base_addrX32:
557 break;
558 case X86::TLS_desc32:
559 case X86::TLS_desc64:
561 break;
562 default:
563 llvm_unreachable("unexpected opcode");
564 }
565
566 const MCSymbolRefExpr *Sym = MCSymbolRefExpr::create(
567 MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)), Specifier, Ctx);
568
569 // Before binutils 2.41, ld has a bogus TLS relaxation error when the GD/LD
570 // code sequence using R_X86_64_GOTPCREL (instead of R_X86_64_GOTPCRELX) is
571 // attempted to be relaxed to IE/LE (binutils PR24784). Work around the bug by
572 // only using GOT when GOTPCRELX is enabled.
573 // TODO Delete the workaround when rustc no longer relies on the hack
574 bool UseGot = MMI->getModule()->getRtLibUseGOT() &&
576
577 if (Specifier == X86::S_TLSDESC) {
578 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(
579 MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)), X86::S_TLSCALL,
580 Ctx);
581 EmitAndCountInstruction(
582 MCInstBuilder(Is64BitsLP64 ? X86::LEA64r : X86::LEA32r)
583 .addReg(Is64BitsLP64 ? X86::RAX : X86::EAX)
584 .addReg(Is64Bits ? X86::RIP : X86::EBX)
585 .addImm(1)
586 .addReg(0)
587 .addExpr(Sym)
588 .addReg(0));
589 EmitAndCountInstruction(
590 MCInstBuilder(Is64Bits ? X86::CALL64m : X86::CALL32m)
591 .addReg(Is64BitsLP64 ? X86::RAX : X86::EAX)
592 .addImm(1)
593 .addReg(0)
594 .addExpr(Expr)
595 .addReg(0));
596 } else if (Is64Bits) {
597 bool NeedsPadding = Specifier == X86::S_TLSGD;
598 if (NeedsPadding && Is64BitsLP64)
599 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
600 EmitAndCountInstruction(MCInstBuilder(X86::LEA64r)
601 .addReg(X86::RDI)
602 .addReg(X86::RIP)
603 .addImm(1)
604 .addReg(0)
605 .addExpr(Sym)
606 .addReg(0));
607 const MCSymbol *TlsGetAddr = Ctx.getOrCreateSymbol("__tls_get_addr");
608 if (NeedsPadding) {
609 if (!UseGot)
610 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
611 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
612 EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX));
613 }
614 if (UseGot) {
615 const MCExpr *Expr =
617 EmitAndCountInstruction(MCInstBuilder(X86::CALL64m)
618 .addReg(X86::RIP)
619 .addImm(1)
620 .addReg(0)
621 .addExpr(Expr)
622 .addReg(0));
623 } else {
624 EmitAndCountInstruction(
625 MCInstBuilder(X86::CALL64pcrel32)
626 .addExpr(MCSymbolRefExpr::create(TlsGetAddr, X86::S_PLT, Ctx)));
627 }
628 } else {
629 if (Specifier == X86::S_TLSGD && !UseGot) {
630 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r)
631 .addReg(X86::EAX)
632 .addReg(0)
633 .addImm(1)
634 .addReg(X86::EBX)
635 .addExpr(Sym)
636 .addReg(0));
637 } else {
638 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r)
639 .addReg(X86::EAX)
640 .addReg(X86::EBX)
641 .addImm(1)
642 .addReg(0)
643 .addExpr(Sym)
644 .addReg(0));
645 }
646
647 const MCSymbol *TlsGetAddr = Ctx.getOrCreateSymbol("___tls_get_addr");
648 if (UseGot) {
649 const MCExpr *Expr = MCSymbolRefExpr::create(TlsGetAddr, X86::S_GOT, Ctx);
650 EmitAndCountInstruction(MCInstBuilder(X86::CALL32m)
651 .addReg(X86::EBX)
652 .addImm(1)
653 .addReg(0)
654 .addExpr(Expr)
655 .addReg(0));
656 } else {
657 EmitAndCountInstruction(
658 MCInstBuilder(X86::CALLpcrel32)
659 .addExpr(MCSymbolRefExpr::create(TlsGetAddr, X86::S_PLT, Ctx)));
660 }
661 }
662}
663
664/// Emit the largest nop instruction smaller than or equal to \p NumBytes
665/// bytes. Return the size of nop emitted.
666static unsigned emitNop(MCStreamer &OS, unsigned NumBytes,
667 const X86Subtarget *Subtarget) {
668 // Determine the longest nop which can be efficiently decoded for the given
669 // target cpu. 15-bytes is the longest single NOP instruction, but some
670 // platforms can't decode the longest forms efficiently.
671 unsigned MaxNopLength = 1;
672 if (Subtarget->is64Bit()) {
673 // FIXME: We can use NOOPL on 32-bit targets with FeatureNOPL, but the
674 // IndexReg/BaseReg below need to be updated.
675 if (Subtarget->hasFeature(X86::TuningFast7ByteNOP))
676 MaxNopLength = 7;
677 else if (Subtarget->hasFeature(X86::TuningFast15ByteNOP))
678 MaxNopLength = 15;
679 else if (Subtarget->hasFeature(X86::TuningFast11ByteNOP))
680 MaxNopLength = 11;
681 else
682 MaxNopLength = 10;
683 } if (Subtarget->is32Bit())
684 MaxNopLength = 2;
685
686 // Cap a single nop emission at the profitable value for the target
687 NumBytes = std::min(NumBytes, MaxNopLength);
688
689 unsigned NopSize;
690 unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
691 IndexReg = Displacement = SegmentReg = 0;
692 BaseReg = X86::RAX;
693 ScaleVal = 1;
694 switch (NumBytes) {
695 case 0:
696 llvm_unreachable("Zero nops?");
697 break;
698 case 1:
699 NopSize = 1;
700 Opc = X86::NOOP;
701 break;
702 case 2:
703 NopSize = 2;
704 Opc = X86::XCHG16ar;
705 break;
706 case 3:
707 NopSize = 3;
708 Opc = X86::NOOPL;
709 break;
710 case 4:
711 NopSize = 4;
712 Opc = X86::NOOPL;
713 Displacement = 8;
714 break;
715 case 5:
716 NopSize = 5;
717 Opc = X86::NOOPL;
718 Displacement = 8;
719 IndexReg = X86::RAX;
720 break;
721 case 6:
722 NopSize = 6;
723 Opc = X86::NOOPW;
724 Displacement = 8;
725 IndexReg = X86::RAX;
726 break;
727 case 7:
728 NopSize = 7;
729 Opc = X86::NOOPL;
730 Displacement = 512;
731 break;
732 case 8:
733 NopSize = 8;
734 Opc = X86::NOOPL;
735 Displacement = 512;
736 IndexReg = X86::RAX;
737 break;
738 case 9:
739 NopSize = 9;
740 Opc = X86::NOOPW;
741 Displacement = 512;
742 IndexReg = X86::RAX;
743 break;
744 default:
745 NopSize = 10;
746 Opc = X86::NOOPW;
747 Displacement = 512;
748 IndexReg = X86::RAX;
749 SegmentReg = X86::CS;
750 break;
751 }
752
753 unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U);
754 NopSize += NumPrefixes;
755 for (unsigned i = 0; i != NumPrefixes; ++i)
756 OS.emitBytes("\x66");
757
758 switch (Opc) {
759 default: llvm_unreachable("Unexpected opcode");
760 case X86::NOOP:
761 OS.emitInstruction(MCInstBuilder(Opc), *Subtarget);
762 break;
763 case X86::XCHG16ar:
764 OS.emitInstruction(MCInstBuilder(Opc).addReg(X86::AX).addReg(X86::AX),
765 *Subtarget);
766 break;
767 case X86::NOOPL:
768 case X86::NOOPW:
770 .addReg(BaseReg)
771 .addImm(ScaleVal)
772 .addReg(IndexReg)
773 .addImm(Displacement)
774 .addReg(SegmentReg),
775 *Subtarget);
776 break;
777 }
778 assert(NopSize <= NumBytes && "We overemitted?");
779 return NopSize;
780}
781
782/// Emit the optimal amount of multi-byte nops on X86.
783static void emitX86Nops(MCStreamer &OS, unsigned NumBytes,
784 const X86Subtarget *Subtarget) {
785 unsigned NopsToEmit = NumBytes;
786 (void)NopsToEmit;
787 while (NumBytes) {
788 NumBytes -= emitNop(OS, NumBytes, Subtarget);
789 assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!");
790 }
791}
792
793void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
794 X86MCInstLower &MCIL) {
795 assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64");
796
797 NoAutoPaddingScope NoPadScope(*OutStreamer);
798
799 StatepointOpers SOpers(&MI);
800 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
801 emitX86Nops(*OutStreamer, PatchBytes, Subtarget);
802 } else {
803 // Lower call target and choose correct opcode
804 const MachineOperand &CallTarget = SOpers.getCallTarget();
805 MCOperand CallTargetMCOp;
806 unsigned CallOpcode;
807 switch (CallTarget.getType()) {
810 CallTargetMCOp = MCIL.LowerSymbolOperand(
811 CallTarget, MCIL.GetSymbolFromOperand(CallTarget));
812 CallOpcode = X86::CALL64pcrel32;
813 // Currently, we only support relative addressing with statepoints.
814 // Otherwise, we'll need a scratch register to hold the target
815 // address. You'll fail asserts during load & relocation if this
816 // symbol is to far away. (TODO: support non-relative addressing)
817 break;
819 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
820 CallOpcode = X86::CALL64pcrel32;
821 // Currently, we only support relative addressing with statepoints.
822 // Otherwise, we'll need a scratch register to hold the target
823 // immediate. You'll fail asserts during load & relocation if this
824 // address is to far away. (TODO: support non-relative addressing)
825 break;
827 // FIXME: Add retpoline support and remove this.
828 if (Subtarget->useIndirectThunkCalls())
829 report_fatal_error("Lowering register statepoints with thunks not "
830 "yet implemented.");
831 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
832 CallOpcode = X86::CALL64r;
833 break;
834 default:
835 llvm_unreachable("Unsupported operand type in statepoint call target");
836 break;
837 }
838
839 // Emit call
840 MCInst CallInst;
841 CallInst.setOpcode(CallOpcode);
842 CallInst.addOperand(CallTargetMCOp);
843 OutStreamer->emitInstruction(CallInst, getSubtargetInfo());
844 maybeEmitNopAfterCallForWindowsEH(&MI);
845 }
846
847 // Record our statepoint node in the same section used by STACKMAP
848 // and PATCHPOINT
849 auto &Ctx = OutStreamer->getContext();
850 MCSymbol *MILabel = Ctx.createTempSymbol();
851 OutStreamer->emitLabel(MILabel);
852 SM.recordStatepoint(*MILabel, MI);
853}
854
855void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI,
856 X86MCInstLower &MCIL) {
857 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
858 // <opcode>, <operands>
859
860 NoAutoPaddingScope NoPadScope(*OutStreamer);
861
862 Register DefRegister = FaultingMI.getOperand(0).getReg();
864 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
865 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
866 unsigned Opcode = FaultingMI.getOperand(3).getImm();
867 unsigned OperandsBeginIdx = 4;
868
869 auto &Ctx = OutStreamer->getContext();
870 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
871 OutStreamer->emitLabel(FaultingLabel);
872
873 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
874 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
875
876 MCInst MI;
877 MI.setOpcode(Opcode);
878
879 if (DefRegister != X86::NoRegister)
880 MI.addOperand(MCOperand::createReg(DefRegister));
881
882 for (const MachineOperand &MO :
883 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx))
884 if (auto Op = MCIL.LowerMachineOperand(&FaultingMI, MO); Op.isValid())
885 MI.addOperand(Op);
886
887 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
888 OutStreamer->emitInstruction(MI, getSubtargetInfo());
889}
890
891void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI,
892 X86MCInstLower &MCIL) {
893 bool Is64Bits = Subtarget->is64Bit();
894 MCContext &Ctx = OutStreamer->getContext();
895 MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__");
896 const MCSymbolRefExpr *Op = MCSymbolRefExpr::create(fentry, Ctx);
897
898 EmitAndCountInstruction(
899 MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32)
900 .addExpr(Op));
901}
902
903void X86AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
904 assert(std::next(MI.getIterator())->isCall() &&
905 "KCFI_CHECK not followed by a call instruction");
906
907 // Adjust the offset for patchable-function-prefix. X86InstrInfo::getNop()
908 // returns a 1-byte X86::NOOP, which means the offset is the same in
909 // bytes. This assumes that patchable-function-prefix is the same for all
910 // functions.
911 const MachineFunction &MF = *MI.getMF();
912 int64_t PrefixNops = MF.getFunction().getFnAttributeAsParsedInteger(
913 "patchable-function-prefix");
914
915 // KCFI allows indirect calls to any location that's preceded by a valid
916 // type identifier. To avoid encoding the full constant into an instruction,
917 // and thus emitting potential call target gadgets at each indirect call
918 // site, load a negated constant to a register and compare that to the
919 // expected value at the call target.
920 const Register AddrReg = MI.getOperand(0).getReg();
921 const uint32_t Type = MI.getOperand(1).getImm();
922 // The check is immediately before the call. If the call target is in R10,
923 // we can clobber R11 for the check instead.
924 unsigned TempReg = AddrReg == X86::R10 ? X86::R11D : X86::R10D;
925 EmitAndCountInstruction(
926 MCInstBuilder(X86::MOV32ri).addReg(TempReg).addImm(-MaskKCFIType(Type)));
927 EmitAndCountInstruction(MCInstBuilder(X86::ADD32rm)
928 .addReg(X86::NoRegister)
929 .addReg(TempReg)
930 .addReg(AddrReg)
931 .addImm(1)
932 .addReg(X86::NoRegister)
933 .addImm(-(PrefixNops + 4))
934 .addReg(X86::NoRegister));
935
936 MCSymbol *Pass = OutContext.createTempSymbol();
937 EmitAndCountInstruction(
938 MCInstBuilder(X86::JCC_1)
939 .addExpr(MCSymbolRefExpr::create(Pass, OutContext))
940 .addImm(X86::COND_E));
941
942 MCSymbol *Trap = OutContext.createTempSymbol();
943 OutStreamer->emitLabel(Trap);
944 EmitAndCountInstruction(MCInstBuilder(X86::TRAP));
945 emitKCFITrapEntry(MF, Trap);
946 OutStreamer->emitLabel(Pass);
947}
948
949void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
950 // FIXME: Make this work on non-ELF.
951 if (!TM.getTargetTriple().isOSBinFormatELF()) {
952 report_fatal_error("llvm.asan.check.memaccess only supported on ELF");
953 return;
954 }
955
956 const auto &Reg = MI.getOperand(0).getReg();
957 ASanAccessInfo AccessInfo(MI.getOperand(1).getImm());
958
959 uint64_t ShadowBase;
960 int MappingScale;
961 bool OrShadowOffset;
962 getAddressSanitizerParams(TM.getTargetTriple(), 64, AccessInfo.CompileKernel,
963 &ShadowBase, &MappingScale, &OrShadowOffset);
964
965 StringRef Name = AccessInfo.IsWrite ? "store" : "load";
966 StringRef Op = OrShadowOffset ? "or" : "add";
967 std::string SymName = ("__asan_check_" + Name + "_" + Op + "_" +
968 Twine(1ULL << AccessInfo.AccessSizeIndex) + "_" +
969 TM.getMCRegisterInfo()->getName(Reg.asMCReg()))
970 .str();
971 if (OrShadowOffset)
973 "OrShadowOffset is not supported with optimized callbacks");
974
975 EmitAndCountInstruction(
976 MCInstBuilder(X86::CALL64pcrel32)
978 OutContext.getOrCreateSymbol(SymName), OutContext)));
979}
980
981void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
982 X86MCInstLower &MCIL) {
983 // PATCHABLE_OP minsize
984
985 NoAutoPaddingScope NoPadScope(*OutStreamer);
986
987 auto NextMI = std::find_if(std::next(MI.getIterator()),
988 MI.getParent()->end().getInstrIterator(),
989 [](auto &II) { return !II.isMetaInstruction(); });
990
991 SmallString<256> Code;
992 unsigned MinSize = MI.getOperand(0).getImm();
993
994 if (NextMI != MI.getParent()->end() && !NextMI->isInlineAsm()) {
995 // Lower the next MachineInstr to find its byte size.
996 // If the next instruction is inline assembly, we skip lowering it for now,
997 // and assume we should always generate NOPs.
998 MCInst MCI;
999 MCIL.Lower(&*NextMI, MCI);
1000
1002 CodeEmitter->encodeInstruction(MCI, Code, Fixups, getSubtargetInfo());
1003 }
1004
1005 if (Code.size() < MinSize) {
1006 if (MinSize == 2 && Subtarget->is32Bit() &&
1007 Subtarget->isTargetWindowsMSVC() &&
1008 (Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) {
1009 // For compatibility reasons, when targetting MSVC, it is important to
1010 // generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools
1011 // rely specifically on this pattern to be able to patch a function.
1012 // This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE.
1013 OutStreamer->emitInstruction(
1014 MCInstBuilder(X86::MOV32rr_REV).addReg(X86::EDI).addReg(X86::EDI),
1015 *Subtarget);
1016 } else {
1017 unsigned NopSize = emitNop(*OutStreamer, MinSize, Subtarget);
1018 assert(NopSize == MinSize && "Could not implement MinSize!");
1019 (void)NopSize;
1020 }
1021 }
1022}
1023
1024// Lower a stackmap of the form:
1025// <id>, <shadowBytes>, ...
1026void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) {
1027 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
1028
1029 auto &Ctx = OutStreamer->getContext();
1030 MCSymbol *MILabel = Ctx.createTempSymbol();
1031 OutStreamer->emitLabel(MILabel);
1032
1033 SM.recordStackMap(*MILabel, MI);
1034 unsigned NumShadowBytes = MI.getOperand(1).getImm();
1035 SMShadowTracker.reset(NumShadowBytes);
1036}
1037
1038// Lower a patchpoint of the form:
1039// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
1040void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
1041 X86MCInstLower &MCIL) {
1042 assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64");
1043
1044 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
1045
1046 NoAutoPaddingScope NoPadScope(*OutStreamer);
1047
1048 auto &Ctx = OutStreamer->getContext();
1049 MCSymbol *MILabel = Ctx.createTempSymbol();
1050 OutStreamer->emitLabel(MILabel);
1051 SM.recordPatchPoint(*MILabel, MI);
1052
1053 PatchPointOpers opers(&MI);
1054 unsigned ScratchIdx = opers.getNextScratchIdx();
1055 unsigned EncodedBytes = 0;
1056 const MachineOperand &CalleeMO = opers.getCallTarget();
1057
1058 // Check for null target. If target is non-null (i.e. is non-zero or is
1059 // symbolic) then emit a call.
1060 if (!(CalleeMO.isImm() && !CalleeMO.getImm())) {
1061 MCOperand CalleeMCOp;
1062 switch (CalleeMO.getType()) {
1063 default:
1064 /// FIXME: Add a verifier check for bad callee types.
1065 llvm_unreachable("Unrecognized callee operand type.");
1067 if (CalleeMO.getImm())
1068 CalleeMCOp = MCOperand::createImm(CalleeMO.getImm());
1069 break;
1072 CalleeMCOp = MCIL.LowerSymbolOperand(CalleeMO,
1073 MCIL.GetSymbolFromOperand(CalleeMO));
1074 break;
1075 }
1076
1077 // Emit MOV to materialize the target address and the CALL to target.
1078 // This is encoded with 12-13 bytes, depending on which register is used.
1079 Register ScratchReg = MI.getOperand(ScratchIdx).getReg();
1080 if (X86II::isX86_64ExtendedReg(ScratchReg))
1081 EncodedBytes = 13;
1082 else
1083 EncodedBytes = 12;
1084
1085 EmitAndCountInstruction(
1086 MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp));
1087 // FIXME: Add retpoline support and remove this.
1088 if (Subtarget->useIndirectThunkCalls())
1090 "Lowering patchpoint with thunks not yet implemented.");
1091 EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg));
1092 }
1093
1094 // Emit padding.
1095 unsigned NumBytes = opers.getNumPatchBytes();
1096 assert(NumBytes >= EncodedBytes &&
1097 "Patchpoint can't request size less than the length of a call.");
1098
1099 emitX86Nops(*OutStreamer, NumBytes - EncodedBytes, Subtarget);
1100}
1101
1102void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
1103 X86MCInstLower &MCIL) {
1104 assert(Subtarget->is64Bit() && "XRay custom events only supports X86-64");
1105
1106 NoAutoPaddingScope NoPadScope(*OutStreamer);
1107
1108 // We want to emit the following pattern, which follows the x86 calling
1109 // convention to prepare for the trampoline call to be patched in.
1110 //
1111 // .p2align 1, ...
1112 // .Lxray_event_sled_N:
1113 // jmp +N // jump across the instrumentation sled
1114 // ... // set up arguments in register
1115 // callq __xray_CustomEvent@plt // force dependency to symbol
1116 // ...
1117 // <jump here>
1118 //
1119 // After patching, it would look something like:
1120 //
1121 // nopw (2-byte nop)
1122 // ...
1123 // callq __xrayCustomEvent // already lowered
1124 // ...
1125 //
1126 // ---
1127 // First we emit the label and the jump.
1128 auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true);
1129 OutStreamer->AddComment("# XRay Custom Event Log");
1130 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1131 OutStreamer->emitLabel(CurSled);
1132
1133 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
1134 // an operand (computed as an offset from the jmp instruction).
1135 // FIXME: Find another less hacky way do force the relative jump.
1136 OutStreamer->emitBinaryData("\xeb\x0f");
1137
1138 // The default C calling convention will place two arguments into %rcx and
1139 // %rdx -- so we only work with those.
1140 const Register DestRegs[] = {X86::RDI, X86::RSI};
1141 bool UsedMask[] = {false, false};
1142 // Filled out in loop.
1143 Register SrcRegs[] = {0, 0};
1144
1145 // Then we put the operands in the %rdi and %rsi registers. We spill the
1146 // values in the register before we clobber them, and mark them as used in
1147 // UsedMask. In case the arguments are already in the correct register, we use
1148 // emit nops appropriately sized to keep the sled the same size in every
1149 // situation.
1150 for (unsigned I = 0; I < MI.getNumOperands(); ++I)
1151 if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I));
1152 Op.isValid()) {
1153 assert(Op.isReg() && "Only support arguments in registers");
1154 SrcRegs[I] = getX86SubSuperRegister(Op.getReg(), 64);
1155 assert(SrcRegs[I].isValid() && "Invalid operand");
1156 if (SrcRegs[I] != DestRegs[I]) {
1157 UsedMask[I] = true;
1158 EmitAndCountInstruction(
1159 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I]));
1160 } else {
1161 emitX86Nops(*OutStreamer, 4, Subtarget);
1162 }
1163 }
1164
1165 // Now that the register values are stashed, mov arguments into place.
1166 // FIXME: This doesn't work if one of the later SrcRegs is equal to an
1167 // earlier DestReg. We will have already overwritten over the register before
1168 // we can copy from it.
1169 for (unsigned I = 0; I < MI.getNumOperands(); ++I)
1170 if (SrcRegs[I] != DestRegs[I])
1171 EmitAndCountInstruction(
1172 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I]));
1173
1174 // We emit a hard dependency on the __xray_CustomEvent symbol, which is the
1175 // name of the trampoline to be implemented by the XRay runtime.
1176 auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent");
1177 MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym);
1178 if (isPositionIndependent())
1180
1181 // Emit the call instruction.
1182 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32)
1183 .addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
1184
1185 // Restore caller-saved and used registers.
1186 for (unsigned I = sizeof UsedMask; I-- > 0;)
1187 if (UsedMask[I])
1188 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I]));
1189 else
1190 emitX86Nops(*OutStreamer, 1, Subtarget);
1191
1192 OutStreamer->AddComment("xray custom event end.");
1193
1194 // Record the sled version. Version 0 of this sled was spelled differently, so
1195 // we let the runtime handle the different offsets we're using. Version 2
1196 // changed the absolute address to a PC-relative address.
1197 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
1198}
1199
1200void X86AsmPrinter::LowerPATCHABLE_TYPED_EVENT_CALL(const MachineInstr &MI,
1201 X86MCInstLower &MCIL) {
1202 assert(Subtarget->is64Bit() && "XRay typed events only supports X86-64");
1203
1204 NoAutoPaddingScope NoPadScope(*OutStreamer);
1205
1206 // We want to emit the following pattern, which follows the x86 calling
1207 // convention to prepare for the trampoline call to be patched in.
1208 //
1209 // .p2align 1, ...
1210 // .Lxray_event_sled_N:
1211 // jmp +N // jump across the instrumentation sled
1212 // ... // set up arguments in register
1213 // callq __xray_TypedEvent@plt // force dependency to symbol
1214 // ...
1215 // <jump here>
1216 //
1217 // After patching, it would look something like:
1218 //
1219 // nopw (2-byte nop)
1220 // ...
1221 // callq __xrayTypedEvent // already lowered
1222 // ...
1223 //
1224 // ---
1225 // First we emit the label and the jump.
1226 auto CurSled = OutContext.createTempSymbol("xray_typed_event_sled_", true);
1227 OutStreamer->AddComment("# XRay Typed Event Log");
1228 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1229 OutStreamer->emitLabel(CurSled);
1230
1231 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
1232 // an operand (computed as an offset from the jmp instruction).
1233 // FIXME: Find another less hacky way do force the relative jump.
1234 OutStreamer->emitBinaryData("\xeb\x14");
1235
1236 // An x86-64 convention may place three arguments into %rcx, %rdx, and R8,
1237 // so we'll work with those. Or we may be called via SystemV, in which case
1238 // we don't have to do any translation.
1239 const Register DestRegs[] = {X86::RDI, X86::RSI, X86::RDX};
1240 bool UsedMask[] = {false, false, false};
1241
1242 // Will fill out src regs in the loop.
1243 Register SrcRegs[] = {0, 0, 0};
1244
1245 // Then we put the operands in the SystemV registers. We spill the values in
1246 // the registers before we clobber them, and mark them as used in UsedMask.
1247 // In case the arguments are already in the correct register, we emit nops
1248 // appropriately sized to keep the sled the same size in every situation.
1249 for (unsigned I = 0; I < MI.getNumOperands(); ++I)
1250 if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I));
1251 Op.isValid()) {
1252 // TODO: Is register only support adequate?
1253 assert(Op.isReg() && "Only supports arguments in registers");
1254 SrcRegs[I] = getX86SubSuperRegister(Op.getReg(), 64);
1255 assert(SrcRegs[I].isValid() && "Invalid operand");
1256 if (SrcRegs[I] != DestRegs[I]) {
1257 UsedMask[I] = true;
1258 EmitAndCountInstruction(
1259 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I]));
1260 } else {
1261 emitX86Nops(*OutStreamer, 4, Subtarget);
1262 }
1263 }
1264
1265 // In the above loop we only stash all of the destination registers or emit
1266 // nops if the arguments are already in the right place. Doing the actually
1267 // moving is postponed until after all the registers are stashed so nothing
1268 // is clobbers. We've already added nops to account for the size of mov and
1269 // push if the register is in the right place, so we only have to worry about
1270 // emitting movs.
1271 // FIXME: This doesn't work if one of the later SrcRegs is equal to an
1272 // earlier DestReg. We will have already overwritten over the register before
1273 // we can copy from it.
1274 for (unsigned I = 0; I < MI.getNumOperands(); ++I)
1275 if (UsedMask[I])
1276 EmitAndCountInstruction(
1277 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I]));
1278
1279 // We emit a hard dependency on the __xray_TypedEvent symbol, which is the
1280 // name of the trampoline to be implemented by the XRay runtime.
1281 auto TSym = OutContext.getOrCreateSymbol("__xray_TypedEvent");
1282 MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym);
1283 if (isPositionIndependent())
1285
1286 // Emit the call instruction.
1287 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32)
1288 .addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
1289
1290 // Restore caller-saved and used registers.
1291 for (unsigned I = sizeof UsedMask; I-- > 0;)
1292 if (UsedMask[I])
1293 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I]));
1294 else
1295 emitX86Nops(*OutStreamer, 1, Subtarget);
1296
1297 OutStreamer->AddComment("xray typed event end.");
1298
1299 // Record the sled version.
1300 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
1301}
1302
1303void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
1304 X86MCInstLower &MCIL) {
1305
1306 NoAutoPaddingScope NoPadScope(*OutStreamer);
1307
1308 const Function &F = MF->getFunction();
1309 if (F.hasFnAttribute("patchable-function-entry")) {
1310 unsigned Num = F.getFnAttributeAsParsedInteger("patchable-function-entry");
1311 emitX86Nops(*OutStreamer, Num, Subtarget);
1312 return;
1313 }
1314 // We want to emit the following pattern:
1315 //
1316 // .p2align 1, ...
1317 // .Lxray_sled_N:
1318 // jmp .tmpN
1319 // # 9 bytes worth of noops
1320 //
1321 // We need the 9 bytes because at runtime, we'd be patching over the full 11
1322 // bytes with the following pattern:
1323 //
1324 // mov %r10, <function id, 32-bit> // 6 bytes
1325 // call <relative offset, 32-bits> // 5 bytes
1326 //
1327 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
1328 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1329 OutStreamer->emitLabel(CurSled);
1330
1331 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
1332 // an operand (computed as an offset from the jmp instruction).
1333 // FIXME: Find another less hacky way do force the relative jump.
1334 OutStreamer->emitBytes("\xeb\x09");
1335 emitX86Nops(*OutStreamer, 9, Subtarget);
1336 recordSled(CurSled, MI, SledKind::FUNCTION_ENTER, 2);
1337}
1338
1339void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI,
1340 X86MCInstLower &MCIL) {
1341 NoAutoPaddingScope NoPadScope(*OutStreamer);
1342
1343 // Since PATCHABLE_RET takes the opcode of the return statement as an
1344 // argument, we use that to emit the correct form of the RET that we want.
1345 // i.e. when we see this:
1346 //
1347 // PATCHABLE_RET X86::RET ...
1348 //
1349 // We should emit the RET followed by sleds.
1350 //
1351 // .p2align 1, ...
1352 // .Lxray_sled_N:
1353 // ret # or equivalent instruction
1354 // # 10 bytes worth of noops
1355 //
1356 // This just makes sure that the alignment for the next instruction is 2.
1357 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
1358 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1359 OutStreamer->emitLabel(CurSled);
1360 unsigned OpCode = MI.getOperand(0).getImm();
1361 MCInst Ret;
1362 Ret.setOpcode(OpCode);
1363 for (auto &MO : drop_begin(MI.operands()))
1364 if (auto Op = MCIL.LowerMachineOperand(&MI, MO); Op.isValid())
1365 Ret.addOperand(Op);
1366 OutStreamer->emitInstruction(Ret, getSubtargetInfo());
1367 emitX86Nops(*OutStreamer, 10, Subtarget);
1368 recordSled(CurSled, MI, SledKind::FUNCTION_EXIT, 2);
1369}
1370
1371void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI,
1372 X86MCInstLower &MCIL) {
1373 MCInst TC;
1374 TC.setOpcode(convertTailJumpOpcode(MI.getOperand(0).getImm()));
1375 // Drop the tail jump opcode.
1376 auto TCOperands = drop_begin(MI.operands());
1377 bool IsConditional = TC.getOpcode() == X86::JCC_1;
1378 MCSymbol *FallthroughLabel;
1379 if (IsConditional) {
1380 // Rewrite:
1381 // je target
1382 //
1383 // To:
1384 // jne .fallthrough
1385 // .p2align 1, ...
1386 // .Lxray_sled_N:
1387 // SLED_CODE
1388 // jmp target
1389 // .fallthrough:
1390 FallthroughLabel = OutContext.createTempSymbol();
1391 EmitToStreamer(
1392 *OutStreamer,
1393 MCInstBuilder(X86::JCC_1)
1394 .addExpr(MCSymbolRefExpr::create(FallthroughLabel, OutContext))
1396 static_cast<X86::CondCode>(MI.getOperand(2).getImm()))));
1397 TC.setOpcode(X86::JMP_1);
1398 // Drop the condition code.
1399 TCOperands = drop_end(TCOperands);
1400 }
1401
1402 NoAutoPaddingScope NoPadScope(*OutStreamer);
1403
1404 // Like PATCHABLE_RET, we have the actual instruction in the operands to this
1405 // instruction so we lower that particular instruction and its operands.
1406 // Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how
1407 // we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to
1408 // the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual
1409 // tail call much like how we have it in PATCHABLE_RET.
1410 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
1411 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1412 OutStreamer->emitLabel(CurSled);
1413 auto Target = OutContext.createTempSymbol();
1414
1415 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
1416 // an operand (computed as an offset from the jmp instruction).
1417 // FIXME: Find another less hacky way do force the relative jump.
1418 OutStreamer->emitBytes("\xeb\x09");
1419 emitX86Nops(*OutStreamer, 9, Subtarget);
1420 OutStreamer->emitLabel(Target);
1421 recordSled(CurSled, MI, SledKind::TAIL_CALL, 2);
1422
1423 // Before emitting the instruction, add a comment to indicate that this is
1424 // indeed a tail call.
1425 OutStreamer->AddComment("TAILCALL");
1426 for (auto &MO : TCOperands)
1427 if (auto Op = MCIL.LowerMachineOperand(&MI, MO); Op.isValid())
1428 TC.addOperand(Op);
1429 OutStreamer->emitInstruction(TC, getSubtargetInfo());
1430
1431 if (IsConditional)
1432 OutStreamer->emitLabel(FallthroughLabel);
1433}
1434
1435static unsigned getSrcIdx(const MachineInstr* MI, unsigned SrcIdx) {
1436 if (X86II::isKMasked(MI->getDesc().TSFlags)) {
1437 // Skip mask operand.
1438 ++SrcIdx;
1439 if (X86II::isKMergeMasked(MI->getDesc().TSFlags)) {
1440 // Skip passthru operand.
1441 ++SrcIdx;
1442 }
1443 }
1444 return SrcIdx;
1445}
1446
1448 unsigned SrcOpIdx) {
1449 const MachineOperand &DstOp = MI->getOperand(0);
1451
1452 // Handle AVX512 MASK/MASXZ write mask comments.
1453 // MASK: zmmX {%kY}
1454 // MASKZ: zmmX {%kY} {z}
1455 if (X86II::isKMasked(MI->getDesc().TSFlags)) {
1456 const MachineOperand &WriteMaskOp = MI->getOperand(SrcOpIdx - 1);
1458 CS << " {%" << Mask << "}";
1459 if (!X86II::isKMergeMasked(MI->getDesc().TSFlags)) {
1460 CS << " {z}";
1461 }
1462 }
1463}
1464
1465static void printShuffleMask(raw_ostream &CS, StringRef Src1Name,
1466 StringRef Src2Name, ArrayRef<int> Mask) {
1467 // One source operand, fix the mask to print all elements in one span.
1468 SmallVector<int, 8> ShuffleMask(Mask);
1469 if (Src1Name == Src2Name)
1470 for (int i = 0, e = ShuffleMask.size(); i != e; ++i)
1471 if (ShuffleMask[i] >= e)
1472 ShuffleMask[i] -= e;
1473
1474 for (int i = 0, e = ShuffleMask.size(); i != e; ++i) {
1475 if (i != 0)
1476 CS << ",";
1477 if (ShuffleMask[i] == SM_SentinelZero) {
1478 CS << "zero";
1479 continue;
1480 }
1481
1482 // Otherwise, it must come from src1 or src2. Print the span of elements
1483 // that comes from this src.
1484 bool isSrc1 = ShuffleMask[i] < (int)e;
1485 CS << (isSrc1 ? Src1Name : Src2Name) << '[';
1486
1487 bool IsFirst = true;
1488 while (i != e && ShuffleMask[i] != SM_SentinelZero &&
1489 (ShuffleMask[i] < (int)e) == isSrc1) {
1490 if (!IsFirst)
1491 CS << ',';
1492 else
1493 IsFirst = false;
1494 if (ShuffleMask[i] == SM_SentinelUndef)
1495 CS << "u";
1496 else
1497 CS << ShuffleMask[i] % (int)e;
1498 ++i;
1499 }
1500 CS << ']';
1501 --i; // For loop increments element #.
1502 }
1503}
1504
1505static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx,
1506 unsigned SrcOp2Idx, ArrayRef<int> Mask) {
1507 std::string Comment;
1508
1509 const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx);
1510 const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx);
1511 StringRef Src1Name = SrcOp1.isReg()
1513 : "mem";
1514 StringRef Src2Name = SrcOp2.isReg()
1516 : "mem";
1517
1518 raw_string_ostream CS(Comment);
1519 printDstRegisterName(CS, MI, SrcOp1Idx);
1520 CS << " = ";
1521 printShuffleMask(CS, Src1Name, Src2Name, Mask);
1522
1523 return Comment;
1524}
1525
1526static void printConstant(const APInt &Val, raw_ostream &CS,
1527 bool PrintZero = false) {
1528 if (Val.getBitWidth() <= 64) {
1529 CS << (PrintZero ? 0ULL : Val.getZExtValue());
1530 } else {
1531 // print multi-word constant as (w0,w1)
1532 CS << "(";
1533 for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
1534 if (i > 0)
1535 CS << ",";
1536 CS << (PrintZero ? 0ULL : Val.getRawData()[i]);
1537 }
1538 CS << ")";
1539 }
1540}
1541
1542static void printConstant(const APFloat &Flt, raw_ostream &CS,
1543 bool PrintZero = false) {
1544 SmallString<32> Str;
1545 // Force scientific notation to distinguish from integers.
1546 if (PrintZero)
1547 APFloat::getZero(Flt.getSemantics()).toString(Str, 0, 0);
1548 else
1549 Flt.toString(Str, 0, 0);
1550 CS << Str;
1551}
1552
1553static void printConstant(const Constant *COp, unsigned BitWidth,
1554 raw_ostream &CS, bool PrintZero = false) {
1555 if (isa<UndefValue>(COp)) {
1556 CS << "u";
1557 } else if (auto *CI = dyn_cast<ConstantInt>(COp)) {
1558 if (auto VTy = dyn_cast<FixedVectorType>(CI->getType())) {
1559 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
1560 if (I != 0)
1561 CS << ',';
1562 printConstant(CI->getValue(), CS, PrintZero);
1563 }
1564 } else
1565 printConstant(CI->getValue(), CS, PrintZero);
1566 } else if (auto *CF = dyn_cast<ConstantFP>(COp)) {
1567 if (auto VTy = dyn_cast<FixedVectorType>(CF->getType())) {
1568 unsigned EltBits = VTy->getScalarSizeInBits();
1569 unsigned E = std::min(BitWidth / EltBits, VTy->getNumElements());
1570 if ((BitWidth % EltBits) == 0) {
1571 for (unsigned I = 0; I != E; ++I) {
1572 if (I != 0)
1573 CS << ",";
1574 printConstant(CF->getValueAPF(), CS, PrintZero);
1575 }
1576 } else {
1577 CS << "?";
1578 }
1579 } else
1580 printConstant(CF->getValueAPF(), CS, PrintZero);
1581 } else if (auto *CDS = dyn_cast<ConstantDataSequential>(COp)) {
1582 Type *EltTy = CDS->getElementType();
1583 bool IsInteger = EltTy->isIntegerTy();
1584 bool IsFP = EltTy->isHalfTy() || EltTy->isFloatTy() || EltTy->isDoubleTy();
1585 unsigned EltBits = EltTy->getPrimitiveSizeInBits();
1586 unsigned E = std::min(BitWidth / EltBits, (unsigned)CDS->getNumElements());
1587 if ((BitWidth % EltBits) == 0) {
1588 for (unsigned I = 0; I != E; ++I) {
1589 if (I != 0)
1590 CS << ",";
1591 if (IsInteger)
1592 printConstant(CDS->getElementAsAPInt(I), CS, PrintZero);
1593 else if (IsFP)
1594 printConstant(CDS->getElementAsAPFloat(I), CS, PrintZero);
1595 else
1596 CS << "?";
1597 }
1598 } else {
1599 CS << "?";
1600 }
1601 } else if (auto *CV = dyn_cast<ConstantVector>(COp)) {
1602 unsigned EltBits = CV->getType()->getScalarSizeInBits();
1603 unsigned E = std::min(BitWidth / EltBits, CV->getNumOperands());
1604 if ((BitWidth % EltBits) == 0) {
1605 for (unsigned I = 0; I != E; ++I) {
1606 if (I != 0)
1607 CS << ",";
1608 printConstant(CV->getOperand(I), EltBits, CS, PrintZero);
1609 }
1610 } else {
1611 CS << "?";
1612 }
1613 } else {
1614 CS << "?";
1615 }
1616}
1617
1618static void printZeroUpperMove(const MachineInstr *MI, MCStreamer &OutStreamer,
1619 int SclWidth, int VecWidth,
1620 const char *ShuffleComment) {
1621 unsigned SrcIdx = getSrcIdx(MI, 1);
1622
1623 std::string Comment;
1624 raw_string_ostream CS(Comment);
1625 printDstRegisterName(CS, MI, SrcIdx);
1626 CS << " = ";
1627
1628 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx)) {
1629 CS << "[";
1630 printConstant(C, SclWidth, CS);
1631 for (int I = 1, E = VecWidth / SclWidth; I < E; ++I) {
1632 CS << ",";
1633 printConstant(C, SclWidth, CS, true);
1634 }
1635 CS << "]";
1636 OutStreamer.AddComment(CS.str());
1637 return; // early-out
1638 }
1639
1640 // We didn't find a constant load, fallback to a shuffle mask decode.
1641 CS << ShuffleComment;
1642 OutStreamer.AddComment(CS.str());
1643}
1644
1645static void printBroadcast(const MachineInstr *MI, MCStreamer &OutStreamer,
1646 int Repeats, int BitWidth) {
1647 unsigned SrcIdx = getSrcIdx(MI, 1);
1648 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx)) {
1649 std::string Comment;
1650 raw_string_ostream CS(Comment);
1651 printDstRegisterName(CS, MI, SrcIdx);
1652 CS << " = [";
1653 for (int l = 0; l != Repeats; ++l) {
1654 if (l != 0)
1655 CS << ",";
1656 printConstant(C, BitWidth, CS);
1657 }
1658 CS << "]";
1659 OutStreamer.AddComment(CS.str());
1660 }
1661}
1662
1663static void addConstantComment(const MachineInstr *MI, MCStreamer &OutStreamer,
1664 unsigned OpNo, int BitWidth, int Repeats = 1) {
1665 if (auto *C = X86::getConstantFromPool(*MI, OpNo)) {
1666 std::string Comment;
1667 raw_string_ostream CS(Comment);
1668 CS << "[";
1669 for (int I = 0; I != Repeats; ++I) {
1670 if (I != 0)
1671 CS << ",";
1672 printConstant(C, BitWidth, CS);
1673 }
1674 CS << "]";
1675 OutStreamer.AddComment(CS.str());
1676 }
1677}
1678
1679static bool printExtend(const MachineInstr *MI, MCStreamer &OutStreamer,
1680 int SrcEltBits, int DstEltBits, bool IsSext) {
1681 unsigned SrcIdx = getSrcIdx(MI, 1);
1682 auto *C = X86::getConstantFromPool(*MI, SrcIdx);
1683 if (C && C->getType()->getScalarSizeInBits() == unsigned(SrcEltBits)) {
1684 if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
1685 int NumElts = CDS->getNumElements();
1686 std::string Comment;
1687 raw_string_ostream CS(Comment);
1688 printDstRegisterName(CS, MI, SrcIdx);
1689 CS << " = [";
1690 for (int i = 0; i != NumElts; ++i) {
1691 if (i != 0)
1692 CS << ",";
1693 if (CDS->getElementType()->isIntegerTy()) {
1694 APInt Elt = CDS->getElementAsAPInt(i);
1695 Elt = IsSext ? Elt.sext(DstEltBits) : Elt.zext(DstEltBits);
1696 printConstant(Elt, CS);
1697 } else
1698 CS << "?";
1699 }
1700 CS << "]";
1701 OutStreamer.AddComment(CS.str());
1702 return true;
1703 }
1704 }
1705
1706 return false;
1707}
1708static void printSignExtend(const MachineInstr *MI, MCStreamer &OutStreamer,
1709 int SrcEltBits, int DstEltBits) {
1710 printExtend(MI, OutStreamer, SrcEltBits, DstEltBits, true);
1711}
1712static void printZeroExtend(const MachineInstr *MI, MCStreamer &OutStreamer,
1713 int SrcEltBits, int DstEltBits) {
1714 if (printExtend(MI, OutStreamer, SrcEltBits, DstEltBits, false))
1715 return;
1716
1717 // We didn't find a constant load, fallback to a shuffle mask decode.
1718 std::string Comment;
1719 raw_string_ostream CS(Comment);
1721 CS << " = ";
1722
1723 SmallVector<int> Mask;
1724 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1725 assert((Width % DstEltBits) == 0 && (DstEltBits % SrcEltBits) == 0 &&
1726 "Illegal extension ratio");
1727 DecodeZeroExtendMask(SrcEltBits, DstEltBits, Width / DstEltBits, false, Mask);
1728 printShuffleMask(CS, "mem", "", Mask);
1729
1730 OutStreamer.AddComment(CS.str());
1731}
1732
1733void X86AsmPrinter::EmitSEHInstruction(const MachineInstr *MI) {
1734 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
1735 assert((getSubtarget().isOSWindows() || getSubtarget().isUEFI()) &&
1736 "SEH_ instruction Windows and UEFI only");
1737
1738 // Use the .cv_fpo directives if we're emitting CodeView on 32-bit x86.
1739 if (EmitFPOData) {
1740 X86TargetStreamer *XTS =
1741 static_cast<X86TargetStreamer *>(OutStreamer->getTargetStreamer());
1742 switch (MI->getOpcode()) {
1743 case X86::SEH_PushReg:
1744 XTS->emitFPOPushReg(MI->getOperand(0).getImm());
1745 break;
1746 case X86::SEH_StackAlloc:
1747 XTS->emitFPOStackAlloc(MI->getOperand(0).getImm());
1748 break;
1749 case X86::SEH_StackAlign:
1750 XTS->emitFPOStackAlign(MI->getOperand(0).getImm());
1751 break;
1752 case X86::SEH_SetFrame:
1753 assert(MI->getOperand(1).getImm() == 0 &&
1754 ".cv_fpo_setframe takes no offset");
1755 XTS->emitFPOSetFrame(MI->getOperand(0).getImm());
1756 break;
1757 case X86::SEH_EndPrologue:
1758 XTS->emitFPOEndPrologue();
1759 break;
1760 case X86::SEH_SaveReg:
1761 case X86::SEH_SaveXMM:
1762 case X86::SEH_PushFrame:
1763 llvm_unreachable("SEH_ directive incompatible with FPO");
1764 break;
1765 default:
1766 llvm_unreachable("expected SEH_ instruction");
1767 }
1768 return;
1769 }
1770
1771 // Otherwise, use the .seh_ directives for all other Windows platforms.
1772 switch (MI->getOpcode()) {
1773 case X86::SEH_PushReg:
1774 OutStreamer->emitWinCFIPushReg(MI->getOperand(0).getImm());
1775 break;
1776
1777 case X86::SEH_SaveReg:
1778 OutStreamer->emitWinCFISaveReg(MI->getOperand(0).getImm(),
1779 MI->getOperand(1).getImm());
1780 break;
1781
1782 case X86::SEH_SaveXMM:
1783 OutStreamer->emitWinCFISaveXMM(MI->getOperand(0).getImm(),
1784 MI->getOperand(1).getImm());
1785 break;
1786
1787 case X86::SEH_StackAlloc:
1788 OutStreamer->emitWinCFIAllocStack(MI->getOperand(0).getImm());
1789 break;
1790
1791 case X86::SEH_SetFrame:
1792 OutStreamer->emitWinCFISetFrame(MI->getOperand(0).getImm(),
1793 MI->getOperand(1).getImm());
1794 break;
1795
1796 case X86::SEH_PushFrame:
1797 OutStreamer->emitWinCFIPushFrame(MI->getOperand(0).getImm());
1798 break;
1799
1800 case X86::SEH_EndPrologue:
1801 OutStreamer->emitWinCFIEndProlog();
1802 break;
1803
1804 case X86::SEH_BeginEpilogue:
1805 OutStreamer->emitWinCFIBeginEpilogue();
1806 break;
1807
1808 case X86::SEH_EndEpilogue:
1809 OutStreamer->emitWinCFIEndEpilogue();
1810 break;
1811
1812 case X86::SEH_UnwindV2Start:
1813 OutStreamer->emitWinCFIUnwindV2Start();
1814 break;
1815
1816 case X86::SEH_UnwindVersion:
1817 OutStreamer->emitWinCFIUnwindVersion(MI->getOperand(0).getImm());
1818 break;
1819
1820 default:
1821 llvm_unreachable("expected SEH_ instruction");
1822 }
1823}
1824
1826 MCStreamer &OutStreamer) {
1827 switch (MI->getOpcode()) {
1828 // Lower PSHUFB and VPERMILP normally but add a comment if we can find
1829 // a constant shuffle mask. We won't be able to do this at the MC layer
1830 // because the mask isn't an immediate.
1831 case X86::PSHUFBrm:
1832 case X86::VPSHUFBrm:
1833 case X86::VPSHUFBYrm:
1834 case X86::VPSHUFBZ128rm:
1835 case X86::VPSHUFBZ128rmk:
1836 case X86::VPSHUFBZ128rmkz:
1837 case X86::VPSHUFBZ256rm:
1838 case X86::VPSHUFBZ256rmk:
1839 case X86::VPSHUFBZ256rmkz:
1840 case X86::VPSHUFBZrm:
1841 case X86::VPSHUFBZrmk:
1842 case X86::VPSHUFBZrmkz: {
1843 unsigned SrcIdx = getSrcIdx(MI, 1);
1844 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx + 1)) {
1845 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1847 DecodePSHUFBMask(C, Width, Mask);
1848 if (!Mask.empty())
1849 OutStreamer.AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
1850 }
1851 break;
1852 }
1853
1854 case X86::VPERMILPSrm:
1855 case X86::VPERMILPSYrm:
1856 case X86::VPERMILPSZ128rm:
1857 case X86::VPERMILPSZ128rmk:
1858 case X86::VPERMILPSZ128rmkz:
1859 case X86::VPERMILPSZ256rm:
1860 case X86::VPERMILPSZ256rmk:
1861 case X86::VPERMILPSZ256rmkz:
1862 case X86::VPERMILPSZrm:
1863 case X86::VPERMILPSZrmk:
1864 case X86::VPERMILPSZrmkz: {
1865 unsigned SrcIdx = getSrcIdx(MI, 1);
1866 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx + 1)) {
1867 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1869 DecodeVPERMILPMask(C, 32, Width, Mask);
1870 if (!Mask.empty())
1871 OutStreamer.AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
1872 }
1873 break;
1874 }
1875 case X86::VPERMILPDrm:
1876 case X86::VPERMILPDYrm:
1877 case X86::VPERMILPDZ128rm:
1878 case X86::VPERMILPDZ128rmk:
1879 case X86::VPERMILPDZ128rmkz:
1880 case X86::VPERMILPDZ256rm:
1881 case X86::VPERMILPDZ256rmk:
1882 case X86::VPERMILPDZ256rmkz:
1883 case X86::VPERMILPDZrm:
1884 case X86::VPERMILPDZrmk:
1885 case X86::VPERMILPDZrmkz: {
1886 unsigned SrcIdx = getSrcIdx(MI, 1);
1887 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx + 1)) {
1888 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1890 DecodeVPERMILPMask(C, 64, Width, Mask);
1891 if (!Mask.empty())
1892 OutStreamer.AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
1893 }
1894 break;
1895 }
1896
1897 case X86::VPERMIL2PDrm:
1898 case X86::VPERMIL2PSrm:
1899 case X86::VPERMIL2PDYrm:
1900 case X86::VPERMIL2PSYrm: {
1901 assert(MI->getNumOperands() >= (3 + X86::AddrNumOperands + 1) &&
1902 "Unexpected number of operands!");
1903
1904 const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1);
1905 if (!CtrlOp.isImm())
1906 break;
1907
1908 unsigned ElSize;
1909 switch (MI->getOpcode()) {
1910 default: llvm_unreachable("Invalid opcode");
1911 case X86::VPERMIL2PSrm: case X86::VPERMIL2PSYrm: ElSize = 32; break;
1912 case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break;
1913 }
1914
1915 if (auto *C = X86::getConstantFromPool(*MI, 3)) {
1916 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1918 DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask);
1919 if (!Mask.empty())
1920 OutStreamer.AddComment(getShuffleComment(MI, 1, 2, Mask));
1921 }
1922 break;
1923 }
1924
1925 case X86::VPPERMrrm: {
1926 if (auto *C = X86::getConstantFromPool(*MI, 3)) {
1927 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1929 DecodeVPPERMMask(C, Width, Mask);
1930 if (!Mask.empty())
1931 OutStreamer.AddComment(getShuffleComment(MI, 1, 2, Mask));
1932 }
1933 break;
1934 }
1935
1936 case X86::MMX_MOVQ64rm: {
1937 if (auto *C = X86::getConstantFromPool(*MI, 1)) {
1938 std::string Comment;
1939 raw_string_ostream CS(Comment);
1940 const MachineOperand &DstOp = MI->getOperand(0);
1942 if (auto *CF = dyn_cast<ConstantFP>(C)) {
1943 CS << "0x" << toString(CF->getValueAPF().bitcastToAPInt(), 16, false);
1944 OutStreamer.AddComment(CS.str());
1945 }
1946 }
1947 break;
1948 }
1949
1950 case X86::GF2P8AFFINEQBrmi:
1951 case X86::VGF2P8AFFINEQBrmi:
1952 case X86::VGF2P8AFFINEQBYrmi:
1953 case X86::VGF2P8AFFINEQBZrmi:
1954 case X86::VGF2P8AFFINEQBZ128rmi:
1955 case X86::VGF2P8AFFINEQBZ256rmi: {
1956 // TODO: Add predicate handling with test coverage.
1957 unsigned SrcIdx = getSrcIdx(MI, 1);
1958 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1959 addConstantComment(MI, OutStreamer, SrcIdx + 1, Width);
1960 break;
1961 }
1962
1963 case X86::VGF2P8AFFINEQBZ128rmbi:
1964 case X86::VGF2P8AFFINEQBZ256rmbi:
1965 case X86::VGF2P8AFFINEQBZrmbi: {
1966 unsigned SrcIdx = getSrcIdx(MI, 1);
1967 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1968 addConstantComment(MI, OutStreamer, SrcIdx + 1, 64, Width / 64);
1969 break;
1970 }
1971
1972#define INSTR_CASE(Prefix, Instr, Suffix, Postfix) \
1973 case X86::Prefix##Instr##Suffix##rm##Postfix:
1974
1975#define CASE_AVX512_ARITH_RM(Instr) \
1976 INSTR_CASE(V, Instr, Z128, ) \
1977 INSTR_CASE(V, Instr, Z128, k) \
1978 INSTR_CASE(V, Instr, Z128, kz) \
1979 INSTR_CASE(V, Instr, Z256, ) \
1980 INSTR_CASE(V, Instr, Z256, k) \
1981 INSTR_CASE(V, Instr, Z256, kz) \
1982 INSTR_CASE(V, Instr, Z, ) \
1983 INSTR_CASE(V, Instr, Z, k) \
1984 INSTR_CASE(V, Instr, Z, kz)
1985
1986#define CASE_ARITH_RM(Instr) \
1987 INSTR_CASE(, Instr, , ) /* SSE */ \
1988 INSTR_CASE(V, Instr, , ) /* AVX-128 */ \
1989 INSTR_CASE(V, Instr, Y, ) /* AVX-256 */ \
1990 INSTR_CASE(V, Instr, Z128, ) \
1991 INSTR_CASE(V, Instr, Z128, k) \
1992 INSTR_CASE(V, Instr, Z128, kz) \
1993 INSTR_CASE(V, Instr, Z256, ) \
1994 INSTR_CASE(V, Instr, Z256, k) \
1995 INSTR_CASE(V, Instr, Z256, kz) \
1996 INSTR_CASE(V, Instr, Z, ) \
1997 INSTR_CASE(V, Instr, Z, k) \
1998 INSTR_CASE(V, Instr, Z, kz)
1999
2000 // TODO: Add additional instructions when useful.
2001 CASE_ARITH_RM(PADDB)
2002 CASE_ARITH_RM(PADDW)
2003 CASE_ARITH_RM(PADDD)
2004 CASE_ARITH_RM(PADDQ)
2005 CASE_ARITH_RM(PMADDUBSW)
2006 CASE_ARITH_RM(PMADDWD)
2007 CASE_ARITH_RM(PMULDQ)
2008 CASE_ARITH_RM(PMULUDQ)
2009 CASE_ARITH_RM(PMULLD)
2010 CASE_AVX512_ARITH_RM(PMULLQ)
2011 CASE_ARITH_RM(PMULLW)
2012 CASE_ARITH_RM(PMULHW)
2013 CASE_ARITH_RM(PMULHUW)
2014 CASE_ARITH_RM(PMULHRSW) {
2015 unsigned SrcIdx = getSrcIdx(MI, 1);
2016 unsigned VectorWidth =
2017 X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
2018 addConstantComment(MI, OutStreamer, SrcIdx + 1, VectorWidth);
2019 break;
2020 }
2021
2022#define MASK_AVX512_CASE(Instr) \
2023 case Instr: \
2024 case Instr##k: \
2025 case Instr##kz:
2026
2027 case X86::MOVSDrm:
2028 case X86::VMOVSDrm:
2029 MASK_AVX512_CASE(X86::VMOVSDZrm)
2030 case X86::MOVSDrm_alt:
2031 case X86::VMOVSDrm_alt:
2032 case X86::VMOVSDZrm_alt:
2033 case X86::MOVQI2PQIrm:
2034 case X86::VMOVQI2PQIrm:
2035 case X86::VMOVQI2PQIZrm:
2036 printZeroUpperMove(MI, OutStreamer, 64, 128, "mem[0],zero");
2037 break;
2038
2039 MASK_AVX512_CASE(X86::VMOVSHZrm)
2040 case X86::VMOVSHZrm_alt:
2041 printZeroUpperMove(MI, OutStreamer, 16, 128,
2042 "mem[0],zero,zero,zero,zero,zero,zero,zero");
2043 break;
2044
2045 case X86::MOVSSrm:
2046 case X86::VMOVSSrm:
2047 MASK_AVX512_CASE(X86::VMOVSSZrm)
2048 case X86::MOVSSrm_alt:
2049 case X86::VMOVSSrm_alt:
2050 case X86::VMOVSSZrm_alt:
2051 case X86::MOVDI2PDIrm:
2052 case X86::VMOVDI2PDIrm:
2053 case X86::VMOVDI2PDIZrm:
2054 printZeroUpperMove(MI, OutStreamer, 32, 128, "mem[0],zero,zero,zero");
2055 break;
2056
2057#define MOV_CASE(Prefix, Suffix) \
2058 case X86::Prefix##MOVAPD##Suffix##rm: \
2059 case X86::Prefix##MOVAPS##Suffix##rm: \
2060 case X86::Prefix##MOVUPD##Suffix##rm: \
2061 case X86::Prefix##MOVUPS##Suffix##rm: \
2062 case X86::Prefix##MOVDQA##Suffix##rm: \
2063 case X86::Prefix##MOVDQU##Suffix##rm:
2064
2065#define MOV_AVX512_CASE(Suffix, Postfix) \
2066 case X86::VMOVDQA64##Suffix##rm##Postfix: \
2067 case X86::VMOVDQA32##Suffix##rm##Postfix: \
2068 case X86::VMOVDQU64##Suffix##rm##Postfix: \
2069 case X86::VMOVDQU32##Suffix##rm##Postfix: \
2070 case X86::VMOVDQU16##Suffix##rm##Postfix: \
2071 case X86::VMOVDQU8##Suffix##rm##Postfix: \
2072 case X86::VMOVAPS##Suffix##rm##Postfix: \
2073 case X86::VMOVAPD##Suffix##rm##Postfix: \
2074 case X86::VMOVUPS##Suffix##rm##Postfix: \
2075 case X86::VMOVUPD##Suffix##rm##Postfix:
2076
2077#define CASE_128_MOV_RM() \
2078 MOV_CASE(, ) /* SSE */ \
2079 MOV_CASE(V, ) /* AVX-128 */ \
2080 MOV_AVX512_CASE(Z128, ) \
2081 MOV_AVX512_CASE(Z128, k) \
2082 MOV_AVX512_CASE(Z128, kz)
2083
2084#define CASE_256_MOV_RM() \
2085 MOV_CASE(V, Y) /* AVX-256 */ \
2086 MOV_AVX512_CASE(Z256, ) \
2087 MOV_AVX512_CASE(Z256, k) \
2088 MOV_AVX512_CASE(Z256, kz) \
2089
2090#define CASE_512_MOV_RM() \
2091 MOV_AVX512_CASE(Z, ) \
2092 MOV_AVX512_CASE(Z, k) \
2093 MOV_AVX512_CASE(Z, kz) \
2094
2095 // For loads from a constant pool to a vector register, print the constant
2096 // loaded.
2098 printBroadcast(MI, OutStreamer, 1, 128);
2099 break;
2101 printBroadcast(MI, OutStreamer, 1, 256);
2102 break;
2104 printBroadcast(MI, OutStreamer, 1, 512);
2105 break;
2106 case X86::VBROADCASTF128rm:
2107 case X86::VBROADCASTI128rm:
2108 MASK_AVX512_CASE(X86::VBROADCASTF32X4Z256rm)
2109 MASK_AVX512_CASE(X86::VBROADCASTF64X2Z256rm)
2110 MASK_AVX512_CASE(X86::VBROADCASTI32X4Z256rm)
2111 MASK_AVX512_CASE(X86::VBROADCASTI64X2Z256rm)
2112 printBroadcast(MI, OutStreamer, 2, 128);
2113 break;
2114 MASK_AVX512_CASE(X86::VBROADCASTF32X4Zrm)
2115 MASK_AVX512_CASE(X86::VBROADCASTF64X2Zrm)
2116 MASK_AVX512_CASE(X86::VBROADCASTI32X4Zrm)
2117 MASK_AVX512_CASE(X86::VBROADCASTI64X2Zrm)
2118 printBroadcast(MI, OutStreamer, 4, 128);
2119 break;
2120 MASK_AVX512_CASE(X86::VBROADCASTF32X8Zrm)
2121 MASK_AVX512_CASE(X86::VBROADCASTF64X4Zrm)
2122 MASK_AVX512_CASE(X86::VBROADCASTI32X8Zrm)
2123 MASK_AVX512_CASE(X86::VBROADCASTI64X4Zrm)
2124 printBroadcast(MI, OutStreamer, 2, 256);
2125 break;
2126
2127 // For broadcast loads from a constant pool to a vector register, repeatedly
2128 // print the constant loaded.
2129 case X86::MOVDDUPrm:
2130 case X86::VMOVDDUPrm:
2131 MASK_AVX512_CASE(X86::VMOVDDUPZ128rm)
2132 case X86::VPBROADCASTQrm:
2133 MASK_AVX512_CASE(X86::VPBROADCASTQZ128rm)
2134 printBroadcast(MI, OutStreamer, 2, 64);
2135 break;
2136 case X86::VBROADCASTSDYrm:
2137 MASK_AVX512_CASE(X86::VBROADCASTSDZ256rm)
2138 case X86::VPBROADCASTQYrm:
2139 MASK_AVX512_CASE(X86::VPBROADCASTQZ256rm)
2140 printBroadcast(MI, OutStreamer, 4, 64);
2141 break;
2142 MASK_AVX512_CASE(X86::VBROADCASTSDZrm)
2143 MASK_AVX512_CASE(X86::VPBROADCASTQZrm)
2144 printBroadcast(MI, OutStreamer, 8, 64);
2145 break;
2146 case X86::VBROADCASTSSrm:
2147 MASK_AVX512_CASE(X86::VBROADCASTSSZ128rm)
2148 case X86::VPBROADCASTDrm:
2149 MASK_AVX512_CASE(X86::VPBROADCASTDZ128rm)
2150 printBroadcast(MI, OutStreamer, 4, 32);
2151 break;
2152 case X86::VBROADCASTSSYrm:
2153 MASK_AVX512_CASE(X86::VBROADCASTSSZ256rm)
2154 case X86::VPBROADCASTDYrm:
2155 MASK_AVX512_CASE(X86::VPBROADCASTDZ256rm)
2156 printBroadcast(MI, OutStreamer, 8, 32);
2157 break;
2158 MASK_AVX512_CASE(X86::VBROADCASTSSZrm)
2159 MASK_AVX512_CASE(X86::VPBROADCASTDZrm)
2160 printBroadcast(MI, OutStreamer, 16, 32);
2161 break;
2162 case X86::VPBROADCASTWrm:
2163 MASK_AVX512_CASE(X86::VPBROADCASTWZ128rm)
2164 printBroadcast(MI, OutStreamer, 8, 16);
2165 break;
2166 case X86::VPBROADCASTWYrm:
2167 MASK_AVX512_CASE(X86::VPBROADCASTWZ256rm)
2168 printBroadcast(MI, OutStreamer, 16, 16);
2169 break;
2170 MASK_AVX512_CASE(X86::VPBROADCASTWZrm)
2171 printBroadcast(MI, OutStreamer, 32, 16);
2172 break;
2173 case X86::VPBROADCASTBrm:
2174 MASK_AVX512_CASE(X86::VPBROADCASTBZ128rm)
2175 printBroadcast(MI, OutStreamer, 16, 8);
2176 break;
2177 case X86::VPBROADCASTBYrm:
2178 MASK_AVX512_CASE(X86::VPBROADCASTBZ256rm)
2179 printBroadcast(MI, OutStreamer, 32, 8);
2180 break;
2181 MASK_AVX512_CASE(X86::VPBROADCASTBZrm)
2182 printBroadcast(MI, OutStreamer, 64, 8);
2183 break;
2184
2185#define MOVX_CASE(Prefix, Ext, Type, Suffix, Postfix) \
2186 case X86::Prefix##PMOV##Ext##Type##Suffix##rm##Postfix:
2187
2188#define CASE_MOVX_RM(Ext, Type) \
2189 MOVX_CASE(, Ext, Type, , ) \
2190 MOVX_CASE(V, Ext, Type, , ) \
2191 MOVX_CASE(V, Ext, Type, Y, ) \
2192 MOVX_CASE(V, Ext, Type, Z128, ) \
2193 MOVX_CASE(V, Ext, Type, Z128, k ) \
2194 MOVX_CASE(V, Ext, Type, Z128, kz ) \
2195 MOVX_CASE(V, Ext, Type, Z256, ) \
2196 MOVX_CASE(V, Ext, Type, Z256, k ) \
2197 MOVX_CASE(V, Ext, Type, Z256, kz ) \
2198 MOVX_CASE(V, Ext, Type, Z, ) \
2199 MOVX_CASE(V, Ext, Type, Z, k ) \
2200 MOVX_CASE(V, Ext, Type, Z, kz )
2201
2202 CASE_MOVX_RM(SX, BD)
2203 printSignExtend(MI, OutStreamer, 8, 32);
2204 break;
2205 CASE_MOVX_RM(SX, BQ)
2206 printSignExtend(MI, OutStreamer, 8, 64);
2207 break;
2208 CASE_MOVX_RM(SX, BW)
2209 printSignExtend(MI, OutStreamer, 8, 16);
2210 break;
2211 CASE_MOVX_RM(SX, DQ)
2212 printSignExtend(MI, OutStreamer, 32, 64);
2213 break;
2214 CASE_MOVX_RM(SX, WD)
2215 printSignExtend(MI, OutStreamer, 16, 32);
2216 break;
2217 CASE_MOVX_RM(SX, WQ)
2218 printSignExtend(MI, OutStreamer, 16, 64);
2219 break;
2220
2221 CASE_MOVX_RM(ZX, BD)
2222 printZeroExtend(MI, OutStreamer, 8, 32);
2223 break;
2224 CASE_MOVX_RM(ZX, BQ)
2225 printZeroExtend(MI, OutStreamer, 8, 64);
2226 break;
2227 CASE_MOVX_RM(ZX, BW)
2228 printZeroExtend(MI, OutStreamer, 8, 16);
2229 break;
2230 CASE_MOVX_RM(ZX, DQ)
2231 printZeroExtend(MI, OutStreamer, 32, 64);
2232 break;
2233 CASE_MOVX_RM(ZX, WD)
2234 printZeroExtend(MI, OutStreamer, 16, 32);
2235 break;
2236 CASE_MOVX_RM(ZX, WQ)
2237 printZeroExtend(MI, OutStreamer, 16, 64);
2238 break;
2239 }
2240}
2241
2242// Does the given operand refer to a DLLIMPORT function?
2244 return MO.isGlobal() && (MO.getTargetFlags() == X86II::MO_DLLIMPORT);
2245}
2246
2247// Is the given instruction a call to a CFGuard function?
2249 assert(MI->getOpcode() == X86::TAILJMPm64_REX ||
2250 MI->getOpcode() == X86::CALL64m);
2251 const MachineOperand &MO = MI->getOperand(3);
2252 return MO.isGlobal() && (MO.getTargetFlags() == X86II::MO_NO_FLAG) &&
2254}
2255
2256// Does the containing block for the given instruction contain any jump table
2257// info (indicating that the block is a dispatch for a jump table)?
2259 const MachineBasicBlock &MBB = *MI->getParent();
2260 for (auto I = MBB.instr_rbegin(), E = MBB.instr_rend(); I != E; ++I)
2261 if (I->isJumpTableDebugInfo())
2262 return true;
2263
2264 return false;
2265}
2266
2268 // FIXME: Enable feature predicate checks once all the test pass.
2269 // X86_MC::verifyInstructionPredicates(MI->getOpcode(),
2270 // Subtarget->getFeatureBits());
2271
2272 X86MCInstLower MCInstLowering(*MF, *this);
2273 const X86RegisterInfo *RI =
2274 MF->getSubtarget<X86Subtarget>().getRegisterInfo();
2275
2276 if (MI->getOpcode() == X86::OR64rm) {
2277 for (auto &Opd : MI->operands()) {
2278 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
2279 "swift_async_extendedFramePointerFlags") {
2280 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
2281 }
2282 }
2283 }
2284
2285 // Add comments for values loaded from constant pool.
2286 if (OutStreamer->isVerboseAsm())
2288
2289 // Add a comment about EVEX compression
2290 if (TM.Options.MCOptions.ShowMCEncoding) {
2291 if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_LEGACY)
2292 OutStreamer->AddComment("EVEX TO LEGACY Compression ", false);
2293 else if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX)
2294 OutStreamer->AddComment("EVEX TO VEX Compression ", false);
2295 else if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_EVEX)
2296 OutStreamer->AddComment("EVEX TO EVEX Compression ", false);
2297 }
2298
2299 // We use this to suppress NOP padding for Windows EH.
2300 bool IsTailJump = false;
2301
2302 switch (MI->getOpcode()) {
2303 case TargetOpcode::DBG_VALUE:
2304 llvm_unreachable("Should be handled target independently");
2305
2306 case X86::EH_RETURN:
2307 case X86::EH_RETURN64: {
2308 // Lower these as normal, but add some comments.
2309 Register Reg = MI->getOperand(0).getReg();
2310 OutStreamer->AddComment(StringRef("eh_return, addr: %") +
2312 break;
2313 }
2314 case X86::CLEANUPRET: {
2315 // Lower these as normal, but add some comments.
2316 OutStreamer->AddComment("CLEANUPRET");
2317 break;
2318 }
2319
2320 case X86::CATCHRET: {
2321 // Lower these as normal, but add some comments.
2322 OutStreamer->AddComment("CATCHRET");
2323 break;
2324 }
2325
2326 case X86::ENDBR32:
2327 case X86::ENDBR64: {
2328 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
2329 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
2330 // non-empty. If MI is the initial ENDBR, place the
2331 // __patchable_function_entries label after ENDBR.
2334 MI == &MF->front().front()) {
2335 MCInst Inst;
2336 MCInstLowering.Lower(MI, Inst);
2337 EmitAndCountInstruction(Inst);
2340 return;
2341 }
2342 break;
2343 }
2344
2345 case X86::TAILJMPd64:
2346 if (IndCSPrefix && MI->hasRegisterImplicitUseOperand(X86::R11))
2347 EmitAndCountInstruction(MCInstBuilder(X86::CS_PREFIX));
2348
2349 if (EnableImportCallOptimization && isImportedFunction(MI->getOperand(0))) {
2350 emitLabelAndRecordForImportCallOptimization(
2351 IMAGE_RETPOLINE_AMD64_IMPORT_BR);
2352 }
2353
2354 // Lower this as normal, but add a comment.
2355 OutStreamer->AddComment("TAILCALL");
2356 IsTailJump = true;
2357 break;
2358
2359 case X86::TAILJMPr:
2360 case X86::TAILJMPm:
2361 case X86::TAILJMPd:
2362 case X86::TAILJMPd_CC:
2363 case X86::TAILJMPr64:
2364 case X86::TAILJMPm64:
2365 case X86::TAILJMPd64_CC:
2366 if (EnableImportCallOptimization)
2367 report_fatal_error("Unexpected TAILJMP instruction was emitted when "
2368 "import call optimization was enabled");
2369
2370 // Lower these as normal, but add some comments.
2371 OutStreamer->AddComment("TAILCALL");
2372 IsTailJump = true;
2373 break;
2374
2375 case X86::TAILJMPm64_REX:
2376 if (EnableImportCallOptimization && isCallToCFGuardFunction(MI)) {
2377 emitLabelAndRecordForImportCallOptimization(
2378 IMAGE_RETPOLINE_AMD64_CFG_BR_REX);
2379 }
2380
2381 OutStreamer->AddComment("TAILCALL");
2382 IsTailJump = true;
2383 break;
2384
2385 case X86::TAILJMPr64_REX: {
2386 if (EnableImportCallOptimization) {
2387 assert(MI->getOperand(0).getReg() == X86::RAX &&
2388 "Indirect tail calls with impcall enabled must go through RAX (as "
2389 "enforced by TCRETURNImpCallri64)");
2390 emitLabelAndRecordForImportCallOptimization(
2391 IMAGE_RETPOLINE_AMD64_INDIR_BR);
2392 }
2393
2394 OutStreamer->AddComment("TAILCALL");
2395 IsTailJump = true;
2396 break;
2397 }
2398
2399 case X86::JMP64r:
2400 if (EnableImportCallOptimization && hasJumpTableInfoInBlock(MI)) {
2401 uint16_t EncodedReg =
2402 this->getSubtarget().getRegisterInfo()->getEncodingValue(
2403 MI->getOperand(0).getReg().asMCReg());
2404 emitLabelAndRecordForImportCallOptimization(
2405 (ImportCallKind)(IMAGE_RETPOLINE_AMD64_SWITCHTABLE_FIRST +
2406 EncodedReg));
2407 }
2408 break;
2409
2410 case X86::JMP16r:
2411 case X86::JMP16m:
2412 case X86::JMP32r:
2413 case X86::JMP32m:
2414 case X86::JMP64m:
2415 if (EnableImportCallOptimization && hasJumpTableInfoInBlock(MI))
2417 "Unexpected JMP instruction was emitted for a jump-table when import "
2418 "call optimization was enabled");
2419 break;
2420
2421 case X86::TLS_addr32:
2422 case X86::TLS_addr64:
2423 case X86::TLS_addrX32:
2424 case X86::TLS_base_addr32:
2425 case X86::TLS_base_addr64:
2426 case X86::TLS_base_addrX32:
2427 case X86::TLS_desc32:
2428 case X86::TLS_desc64:
2429 return LowerTlsAddr(MCInstLowering, *MI);
2430
2431 case X86::MOVPC32r: {
2432 // This is a pseudo op for a two instruction sequence with a label, which
2433 // looks like:
2434 // call "L1$pb"
2435 // "L1$pb":
2436 // popl %esi
2437
2438 // Emit the call.
2439 MCSymbol *PICBase = MF->getPICBaseSymbol();
2440 // FIXME: We would like an efficient form for this, so we don't have to do a
2441 // lot of extra uniquing.
2442 EmitAndCountInstruction(
2443 MCInstBuilder(X86::CALLpcrel32)
2444 .addExpr(MCSymbolRefExpr::create(PICBase, OutContext)));
2445
2446 const X86FrameLowering *FrameLowering =
2447 MF->getSubtarget<X86Subtarget>().getFrameLowering();
2448 bool hasFP = FrameLowering->hasFP(*MF);
2449
2450 // TODO: This is needed only if we require precise CFA.
2451 bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() &&
2452 !OutStreamer->getDwarfFrameInfos().back().End;
2453
2454 int stackGrowth = -RI->getSlotSize();
2455
2456 if (HasActiveDwarfFrame && !hasFP) {
2457 OutStreamer->emitCFIAdjustCfaOffset(-stackGrowth);
2458 MF->getInfo<X86MachineFunctionInfo>()->setHasCFIAdjustCfa(true);
2459 }
2460
2461 // Emit the label.
2462 OutStreamer->emitLabel(PICBase);
2463
2464 // popl $reg
2465 EmitAndCountInstruction(
2466 MCInstBuilder(X86::POP32r).addReg(MI->getOperand(0).getReg()));
2467
2468 if (HasActiveDwarfFrame && !hasFP) {
2469 OutStreamer->emitCFIAdjustCfaOffset(stackGrowth);
2470 }
2471 return;
2472 }
2473
2474 case X86::ADD32ri: {
2475 // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
2476 if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
2477 break;
2478
2479 // Okay, we have something like:
2480 // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
2481
2482 // For this, we want to print something like:
2483 // MYGLOBAL + (. - PICBASE)
2484 // However, we can't generate a ".", so just emit a new label here and refer
2485 // to it.
2486 MCSymbol *DotSym = OutContext.createTempSymbol();
2487 OutStreamer->emitLabel(DotSym);
2488
2489 // Now that we have emitted the label, lower the complex operand expression.
2490 MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
2491
2492 const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext);
2493 const MCExpr *PICBase =
2494 MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext);
2495 DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext);
2496
2497 DotExpr = MCBinaryExpr::createAdd(
2499
2500 EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri)
2501 .addReg(MI->getOperand(0).getReg())
2502 .addReg(MI->getOperand(1).getReg())
2503 .addExpr(DotExpr));
2504 return;
2505 }
2506 case TargetOpcode::STATEPOINT:
2507 return LowerSTATEPOINT(*MI, MCInstLowering);
2508
2509 case TargetOpcode::FAULTING_OP:
2510 return LowerFAULTING_OP(*MI, MCInstLowering);
2511
2512 case TargetOpcode::FENTRY_CALL:
2513 return LowerFENTRY_CALL(*MI, MCInstLowering);
2514
2515 case TargetOpcode::PATCHABLE_OP:
2516 return LowerPATCHABLE_OP(*MI, MCInstLowering);
2517
2518 case TargetOpcode::STACKMAP:
2519 return LowerSTACKMAP(*MI);
2520
2521 case TargetOpcode::PATCHPOINT:
2522 return LowerPATCHPOINT(*MI, MCInstLowering);
2523
2524 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2525 return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering);
2526
2527 case TargetOpcode::PATCHABLE_RET:
2528 return LowerPATCHABLE_RET(*MI, MCInstLowering);
2529
2530 case TargetOpcode::PATCHABLE_TAIL_CALL:
2531 return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);
2532
2533 case TargetOpcode::PATCHABLE_EVENT_CALL:
2534 return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering);
2535
2536 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
2537 return LowerPATCHABLE_TYPED_EVENT_CALL(*MI, MCInstLowering);
2538
2539 case X86::MORESTACK_RET:
2540 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
2541 return;
2542
2543 case X86::KCFI_CHECK:
2544 return LowerKCFI_CHECK(*MI);
2545
2546 case X86::ASAN_CHECK_MEMACCESS:
2547 return LowerASAN_CHECK_MEMACCESS(*MI);
2548
2549 case X86::MORESTACK_RET_RESTORE_R10:
2550 // Return, then restore R10.
2551 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
2552 EmitAndCountInstruction(
2553 MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX));
2554 return;
2555
2556 case X86::SEH_PushReg:
2557 case X86::SEH_SaveReg:
2558 case X86::SEH_SaveXMM:
2559 case X86::SEH_StackAlloc:
2560 case X86::SEH_StackAlign:
2561 case X86::SEH_SetFrame:
2562 case X86::SEH_PushFrame:
2563 case X86::SEH_EndPrologue:
2564 case X86::SEH_EndEpilogue:
2565 case X86::SEH_UnwindV2Start:
2566 case X86::SEH_UnwindVersion:
2567 EmitSEHInstruction(MI);
2568 return;
2569
2570 case X86::SEH_SplitChainedAtEndOfBlock:
2571 assert(!SplitChainedAtEndOfBlock &&
2572 "Duplicate SEH_SplitChainedAtEndOfBlock in a current block");
2573 SplitChainedAtEndOfBlock = true;
2574 return;
2575
2576 case X86::SEH_BeginEpilogue: {
2577 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
2578 EmitSEHInstruction(MI);
2579 return;
2580 }
2581 case X86::UBSAN_UD1:
2582 EmitAndCountInstruction(MCInstBuilder(X86::UD1Lm)
2583 .addReg(X86::EAX)
2584 .addReg(X86::EAX)
2585 .addImm(1)
2586 .addReg(X86::NoRegister)
2587 .addImm(MI->getOperand(0).getImm())
2588 .addReg(X86::NoRegister));
2589 return;
2590 case X86::CALL64pcrel32:
2591 if (IndCSPrefix && MI->hasRegisterImplicitUseOperand(X86::R11))
2592 EmitAndCountInstruction(MCInstBuilder(X86::CS_PREFIX));
2593
2594 if (EnableImportCallOptimization && isImportedFunction(MI->getOperand(0))) {
2595 emitLabelAndRecordForImportCallOptimization(
2596 IMAGE_RETPOLINE_AMD64_IMPORT_CALL);
2597
2598 MCInst TmpInst;
2599 MCInstLowering.Lower(MI, TmpInst);
2600
2601 // For Import Call Optimization to work, we need a the call instruction
2602 // with a rex prefix, and a 5-byte nop after the call instruction.
2603 EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX));
2604 emitCallInstruction(TmpInst);
2605 emitNop(*OutStreamer, 5, Subtarget);
2606 maybeEmitNopAfterCallForWindowsEH(MI);
2607 return;
2608 }
2609
2610 break;
2611
2612 case X86::CALL64r:
2613 if (EnableImportCallOptimization) {
2614 assert(MI->getOperand(0).getReg() == X86::RAX &&
2615 "Indirect calls with impcall enabled must go through RAX (as "
2616 "enforced by CALL64r_ImpCall)");
2617
2618 emitLabelAndRecordForImportCallOptimization(
2619 IMAGE_RETPOLINE_AMD64_INDIR_CALL);
2620 MCInst TmpInst;
2621 MCInstLowering.Lower(MI, TmpInst);
2622 emitCallInstruction(TmpInst);
2623
2624 // For Import Call Optimization to work, we need a 3-byte nop after the
2625 // call instruction.
2626 emitNop(*OutStreamer, 3, Subtarget);
2627 maybeEmitNopAfterCallForWindowsEH(MI);
2628 return;
2629 }
2630 break;
2631
2632 case X86::CALL64m:
2633 if (EnableImportCallOptimization && isCallToCFGuardFunction(MI)) {
2634 emitLabelAndRecordForImportCallOptimization(
2635 IMAGE_RETPOLINE_AMD64_CFG_CALL);
2636 }
2637 break;
2638
2639 case X86::JCC_1:
2640 // Two instruction prefixes (2EH for branch not-taken and 3EH for branch
2641 // taken) are used as branch hints. Here we add branch taken prefix for
2642 // jump instruction with higher probability than threshold.
2643 if (getSubtarget().hasBranchHint() && EnableBranchHint) {
2644 const MachineBranchProbabilityInfo *MBPI =
2646 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
2647 BranchProbability EdgeProb =
2648 MBPI->getEdgeProbability(MI->getParent(), DestBB);
2650 if (EdgeProb > Threshold)
2651 EmitAndCountInstruction(MCInstBuilder(X86::DS_PREFIX));
2652 }
2653 break;
2654
2655 case X86::JCC_SELF:
2656 MCSymbol *Sym = OutContext.createTempSymbol();
2657 OutStreamer->emitLabel(Sym);
2658 EmitAndCountInstruction(
2659 MCInstBuilder(X86::JCC_1)
2660 .addExpr(MCSymbolRefExpr::create(Sym, OutContext))
2661 .addImm(MI->getOperand(0).getImm()));
2662 return;
2663 }
2664
2665 MCInst TmpInst;
2666 MCInstLowering.Lower(MI, TmpInst);
2667
2668 if (MI->isCall()) {
2669 emitCallInstruction(TmpInst);
2670 // Since tail calls transfer control without leaving a stack frame, there is
2671 // never a need for NOP padding tail calls.
2672 if (!IsTailJump)
2673 maybeEmitNopAfterCallForWindowsEH(MI);
2674 return;
2675 }
2676
2677 EmitAndCountInstruction(TmpInst);
2678}
2679
2681 const MCSubtargetInfo *EndInfo,
2682 const MachineInstr *MI) {
2683 if (MI) {
2684 // If unwinding inline asm ends on a call, wineh may require insertion of
2685 // a nop.
2686 unsigned ExtraInfo = MI->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
2687 if (ExtraInfo & InlineAsm::Extra_MayUnwind)
2688 maybeEmitNopAfterCallForWindowsEH(MI);
2689 }
2690}
2691
2692void X86AsmPrinter::emitCallInstruction(const llvm::MCInst &MCI) {
2693 // Stackmap shadows cannot include branch targets, so we can count the bytes
2694 // in a call towards the shadow, but must ensure that the no thread returns
2695 // in to the stackmap shadow. The only way to achieve this is if the call
2696 // is at the end of the shadow.
2697
2698 // Count then size of the call towards the shadow
2699 SMShadowTracker.count(MCI, getSubtargetInfo(), CodeEmitter.get());
2700 // Then flush the shadow so that we fill with nops before the call, not
2701 // after it.
2702 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
2703 // Then emit the call
2704 OutStreamer->emitInstruction(MCI, getSubtargetInfo());
2705}
2706
2707// Determines whether a NOP is required after a CALL, so that Windows EH
2708// IP2State tables have the correct information.
2709//
2710// On most Windows platforms (AMD64, ARM64, ARM32, IA64, but *not* x86-32),
2711// exception handling works by looking up instruction pointers in lookup
2712// tables. These lookup tables are stored in .xdata sections in executables.
2713// One element of the lookup tables are the "IP2State" tables (Instruction
2714// Pointer to State).
2715//
2716// If a function has any instructions that require cleanup during exception
2717// unwinding, then it will have an IP2State table. Each entry in the IP2State
2718// table describes a range of bytes in the function's instruction stream, and
2719// associates an "EH state number" with that range of instructions. A value of
2720// -1 means "the null state", which does not require any code to execute.
2721// A value other than -1 is an index into the State table.
2722//
2723// The entries in the IP2State table contain byte offsets within the instruction
2724// stream of the function. The Windows ABI requires that these offsets are
2725// aligned to instruction boundaries; they are not permitted to point to a byte
2726// that is not the first byte of an instruction.
2727//
2728// Unfortunately, CALL instructions present a problem during unwinding. CALL
2729// instructions push the address of the instruction after the CALL instruction,
2730// so that execution can resume after the CALL. If the CALL is the last
2731// instruction within an IP2State region, then the return address (on the stack)
2732// points to the *next* IP2State region. This means that the unwinder will
2733// use the wrong cleanup funclet during unwinding.
2734//
2735// To fix this problem, the Windows AMD64 ABI requires that CALL instructions
2736// are never placed at the end of an IP2State region. Stated equivalently, the
2737// end of a CALL instruction cannot be aligned to an IP2State boundary. If a
2738// CALL instruction would occur at the end of an IP2State region, then the
2739// compiler must insert a NOP instruction after the CALL. The NOP instruction
2740// is placed in the same EH region as the CALL instruction, so that the return
2741// address points to the NOP and the unwinder will locate the correct region.
2742//
2743// NOP padding is only necessary on Windows AMD64 targets. On ARM64 and ARM32,
2744// instructions have a fixed size so the unwinder knows how to "back up" by
2745// one instruction.
2746//
2747// Interaction with Import Call Optimization (ICO):
2748//
2749// Import Call Optimization (ICO) is a compiler + OS feature on Windows which
2750// improves the performance and security of DLL imports. ICO relies on using a
2751// specific CALL idiom that can be replaced by the OS DLL loader. This removes
2752// a load and indirect CALL and replaces it with a single direct CALL.
2753//
2754// To achieve this, ICO also inserts NOPs after the CALL instruction. If the
2755// end of the CALL is aligned with an EH state transition, we *also* insert
2756// a single-byte NOP. **Both forms of NOPs must be preserved.** They cannot
2757// be combined into a single larger NOP; nor can the second NOP be removed.
2758//
2759// This is necessary because, if ICO is active and the call site is modified
2760// by the loader, the loader will end up overwriting the NOPs that were inserted
2761// for ICO. That means that those NOPs cannot be used for the correct
2762// termination of the exception handling region (the IP2State transition),
2763// so we still need an additional NOP instruction. The NOPs cannot be combined
2764// into a longer NOP (which is ordinarily desirable) because then ICO would
2765// split one instruction, producing a malformed instruction after the ICO call.
2766void X86AsmPrinter::maybeEmitNopAfterCallForWindowsEH(const MachineInstr *MI) {
2767 // We only need to insert NOPs after CALLs when targeting Windows on AMD64.
2768 // (Don't let the name fool you: Itanium refers to table-based exception
2769 // handling, not the Itanium architecture.)
2770 if (MAI.getExceptionHandlingType() != ExceptionHandling::WinEH ||
2771 MAI.getWinEHEncodingType() != WinEH::EncodingType::Itanium) {
2772 return;
2773 }
2774
2775 bool HasEHPersonality = MF->getWinEHFuncInfo() != nullptr;
2776
2777 // Set up MBB iterator, initially positioned on the same MBB as MI.
2778 MachineFunction::const_iterator MFI(MI->getParent());
2780
2781 // Set up instruction iterator, positioned immediately *after* MI.
2783 MachineBasicBlock::const_iterator MBBE = MI->getParent()->end();
2784 ++MBBI; // Step over MI
2785
2786 // This loop iterates MBBs
2787 for (;;) {
2788 // This loop iterates instructions
2789 for (; MBBI != MBBE; ++MBBI) {
2790 // Check the instruction that follows this CALL.
2791 const MachineInstr &NextMI = *MBBI;
2792
2793 // If there is an EH_LABEL after this CALL, then there is an EH state
2794 // transition after this CALL. This is exactly the situation which
2795 // requires NOP padding.
2796 if (NextMI.isEHLabel()) {
2797 if (HasEHPersonality) {
2798 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2799 return;
2800 }
2801 // We actually want to continue, in case there is an SEH_BeginEpilogue
2802 // instruction after the EH_LABEL. In some situations, IR is produced
2803 // that contains EH_LABEL pseudo-instructions, even when we are not
2804 // generating IP2State tables. We still need to insert a NOP before
2805 // SEH_BeginEpilogue in that case.
2806 continue;
2807 }
2808
2809 // Somewhat similarly, if the CALL is the last instruction before the
2810 // SEH prologue, then we also need a NOP. This is necessary because the
2811 // Windows stack unwinder will not invoke a function's exception handler
2812 // if the instruction pointer is in the function prologue or epilogue.
2813 //
2814 // We always emit a NOP before SEH_BeginEpilogue, even if there is no
2815 // personality function (unwind info) for this frame. This is the same
2816 // behavior as MSVC.
2817 if (NextMI.getOpcode() == X86::SEH_BeginEpilogue) {
2818 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2819 return;
2820 }
2821
2822 if (!NextMI.isPseudo() && !NextMI.isMetaInstruction()) {
2823 // We found a real instruction. During the CALL, the return IP will
2824 // point to this instruction. Since this instruction has the same EH
2825 // state as the call itself (because there is no intervening EH_LABEL),
2826 // the IP2State table will be accurate; there is no need to insert a
2827 // NOP.
2828 return;
2829 }
2830
2831 // The next instruction is a pseudo-op. Ignore it and keep searching.
2832 // Because these instructions do not generate any machine code, they
2833 // cannot prevent the IP2State table from pointing at the wrong
2834 // instruction during a CALL.
2835 }
2836
2837 // We've reached the end of this MBB. Find the next MBB in program order.
2838 // MBB order should be finalized by this point, so falling across MBBs is
2839 // expected.
2840 ++MFI;
2841 if (MFI == MFE) {
2842 // No more blocks; we've reached the end of the function. This should
2843 // only happen with no-return functions, but double-check to be sure.
2844 if (HasEHPersonality) {
2845 // If the CALL has no successors, then it is a noreturn function.
2846 // Insert an INT3 instead of a NOP. This accomplishes the same purpose,
2847 // but is more clear to read. Also, analysis tools will understand
2848 // that they should not continue disassembling after the CALL (unless
2849 // there are other branches to that label).
2850 if (MI->getParent()->succ_empty())
2851 EmitAndCountInstruction(MCInstBuilder(X86::INT3));
2852 else
2853 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2854 }
2855 return;
2856 }
2857
2858 // Set up iterator to scan the next basic block.
2859 const MachineBasicBlock *NextMBB = &*MFI;
2860 MBBI = NextMBB->instr_begin();
2861 MBBE = NextMBB->instr_end();
2862 }
2863}
2864
2865void X86AsmPrinter::emitLabelAndRecordForImportCallOptimization(
2866 ImportCallKind Kind) {
2867 assert(EnableImportCallOptimization);
2868
2869 MCSymbol *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
2870 OutStreamer->emitLabel(CallSiteSymbol);
2871
2872 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
2873 .push_back({CallSiteSymbol, Kind});
2874}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static void printShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef< int > Mask)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:598
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
static constexpr unsigned SM(unsigned Version)
uint64_t IntrinsicInst * II
static cl::opt< bool > EnableBranchHint("ppc-use-branch-hint", cl::init(true), cl::desc("Enable static hinting of branches on ppc"), cl::Hidden)
static MCSymbol * GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallString class.
This file contains some functions that are useful when dealing with strings.
static MCOperand LowerSymbolOperand(const MachineInstr *MI, const MachineOperand &MO, const MCSymbol *Symbol, AsmPrinter &AP)
static void emitX86Nops(MCStreamer &OS, unsigned NumBytes, const X86Subtarget *Subtarget)
Emit the optimal amount of multi-byte nops on X86.
static unsigned getRetOpcode(const X86Subtarget &Subtarget)
static void printSignExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits)
static unsigned getSrcIdx(const MachineInstr *MI, unsigned SrcIdx)
static void printBroadcast(const MachineInstr *MI, MCStreamer &OutStreamer, int Repeats, int BitWidth)
static bool printExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits, bool IsSext)
static void printZeroUpperMove(const MachineInstr *MI, MCStreamer &OutStreamer, int SclWidth, int VecWidth, const char *ShuffleComment)
static void addConstantComment(const MachineInstr *MI, MCStreamer &OutStreamer, unsigned OpNo, int BitWidth, int Repeats=1)
static unsigned convertTailJumpOpcode(unsigned Opcode, bool IsLarge=false)
#define MASK_AVX512_CASE(Instr)
#define CASE_ARITH_RM(Instr)
static void addConstantComments(const MachineInstr *MI, MCStreamer &OutStreamer)
#define CASE_256_MOV_RM()
#define CASE_AVX512_ARITH_RM(Instr)
bool hasJumpTableInfoInBlock(const llvm::MachineInstr *MI)
static unsigned emitNop(MCStreamer &OS, unsigned NumBytes, const X86Subtarget *Subtarget)
Emit the largest nop instruction smaller than or equal to NumBytes bytes.
static void printDstRegisterName(raw_ostream &CS, const MachineInstr *MI, unsigned SrcOpIdx)
#define CASE_MOVX_RM(Ext, Type)
bool isImportedFunction(const MachineOperand &MO)
static cl::opt< bool > EnableBranchHint("enable-branch-hint", cl::desc("Enable branch hint."), cl::init(false), cl::Hidden)
static void printConstant(const APInt &Val, raw_ostream &CS, bool PrintZero=false)
static void printZeroExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits)
static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx, unsigned SrcOp2Idx, ArrayRef< int > Mask)
bool isCallToCFGuardFunction(const MachineInstr *MI)
#define CASE_512_MOV_RM()
static cl::opt< unsigned > BranchHintProbabilityThreshold("branch-hint-probability-threshold", cl::desc("The probability threshold of enabling branch hint."), cl::init(50), cl::Hidden)
#define CASE_128_MOV_RM()
void toString(SmallVectorImpl< char > &Str, unsigned FormatPrecision=0, unsigned FormatMaxPadding=3, bool TruncateZero=true) const
Definition APFloat.h:1563
const fltSemantics & getSemantics() const
Definition APFloat.h:1542
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1134
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1054
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
unsigned getNumWords() const
Get the number of words.
Definition APInt.h:1518
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1027
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition APInt.h:576
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
MCSymbol * getSymbol(const GlobalValue *GV) const
MCSymbol * CurrentFnBegin
Definition AsmPrinter.h:233
TargetMachine & TM
Target machine description.
Definition AsmPrinter.h:94
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
MachineFunction * MF
The current machine function.
Definition AsmPrinter.h:109
MCSymbol * GetJTISymbol(unsigned JTID, bool isLinkerPrivate=false) const
Return the symbol for the specified jump table entry.
AsmPrinter(TargetMachine &TM, std::unique_ptr< MCStreamer > Streamer, char &ID=AsmPrinter::ID)
MCSymbol * getSymbolPreferLocal(const GlobalValue &GV) const
Similar to getSymbol() but preferred for references.
MachineModuleInfo * MMI
This is a pointer to the current MachineModuleInfo.
Definition AsmPrinter.h:112
MCContext & OutContext
This is the context for the output file that we are streaming.
Definition AsmPrinter.h:101
MCSymbol * createTempSymbol(const Twine &Name) const
MCSymbol * CurrentPatchableFunctionEntrySym
The symbol for the entry in __patchable_function_entires.
Definition AsmPrinter.h:124
std::unique_ptr< MCStreamer > OutStreamer
This is the MCStreamer object for the file we are generating.
Definition AsmPrinter.h:106
const MCAsmInfo & MAI
Target Asm Printer information.
Definition AsmPrinter.h:97
void getNameWithPrefix(SmallVectorImpl< char > &Name, const GlobalValue *GV) const
MCSymbol * GetBlockAddressSymbol(const BlockAddress *BA) const
Return the MCSymbol used to satisfy BlockAddress uses of the specified basic block.
const MCSubtargetInfo & getSubtargetInfo() const
Return information about subtarget.
This is an important base class in LLVM.
Definition Constant.h:43
Register getReg() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:714
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
Definition Function.cpp:775
bool hasInternalLinkage() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
bool doesSetDirectiveSuppressReloc() const
Definition MCAsmInfo.h:604
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
MCCodeEmitter - Generic instruction encoding interface.
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
Context object for machine code objects.
Definition MCContext.h:83
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI const MCTargetOptions & getTargetOptions() const
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addExpr(const MCExpr *Val)
Add a new MCExpr operand.
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
unsigned getNumOperands() const
Definition MCInst.h:212
unsigned getOpcode() const
Definition MCInst.h:202
iterator insert(iterator I, const MCOperand &Op)
Definition MCInst.h:232
void setFlags(unsigned F)
Definition MCInst.h:204
void addOperand(const MCOperand Op)
Definition MCInst.h:215
iterator begin()
Definition MCInst.h:227
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
Instances of this class represent operands of the MCInst class.
Definition MCInst.h:40
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
Streaming machine code generation interface.
Definition MCStreamer.h:222
virtual void emitWinCFIUnwindVersion(uint8_t Version, SMLoc Loc=SMLoc())
virtual void emitWinCFIPushReg(MCRegister Register, SMLoc Loc=SMLoc())
virtual void emitBinaryData(StringRef Data)
Functionally identical to EmitBytes.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitWinCFIUnwindV2Start(SMLoc Loc=SMLoc())
virtual void emitWinCFIEndEpilogue(SMLoc Loc=SMLoc())
virtual void emitWinCFIPushFrame(bool Code, SMLoc Loc=SMLoc())
virtual void emitWinCFISaveXMM(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
MCContext & getContext() const
Definition MCStreamer.h:323
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:394
virtual void emitWinCFIBeginEpilogue(SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:333
virtual void emitWinCFISaveReg(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
virtual void emitWinCFIEndProlog(SMLoc Loc=SMLoc())
virtual void emitCodeAlignment(Align Alignment, const MCSubtargetInfo *STI, unsigned MaxBytesToEmit=0)
Emit nops until the byte alignment ByteAlignment is reached.
virtual void emitWinCFISetFrame(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
virtual void emitWinCFIAllocStack(unsigned Size, SMLoc Loc=SMLoc())
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:428
virtual void emitBytes(StringRef Data)
Emit the bytes in Data into the output.
Generic base class for all target subtargets.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
mop_range operands()
bool isPseudo(QueryType Type=IgnoreBundle) const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
const MachineOperand & getOperand(unsigned i) const
bool isEHLabel() const
bool isMetaInstruction(QueryType Type=IgnoreBundle) const
Return true if this instruction doesn't produce any output in the form of executable instructions.
StubValueTy & getGVStubEntry(MCSymbol *Sym)
PointerIntPair< MCSymbol *, 1, bool > StubValueTy
MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation for MachO targets.
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
const BlockAddress * getBlockAddress() const
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
const char * getSymbolName() const
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
MCSymbol * getMCSymbol() const
@ MO_Immediate
Immediate operand.
@ MO_ConstantPoolIndex
Address of indexed Constant in Constant Pool.
@ MO_MCSymbol
MCSymbol reference (for debug/eh info)
@ MO_GlobalAddress
Address of a global value.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_BlockAddress
Address of a basic block.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
@ MO_JumpTableIndex
Address of indexed Jump Table for switch.
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
LLVM_ABI void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition Mangler.cpp:121
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition Pass.cpp:140
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerTy getPointer() const
Wrapper class representing virtual and physical registers.
Definition Register.h:20
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
CodeModel::Model getCodeModel() const
Returns the code model.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:155
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition Type.h:144
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:158
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
static const char * getRegisterName(MCRegister Reg)
void emitInstruction(const MachineInstr *MI) override
Targets should implement this to emit instructions.
const X86Subtarget & getSubtarget() const
X86AsmPrinter(TargetMachine &TM, std::unique_ptr< MCStreamer > Streamer)
void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo, const MCSubtargetInfo *EndInfo, const MachineInstr *MI) override
Let the target do anything it needs to do after emitting inlineasm.
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
unsigned getSlotSize() const
bool isTargetWindowsMSVC() const
bool useIndirectThunkCalls() const
virtual bool emitFPOPushReg(MCRegister Reg, SMLoc L={})
virtual bool emitFPOEndPrologue(SMLoc L={})
virtual bool emitFPOStackAlign(unsigned Align, SMLoc L={})
virtual bool emitFPOSetFrame(MCRegister Reg, SMLoc L={})
virtual bool emitFPOStackAlloc(unsigned StackAlloc, SMLoc L={})
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Itanium
Windows CE ARM, PowerPC, SH3, SH4.
Definition MCAsmInfo.h:49
bool isKMergeMasked(uint64_t TSFlags)
@ MO_TLSLD
MO_TLSLD - On a symbol operand this indicates that the immediate is the offset of the GOT entry with ...
@ MO_GOTPCREL_NORELAX
MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL relocations are guaranteed to...
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_DARWIN_NONLAZY_PIC_BASE
MO_DARWIN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this indicates that the reference is actually...
@ MO_GOT_ABSOLUTE_ADDRESS
MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a relocation of: SYMBOL_LABEL + [.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
@ MO_NTPOFF
MO_NTPOFF - On a symbol operand this indicates that the immediate is the negative thread-pointer offs...
@ MO_DARWIN_NONLAZY
MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the reference is actually to the "...
@ MO_INDNTPOFF
MO_INDNTPOFF - On a symbol operand this indicates that the immediate is the absolute address of the G...
@ MO_GOTNTPOFF
MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry w...
@ MO_TPOFF
MO_TPOFF - On a symbol operand this indicates that the immediate is the thread-pointer offset for the...
@ MO_TLVP_PIC_BASE
MO_TLVP_PIC_BASE - On a symbol operand this indicates that the immediate is some TLS offset from the ...
@ MO_GOT
MO_GOT - On a symbol operand this indicates that the immediate is the offset to the GOT entry for the...
@ MO_ABS8
MO_ABS8 - On a symbol operand this indicates that the symbol is known to be an absolute symbol in ran...
@ MO_PLT
MO_PLT - On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol n...
@ MO_TLSGD
MO_TLSGD - On a symbol operand this indicates that the immediate is the offset of the GOT entry with ...
@ MO_NO_FLAG
MO_NO_FLAG - No flag for the operand.
@ MO_TLVP
MO_TLVP - On a symbol operand this indicates that the immediate is some TLS offset.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand "FOO", this indicates that the reference is actually to the "__imp...
@ MO_GOTTPOFF
MO_GOTTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry wi...
@ MO_SECREL
MO_SECREL - On a symbol operand this indicates that the immediate is the offset from beginning of sec...
@ MO_DTPOFF
MO_DTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry with...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_TLSLDM
MO_TLSLDM - On a symbol operand this indicates that the immediate is the offset of the GOT entry with...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
bool isKMasked(uint64_t TSFlags)
bool isX86_64ExtendedReg(MCRegister Reg)
bool optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI)
@ AddrNumOperands
Definition X86BaseInfo.h:36
bool optimizeMOV(MCInst &MI, bool In64BitMode)
Simplify things like MOV32rm to MOV32o32a.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
bool optimizeMOVSX(MCInst &MI)
bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI)
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
uint16_t Specifier
const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)
Find any constant pool entry associated with a specific instruction operand.
bool optimizeINCDEC(MCInst &MI, bool In64BitMode)
unsigned getVectorRegisterWidth(const MCOperandInfo &Info)
Get the width of the vector register operand.
@ S_GOTPCREL_NORELAX
initializer< Ty > init(const Ty &Val)
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
This is an optimization pass for GlobalISel generic memory operations.
void DecodeZeroExtendMask(unsigned SrcScalarBits, unsigned DstScalarBits, unsigned NumDstElts, bool IsAnyExtend, SmallVectorImpl< int > &ShuffleMask)
Decode a zero extension instruction as a shuffle mask.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
void DecodeVPERMILPMask(unsigned NumElts, unsigned ScalarBits, ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPERMILPD/VPERMILPS variable mask from a raw array of constants.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
LLVM_ABI bool isCFGuardFunction(const GlobalValue *GV)
Definition CFGuard.cpp:323
@ WinEH
Windows Exception Handling.
Definition CodeGen.h:58
void DecodeVPERMIL2PMask(unsigned NumElts, unsigned ScalarBits, unsigned M2Z, ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPERMIL2PD/VPERMIL2PS variable mask from a raw array of constants.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:322
void DecodeVPPERMMask(ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPPERM mask from a raw array of constants such as from BUILD_VECTOR.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
@ SM_SentinelUndef
@ SM_SentinelZero
void DecodePSHUFBMask(ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a PSHUFB mask from a raw array of constants such as from BUILD_VECTOR.
#define N
void changeAndComment(bool b)
NoAutoPaddingScope(MCStreamer &OS)
const bool OldAllowAutoPadding