63 "branch-hint-probability-threshold",
64 cl::desc(
"The probability threshold of enabling branch hint."),
104 if (b ==
OS.getAllowAutoPadding())
106 OS.setAllowAutoPadding(b);
108 OS.emitRawComment(
"autopadding");
110 OS.emitRawComment(
"noautopadding");
118void X86AsmPrinter::StackMapShadowTracker::count(
const MCInst &Inst,
122 SmallString<256>
Code;
125 CurrentShadowSize +=
Code.size();
126 if (CurrentShadowSize >= RequiredShadowSize)
131void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
132 MCStreamer &
OutStreamer,
const MCSubtargetInfo &STI) {
133 if (InShadow && CurrentShadowSize < RequiredShadowSize) {
136 &
MF->getSubtarget<X86Subtarget>());
140void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
145X86MCInstLower::X86MCInstLower(
const MachineFunction &mf,
156MCSymbol *X86MCInstLower::GetSymbolFromOperand(
const MachineOperand &MO)
const {
163 "Isn't a symbol reference");
166 SmallString<128>
Name;
179 Suffix =
"$non_lazy_ptr";
184 Name +=
DL.getPrivateGlobalPrefix();
191 }
else if (MO.
isMBB()) {
206 MachineModuleInfoCOFF &MMICOFF =
209 if (!StubSym.getPointer()) {
219 getMachOMMI().getGVStubEntry(Sym);
233MCOperand X86MCInstLower::LowerSymbolOperand(
const MachineOperand &MO,
234 MCSymbol *Sym)
const {
237 const MCExpr *Expr =
nullptr;
320 AsmPrinter.
OutStreamer->emitAssignment(Label, Expr);
336 return Subtarget.is64Bit() ? X86::RET64 : X86::RET32;
339MCOperand X86MCInstLower::LowerMachineOperand(
const MachineInstr *
MI,
340 const MachineOperand &MO)
const {
376 Opcode = X86::JMP32r;
379 Opcode = X86::JMP32m;
381 case X86::TAILJMPr64:
382 Opcode = X86::JMP64r;
384 case X86::TAILJMPm64:
385 Opcode = X86::JMP64m;
387 case X86::TAILJMPr64_REX:
388 Opcode = X86::JMP64r_REX;
390 case X86::TAILJMPm64_REX:
391 Opcode = X86::JMP64m_REX;
394 case X86::TAILJMPd64:
397 case X86::TAILJMPd_CC:
398 case X86::TAILJMPd64_CC:
406void X86MCInstLower::Lower(
const MachineInstr *
MI, MCInst &OutMI)
const {
409 for (
const MachineOperand &MO :
MI->operands())
410 if (
auto Op = LowerMachineOperand(
MI, MO);
Op.isValid())
430 "Unexpected # of LEA operands");
432 "LEA has segment specified!");
437 case X86::MULX64Hrm: {
442 case X86::MULX32Hrr: NewOpc = X86::MULX32rr;
break;
443 case X86::MULX32Hrm: NewOpc = X86::MULX32rm;
break;
444 case X86::MULX64Hrr: NewOpc = X86::MULX64rr;
break;
445 case X86::MULX64Hrm: NewOpc = X86::MULX64rm;
break;
458 case X86::CALL64pcrel32:
462 case X86::EH_RETURN64: {
467 case X86::CLEANUPRET: {
473 case X86::CATCHRET: {
475 const X86Subtarget &Subtarget = AsmPrinter.
getSubtarget();
476 unsigned ReturnReg = In64BitMode ? X86::RAX : X86::EAX;
485 case X86::TAILJMPr64:
486 case X86::TAILJMPr64_REX:
488 case X86::TAILJMPd64:
492 case X86::TAILJMPd_CC:
493 case X86::TAILJMPd64_CC:
498 case X86::TAILJMPm64:
499 case X86::TAILJMPm64_REX:
501 "Unexpected number of operands!");
504 case X86::MASKMOVDQU:
505 case X86::VMASKMOVDQU:
519 const MachineOperand *FlagDef =
520 MI->findRegisterDefOperand(X86::EFLAGS,
nullptr);
530void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering,
531 const MachineInstr &
MI) {
532 NoAutoPaddingScope NoPadScope(*OutStreamer);
533 bool Is64Bits = getSubtarget().is64Bit();
534 bool Is64BitsLP64 = getSubtarget().isTarget64BitLP64();
538 switch (
MI.getOpcode()) {
539 case X86::TLS_addr32:
540 case X86::TLS_addr64:
541 case X86::TLS_addrX32:
544 case X86::TLS_base_addr32:
547 case X86::TLS_base_addr64:
548 case X86::TLS_base_addrX32:
551 case X86::TLS_desc32:
552 case X86::TLS_desc64:
560 MCInstLowering.GetSymbolFromOperand(
MI.getOperand(3)), Specifier, Ctx);
567 bool UseGot = MMI->getModule()->getRtLibUseGOT() &&
574 EmitAndCountInstruction(
575 MCInstBuilder(Is64BitsLP64 ? X86::LEA64r : X86::LEA32r)
576 .addReg(Is64BitsLP64 ? X86::RAX : X86::EAX)
577 .addReg(Is64Bits ? X86::RIP : X86::EBX)
582 EmitAndCountInstruction(
583 MCInstBuilder(Is64Bits ? X86::CALL64m : X86::CALL32m)
584 .addReg(Is64BitsLP64 ? X86::RAX : X86::EAX)
589 }
else if (Is64Bits) {
591 if (NeedsPadding && Is64BitsLP64)
592 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
593 EmitAndCountInstruction(MCInstBuilder(X86::LEA64r)
603 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
604 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
605 EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX));
610 EmitAndCountInstruction(MCInstBuilder(X86::CALL64m)
617 EmitAndCountInstruction(
618 MCInstBuilder(X86::CALL64pcrel32)
623 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r)
631 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r)
643 EmitAndCountInstruction(MCInstBuilder(X86::CALL32m)
650 EmitAndCountInstruction(
651 MCInstBuilder(X86::CALLpcrel32)
664 unsigned MaxNopLength = 1;
665 if (Subtarget->is64Bit()) {
668 if (Subtarget->hasFeature(X86::TuningFast7ByteNOP))
670 else if (Subtarget->hasFeature(X86::TuningFast15ByteNOP))
672 else if (Subtarget->hasFeature(X86::TuningFast11ByteNOP))
676 }
if (Subtarget->is32Bit())
680 NumBytes = std::min(NumBytes, MaxNopLength);
683 unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
684 IndexReg = Displacement = SegmentReg = 0;
742 SegmentReg = X86::CS;
746 unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U);
747 NopSize += NumPrefixes;
748 for (
unsigned i = 0; i != NumPrefixes; ++i)
766 .addImm(Displacement)
771 assert(NopSize <= NumBytes &&
"We overemitted?");
778 unsigned NopsToEmit = NumBytes;
781 NumBytes -=
emitNop(OS, NumBytes, Subtarget);
782 assert(NopsToEmit >= NumBytes &&
"Emitted more than I asked for!");
786void X86AsmPrinter::LowerSTATEPOINT(
const MachineInstr &
MI,
787 X86MCInstLower &MCIL) {
788 assert(Subtarget->is64Bit() &&
"Statepoint currently only supports X86-64");
790 NoAutoPaddingScope NoPadScope(*OutStreamer);
792 StatepointOpers SOpers(&
MI);
793 if (
unsigned PatchBytes = SOpers.getNumPatchBytes()) {
797 const MachineOperand &CallTarget = SOpers.getCallTarget();
798 MCOperand CallTargetMCOp;
800 switch (CallTarget.
getType()) {
803 CallTargetMCOp = MCIL.LowerSymbolOperand(
804 CallTarget, MCIL.GetSymbolFromOperand(CallTarget));
805 CallOpcode = X86::CALL64pcrel32;
813 CallOpcode = X86::CALL64pcrel32;
825 CallOpcode = X86::CALL64r;
837 maybeEmitNopAfterCallForWindowsEH(&
MI);
845 SM.recordStatepoint(*MILabel,
MI);
848void X86AsmPrinter::LowerFAULTING_OP(
const MachineInstr &FaultingMI,
849 X86MCInstLower &MCIL) {
853 NoAutoPaddingScope NoPadScope(*OutStreamer);
860 unsigned OperandsBeginIdx = 4;
867 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
870 MI.setOpcode(Opcode);
872 if (DefRegister != X86::NoRegister)
875 for (
const MachineOperand &MO :
877 if (
auto Op = MCIL.LowerMachineOperand(&FaultingMI, MO);
Op.isValid())
884void X86AsmPrinter::LowerFENTRY_CALL(
const MachineInstr &
MI,
885 X86MCInstLower &MCIL) {
886 bool Is64Bits = Subtarget->is64Bit();
891 EmitAndCountInstruction(
892 MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32)
896void X86AsmPrinter::LowerKCFI_CHECK(
const MachineInstr &
MI) {
897 assert(std::next(
MI.getIterator())->isCall() &&
898 "KCFI_CHECK not followed by a call instruction");
904 const MachineFunction &MF = *
MI.getMF();
905 int64_t PrefixNops = 0;
916 const Register AddrReg =
MI.getOperand(0).getReg();
917 const uint32_t
Type =
MI.getOperand(1).getImm();
920 unsigned TempReg = AddrReg == X86::R10 ? X86::R11D : X86::R10D;
921 EmitAndCountInstruction(
922 MCInstBuilder(X86::MOV32ri).addReg(TempReg).addImm(-MaskKCFIType(
Type)));
923 EmitAndCountInstruction(MCInstBuilder(X86::ADD32rm)
924 .addReg(X86::NoRegister)
928 .addReg(X86::NoRegister)
929 .addImm(-(PrefixNops + 4))
930 .addReg(X86::NoRegister));
933 EmitAndCountInstruction(
934 MCInstBuilder(X86::JCC_1)
940 EmitAndCountInstruction(MCInstBuilder(X86::TRAP));
941 emitKCFITrapEntry(MF,
Trap);
945void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(
const MachineInstr &
MI) {
947 if (!TM.getTargetTriple().isOSBinFormatELF()) {
952 const auto &
Reg =
MI.getOperand(0).getReg();
953 ASanAccessInfo AccessInfo(
MI.getOperand(1).getImm());
959 &ShadowBase, &MappingScale, &OrShadowOffset);
961 StringRef
Name = AccessInfo.IsWrite ?
"store" :
"load";
962 StringRef
Op = OrShadowOffset ?
"or" :
"add";
963 std::string SymName = (
"__asan_check_" +
Name +
"_" +
Op +
"_" +
964 Twine(1ULL << AccessInfo.AccessSizeIndex) +
"_" +
965 TM.getMCRegisterInfo()->getName(
Reg.
asMCReg()))
969 "OrShadowOffset is not supported with optimized callbacks");
971 EmitAndCountInstruction(
972 MCInstBuilder(X86::CALL64pcrel32)
974 OutContext.getOrCreateSymbol(SymName), OutContext)));
977void X86AsmPrinter::LowerPATCHABLE_OP(
const MachineInstr &
MI,
978 X86MCInstLower &MCIL) {
981 NoAutoPaddingScope NoPadScope(*OutStreamer);
983 auto NextMI = std::find_if(std::next(
MI.getIterator()),
984 MI.getParent()->end().getInstrIterator(),
985 [](
auto &
II) { return !II.isMetaInstruction(); });
987 SmallString<256>
Code;
988 unsigned MinSize =
MI.getOperand(0).getImm();
990 if (NextMI !=
MI.getParent()->end() && !NextMI->isInlineAsm()) {
995 MCIL.Lower(&*NextMI, MCI);
1001 if (
Code.size() < MinSize) {
1002 if (MinSize == 2 && Subtarget->is32Bit() &&
1004 (Subtarget->getCPU().empty() || Subtarget->getCPU() ==
"pentium3")) {
1010 MCInstBuilder(X86::MOV32rr_REV).addReg(X86::EDI).addReg(X86::EDI),
1013 unsigned NopSize =
emitNop(*OutStreamer, MinSize, Subtarget);
1014 assert(NopSize == MinSize &&
"Could not implement MinSize!");
1022void X86AsmPrinter::LowerSTACKMAP(
const MachineInstr &
MI) {
1023 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
1029 SM.recordStackMap(*MILabel,
MI);
1030 unsigned NumShadowBytes =
MI.getOperand(1).getImm();
1031 SMShadowTracker.reset(NumShadowBytes);
1036void X86AsmPrinter::LowerPATCHPOINT(
const MachineInstr &
MI,
1037 X86MCInstLower &MCIL) {
1038 assert(Subtarget->is64Bit() &&
"Patchpoint currently only supports X86-64");
1040 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
1042 NoAutoPaddingScope NoPadScope(*OutStreamer);
1047 SM.recordPatchPoint(*MILabel,
MI);
1049 PatchPointOpers opers(&
MI);
1050 unsigned ScratchIdx = opers.getNextScratchIdx();
1051 unsigned EncodedBytes = 0;
1052 const MachineOperand &CalleeMO = opers.getCallTarget();
1057 MCOperand CalleeMCOp;
1068 CalleeMCOp = MCIL.LowerSymbolOperand(CalleeMO,
1069 MCIL.GetSymbolFromOperand(CalleeMO));
1075 Register ScratchReg =
MI.getOperand(ScratchIdx).getReg();
1081 EmitAndCountInstruction(
1082 MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).
addOperand(CalleeMCOp));
1086 "Lowering patchpoint with thunks not yet implemented.");
1087 EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg));
1091 unsigned NumBytes = opers.getNumPatchBytes();
1092 assert(NumBytes >= EncodedBytes &&
1093 "Patchpoint can't request size less than the length of a call.");
1095 emitX86Nops(*OutStreamer, NumBytes - EncodedBytes, Subtarget);
1098void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(
const MachineInstr &
MI,
1099 X86MCInstLower &MCIL) {
1100 assert(Subtarget->is64Bit() &&
"XRay custom events only supports X86-64");
1102 NoAutoPaddingScope NoPadScope(*OutStreamer);
1124 auto CurSled = OutContext.createTempSymbol(
"xray_event_sled_",
true);
1125 OutStreamer->
AddComment(
"# XRay Custom Event Log");
1136 const Register DestRegs[] = {X86::RDI, X86::RSI};
1137 bool UsedMask[] = {
false,
false};
1146 for (
unsigned I = 0;
I <
MI.getNumOperands(); ++
I)
1147 if (
auto Op = MCIL.LowerMachineOperand(&
MI,
MI.getOperand(
I));
1149 assert(
Op.isReg() &&
"Only support arguments in registers");
1152 if (SrcRegs[
I] != DestRegs[
I]) {
1154 EmitAndCountInstruction(
1155 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[
I]));
1165 for (
unsigned I = 0;
I <
MI.getNumOperands(); ++
I)
1166 if (SrcRegs[
I] != DestRegs[
I])
1167 EmitAndCountInstruction(
1168 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[
I]).addReg(SrcRegs[
I]));
1172 auto TSym = OutContext.getOrCreateSymbol(
"__xray_CustomEvent");
1174 if (isPositionIndependent())
1178 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32)
1179 .
addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
1182 for (
unsigned I =
sizeof UsedMask;
I-- > 0;)
1184 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[
I]));
1188 OutStreamer->
AddComment(
"xray custom event end.");
1193 recordSled(CurSled,
MI, SledKind::CUSTOM_EVENT, 2);
1196void X86AsmPrinter::LowerPATCHABLE_TYPED_EVENT_CALL(
const MachineInstr &
MI,
1197 X86MCInstLower &MCIL) {
1198 assert(Subtarget->is64Bit() &&
"XRay typed events only supports X86-64");
1200 NoAutoPaddingScope NoPadScope(*OutStreamer);
1222 auto CurSled = OutContext.createTempSymbol(
"xray_typed_event_sled_",
true);
1223 OutStreamer->
AddComment(
"# XRay Typed Event Log");
1235 const Register DestRegs[] = {X86::RDI, X86::RSI, X86::RDX};
1236 bool UsedMask[] = {
false,
false,
false};
1245 for (
unsigned I = 0;
I <
MI.getNumOperands(); ++
I)
1246 if (
auto Op = MCIL.LowerMachineOperand(&
MI,
MI.getOperand(
I));
1249 assert(
Op.isReg() &&
"Only supports arguments in registers");
1252 if (SrcRegs[
I] != DestRegs[
I]) {
1254 EmitAndCountInstruction(
1255 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[
I]));
1270 for (
unsigned I = 0;
I <
MI.getNumOperands(); ++
I)
1272 EmitAndCountInstruction(
1273 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[
I]).addReg(SrcRegs[
I]));
1277 auto TSym = OutContext.getOrCreateSymbol(
"__xray_TypedEvent");
1279 if (isPositionIndependent())
1283 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32)
1284 .
addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
1287 for (
unsigned I =
sizeof UsedMask;
I-- > 0;)
1289 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[
I]));
1293 OutStreamer->
AddComment(
"xray typed event end.");
1296 recordSled(CurSled,
MI, SledKind::TYPED_EVENT, 2);
1299void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(
const MachineInstr &
MI,
1300 X86MCInstLower &MCIL) {
1302 NoAutoPaddingScope NoPadScope(*OutStreamer);
1305 if (
F.hasFnAttribute(
"patchable-function-entry")) {
1307 if (
F.getFnAttribute(
"patchable-function-entry")
1309 .getAsInteger(10, Num))
1327 auto CurSled = OutContext.createTempSymbol(
"xray_sled_",
true);
1336 recordSled(CurSled,
MI, SledKind::FUNCTION_ENTER, 2);
1339void X86AsmPrinter::LowerPATCHABLE_RET(
const MachineInstr &
MI,
1340 X86MCInstLower &MCIL) {
1341 NoAutoPaddingScope NoPadScope(*OutStreamer);
1357 auto CurSled = OutContext.createTempSymbol(
"xray_sled_",
true);
1360 unsigned OpCode =
MI.getOperand(0).getImm();
1364 if (
auto Op = MCIL.LowerMachineOperand(&
MI, MO);
Op.isValid())
1368 recordSled(CurSled,
MI, SledKind::FUNCTION_EXIT, 2);
1371void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(
const MachineInstr &
MI,
1372 X86MCInstLower &MCIL) {
1377 bool IsConditional = TC.
getOpcode() == X86::JCC_1;
1379 if (IsConditional) {
1390 FallthroughLabel = OutContext.createTempSymbol();
1393 MCInstBuilder(X86::JCC_1)
1402 NoAutoPaddingScope NoPadScope(*OutStreamer);
1410 auto CurSled = OutContext.createTempSymbol(
"xray_sled_",
true);
1413 auto Target = OutContext.createTempSymbol();
1421 recordSled(CurSled,
MI, SledKind::TAIL_CALL, 2);
1426 for (
auto &MO : TCOperands)
1427 if (
auto Op = MCIL.LowerMachineOperand(&
MI, MO);
Op.isValid())
1432 OutStreamer->
emitLabel(FallthroughLabel);
1448 unsigned SrcOpIdx) {
1458 CS <<
" {%" << Mask <<
"}";
1469 if (Src1Name == Src2Name)
1470 for (
int i = 0, e = ShuffleMask.size(); i != e; ++i)
1471 if (ShuffleMask[i] >= e)
1472 ShuffleMask[i] -= e;
1474 for (
int i = 0, e = ShuffleMask.size(); i != e; ++i) {
1484 bool isSrc1 = ShuffleMask[i] < (int)e;
1485 CS << (isSrc1 ? Src1Name : Src2Name) <<
'[';
1487 bool IsFirst =
true;
1489 (ShuffleMask[i] < (
int)e) == isSrc1) {
1497 CS << ShuffleMask[i] % (int)e;
1507 std::string Comment;
1527 bool PrintZero =
false) {
1536 CS << (PrintZero ? 0ULL : Val.
getRawData()[i]);
1543 bool PrintZero =
false) {
1559 for (
unsigned I = 0,
E = VTy->getNumElements();
I !=
E; ++
I) {
1568 unsigned EltBits = VTy->getScalarSizeInBits();
1569 unsigned E = std::min(
BitWidth / EltBits, VTy->getNumElements());
1571 for (
unsigned I = 0;
I !=
E; ++
I) {
1582 Type *EltTy = CDS->getElementType();
1586 unsigned E = std::min(
BitWidth / EltBits, (
unsigned)CDS->getNumElements());
1588 for (
unsigned I = 0;
I !=
E; ++
I) {
1602 unsigned EltBits = CV->getType()->getScalarSizeInBits();
1603 unsigned E = std::min(
BitWidth / EltBits, CV->getNumOperands());
1605 for (
unsigned I = 0;
I !=
E; ++
I) {
1619 int SclWidth,
int VecWidth,
1620 const char *ShuffleComment) {
1623 std::string Comment;
1631 for (
int I = 1,
E = VecWidth / SclWidth;
I <
E; ++
I) {
1641 CS << ShuffleComment;
1649 std::string Comment;
1653 for (
int l = 0; l != Repeats; ++l) {
1664 int SrcEltBits,
int DstEltBits,
bool IsSext) {
1667 if (
C &&
C->getType()->getScalarSizeInBits() ==
unsigned(SrcEltBits)) {
1669 int NumElts = CDS->getNumElements();
1670 std::string Comment;
1674 for (
int i = 0; i != NumElts; ++i) {
1677 if (CDS->getElementType()->isIntegerTy()) {
1678 APInt Elt = CDS->getElementAsAPInt(i);
1679 Elt = IsSext ? Elt.
sext(DstEltBits) : Elt.
zext(DstEltBits);
1693 int SrcEltBits,
int DstEltBits) {
1697 int SrcEltBits,
int DstEltBits) {
1698 if (
printExtend(
MI, OutStreamer, SrcEltBits, DstEltBits,
false))
1702 std::string Comment;
1709 assert((Width % DstEltBits) == 0 && (DstEltBits % SrcEltBits) == 0 &&
1710 "Illegal extension ratio");
1717void X86AsmPrinter::EmitSEHInstruction(
const MachineInstr *
MI) {
1718 assert(MF->
hasWinCFI() &&
"SEH_ instruction in function without WinCFI?");
1719 assert((getSubtarget().isOSWindows() || getSubtarget().isUEFI()) &&
1720 "SEH_ instruction Windows and UEFI only");
1724 X86TargetStreamer *XTS =
1726 switch (
MI->getOpcode()) {
1727 case X86::SEH_PushReg:
1730 case X86::SEH_StackAlloc:
1733 case X86::SEH_StackAlign:
1736 case X86::SEH_SetFrame:
1737 assert(
MI->getOperand(1).getImm() == 0 &&
1738 ".cv_fpo_setframe takes no offset");
1741 case X86::SEH_EndPrologue:
1744 case X86::SEH_SaveReg:
1745 case X86::SEH_SaveXMM:
1746 case X86::SEH_PushFrame:
1756 switch (
MI->getOpcode()) {
1757 case X86::SEH_PushReg:
1761 case X86::SEH_SaveReg:
1763 MI->getOperand(1).getImm());
1766 case X86::SEH_SaveXMM:
1768 MI->getOperand(1).getImm());
1771 case X86::SEH_StackAlloc:
1775 case X86::SEH_SetFrame:
1777 MI->getOperand(1).getImm());
1780 case X86::SEH_PushFrame:
1784 case X86::SEH_EndPrologue:
1788 case X86::SEH_BeginEpilogue:
1792 case X86::SEH_EndEpilogue:
1796 case X86::SEH_UnwindV2Start:
1800 case X86::SEH_UnwindVersion:
1811 switch (
MI->getOpcode()) {
1816 case X86::VPSHUFBrm:
1817 case X86::VPSHUFBYrm:
1818 case X86::VPSHUFBZ128rm:
1819 case X86::VPSHUFBZ128rmk:
1820 case X86::VPSHUFBZ128rmkz:
1821 case X86::VPSHUFBZ256rm:
1822 case X86::VPSHUFBZ256rmk:
1823 case X86::VPSHUFBZ256rmkz:
1824 case X86::VPSHUFBZrm:
1825 case X86::VPSHUFBZrmk:
1826 case X86::VPSHUFBZrmkz: {
1838 case X86::VPERMILPSrm:
1839 case X86::VPERMILPSYrm:
1840 case X86::VPERMILPSZ128rm:
1841 case X86::VPERMILPSZ128rmk:
1842 case X86::VPERMILPSZ128rmkz:
1843 case X86::VPERMILPSZ256rm:
1844 case X86::VPERMILPSZ256rmk:
1845 case X86::VPERMILPSZ256rmkz:
1846 case X86::VPERMILPSZrm:
1847 case X86::VPERMILPSZrmk:
1848 case X86::VPERMILPSZrmkz: {
1859 case X86::VPERMILPDrm:
1860 case X86::VPERMILPDYrm:
1861 case X86::VPERMILPDZ128rm:
1862 case X86::VPERMILPDZ128rmk:
1863 case X86::VPERMILPDZ128rmkz:
1864 case X86::VPERMILPDZ256rm:
1865 case X86::VPERMILPDZ256rmk:
1866 case X86::VPERMILPDZ256rmkz:
1867 case X86::VPERMILPDZrm:
1868 case X86::VPERMILPDZrmk:
1869 case X86::VPERMILPDZrmkz: {
1881 case X86::VPERMIL2PDrm:
1882 case X86::VPERMIL2PSrm:
1883 case X86::VPERMIL2PDYrm:
1884 case X86::VPERMIL2PSYrm: {
1886 "Unexpected number of operands!");
1889 if (!CtrlOp.
isImm())
1893 switch (
MI->getOpcode()) {
1895 case X86::VPERMIL2PSrm:
case X86::VPERMIL2PSYrm: ElSize = 32;
break;
1896 case X86::VPERMIL2PDrm:
case X86::VPERMIL2PDYrm: ElSize = 64;
break;
1909 case X86::VPPERMrrm: {
1920 case X86::MMX_MOVQ64rm: {
1922 std::string Comment;
1927 CS <<
"0x" <<
toString(CF->getValueAPF().bitcastToAPInt(), 16,
false);
1934#define INSTR_CASE(Prefix, Instr, Suffix, Postfix) \
1935 case X86::Prefix##Instr##Suffix##rm##Postfix:
1937#define CASE_AVX512_ARITH_RM(Instr) \
1938 INSTR_CASE(V, Instr, Z128, ) \
1939 INSTR_CASE(V, Instr, Z128, k) \
1940 INSTR_CASE(V, Instr, Z128, kz) \
1941 INSTR_CASE(V, Instr, Z256, ) \
1942 INSTR_CASE(V, Instr, Z256, k) \
1943 INSTR_CASE(V, Instr, Z256, kz) \
1944 INSTR_CASE(V, Instr, Z, ) \
1945 INSTR_CASE(V, Instr, Z, k) \
1946 INSTR_CASE(V, Instr, Z, kz)
1948#define CASE_ARITH_RM(Instr) \
1949 INSTR_CASE(, Instr, , ) \
1950 INSTR_CASE(V, Instr, , ) \
1951 INSTR_CASE(V, Instr, Y, ) \
1952 INSTR_CASE(V, Instr, Z128, ) \
1953 INSTR_CASE(V, Instr, Z128, k) \
1954 INSTR_CASE(V, Instr, Z128, kz) \
1955 INSTR_CASE(V, Instr, Z256, ) \
1956 INSTR_CASE(V, Instr, Z256, k) \
1957 INSTR_CASE(V, Instr, Z256, kz) \
1958 INSTR_CASE(V, Instr, Z, ) \
1959 INSTR_CASE(V, Instr, Z, k) \
1960 INSTR_CASE(V, Instr, Z, kz)
1975 std::string Comment;
1977 unsigned VectorWidth =
1987#define MASK_AVX512_CASE(Instr) \
1995 case X86::MOVSDrm_alt:
1996 case X86::VMOVSDrm_alt:
1997 case X86::VMOVSDZrm_alt:
1998 case X86::MOVQI2PQIrm:
1999 case X86::VMOVQI2PQIrm:
2000 case X86::VMOVQI2PQIZrm:
2005 case X86::VMOVSHZrm_alt:
2007 "mem[0],zero,zero,zero,zero,zero,zero,zero");
2013 case X86::MOVSSrm_alt:
2014 case X86::VMOVSSrm_alt:
2015 case X86::VMOVSSZrm_alt:
2016 case X86::MOVDI2PDIrm:
2017 case X86::VMOVDI2PDIrm:
2018 case X86::VMOVDI2PDIZrm:
2022#define MOV_CASE(Prefix, Suffix) \
2023 case X86::Prefix##MOVAPD##Suffix##rm: \
2024 case X86::Prefix##MOVAPS##Suffix##rm: \
2025 case X86::Prefix##MOVUPD##Suffix##rm: \
2026 case X86::Prefix##MOVUPS##Suffix##rm: \
2027 case X86::Prefix##MOVDQA##Suffix##rm: \
2028 case X86::Prefix##MOVDQU##Suffix##rm:
2030#define MOV_AVX512_CASE(Suffix, Postfix) \
2031 case X86::VMOVDQA64##Suffix##rm##Postfix: \
2032 case X86::VMOVDQA32##Suffix##rm##Postfix: \
2033 case X86::VMOVDQU64##Suffix##rm##Postfix: \
2034 case X86::VMOVDQU32##Suffix##rm##Postfix: \
2035 case X86::VMOVDQU16##Suffix##rm##Postfix: \
2036 case X86::VMOVDQU8##Suffix##rm##Postfix: \
2037 case X86::VMOVAPS##Suffix##rm##Postfix: \
2038 case X86::VMOVAPD##Suffix##rm##Postfix: \
2039 case X86::VMOVUPS##Suffix##rm##Postfix: \
2040 case X86::VMOVUPD##Suffix##rm##Postfix:
2042#define CASE_128_MOV_RM() \
2045 MOV_AVX512_CASE(Z128, ) \
2046 MOV_AVX512_CASE(Z128, k) \
2047 MOV_AVX512_CASE(Z128, kz)
2049#define CASE_256_MOV_RM() \
2051 MOV_AVX512_CASE(Z256, ) \
2052 MOV_AVX512_CASE(Z256, k) \
2053 MOV_AVX512_CASE(Z256, kz) \
2055#define CASE_512_MOV_RM() \
2056 MOV_AVX512_CASE(Z, ) \
2057 MOV_AVX512_CASE(Z, k) \
2058 MOV_AVX512_CASE(Z, kz) \
2071 case X86::VBROADCASTF128rm:
2072 case X86::VBROADCASTI128rm:
2094 case X86::MOVDDUPrm:
2095 case X86::VMOVDDUPrm:
2097 case X86::VPBROADCASTQrm:
2101 case X86::VBROADCASTSDYrm:
2103 case X86::VPBROADCASTQYrm:
2111 case X86::VBROADCASTSSrm:
2113 case X86::VPBROADCASTDrm:
2117 case X86::VBROADCASTSSYrm:
2119 case X86::VPBROADCASTDYrm:
2127 case X86::VPBROADCASTWrm:
2131 case X86::VPBROADCASTWYrm:
2138 case X86::VPBROADCASTBrm:
2142 case X86::VPBROADCASTBYrm:
2150#define MOVX_CASE(Prefix, Ext, Type, Suffix, Postfix) \
2151 case X86::Prefix##PMOV##Ext##Type##Suffix##rm##Postfix:
2153#define CASE_MOVX_RM(Ext, Type) \
2154 MOVX_CASE(, Ext, Type, , ) \
2155 MOVX_CASE(V, Ext, Type, , ) \
2156 MOVX_CASE(V, Ext, Type, Y, ) \
2157 MOVX_CASE(V, Ext, Type, Z128, ) \
2158 MOVX_CASE(V, Ext, Type, Z128, k ) \
2159 MOVX_CASE(V, Ext, Type, Z128, kz ) \
2160 MOVX_CASE(V, Ext, Type, Z256, ) \
2161 MOVX_CASE(V, Ext, Type, Z256, k ) \
2162 MOVX_CASE(V, Ext, Type, Z256, kz ) \
2163 MOVX_CASE(V, Ext, Type, Z, ) \
2164 MOVX_CASE(V, Ext, Type, Z, k ) \
2165 MOVX_CASE(V, Ext, Type, Z, kz )
2214 assert(
MI->getOpcode() == X86::TAILJMPm64_REX ||
2215 MI->getOpcode() == X86::CALL64m);
2225 for (
auto I =
MBB.instr_rbegin(),
E =
MBB.instr_rend();
I !=
E; ++
I)
2226 if (
I->isJumpTableDebugInfo())
2237 X86MCInstLower MCInstLowering(*
MF, *
this);
2241 if (
MI->getOpcode() == X86::OR64rm) {
2242 for (
auto &Opd :
MI->operands()) {
2243 if (Opd.isSymbol() &&
StringRef(Opd.getSymbolName()) ==
2244 "swift_async_extendedFramePointerFlags") {
2245 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags =
true;
2255 if (
TM.Options.MCOptions.ShowMCEncoding) {
2257 OutStreamer->AddComment(
"EVEX TO LEGACY Compression ",
false);
2259 OutStreamer->AddComment(
"EVEX TO VEX Compression ",
false);
2261 OutStreamer->AddComment(
"EVEX TO EVEX Compression ",
false);
2265 bool IsTailJump =
false;
2267 switch (
MI->getOpcode()) {
2268 case TargetOpcode::DBG_VALUE:
2271 case X86::EH_RETURN:
2272 case X86::EH_RETURN64: {
2279 case X86::CLEANUPRET: {
2285 case X86::CATCHRET: {
2292 case X86::ENDBR64: {
2299 MI == &
MF->front().front()) {
2301 MCInstLowering.Lower(
MI, Inst);
2302 EmitAndCountInstruction(Inst);
2310 case X86::TAILJMPd64:
2311 if (IndCSPrefix &&
MI->hasRegisterImplicitUseOperand(X86::R11))
2315 emitLabelAndRecordForImportCallOptimization(
2316 IMAGE_RETPOLINE_AMD64_IMPORT_BR);
2327 case X86::TAILJMPd_CC:
2328 case X86::TAILJMPr64:
2329 case X86::TAILJMPm64:
2330 case X86::TAILJMPd64_CC:
2331 if (EnableImportCallOptimization)
2333 "import call optimization was enabled");
2340 case X86::TAILJMPm64_REX:
2342 emitLabelAndRecordForImportCallOptimization(
2343 IMAGE_RETPOLINE_AMD64_CFG_BR_REX);
2350 case X86::TAILJMPr64_REX: {
2351 if (EnableImportCallOptimization) {
2352 assert(
MI->getOperand(0).getReg() == X86::RAX &&
2353 "Indirect tail calls with impcall enabled must go through RAX (as "
2354 "enforced by TCRETURNImpCallri64)");
2355 emitLabelAndRecordForImportCallOptimization(
2356 IMAGE_RETPOLINE_AMD64_INDIR_BR);
2367 this->
getSubtarget().getRegisterInfo()->getEncodingValue(
2368 MI->getOperand(0).getReg().asMCReg());
2369 emitLabelAndRecordForImportCallOptimization(
2370 (ImportCallKind)(IMAGE_RETPOLINE_AMD64_SWITCHTABLE_FIRST +
2382 "Unexpected JMP instruction was emitted for a jump-table when import "
2383 "call optimization was enabled");
2386 case X86::TLS_addr32:
2387 case X86::TLS_addr64:
2388 case X86::TLS_addrX32:
2389 case X86::TLS_base_addr32:
2390 case X86::TLS_base_addr64:
2391 case X86::TLS_base_addrX32:
2392 case X86::TLS_desc32:
2393 case X86::TLS_desc64:
2394 return LowerTlsAddr(MCInstLowering, *
MI);
2396 case X86::MOVPC32r: {
2407 EmitAndCountInstruction(
2413 bool hasFP = FrameLowering->
hasFP(*
MF);
2416 bool HasActiveDwarfFrame =
OutStreamer->getNumFrameInfos() &&
2421 if (HasActiveDwarfFrame && !hasFP) {
2422 OutStreamer->emitCFIAdjustCfaOffset(-stackGrowth);
2430 EmitAndCountInstruction(
2433 if (HasActiveDwarfFrame && !hasFP) {
2439 case X86::ADD32ri: {
2455 MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(
MI->getOperand(2));
2466 .addReg(
MI->getOperand(0).getReg())
2467 .
addReg(
MI->getOperand(1).getReg())
2471 case TargetOpcode::STATEPOINT:
2472 return LowerSTATEPOINT(*
MI, MCInstLowering);
2474 case TargetOpcode::FAULTING_OP:
2475 return LowerFAULTING_OP(*
MI, MCInstLowering);
2477 case TargetOpcode::FENTRY_CALL:
2478 return LowerFENTRY_CALL(*
MI, MCInstLowering);
2480 case TargetOpcode::PATCHABLE_OP:
2481 return LowerPATCHABLE_OP(*
MI, MCInstLowering);
2483 case TargetOpcode::STACKMAP:
2484 return LowerSTACKMAP(*
MI);
2486 case TargetOpcode::PATCHPOINT:
2487 return LowerPATCHPOINT(*
MI, MCInstLowering);
2489 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2490 return LowerPATCHABLE_FUNCTION_ENTER(*
MI, MCInstLowering);
2492 case TargetOpcode::PATCHABLE_RET:
2493 return LowerPATCHABLE_RET(*
MI, MCInstLowering);
2495 case TargetOpcode::PATCHABLE_TAIL_CALL:
2496 return LowerPATCHABLE_TAIL_CALL(*
MI, MCInstLowering);
2498 case TargetOpcode::PATCHABLE_EVENT_CALL:
2499 return LowerPATCHABLE_EVENT_CALL(*
MI, MCInstLowering);
2501 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
2502 return LowerPATCHABLE_TYPED_EVENT_CALL(*
MI, MCInstLowering);
2504 case X86::MORESTACK_RET:
2508 case X86::KCFI_CHECK:
2509 return LowerKCFI_CHECK(*
MI);
2511 case X86::ASAN_CHECK_MEMACCESS:
2512 return LowerASAN_CHECK_MEMACCESS(*
MI);
2514 case X86::MORESTACK_RET_RESTORE_R10:
2517 EmitAndCountInstruction(
2518 MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX));
2521 case X86::SEH_PushReg:
2522 case X86::SEH_SaveReg:
2523 case X86::SEH_SaveXMM:
2524 case X86::SEH_StackAlloc:
2525 case X86::SEH_StackAlign:
2526 case X86::SEH_SetFrame:
2527 case X86::SEH_PushFrame:
2528 case X86::SEH_EndPrologue:
2529 case X86::SEH_EndEpilogue:
2530 case X86::SEH_UnwindV2Start:
2531 case X86::SEH_UnwindVersion:
2532 EmitSEHInstruction(
MI);
2535 case X86::SEH_BeginEpilogue: {
2536 assert(
MF->hasWinCFI() &&
"SEH_ instruction in function without WinCFI?");
2537 EmitSEHInstruction(
MI);
2540 case X86::UBSAN_UD1:
2545 .addReg(X86::NoRegister)
2546 .addImm(
MI->getOperand(0).getImm())
2547 .
addReg(X86::NoRegister));
2549 case X86::CALL64pcrel32:
2550 if (IndCSPrefix &&
MI->hasRegisterImplicitUseOperand(X86::R11))
2554 emitLabelAndRecordForImportCallOptimization(
2555 IMAGE_RETPOLINE_AMD64_IMPORT_CALL);
2558 MCInstLowering.Lower(
MI, TmpInst);
2563 emitCallInstruction(TmpInst);
2565 maybeEmitNopAfterCallForWindowsEH(
MI);
2572 if (EnableImportCallOptimization) {
2573 assert(
MI->getOperand(0).getReg() == X86::RAX &&
2574 "Indirect calls with impcall enabled must go through RAX (as "
2575 "enforced by CALL64r_ImpCall)");
2577 emitLabelAndRecordForImportCallOptimization(
2578 IMAGE_RETPOLINE_AMD64_INDIR_CALL);
2580 MCInstLowering.Lower(
MI, TmpInst);
2581 emitCallInstruction(TmpInst);
2586 maybeEmitNopAfterCallForWindowsEH(
MI);
2593 emitLabelAndRecordForImportCallOptimization(
2594 IMAGE_RETPOLINE_AMD64_CFG_CALL);
2609 if (EdgeProb > Threshold)
2616 MCInstLowering.Lower(
MI, TmpInst);
2619 emitCallInstruction(TmpInst);
2623 maybeEmitNopAfterCallForWindowsEH(
MI);
2627 EmitAndCountInstruction(TmpInst);
2630void X86AsmPrinter::emitCallInstruction(
const llvm::MCInst &MCI) {
2637 SMShadowTracker.count(MCI, getSubtargetInfo(), CodeEmitter.get());
2640 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
2704void X86AsmPrinter::maybeEmitNopAfterCallForWindowsEH(
const MachineInstr *
MI) {
2729 const MachineInstr &NextMI = *
MBBI;
2735 if (HasEHPersonality) {
2736 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2755 if (NextMI.
getOpcode() == X86::SEH_BeginEpilogue) {
2756 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2782 if (HasEHPersonality) {
2788 if (
MI->getParent()->succ_empty())
2789 EmitAndCountInstruction(MCInstBuilder(X86::INT3));
2791 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2797 const MachineBasicBlock *NextMBB = &*MFI;
2803void X86AsmPrinter::emitLabelAndRecordForImportCallOptimization(
2804 ImportCallKind Kind) {
2805 assert(EnableImportCallOptimization);
2807 MCSymbol *CallSiteSymbol = MMI->getContext().createNamedTempSymbol(
"impcall");
2811 .push_back({CallSiteSymbol,
Kind});
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static void printShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef< int > Mask)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
print mir2vec MIR2Vec Vocabulary Printer Pass
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Promote Memory to Register
static constexpr unsigned SM(unsigned Version)
uint64_t IntrinsicInst * II
static cl::opt< bool > EnableBranchHint("ppc-use-branch-hint", cl::init(true), cl::desc("Enable static hinting of branches on ppc"), cl::Hidden)
static MCSymbol * GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file defines the SmallString class.
static MCOperand LowerSymbolOperand(const MachineInstr *MI, const MachineOperand &MO, const MCSymbol *Symbol, AsmPrinter &AP)
static void emitX86Nops(MCStreamer &OS, unsigned NumBytes, const X86Subtarget *Subtarget)
Emit the optimal amount of multi-byte nops on X86.
static unsigned getRetOpcode(const X86Subtarget &Subtarget)
static void printSignExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits)
static unsigned convertTailJumpOpcode(unsigned Opcode)
static unsigned getSrcIdx(const MachineInstr *MI, unsigned SrcIdx)
static void printBroadcast(const MachineInstr *MI, MCStreamer &OutStreamer, int Repeats, int BitWidth)
static bool printExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits, bool IsSext)
static void printZeroUpperMove(const MachineInstr *MI, MCStreamer &OutStreamer, int SclWidth, int VecWidth, const char *ShuffleComment)
#define MASK_AVX512_CASE(Instr)
#define CASE_ARITH_RM(Instr)
static void addConstantComments(const MachineInstr *MI, MCStreamer &OutStreamer)
#define CASE_256_MOV_RM()
#define CASE_AVX512_ARITH_RM(Instr)
bool hasJumpTableInfoInBlock(const llvm::MachineInstr *MI)
static unsigned emitNop(MCStreamer &OS, unsigned NumBytes, const X86Subtarget *Subtarget)
Emit the largest nop instruction smaller than or equal to NumBytes bytes.
static void printDstRegisterName(raw_ostream &CS, const MachineInstr *MI, unsigned SrcOpIdx)
#define CASE_MOVX_RM(Ext, Type)
bool isImportedFunction(const MachineOperand &MO)
static cl::opt< bool > EnableBranchHint("enable-branch-hint", cl::desc("Enable branch hint."), cl::init(false), cl::Hidden)
static void printConstant(const APInt &Val, raw_ostream &CS, bool PrintZero=false)
static void printZeroExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits)
static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx, unsigned SrcOp2Idx, ArrayRef< int > Mask)
bool isCallToCFGuardFunction(const MachineInstr *MI)
#define CASE_512_MOV_RM()
static cl::opt< unsigned > BranchHintProbabilityThreshold("branch-hint-probability-threshold", cl::desc("The probability threshold of enabling branch hint."), cl::init(50), cl::Hidden)
#define CASE_128_MOV_RM()
void toString(SmallVectorImpl< char > &Str, unsigned FormatPrecision=0, unsigned FormatMaxPadding=3, bool TruncateZero=true) const
const fltSemantics & getSemantics() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned getNumWords() const
Get the number of words.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This class is intended to be used as a driving class for all asm writers.
MCSymbol * getSymbol(const GlobalValue *GV) const
MCSymbol * CurrentFnBegin
TargetMachine & TM
Target machine description.
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
const MCAsmInfo * MAI
Target Asm Printer information.
MachineFunction * MF
The current machine function.
MCSymbol * GetJTISymbol(unsigned JTID, bool isLinkerPrivate=false) const
Return the symbol for the specified jump table entry.
AsmPrinter(TargetMachine &TM, std::unique_ptr< MCStreamer > Streamer, char &ID=AsmPrinter::ID)
MCSymbol * getSymbolPreferLocal(const GlobalValue &GV) const
Similar to getSymbol() but preferred for references.
MachineModuleInfo * MMI
This is a pointer to the current MachineModuleInfo.
MCContext & OutContext
This is the context for the output file that we are streaming.
MCSymbol * createTempSymbol(const Twine &Name) const
MCSymbol * CurrentPatchableFunctionEntrySym
The symbol for the entry in __patchable_function_entires.
std::unique_ptr< MCStreamer > OutStreamer
This is the MCStreamer object for the file we are generating.
void getNameWithPrefix(SmallVectorImpl< char > &Name, const GlobalValue *GV) const
MCSymbol * GetBlockAddressSymbol(const BlockAddress *BA) const
Return the MCSymbol used to satisfy BlockAddress uses of the specified basic block.
const MCSubtargetInfo & getSubtargetInfo() const
Return information about subtarget.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
This is an important base class in LLVM.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasInternalLinkage() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
bool doesSetDirectiveSuppressReloc() const
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
MCCodeEmitter - Generic instruction encoding interface.
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Context object for machine code objects.
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
const MCTargetOptions * getTargetOptions() const
Base class for the full range of assembler expressions which are needed for parsing.
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addExpr(const MCExpr *Val)
Add a new MCExpr operand.
Instances of this class represent a single low-level machine instruction.
unsigned getNumOperands() const
unsigned getOpcode() const
iterator insert(iterator I, const MCOperand &Op)
void setFlags(unsigned F)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Instances of this class represent operands of the MCInst class.
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
MCRegister getReg() const
Returns the register number.
Streaming machine code generation interface.
virtual void emitWinCFIUnwindVersion(uint8_t Version, SMLoc Loc=SMLoc())
virtual void emitWinCFIPushReg(MCRegister Register, SMLoc Loc=SMLoc())
virtual void emitBinaryData(StringRef Data)
Functionally identical to EmitBytes.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitWinCFIUnwindV2Start(SMLoc Loc=SMLoc())
virtual void emitWinCFIEndEpilogue(SMLoc Loc=SMLoc())
virtual void emitWinCFIPushFrame(bool Code, SMLoc Loc=SMLoc())
virtual void emitWinCFISaveXMM(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
MCContext & getContext() const
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
virtual void emitWinCFIBeginEpilogue(SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
virtual void emitWinCFISaveReg(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
virtual void emitWinCFIEndProlog(SMLoc Loc=SMLoc())
virtual void emitCodeAlignment(Align Alignment, const MCSubtargetInfo *STI, unsigned MaxBytesToEmit=0)
Emit nops until the byte alignment ByteAlignment is reached.
virtual void emitWinCFISetFrame(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
virtual void emitWinCFIAllocStack(unsigned Size, SMLoc Loc=SMLoc())
MCSection * getCurrentSectionOnly() const
virtual void emitBytes(StringRef Data)
Emit the bytes in Data into the output.
Generic base class for all target subtargets.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
StringRef getName() const
getName - Get the symbol name.
instr_iterator instr_begin()
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
instr_iterator instr_end()
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isPseudo(QueryType Type=IgnoreBundle) const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
const MachineOperand & getOperand(unsigned i) const
bool isMetaInstruction(QueryType Type=IgnoreBundle) const
Return true if this instruction doesn't produce any output in the form of executable instructions.
StubValueTy & getGVStubEntry(MCSymbol *Sym)
PointerIntPair< MCSymbol *, 1, bool > StubValueTy
MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation for MachO targets.
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
const BlockAddress * getBlockAddress() const
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
const char * getSymbolName() const
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
MCSymbol * getMCSymbol() const
@ MO_Immediate
Immediate operand.
@ MO_ConstantPoolIndex
Address of indexed Constant in Constant Pool.
@ MO_MCSymbol
MCSymbol reference (for debug/eh info)
@ MO_GlobalAddress
Address of a global value.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_BlockAddress
Address of a basic block.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
@ MO_JumpTableIndex
Address of indexed Jump Table for switch.
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
LLVM_ABI void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerTy getPointer() const
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
constexpr bool empty() const
empty - Check if the string is empty.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
The instances of the Type class are immutable: once they are created, they are never changed.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static const char * getRegisterName(MCRegister Reg)
void emitInstruction(const MachineInstr *MI) override
Targets should implement this to emit instructions.
const X86Subtarget & getSubtarget() const
X86AsmPrinter(TargetMachine &TM, std::unique_ptr< MCStreamer > Streamer)
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
unsigned getSlotSize() const
bool isTargetWindowsMSVC() const
bool useIndirectThunkCalls() const
virtual bool emitFPOPushReg(MCRegister Reg, SMLoc L={})
virtual bool emitFPOEndPrologue(SMLoc L={})
virtual bool emitFPOStackAlign(unsigned Align, SMLoc L={})
virtual bool emitFPOSetFrame(MCRegister Reg, SMLoc L={})
virtual bool emitFPOStackAlloc(unsigned StackAlloc, SMLoc L={})
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
@ Itanium
Windows CE ARM, PowerPC, SH3, SH4.
bool isKMergeMasked(uint64_t TSFlags)
@ MO_TLSLD
MO_TLSLD - On a symbol operand this indicates that the immediate is the offset of the GOT entry with ...
@ MO_GOTPCREL_NORELAX
MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL relocations are guaranteed to...
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_DARWIN_NONLAZY_PIC_BASE
MO_DARWIN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this indicates that the reference is actually...
@ MO_GOT_ABSOLUTE_ADDRESS
MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a relocation of: SYMBOL_LABEL + [.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
@ MO_NTPOFF
MO_NTPOFF - On a symbol operand this indicates that the immediate is the negative thread-pointer offs...
@ MO_DARWIN_NONLAZY
MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the reference is actually to the "...
@ MO_INDNTPOFF
MO_INDNTPOFF - On a symbol operand this indicates that the immediate is the absolute address of the G...
@ MO_GOTNTPOFF
MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry w...
@ MO_TPOFF
MO_TPOFF - On a symbol operand this indicates that the immediate is the thread-pointer offset for the...
@ MO_TLVP_PIC_BASE
MO_TLVP_PIC_BASE - On a symbol operand this indicates that the immediate is some TLS offset from the ...
@ MO_GOT
MO_GOT - On a symbol operand this indicates that the immediate is the offset to the GOT entry for the...
@ MO_ABS8
MO_ABS8 - On a symbol operand this indicates that the symbol is known to be an absolute symbol in ran...
@ MO_PLT
MO_PLT - On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol n...
@ MO_TLSGD
MO_TLSGD - On a symbol operand this indicates that the immediate is the offset of the GOT entry with ...
@ MO_NO_FLAG
MO_NO_FLAG - No flag for the operand.
@ MO_TLVP
MO_TLVP - On a symbol operand this indicates that the immediate is some TLS offset.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand "FOO", this indicates that the reference is actually to the "__imp...
@ MO_GOTTPOFF
MO_GOTTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry wi...
@ MO_SECREL
MO_SECREL - On a symbol operand this indicates that the immediate is the offset from beginning of sec...
@ MO_DTPOFF
MO_DTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry with...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_TLSLDM
MO_TLSLDM - On a symbol operand this indicates that the immediate is the offset of the GOT entry with...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
bool isKMasked(uint64_t TSFlags)
bool isX86_64ExtendedReg(MCRegister Reg)
bool optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI)
bool optimizeMOV(MCInst &MI, bool In64BitMode)
Simplify things like MOV32rm to MOV32o32a.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
bool optimizeMOVSX(MCInst &MI)
bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI)
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)
Find any constant pool entry associated with a specific instruction operand.
bool optimizeINCDEC(MCInst &MI, bool In64BitMode)
unsigned getVectorRegisterWidth(const MCOperandInfo &Info)
Get the width of the vector register operand.
initializer< Ty > init(const Ty &Val)
NodeAddr< CodeNode * > Code
This is an optimization pass for GlobalISel generic memory operations.
void DecodeZeroExtendMask(unsigned SrcScalarBits, unsigned DstScalarBits, unsigned NumDstElts, bool IsAnyExtend, SmallVectorImpl< int > &ShuffleMask)
Decode a zero extension instruction as a shuffle mask.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void DecodeVPERMILPMask(unsigned NumElts, unsigned ScalarBits, ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPERMILPD/VPERMILPS variable mask from a raw array of constants.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
bool isCFGuardFunction(const GlobalValue *GV)
@ WinEH
Windows Exception Handling.
void DecodeVPERMIL2PMask(unsigned NumElts, unsigned ScalarBits, unsigned M2Z, ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPERMIL2PD/VPERMIL2PS variable mask from a raw array of constants.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
void DecodeVPPERMMask(ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPPERM mask from a raw array of constants such as from BUILD_VECTOR.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
void DecodePSHUFBMask(ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a PSHUFB mask from a raw array of constants such as from BUILD_VECTOR.
void changeAndComment(bool b)
NoAutoPaddingScope(MCStreamer &OS)
const bool OldAllowAutoPadding