LLVM 20.0.0git
X86AsmBackend.cpp
Go to the documentation of this file.
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
16#include "llvm/MC/MCAssembler.h"
18#include "llvm/MC/MCContext.h"
19#include "llvm/MC/MCDwarf.h"
22#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
30#include "llvm/MC/MCValue.h"
35
36using namespace llvm;
37
38namespace {
39/// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
40class X86AlignBranchKind {
41private:
42 uint8_t AlignBranchKind = 0;
43
44public:
45 void operator=(const std::string &Val) {
46 if (Val.empty())
47 return;
48 SmallVector<StringRef, 6> BranchTypes;
49 StringRef(Val).split(BranchTypes, '+', -1, false);
50 for (auto BranchType : BranchTypes) {
51 if (BranchType == "fused")
52 addKind(X86::AlignBranchFused);
53 else if (BranchType == "jcc")
54 addKind(X86::AlignBranchJcc);
55 else if (BranchType == "jmp")
56 addKind(X86::AlignBranchJmp);
57 else if (BranchType == "call")
58 addKind(X86::AlignBranchCall);
59 else if (BranchType == "ret")
60 addKind(X86::AlignBranchRet);
61 else if (BranchType == "indirect")
63 else {
64 errs() << "invalid argument " << BranchType.str()
65 << " to -x86-align-branch=; each element must be one of: fused, "
66 "jcc, jmp, call, ret, indirect.(plus separated)\n";
67 }
68 }
69 }
70
71 operator uint8_t() const { return AlignBranchKind; }
72 void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
73};
74
75X86AlignBranchKind X86AlignBranchKindLoc;
76
77cl::opt<unsigned> X86AlignBranchBoundary(
78 "x86-align-branch-boundary", cl::init(0),
80 "Control how the assembler should align branches with NOP. If the "
81 "boundary's size is not 0, it should be a power of 2 and no less "
82 "than 32. Branches will be aligned to prevent from being across or "
83 "against the boundary of specified size. The default value 0 does not "
84 "align branches."));
85
87 "x86-align-branch",
89 "Specify types of branches to align (plus separated list of types):"
90 "\njcc indicates conditional jumps"
91 "\nfused indicates fused conditional jumps"
92 "\njmp indicates direct unconditional jumps"
93 "\ncall indicates direct and indirect calls"
94 "\nret indicates rets"
95 "\nindirect indicates indirect unconditional jumps"),
96 cl::location(X86AlignBranchKindLoc));
97
98cl::opt<bool> X86AlignBranchWithin32BBoundaries(
99 "x86-branches-within-32B-boundaries", cl::init(false),
100 cl::desc(
101 "Align selected instructions to mitigate negative performance impact "
102 "of Intel's micro code update for errata skx102. May break "
103 "assumptions about labels corresponding to particular instructions, "
104 "and should be used with caution."));
105
106cl::opt<unsigned> X86PadMaxPrefixSize(
107 "x86-pad-max-prefix-size", cl::init(0),
108 cl::desc("Maximum number of prefixes to use for padding"));
109
110cl::opt<bool> X86PadForAlign(
111 "x86-pad-for-align", cl::init(false), cl::Hidden,
112 cl::desc("Pad previous instructions to implement align directives"));
113
114cl::opt<bool> X86PadForBranchAlign(
115 "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
116 cl::desc("Pad previous instructions to implement branch alignment"));
117
118class X86AsmBackend : public MCAsmBackend {
119 const MCSubtargetInfo &STI;
120 std::unique_ptr<const MCInstrInfo> MCII;
121 X86AlignBranchKind AlignBranchType;
122 Align AlignBoundary;
123 unsigned TargetPrefixMax = 0;
124
125 MCInst PrevInst;
126 unsigned PrevInstOpcode = 0;
127 MCBoundaryAlignFragment *PendingBA = nullptr;
128 std::pair<MCFragment *, size_t> PrevInstPosition;
129 bool IsRightAfterData = false;
130
131 uint8_t determinePaddingPrefix(const MCInst &Inst) const;
132 bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
133 bool needAlign(const MCInst &Inst) const;
134 bool canPadBranches(MCObjectStreamer &OS) const;
135 bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
136
137public:
138 X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
139 : MCAsmBackend(llvm::endianness::little), STI(STI),
140 MCII(T.createMCInstrInfo()) {
141 if (X86AlignBranchWithin32BBoundaries) {
142 // At the moment, this defaults to aligning fused branches, unconditional
143 // jumps, and (unfused) conditional jumps with nops. Both the
144 // instructions aligned and the alignment method (nop vs prefix) may
145 // change in the future.
146 AlignBoundary = assumeAligned(32);
147 AlignBranchType.addKind(X86::AlignBranchFused);
148 AlignBranchType.addKind(X86::AlignBranchJcc);
149 AlignBranchType.addKind(X86::AlignBranchJmp);
150 }
151 // Allow overriding defaults set by main flag
152 if (X86AlignBranchBoundary.getNumOccurrences())
153 AlignBoundary = assumeAligned(X86AlignBranchBoundary);
154 if (X86AlignBranch.getNumOccurrences())
155 AlignBranchType = X86AlignBranchKindLoc;
156 if (X86PadMaxPrefixSize.getNumOccurrences())
157 TargetPrefixMax = X86PadMaxPrefixSize;
158 }
159
160 bool allowAutoPadding() const override;
161 bool allowEnhancedRelaxation() const override;
162 void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst,
163 const MCSubtargetInfo &STI);
164 void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst);
165
166 unsigned getNumFixupKinds() const override {
168 }
169
170 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
171
172 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
173
174 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
175 const MCValue &Target,
176 const MCSubtargetInfo *STI) override;
177
178 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
180 uint64_t Value, bool IsResolved,
181 const MCSubtargetInfo *STI) const override;
182
183 bool mayNeedRelaxation(const MCInst &Inst,
184 const MCSubtargetInfo &STI) const override;
185
187 uint64_t Value) const override;
188
189 void relaxInstruction(MCInst &Inst,
190 const MCSubtargetInfo &STI) const override;
191
192 bool padInstructionViaRelaxation(MCRelaxableFragment &RF,
194 unsigned &RemainingSize) const;
195
196 bool padInstructionViaPrefix(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
197 unsigned &RemainingSize) const;
198
199 bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
200 unsigned &RemainingSize) const;
201
202 void finishLayout(const MCAssembler &Asm) const override;
203
204 unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
205
206 bool writeNopData(raw_ostream &OS, uint64_t Count,
207 const MCSubtargetInfo *STI) const override;
208};
209} // end anonymous namespace
210
211static bool isRelaxableBranch(unsigned Opcode) {
212 return Opcode == X86::JCC_1 || Opcode == X86::JMP_1;
213}
214
215static unsigned getRelaxedOpcodeBranch(unsigned Opcode,
216 bool Is16BitMode = false) {
217 switch (Opcode) {
218 default:
219 llvm_unreachable("invalid opcode for branch");
220 case X86::JCC_1:
221 return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
222 case X86::JMP_1:
223 return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
224 }
225}
226
227static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode) {
228 unsigned Opcode = MI.getOpcode();
229 return isRelaxableBranch(Opcode) ? getRelaxedOpcodeBranch(Opcode, Is16BitMode)
231}
232
234 const MCInstrInfo &MCII) {
235 unsigned Opcode = MI.getOpcode();
236 switch (Opcode) {
237 default:
238 return X86::COND_INVALID;
239 case X86::JCC_1: {
240 const MCInstrDesc &Desc = MCII.get(Opcode);
241 return static_cast<X86::CondCode>(
242 MI.getOperand(Desc.getNumOperands() - 1).getImm());
243 }
244 }
245}
246
250 return classifySecondCondCodeInMacroFusion(CC);
251}
252
253/// Check if the instruction uses RIP relative addressing.
254static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
255 unsigned Opcode = MI.getOpcode();
256 const MCInstrDesc &Desc = MCII.get(Opcode);
257 uint64_t TSFlags = Desc.TSFlags;
258 unsigned CurOp = X86II::getOperandBias(Desc);
259 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
260 if (MemoryOperand < 0)
261 return false;
262 unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
263 MCRegister BaseReg = MI.getOperand(BaseRegNum).getReg();
264 return (BaseReg == X86::RIP);
265}
266
267/// Check if the instruction is a prefix.
268static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII) {
269 return X86II::isPrefix(MCII.get(Opcode).TSFlags);
270}
271
272/// Check if the instruction is valid as the first instruction in macro fusion.
273static bool isFirstMacroFusibleInst(const MCInst &Inst,
274 const MCInstrInfo &MCII) {
275 // An Intel instruction with RIP relative addressing is not macro fusible.
276 if (isRIPRelative(Inst, MCII))
277 return false;
280 return FIK != X86::FirstMacroFusionInstKind::Invalid;
281}
282
283/// X86 can reduce the bytes of NOP by padding instructions with prefixes to
284/// get a better peformance in some cases. Here, we determine which prefix is
285/// the most suitable.
286///
287/// If the instruction has a segment override prefix, use the existing one.
288/// If the target is 64-bit, use the CS.
289/// If the target is 32-bit,
290/// - If the instruction has a ESP/EBP base register, use SS.
291/// - Otherwise use DS.
292uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
293 assert((STI.hasFeature(X86::Is32Bit) || STI.hasFeature(X86::Is64Bit)) &&
294 "Prefixes can be added only in 32-bit or 64-bit mode.");
295 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
296 uint64_t TSFlags = Desc.TSFlags;
297
298 // Determine where the memory operand starts, if present.
299 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
300 if (MemoryOperand != -1)
301 MemoryOperand += X86II::getOperandBias(Desc);
302
303 MCRegister SegmentReg;
304 if (MemoryOperand >= 0) {
305 // Check for explicit segment override on memory operand.
306 SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
307 }
308
309 switch (TSFlags & X86II::FormMask) {
310 default:
311 break;
312 case X86II::RawFrmDstSrc: {
313 // Check segment override opcode prefix as needed (not for %ds).
314 if (Inst.getOperand(2).getReg() != X86::DS)
315 SegmentReg = Inst.getOperand(2).getReg();
316 break;
317 }
318 case X86II::RawFrmSrc: {
319 // Check segment override opcode prefix as needed (not for %ds).
320 if (Inst.getOperand(1).getReg() != X86::DS)
321 SegmentReg = Inst.getOperand(1).getReg();
322 break;
323 }
325 // Check segment override opcode prefix as needed.
326 SegmentReg = Inst.getOperand(1).getReg();
327 break;
328 }
329 }
330
331 if (SegmentReg)
332 return X86::getSegmentOverridePrefixForReg(SegmentReg);
333
334 if (STI.hasFeature(X86::Is64Bit))
335 return X86::CS_Encoding;
336
337 if (MemoryOperand >= 0) {
338 unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
339 MCRegister BaseReg = Inst.getOperand(BaseRegNum).getReg();
340 if (BaseReg == X86::ESP || BaseReg == X86::EBP)
341 return X86::SS_Encoding;
342 }
343 return X86::DS_Encoding;
344}
345
346/// Check if the two instructions will be macro-fused on the target cpu.
347bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
348 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
349 if (!InstDesc.isConditionalBranch())
350 return false;
351 if (!isFirstMacroFusibleInst(Cmp, *MCII))
352 return false;
353 const X86::FirstMacroFusionInstKind CmpKind =
355 const X86::SecondMacroFusionInstKind BranchKind =
357 return X86::isMacroFused(CmpKind, BranchKind);
358}
359
360/// Check if the instruction has a variant symbol operand.
361static bool hasVariantSymbol(const MCInst &MI) {
362 for (auto &Operand : MI) {
363 if (!Operand.isExpr())
364 continue;
365 const MCExpr &Expr = *Operand.getExpr();
366 if (Expr.getKind() == MCExpr::SymbolRef &&
367 cast<MCSymbolRefExpr>(Expr).getKind() != MCSymbolRefExpr::VK_None)
368 return true;
369 }
370 return false;
371}
372
373bool X86AsmBackend::allowAutoPadding() const {
374 return (AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone);
375}
376
377bool X86AsmBackend::allowEnhancedRelaxation() const {
378 return allowAutoPadding() && TargetPrefixMax != 0 && X86PadForBranchAlign;
379}
380
381/// X86 has certain instructions which enable interrupts exactly one
382/// instruction *after* the instruction which stores to SS. Return true if the
383/// given instruction may have such an interrupt delay slot.
384static bool mayHaveInterruptDelaySlot(unsigned InstOpcode) {
385 switch (InstOpcode) {
386 case X86::POPSS16:
387 case X86::POPSS32:
388 case X86::STI:
389 return true;
390
391 case X86::MOV16sr:
392 case X86::MOV32sr:
393 case X86::MOV64sr:
394 case X86::MOV16sm:
395 // In fact, this is only the case if the first operand is SS. However, as
396 // segment moves occur extremely rarely, this is just a minor pessimization.
397 return true;
398 }
399 return false;
400}
401
402/// Check if the instruction to be emitted is right after any data.
403static bool
405 const std::pair<MCFragment *, size_t> &PrevInstPosition) {
406 MCFragment *F = CurrentFragment;
407 // Since data is always emitted into a DataFragment, our check strategy is
408 // simple here.
409 // - If the fragment is a DataFragment
410 // - If it's empty (section start or data after align), return false.
411 // - If it's not the fragment where the previous instruction is,
412 // returns true.
413 // - If it's the fragment holding the previous instruction but its
414 // size changed since the previous instruction was emitted into
415 // it, returns true.
416 // - Otherwise returns false.
417 // - If the fragment is not a DataFragment, returns false.
418 if (auto *DF = dyn_cast_or_null<MCDataFragment>(F))
419 return DF->getContents().size() &&
420 (DF != PrevInstPosition.first ||
421 DF->getContents().size() != PrevInstPosition.second);
422
423 return false;
424}
425
426/// \returns the fragment size if it has instructions, otherwise returns 0.
427static size_t getSizeForInstFragment(const MCFragment *F) {
428 if (!F || !F->hasInstructions())
429 return 0;
430 // MCEncodedFragmentWithContents being templated makes this tricky.
431 switch (F->getKind()) {
432 default:
433 llvm_unreachable("Unknown fragment with instructions!");
435 return cast<MCDataFragment>(*F).getContents().size();
437 return cast<MCRelaxableFragment>(*F).getContents().size();
438 }
439}
440
441/// Return true if we can insert NOP or prefixes automatically before the
442/// the instruction to be emitted.
443bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
444 if (hasVariantSymbol(Inst))
445 // Linker may rewrite the instruction with variant symbol operand(e.g.
446 // TLSCALL).
447 return false;
448
449 if (mayHaveInterruptDelaySlot(PrevInstOpcode))
450 // If this instruction follows an interrupt enabling instruction with a one
451 // instruction delay, inserting a nop would change behavior.
452 return false;
453
454 if (isPrefix(PrevInstOpcode, *MCII))
455 // If this instruction follows a prefix, inserting a nop/prefix would change
456 // semantic.
457 return false;
458
459 if (isPrefix(Inst.getOpcode(), *MCII))
460 // If this instruction is a prefix, inserting a prefix would change
461 // semantic.
462 return false;
463
464 if (IsRightAfterData)
465 // If this instruction follows any data, there is no clear
466 // instruction boundary, inserting a nop/prefix would change semantic.
467 return false;
468
469 return true;
470}
471
472bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
473 if (!OS.getAllowAutoPadding())
474 return false;
475 assert(allowAutoPadding() && "incorrect initialization!");
476
477 // We only pad in text section.
478 if (!OS.getCurrentSectionOnly()->isText())
479 return false;
480
481 // To be Done: Currently don't deal with Bundle cases.
482 if (OS.getAssembler().isBundlingEnabled())
483 return false;
484
485 // Branches only need to be aligned in 32-bit or 64-bit mode.
486 if (!(STI.hasFeature(X86::Is64Bit) || STI.hasFeature(X86::Is32Bit)))
487 return false;
488
489 return true;
490}
491
492/// Check if the instruction operand needs to be aligned.
493bool X86AsmBackend::needAlign(const MCInst &Inst) const {
494 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
495 return (Desc.isConditionalBranch() &&
496 (AlignBranchType & X86::AlignBranchJcc)) ||
497 (Desc.isUnconditionalBranch() &&
498 (AlignBranchType & X86::AlignBranchJmp)) ||
499 (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
500 (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
501 (Desc.isIndirectBranch() &&
502 (AlignBranchType & X86::AlignBranchIndirect));
503}
504
505/// Insert BoundaryAlignFragment before instructions to align branches.
506void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
507 const MCInst &Inst, const MCSubtargetInfo &STI) {
508 // Used by canPadInst. Done here, because in emitInstructionEnd, the current
509 // fragment will have changed.
510 IsRightAfterData =
511 isRightAfterData(OS.getCurrentFragment(), PrevInstPosition);
512
513 if (!canPadBranches(OS))
514 return;
515
516 // NB: PrevInst only valid if canPadBranches is true.
517 if (!isMacroFused(PrevInst, Inst))
518 // Macro fusion doesn't happen indeed, clear the pending.
519 PendingBA = nullptr;
520
521 // When branch padding is enabled (basically the skx102 erratum => unlikely),
522 // we call canPadInst (not cheap) twice. However, in the common case, we can
523 // avoid unnecessary calls to that, as this is otherwise only used for
524 // relaxable fragments.
525 if (!canPadInst(Inst, OS))
526 return;
527
528 if (PendingBA && PendingBA->getNext() == OS.getCurrentFragment()) {
529 // Macro fusion actually happens and there is no other fragment inserted
530 // after the previous instruction.
531 //
532 // Do nothing here since we already inserted a BoudaryAlign fragment when
533 // we met the first instruction in the fused pair and we'll tie them
534 // together in emitInstructionEnd.
535 //
536 // Note: When there is at least one fragment, such as MCAlignFragment,
537 // inserted after the previous instruction, e.g.
538 //
539 // \code
540 // cmp %rax %rcx
541 // .align 16
542 // je .Label0
543 // \ endcode
544 //
545 // We will treat the JCC as a unfused branch although it may be fused
546 // with the CMP.
547 return;
548 }
549
550 if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
551 isFirstMacroFusibleInst(Inst, *MCII))) {
552 // If we meet a unfused branch or the first instuction in a fusiable pair,
553 // insert a BoundaryAlign fragment.
554 PendingBA = OS.getContext().allocFragment<MCBoundaryAlignFragment>(
555 AlignBoundary, STI);
556 OS.insert(PendingBA);
557 }
558}
559
560/// Set the last fragment to be aligned for the BoundaryAlignFragment.
561void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS,
562 const MCInst &Inst) {
563 MCFragment *CF = OS.getCurrentFragment();
564 if (auto *F = dyn_cast_or_null<MCRelaxableFragment>(CF))
565 F->setAllowAutoPadding(canPadInst(Inst, OS));
566
567 // Update PrevInstOpcode here, canPadInst() reads that.
568 PrevInstOpcode = Inst.getOpcode();
569 PrevInstPosition = std::make_pair(CF, getSizeForInstFragment(CF));
570
571 if (!canPadBranches(OS))
572 return;
573
574 // PrevInst is only needed if canPadBranches. Copying an MCInst isn't cheap.
575 PrevInst = Inst;
576
577 if (!needAlign(Inst) || !PendingBA)
578 return;
579
580 // Tie the aligned instructions into a pending BoundaryAlign.
581 PendingBA->setLastFragment(CF);
582 PendingBA = nullptr;
583
584 // We need to ensure that further data isn't added to the current
585 // DataFragment, so that we can get the size of instructions later in
586 // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
587 // DataFragment.
588 if (isa_and_nonnull<MCDataFragment>(CF))
589 OS.insert(OS.getContext().allocFragment<MCDataFragment>());
590
591 // Update the maximum alignment on the current section if necessary.
592 MCSection *Sec = OS.getCurrentSectionOnly();
593 Sec->ensureMinAlignment(AlignBoundary);
594}
595
596std::optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
597 if (STI.getTargetTriple().isOSBinFormatELF()) {
598 unsigned Type;
599 if (STI.getTargetTriple().getArch() == Triple::x86_64) {
601#define ELF_RELOC(X, Y) .Case(#X, Y)
602#include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
603#undef ELF_RELOC
604 .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
605 .Case("BFD_RELOC_8", ELF::R_X86_64_8)
606 .Case("BFD_RELOC_16", ELF::R_X86_64_16)
607 .Case("BFD_RELOC_32", ELF::R_X86_64_32)
608 .Case("BFD_RELOC_64", ELF::R_X86_64_64)
609 .Default(-1u);
610 } else {
612#define ELF_RELOC(X, Y) .Case(#X, Y)
613#include "llvm/BinaryFormat/ELFRelocs/i386.def"
614#undef ELF_RELOC
615 .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
616 .Case("BFD_RELOC_8", ELF::R_386_8)
617 .Case("BFD_RELOC_16", ELF::R_386_16)
618 .Case("BFD_RELOC_32", ELF::R_386_32)
619 .Default(-1u);
620 }
621 if (Type == -1u)
622 return std::nullopt;
623 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
624 }
626}
627
628const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
629 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
630 // clang-format off
631 {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
632 {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
633 {"reloc_riprel_4byte_movq_load_rex2", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
634 {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
635 {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
636 {"reloc_riprel_4byte_relax_rex2", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
637 {"reloc_riprel_4byte_relax_evex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
638 {"reloc_signed_4byte", 0, 32, 0},
639 {"reloc_signed_4byte_relax", 0, 32, 0},
640 {"reloc_global_offset_table", 0, 32, 0},
641 {"reloc_global_offset_table8", 0, 64, 0},
642 {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
643 // clang-format on
644 };
645
646 // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
647 // do not require any extra processing.
648 if (Kind >= FirstLiteralRelocationKind)
650
651 if (Kind < FirstTargetFixupKind)
653
654 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
655 "Invalid kind!");
656 assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
657 return Infos[Kind - FirstTargetFixupKind];
658}
659
660bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
661 const MCFixup &Fixup, const MCValue &,
662 const MCSubtargetInfo *STI) {
663 return Fixup.getKind() >= FirstLiteralRelocationKind;
664}
665
666static unsigned getFixupKindSize(unsigned Kind) {
667 switch (Kind) {
668 default:
669 llvm_unreachable("invalid fixup kind!");
670 case FK_NONE:
671 return 0;
672 case FK_PCRel_1:
673 case FK_SecRel_1:
674 case FK_Data_1:
675 return 1;
676 case FK_PCRel_2:
677 case FK_SecRel_2:
678 case FK_Data_2:
679 return 2;
680 case FK_PCRel_4:
692 case FK_SecRel_4:
693 case FK_Data_4:
694 return 4;
695 case FK_PCRel_8:
696 case FK_SecRel_8:
697 case FK_Data_8:
699 return 8;
700 }
701}
702
703void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
704 const MCValue &Target,
706 uint64_t Value, bool IsResolved,
707 const MCSubtargetInfo *STI) const {
708 unsigned Kind = Fixup.getKind();
709 if (Kind >= FirstLiteralRelocationKind)
710 return;
711 unsigned Size = getFixupKindSize(Kind);
712
713 assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
714
715 int64_t SignedValue = static_cast<int64_t>(Value);
716 if ((Target.isAbsolute() || IsResolved) &&
717 getFixupKindInfo(Fixup.getKind()).Flags &
719 // check that PC relative fixup fits into the fixup size.
720 if (Size > 0 && !isIntN(Size * 8, SignedValue))
721 Asm.getContext().reportError(
722 Fixup.getLoc(), "value of " + Twine(SignedValue) +
723 " is too large for field of " + Twine(Size) +
724 ((Size == 1) ? " byte." : " bytes."));
725 } else {
726 // Check that uppper bits are either all zeros or all ones.
727 // Specifically ignore overflow/underflow as long as the leakage is
728 // limited to the lower bits. This is to remain compatible with
729 // other assemblers.
730 assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
731 "Value does not fit in the Fixup field");
732 }
733
734 for (unsigned i = 0; i != Size; ++i)
735 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
736}
737
738bool X86AsmBackend::mayNeedRelaxation(const MCInst &MI,
739 const MCSubtargetInfo &STI) const {
740 unsigned Opcode = MI.getOpcode();
741 unsigned SkipOperands = X86::isCCMPCC(Opcode) ? 2 : 0;
742 return isRelaxableBranch(Opcode) ||
743 (X86::getOpcodeForLongImmediateForm(Opcode) != Opcode &&
744 MI.getOperand(MI.getNumOperands() - 1 - SkipOperands).isExpr());
745}
746
747bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
748 uint64_t Value) const {
749 // Relax if the value is too big for a (signed) i8.
750 return !isInt<8>(Value);
751}
752
753// FIXME: Can tblgen help at all here to verify there aren't other instructions
754// we can relax?
755void X86AsmBackend::relaxInstruction(MCInst &Inst,
756 const MCSubtargetInfo &STI) const {
757 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
758 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
759 unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
760
761 if (RelaxedOp == Inst.getOpcode()) {
764 Inst.dump_pretty(OS);
765 OS << "\n";
766 report_fatal_error("unexpected instruction to relax: " + OS.str());
767 }
768
769 Inst.setOpcode(RelaxedOp);
770}
771
772bool X86AsmBackend::padInstructionViaPrefix(MCRelaxableFragment &RF,
774 unsigned &RemainingSize) const {
775 if (!RF.getAllowAutoPadding())
776 return false;
777 // If the instruction isn't fully relaxed, shifting it around might require a
778 // larger value for one of the fixups then can be encoded. The outer loop
779 // will also catch this before moving to the next instruction, but we need to
780 // prevent padding this single instruction as well.
781 if (mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
782 return false;
783
784 const unsigned OldSize = RF.getContents().size();
785 if (OldSize == 15)
786 return false;
787
788 const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
789 const unsigned RemainingPrefixSize = [&]() -> unsigned {
791 X86_MC::emitPrefix(Emitter, RF.getInst(), Code, STI);
792 assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
793
794 // TODO: It turns out we need a decent amount of plumbing for the target
795 // specific bits to determine number of prefixes its safe to add. Various
796 // targets (older chips mostly, but also Atom family) encounter decoder
797 // stalls with too many prefixes. For testing purposes, we set the value
798 // externally for the moment.
799 unsigned ExistingPrefixSize = Code.size();
800 if (TargetPrefixMax <= ExistingPrefixSize)
801 return 0;
802 return TargetPrefixMax - ExistingPrefixSize;
803 }();
804 const unsigned PrefixBytesToAdd =
805 std::min(MaxPossiblePad, RemainingPrefixSize);
806 if (PrefixBytesToAdd == 0)
807 return false;
808
809 const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
810
812 Code.append(PrefixBytesToAdd, Prefix);
813 Code.append(RF.getContents().begin(), RF.getContents().end());
814 RF.getContents() = Code;
815
816 // Adjust the fixups for the change in offsets
817 for (auto &F : RF.getFixups()) {
818 F.setOffset(F.getOffset() + PrefixBytesToAdd);
819 }
820
821 RemainingSize -= PrefixBytesToAdd;
822 return true;
823}
824
825bool X86AsmBackend::padInstructionViaRelaxation(MCRelaxableFragment &RF,
827 unsigned &RemainingSize) const {
828 if (!mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
829 // TODO: There are lots of other tricks we could apply for increasing
830 // encoding size without impacting performance.
831 return false;
832
833 MCInst Relaxed = RF.getInst();
834 relaxInstruction(Relaxed, *RF.getSubtargetInfo());
835
838 Emitter.encodeInstruction(Relaxed, Code, Fixups, *RF.getSubtargetInfo());
839 const unsigned OldSize = RF.getContents().size();
840 const unsigned NewSize = Code.size();
841 assert(NewSize >= OldSize && "size decrease during relaxation?");
842 unsigned Delta = NewSize - OldSize;
843 if (Delta > RemainingSize)
844 return false;
845 RF.setInst(Relaxed);
846 RF.getContents() = Code;
847 RF.getFixups() = Fixups;
848 RemainingSize -= Delta;
849 return true;
850}
851
852bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
854 unsigned &RemainingSize) const {
855 bool Changed = false;
856 if (RemainingSize != 0)
857 Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
858 if (RemainingSize != 0)
859 Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
860 return Changed;
861}
862
863void X86AsmBackend::finishLayout(MCAssembler const &Asm) const {
864 // See if we can further relax some instructions to cut down on the number of
865 // nop bytes required for code alignment. The actual win is in reducing
866 // instruction count, not number of bytes. Modern X86-64 can easily end up
867 // decode limited. It is often better to reduce the number of instructions
868 // (i.e. eliminate nops) even at the cost of increasing the size and
869 // complexity of others.
870 if (!X86PadForAlign && !X86PadForBranchAlign)
871 return;
872
873 // The processed regions are delimitered by LabeledFragments. -g may have more
874 // MCSymbols and therefore different relaxation results. X86PadForAlign is
875 // disabled by default to eliminate the -g vs non -g difference.
876 DenseSet<MCFragment *> LabeledFragments;
877 for (const MCSymbol &S : Asm.symbols())
878 LabeledFragments.insert(S.getFragment(false));
879
880 for (MCSection &Sec : Asm) {
881 if (!Sec.isText())
882 continue;
883
885 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
886 MCFragment &F = *I;
887
888 if (LabeledFragments.count(&F))
889 Relaxable.clear();
890
891 if (F.getKind() == MCFragment::FT_Data) // Skip and ignore
892 continue;
893
894 if (F.getKind() == MCFragment::FT_Relaxable) {
895 auto &RF = cast<MCRelaxableFragment>(*I);
896 Relaxable.push_back(&RF);
897 continue;
898 }
899
900 auto canHandle = [](MCFragment &F) -> bool {
901 switch (F.getKind()) {
902 default:
903 return false;
905 return X86PadForAlign;
907 return X86PadForBranchAlign;
908 }
909 };
910 // For any unhandled kind, assume we can't change layout.
911 if (!canHandle(F)) {
912 Relaxable.clear();
913 continue;
914 }
915
916#ifndef NDEBUG
917 const uint64_t OrigOffset = Asm.getFragmentOffset(F);
918#endif
919 const uint64_t OrigSize = Asm.computeFragmentSize(F);
920
921 // To keep the effects local, prefer to relax instructions closest to
922 // the align directive. This is purely about human understandability
923 // of the resulting code. If we later find a reason to expand
924 // particular instructions over others, we can adjust.
925 unsigned RemainingSize = OrigSize;
926 while (!Relaxable.empty() && RemainingSize != 0) {
927 auto &RF = *Relaxable.pop_back_val();
928 // Give the backend a chance to play any tricks it wishes to increase
929 // the encoding size of the given instruction. Target independent code
930 // will try further relaxation, but target's may play further tricks.
931 if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
932 Sec.setHasLayout(false);
933
934 // If we have an instruction which hasn't been fully relaxed, we can't
935 // skip past it and insert bytes before it. Changing its starting
936 // offset might require a larger negative offset than it can encode.
937 // We don't need to worry about larger positive offsets as none of the
938 // possible offsets between this and our align are visible, and the
939 // ones afterwards aren't changing.
940 if (mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
941 break;
942 }
943 Relaxable.clear();
944
945 // BoundaryAlign explicitly tracks it's size (unlike align)
946 if (F.getKind() == MCFragment::FT_BoundaryAlign)
947 cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
948
949#ifndef NDEBUG
950 const uint64_t FinalOffset = Asm.getFragmentOffset(F);
951 const uint64_t FinalSize = Asm.computeFragmentSize(F);
952 assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
953 "can't move start of next fragment!");
954 assert(FinalSize == RemainingSize && "inconsistent size computation?");
955#endif
956
957 // If we're looking at a boundary align, make sure we don't try to pad
958 // its target instructions for some following directive. Doing so would
959 // break the alignment of the current boundary align.
960 if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
961 const MCFragment *LastFragment = BF->getLastFragment();
962 if (!LastFragment)
963 continue;
964 while (&*I != LastFragment)
965 ++I;
966 }
967 }
968 }
969
970 // The layout is done. Mark every fragment as valid.
971 for (MCSection &Section : Asm) {
972 Asm.getFragmentOffset(*Section.curFragList()->Tail);
973 Asm.computeFragmentSize(*Section.curFragList()->Tail);
974 }
975}
976
977unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
978 if (STI.hasFeature(X86::Is16Bit))
979 return 4;
980 if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit))
981 return 1;
982 if (STI.hasFeature(X86::TuningFast7ByteNOP))
983 return 7;
984 if (STI.hasFeature(X86::TuningFast15ByteNOP))
985 return 15;
986 if (STI.hasFeature(X86::TuningFast11ByteNOP))
987 return 11;
988 // FIXME: handle 32-bit mode
989 // 15-bytes is the longest single NOP instruction, but 10-bytes is
990 // commonly the longest that can be efficiently decoded.
991 return 10;
992}
993
994/// Write a sequence of optimal nops to the output, covering \p Count
995/// bytes.
996/// \return - true on success, false on failure
997bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
998 const MCSubtargetInfo *STI) const {
999 static const char Nops32Bit[10][11] = {
1000 // nop
1001 "\x90",
1002 // xchg %ax,%ax
1003 "\x66\x90",
1004 // nopl (%[re]ax)
1005 "\x0f\x1f\x00",
1006 // nopl 0(%[re]ax)
1007 "\x0f\x1f\x40\x00",
1008 // nopl 0(%[re]ax,%[re]ax,1)
1009 "\x0f\x1f\x44\x00\x00",
1010 // nopw 0(%[re]ax,%[re]ax,1)
1011 "\x66\x0f\x1f\x44\x00\x00",
1012 // nopl 0L(%[re]ax)
1013 "\x0f\x1f\x80\x00\x00\x00\x00",
1014 // nopl 0L(%[re]ax,%[re]ax,1)
1015 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
1016 // nopw 0L(%[re]ax,%[re]ax,1)
1017 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
1018 // nopw %cs:0L(%[re]ax,%[re]ax,1)
1019 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
1020 };
1021
1022 // 16-bit mode uses different nop patterns than 32-bit.
1023 static const char Nops16Bit[4][11] = {
1024 // nop
1025 "\x90",
1026 // xchg %eax,%eax
1027 "\x66\x90",
1028 // lea 0(%si),%si
1029 "\x8d\x74\x00",
1030 // lea 0w(%si),%si
1031 "\x8d\xb4\x00\x00",
1032 };
1033
1034 const char(*Nops)[11] =
1035 STI->hasFeature(X86::Is16Bit) ? Nops16Bit : Nops32Bit;
1036
1037 uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
1038
1039 // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
1040 // length.
1041 do {
1042 const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
1043 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1044 for (uint8_t i = 0; i < Prefixes; i++)
1045 OS << '\x66';
1046 const uint8_t Rest = ThisNopLength - Prefixes;
1047 if (Rest != 0)
1048 OS.write(Nops[Rest - 1], Rest);
1049 Count -= ThisNopLength;
1050 } while (Count != 0);
1051
1052 return true;
1053}
1054
1055/* *** */
1056
1057namespace {
1058
1059class ELFX86AsmBackend : public X86AsmBackend {
1060public:
1061 uint8_t OSABI;
1062 ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
1063 : X86AsmBackend(T, STI), OSABI(OSABI) {}
1064};
1065
1066class ELFX86_32AsmBackend : public ELFX86AsmBackend {
1067public:
1068 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
1069 const MCSubtargetInfo &STI)
1070 : ELFX86AsmBackend(T, OSABI, STI) {}
1071
1072 std::unique_ptr<MCObjectTargetWriter>
1073 createObjectTargetWriter() const override {
1074 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
1075 }
1076};
1077
1078class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
1079public:
1080 ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
1081 const MCSubtargetInfo &STI)
1082 : ELFX86AsmBackend(T, OSABI, STI) {}
1083
1084 std::unique_ptr<MCObjectTargetWriter>
1085 createObjectTargetWriter() const override {
1086 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1088 }
1089};
1090
1091class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
1092public:
1093 ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
1094 const MCSubtargetInfo &STI)
1095 : ELFX86AsmBackend(T, OSABI, STI) {}
1096
1097 std::unique_ptr<MCObjectTargetWriter>
1098 createObjectTargetWriter() const override {
1099 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1101 }
1102};
1103
1104class ELFX86_64AsmBackend : public ELFX86AsmBackend {
1105public:
1106 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
1107 const MCSubtargetInfo &STI)
1108 : ELFX86AsmBackend(T, OSABI, STI) {}
1109
1110 std::unique_ptr<MCObjectTargetWriter>
1111 createObjectTargetWriter() const override {
1112 return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
1113 }
1114};
1115
1116class WindowsX86AsmBackend : public X86AsmBackend {
1117 bool Is64Bit;
1118
1119public:
1120 WindowsX86AsmBackend(const Target &T, bool is64Bit,
1121 const MCSubtargetInfo &STI)
1122 : X86AsmBackend(T, STI)
1123 , Is64Bit(is64Bit) {
1124 }
1125
1126 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override {
1128 .Case("dir32", FK_Data_4)
1129 .Case("secrel32", FK_SecRel_4)
1130 .Case("secidx", FK_SecRel_2)
1132 }
1133
1134 std::unique_ptr<MCObjectTargetWriter>
1135 createObjectTargetWriter() const override {
1136 return createX86WinCOFFObjectWriter(Is64Bit);
1137 }
1138};
1139
1140namespace CU {
1141
1142 /// Compact unwind encoding values.
1144 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
1145 /// the return address, then [RE]SP is moved to [RE]BP.
1146 UNWIND_MODE_BP_FRAME = 0x01000000,
1147
1148 /// A frameless function with a small constant stack size.
1149 UNWIND_MODE_STACK_IMMD = 0x02000000,
1150
1151 /// A frameless function with a large constant stack size.
1152 UNWIND_MODE_STACK_IND = 0x03000000,
1153
1154 /// No compact unwind encoding is available.
1155 UNWIND_MODE_DWARF = 0x04000000,
1156
1157 /// Mask for encoding the frame registers.
1158 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1159
1160 /// Mask for encoding the frameless registers.
1161 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1162 };
1163
1164} // namespace CU
1165
1166class DarwinX86AsmBackend : public X86AsmBackend {
1167 const MCRegisterInfo &MRI;
1168
1169 /// Number of registers that can be saved in a compact unwind encoding.
1170 enum { CU_NUM_SAVED_REGS = 6 };
1171
1172 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1173 Triple TT;
1174 bool Is64Bit;
1175
1176 unsigned OffsetSize; ///< Offset of a "push" instruction.
1177 unsigned MoveInstrSize; ///< Size of a "move" instruction.
1178 unsigned StackDivide; ///< Amount to adjust stack size by.
1179protected:
1180 /// Size of a "push" instruction for the given register.
1181 unsigned PushInstrSize(unsigned Reg) const {
1182 switch (Reg) {
1183 case X86::EBX:
1184 case X86::ECX:
1185 case X86::EDX:
1186 case X86::EDI:
1187 case X86::ESI:
1188 case X86::EBP:
1189 case X86::RBX:
1190 case X86::RBP:
1191 return 1;
1192 case X86::R12:
1193 case X86::R13:
1194 case X86::R14:
1195 case X86::R15:
1196 return 2;
1197 }
1198 return 1;
1199 }
1200
1201private:
1202 /// Get the compact unwind number for a given register. The number
1203 /// corresponds to the enum lists in compact_unwind_encoding.h.
1204 int getCompactUnwindRegNum(unsigned Reg) const {
1205 static const MCPhysReg CU32BitRegs[7] = {
1206 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1207 };
1208 static const MCPhysReg CU64BitRegs[] = {
1209 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1210 };
1211 const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1212 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1213 if (*CURegs == Reg)
1214 return Idx;
1215
1216 return -1;
1217 }
1218
1219 /// Return the registers encoded for a compact encoding with a frame
1220 /// pointer.
1221 uint32_t encodeCompactUnwindRegistersWithFrame() const {
1222 // Encode the registers in the order they were saved --- 3-bits per
1223 // register. The list of saved registers is assumed to be in reverse
1224 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1225 uint32_t RegEnc = 0;
1226 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1227 unsigned Reg = SavedRegs[i];
1228 if (Reg == 0) break;
1229
1230 int CURegNum = getCompactUnwindRegNum(Reg);
1231 if (CURegNum == -1) return ~0U;
1232
1233 // Encode the 3-bit register number in order, skipping over 3-bits for
1234 // each register.
1235 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1236 }
1237
1238 assert((RegEnc & 0x3FFFF) == RegEnc &&
1239 "Invalid compact register encoding!");
1240 return RegEnc;
1241 }
1242
1243 /// Create the permutation encoding used with frameless stacks. It is
1244 /// passed the number of registers to be saved and an array of the registers
1245 /// saved.
1246 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1247 // The saved registers are numbered from 1 to 6. In order to encode the
1248 // order in which they were saved, we re-number them according to their
1249 // place in the register order. The re-numbering is relative to the last
1250 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1251 // that order:
1252 //
1253 // Orig Re-Num
1254 // ---- ------
1255 // 6 6
1256 // 2 2
1257 // 4 3
1258 // 5 3
1259 //
1260 for (unsigned i = 0; i < RegCount; ++i) {
1261 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1262 if (CUReg == -1) return ~0U;
1263 SavedRegs[i] = CUReg;
1264 }
1265
1266 // Reverse the list.
1267 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1268
1269 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1270 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1271 unsigned Countless = 0;
1272 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1273 if (SavedRegs[j] < SavedRegs[i])
1274 ++Countless;
1275
1276 RenumRegs[i] = SavedRegs[i] - Countless - 1;
1277 }
1278
1279 // Take the renumbered values and encode them into a 10-bit number.
1280 uint32_t permutationEncoding = 0;
1281 switch (RegCount) {
1282 case 6:
1283 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1284 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1285 + RenumRegs[4];
1286 break;
1287 case 5:
1288 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1289 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1290 + RenumRegs[5];
1291 break;
1292 case 4:
1293 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1294 + 3 * RenumRegs[4] + RenumRegs[5];
1295 break;
1296 case 3:
1297 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1298 + RenumRegs[5];
1299 break;
1300 case 2:
1301 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1302 break;
1303 case 1:
1304 permutationEncoding |= RenumRegs[5];
1305 break;
1306 }
1307
1308 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1309 "Invalid compact register encoding!");
1310 return permutationEncoding;
1311 }
1312
1313public:
1314 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1315 const MCSubtargetInfo &STI)
1316 : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1317 Is64Bit(TT.isArch64Bit()) {
1318 memset(SavedRegs, 0, sizeof(SavedRegs));
1319 OffsetSize = Is64Bit ? 8 : 4;
1320 MoveInstrSize = Is64Bit ? 3 : 2;
1321 StackDivide = Is64Bit ? 8 : 4;
1322 }
1323
1324 std::unique_ptr<MCObjectTargetWriter>
1325 createObjectTargetWriter() const override {
1327 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1328 return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
1329 }
1330
1331 /// Implementation of algorithm to generate the compact unwind encoding
1332 /// for the CFI instructions.
1333 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
1334 const MCContext *Ctxt) const override {
1336 if (Instrs.empty()) return 0;
1337 if (!isDarwinCanonicalPersonality(FI->Personality) &&
1339 return CU::UNWIND_MODE_DWARF;
1340
1341 // Reset the saved registers.
1342 unsigned SavedRegIdx = 0;
1343 memset(SavedRegs, 0, sizeof(SavedRegs));
1344
1345 bool HasFP = false;
1346
1347 // Encode that we are using EBP/RBP as the frame pointer.
1348 uint64_t CompactUnwindEncoding = 0;
1349
1350 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1351 unsigned InstrOffset = 0;
1352 unsigned StackAdjust = 0;
1353 uint64_t StackSize = 0;
1354 int64_t MinAbsOffset = std::numeric_limits<int64_t>::max();
1355
1356 for (const MCCFIInstruction &Inst : Instrs) {
1357 switch (Inst.getOperation()) {
1358 default:
1359 // Any other CFI directives indicate a frame that we aren't prepared
1360 // to represent via compact unwind, so just bail out.
1361 return CU::UNWIND_MODE_DWARF;
1363 // Defines a frame pointer. E.g.
1364 //
1365 // movq %rsp, %rbp
1366 // L0:
1367 // .cfi_def_cfa_register %rbp
1368 //
1369 HasFP = true;
1370
1371 // If the frame pointer is other than esp/rsp, we do not have a way to
1372 // generate a compact unwinding representation, so bail out.
1373 if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1374 (Is64Bit ? X86::RBP : X86::EBP))
1375 return CU::UNWIND_MODE_DWARF;
1376
1377 // Reset the counts.
1378 memset(SavedRegs, 0, sizeof(SavedRegs));
1379 StackAdjust = 0;
1380 SavedRegIdx = 0;
1381 MinAbsOffset = std::numeric_limits<int64_t>::max();
1382 InstrOffset += MoveInstrSize;
1383 break;
1384 }
1386 // Defines a new offset for the CFA. E.g.
1387 //
1388 // With frame:
1389 //
1390 // pushq %rbp
1391 // L0:
1392 // .cfi_def_cfa_offset 16
1393 //
1394 // Without frame:
1395 //
1396 // subq $72, %rsp
1397 // L0:
1398 // .cfi_def_cfa_offset 80
1399 //
1400 StackSize = Inst.getOffset() / StackDivide;
1401 break;
1402 }
1404 // Defines a "push" of a callee-saved register. E.g.
1405 //
1406 // pushq %r15
1407 // pushq %r14
1408 // pushq %rbx
1409 // L0:
1410 // subq $120, %rsp
1411 // L1:
1412 // .cfi_offset %rbx, -40
1413 // .cfi_offset %r14, -32
1414 // .cfi_offset %r15, -24
1415 //
1416 if (SavedRegIdx == CU_NUM_SAVED_REGS)
1417 // If there are too many saved registers, we cannot use a compact
1418 // unwind encoding.
1419 return CU::UNWIND_MODE_DWARF;
1420
1421 MCRegister Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1422 SavedRegs[SavedRegIdx++] = Reg;
1423 StackAdjust += OffsetSize;
1424 MinAbsOffset = std::min(MinAbsOffset, std::abs(Inst.getOffset()));
1425 InstrOffset += PushInstrSize(Reg);
1426 break;
1427 }
1428 }
1429 }
1430
1431 StackAdjust /= StackDivide;
1432
1433 if (HasFP) {
1434 if ((StackAdjust & 0xFF) != StackAdjust)
1435 // Offset was too big for a compact unwind encoding.
1436 return CU::UNWIND_MODE_DWARF;
1437
1438 // We don't attempt to track a real StackAdjust, so if the saved registers
1439 // aren't adjacent to rbp we can't cope.
1440 if (SavedRegIdx != 0 && MinAbsOffset != 3 * (int)OffsetSize)
1441 return CU::UNWIND_MODE_DWARF;
1442
1443 // Get the encoding of the saved registers when we have a frame pointer.
1444 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1445 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1446
1447 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1448 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1449 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1450 } else {
1451 SubtractInstrIdx += InstrOffset;
1452 ++StackAdjust;
1453
1454 if ((StackSize & 0xFF) == StackSize) {
1455 // Frameless stack with a small stack size.
1456 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1457
1458 // Encode the stack size.
1459 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1460 } else {
1461 if ((StackAdjust & 0x7) != StackAdjust)
1462 // The extra stack adjustments are too big for us to handle.
1463 return CU::UNWIND_MODE_DWARF;
1464
1465 // Frameless stack with an offset too large for us to encode compactly.
1466 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1467
1468 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1469 // instruction.
1470 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1471
1472 // Encode any extra stack adjustments (done via push instructions).
1473 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1474 }
1475
1476 // Encode the number of registers saved. (Reverse the list first.)
1477 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1478 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1479
1480 // Get the encoding of the saved registers when we don't have a frame
1481 // pointer.
1482 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1483 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1484
1485 // Encode the register encoding.
1486 CompactUnwindEncoding |=
1487 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1488 }
1489
1490 return CompactUnwindEncoding;
1491 }
1492};
1493
1494} // end anonymous namespace
1495
1497 const MCSubtargetInfo &STI,
1498 const MCRegisterInfo &MRI,
1499 const MCTargetOptions &Options) {
1500 const Triple &TheTriple = STI.getTargetTriple();
1501 if (TheTriple.isOSBinFormatMachO())
1502 return new DarwinX86AsmBackend(T, MRI, STI);
1503
1504 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1505 return new WindowsX86AsmBackend(T, false, STI);
1506
1507 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1508
1509 if (TheTriple.isOSIAMCU())
1510 return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1511
1512 return new ELFX86_32AsmBackend(T, OSABI, STI);
1513}
1514
1516 const MCSubtargetInfo &STI,
1517 const MCRegisterInfo &MRI,
1518 const MCTargetOptions &Options) {
1519 const Triple &TheTriple = STI.getTargetTriple();
1520 if (TheTriple.isOSBinFormatMachO())
1521 return new DarwinX86AsmBackend(T, MRI, STI);
1522
1523 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1524 return new WindowsX86AsmBackend(T, true, STI);
1525
1526 if (TheTriple.isUEFI()) {
1527 assert(TheTriple.isOSBinFormatCOFF() &&
1528 "Only COFF format is supported in UEFI environment.");
1529 return new WindowsX86AsmBackend(T, true, STI);
1530 }
1531
1532 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1533
1534 if (TheTriple.isX32())
1535 return new ELFX86_X32AsmBackend(T, OSABI, STI);
1536 return new ELFX86_64AsmBackend(T, OSABI, STI);
1537}
1538
1539namespace {
1540class X86ELFStreamer : public MCELFStreamer {
1541public:
1542 X86ELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
1543 std::unique_ptr<MCObjectWriter> OW,
1544 std::unique_ptr<MCCodeEmitter> Emitter)
1545 : MCELFStreamer(Context, std::move(TAB), std::move(OW),
1546 std::move(Emitter)) {}
1547
1548 void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;
1549};
1550} // end anonymous namespace
1551
1553 const MCSubtargetInfo &STI) {
1554 auto &Backend = static_cast<X86AsmBackend &>(S.getAssembler().getBackend());
1555 Backend.emitInstructionBegin(S, Inst, STI);
1556 S.MCObjectStreamer::emitInstruction(Inst, STI);
1557 Backend.emitInstructionEnd(S, Inst);
1558}
1559
1560void X86ELFStreamer::emitInstruction(const MCInst &Inst,
1561 const MCSubtargetInfo &STI) {
1562 X86_MC::emitInstruction(*this, Inst, STI);
1563}
1564
1566 std::unique_ptr<MCAsmBackend> &&MAB,
1567 std::unique_ptr<MCObjectWriter> &&MOW,
1568 std::unique_ptr<MCCodeEmitter> &&MCE) {
1569 return new X86ELFStreamer(Context, std::move(MAB), std::move(MOW),
1570 std::move(MCE));
1571}
unsigned const MachineRegisterInfo * MRI
dxil DXContainer Global Emitter
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
PowerPC TLS Dynamic Call Fixup
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static MCInstrInfo * createMCInstrInfo()
static unsigned getRelaxedOpcodeBranch(unsigned Opcode, bool Is16BitMode=false)
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
static size_t getSizeForInstFragment(const MCFragment *F)
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
static bool isRightAfterData(MCFragment *CurrentFragment, const std::pair< MCFragment *, size_t > &PrevInstPosition)
Check if the instruction to be emitted is right after any data.
static bool mayHaveInterruptDelaySlot(unsigned InstOpcode)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode)
static unsigned getFixupKindSize(unsigned Kind)
static bool isRelaxableBranch(unsigned Opcode)
static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
static bool is64Bit(const char *name)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
Generic interface to target specific assembler backends.
Definition: MCAsmBackend.h:42
virtual bool allowEnhancedRelaxation() const
Return true if this target allows an unrelaxable instruction to be emitted into RelaxableFragment and...
Definition: MCAsmBackend.h:64
virtual unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const
Returns the maximum size of a nop in bytes on this target.
Definition: MCAsmBackend.h:211
virtual bool writeNopData(raw_ostream &OS, uint64_t Count, const MCSubtargetInfo *STI) const =0
Write an (optimal) nop sequence of Count bytes to the given output.
virtual void relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const
Relax the instruction in the given fragment to the next wider instruction.
Definition: MCAsmBackend.h:179
virtual bool mayNeedRelaxation(const MCInst &Inst, const MCSubtargetInfo &STI) const
Check whether the given instruction may need relaxation.
Definition: MCAsmBackend.h:155
virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value) const
Simple predicate for targets where !Resolved implies requiring relaxation.
Definition: MCAsmBackend.h:169
virtual void finishLayout(MCAssembler const &Asm) const
Give backend an opportunity to finish layout after relaxation.
Definition: MCAsmBackend.h:223
virtual bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, const MCSubtargetInfo *STI)
Hook to check if a relocation is needed for some target specific reason.
Definition: MCAsmBackend.h:96
virtual unsigned getNumFixupKinds() const =0
Get the number of target specific fixup kinds.
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual std::optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef< char > Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const =0
Apply the Value for given Fixup into the provided data fragment, at the offset specified by the fixup...
virtual bool allowAutoPadding() const
Return true if this target might automatically pad instructions and thus need to emit padding enable/...
Definition: MCAsmBackend.h:60
MCAsmBackend & getBackend() const
Definition: MCAssembler.h:188
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition: MCFragment.h:532
void setLastFragment(const MCFragment *F)
Definition: MCFragment.h:556
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
Context object for machine code objects.
Definition: MCContext.h:83
bool emitCompactUnwindNonCanonical() const
Definition: MCContext.cpp:942
Fragment for data and encoded instructions.
Definition: MCFragment.h:219
SmallVectorImpl< MCFixup > & getFixups()
Definition: MCFragment.h:200
SmallVectorImpl< char > & getContents()
Definition: MCFragment.h:197
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition: MCFragment.h:167
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:39
ExprKind getKind() const
Definition: MCExpr.h:78
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:71
MCFragment * getNext() const
Definition: MCFragment.h:95
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
Definition: MCInst.cpp:84
unsigned getOpcode() const
Definition: MCInst.h:199
void setOpcode(unsigned Op)
Definition: MCInst.h:198
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:207
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:317
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Streaming object file generation interface.
MCAssembler & getAssembler()
void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override
Emit the given Instruction into the current section.
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
Definition: MCFragment.h:234
bool getAllowAutoPadding() const
Definition: MCFragment.h:247
const MCInst & getInst() const
Definition: MCFragment.h:244
void setInst(const MCInst &Value)
Definition: MCFragment.h:245
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:36
void ensureMinAlignment(Align MinAlignment)
Makes sure that Alignment is at least MinAlignment.
Definition: MCSection.h:150
bool isText() const
Definition: MCSection.h:131
iterator end() const
Definition: MCSection.h:183
void setHasLayout(bool Value)
Definition: MCSection.h:173
iterator begin() const
Definition: MCSection.h:182
Streaming machine code generation interface.
Definition: MCStreamer.h:213
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
This represents an "assembler immediate".
Definition: MCValue.h:36
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:310
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isX32() const
Tests whether the target is X32.
Definition: Triple.h:1060
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition: Triple.h:743
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:392
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:383
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition: Triple.h:735
bool isUEFI() const
Tests whether the OS is UEFI.
Definition: Triple.h:630
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:635
bool isOSIAMCU() const
Definition: Triple.h:608
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:730
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:95
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & write(unsigned char C)
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CompactUnwindEncodings
Compact unwind encoding values.
@ EM_386
Definition: ELF.h:137
@ EM_X86_64
Definition: ELF.h:179
@ EM_IAMCU
Definition: ELF.h:140
Expected< uint32_t > getCPUSubType(const Triple &T)
Definition: MachO.cpp:95
Expected< uint32_t > getCPUType(const Triple &T)
Definition: MachO.cpp:77
@ Relaxed
Definition: NVPTX.h:120
Reg
All possible values of the reg field in the ModR/M byte.
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:518
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:511
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:508
bool isPrefix(uint64_t TSFlags)
Definition: X86BaseInfo.h:882
int getMemoryOperandNo(uint64_t TSFlags)
Definition: X86BaseInfo.h:1011
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:968
void emitPrefix(MCCodeEmitter &MCE, const MCInst &MI, SmallVectorImpl< char > &CB, const MCSubtargetInfo &STI)
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
Definition: X86BaseInfo.h:126
@ AddrSegmentReg
Definition: X86BaseInfo.h:34
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
Definition: X86BaseInfo.h:309
@ AlignBranchJmp
Definition: X86BaseInfo.h:313
@ AlignBranchIndirect
Definition: X86BaseInfo.h:316
@ AlignBranchJcc
Definition: X86BaseInfo.h:312
@ AlignBranchCall
Definition: X86BaseInfo.h:314
@ AlignBranchRet
Definition: X86BaseInfo.h:315
@ AlignBranchNone
Definition: X86BaseInfo.h:310
@ AlignBranchFused
Definition: X86BaseInfo.h:311
SecondMacroFusionInstKind
Definition: X86BaseInfo.h:116
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(MCRegister Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:332
FirstMacroFusionInstKind
Definition: X86BaseInfo.h:107
unsigned getOpcodeForLongImmediateForm(unsigned Opcode)
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
Definition: X86BaseInfo.h:290
@ reloc_global_offset_table8
Definition: X86FixupKinds.h:38
@ reloc_riprel_4byte_movq_load_rex2
Definition: X86FixupKinds.h:19
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:33
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:39
@ NumTargetFixupKinds
Definition: X86FixupKinds.h:42
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:21
@ reloc_riprel_4byte_relax_evex
Definition: X86FixupKinds.h:27
@ reloc_signed_4byte
Definition: X86FixupKinds.h:30
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:23
@ reloc_global_offset_table
Definition: X86FixupKinds.h:35
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
@ reloc_riprel_4byte_relax_rex2
Definition: X86FixupKinds.h:25
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
MCStreamer * createX86ELFStreamer(const Triple &T, MCContext &Context, std::unique_ptr< MCAsmBackend > &&MAB, std::unique_ptr< MCObjectWriter > &&MOW, std::unique_ptr< MCCodeEmitter > &&MCE)
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FirstTargetFixupKind
Definition: MCFixup.h:45
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_PCRel_2
A two-byte pc relative fixup.
Definition: MCFixup.h:29
@ FK_SecRel_2
A two-byte section relative fixup.
Definition: MCFixup.h:41
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
Definition: MCFixup.h:50
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
@ FK_SecRel_8
A eight-byte section relative fixup.
Definition: MCFixup.h:43
@ FK_PCRel_8
A eight-byte pc relative fixup.
Definition: MCFixup.h:31
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:42
@ FK_PCRel_1
A one-byte pc relative fixup.
Definition: MCFixup.h:28
@ FK_SecRel_1
A one-byte section relative fixup.
Definition: MCFixup.h:40
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:756
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:260
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1873
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
endianness
Definition: bit.h:70
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.
const MCSymbol * Personality
Definition: MCDwarf.h:764
std::vector< MCCFIInstruction > Instructions
Definition: MCDwarf.h:766
Target independent information on a fixup kind.
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...