LLVM 19.0.0git
X86AsmBackend.cpp
Go to the documentation of this file.
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
16#include "llvm/MC/MCAsmLayout.h"
17#include "llvm/MC/MCAssembler.h"
19#include "llvm/MC/MCContext.h"
20#include "llvm/MC/MCDwarf.h"
22#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
32#include "llvm/MC/MCValue.h"
37
38using namespace llvm;
39
40namespace {
41/// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
42class X86AlignBranchKind {
43private:
44 uint8_t AlignBranchKind = 0;
45
46public:
47 void operator=(const std::string &Val) {
48 if (Val.empty())
49 return;
50 SmallVector<StringRef, 6> BranchTypes;
51 StringRef(Val).split(BranchTypes, '+', -1, false);
52 for (auto BranchType : BranchTypes) {
53 if (BranchType == "fused")
54 addKind(X86::AlignBranchFused);
55 else if (BranchType == "jcc")
56 addKind(X86::AlignBranchJcc);
57 else if (BranchType == "jmp")
58 addKind(X86::AlignBranchJmp);
59 else if (BranchType == "call")
60 addKind(X86::AlignBranchCall);
61 else if (BranchType == "ret")
62 addKind(X86::AlignBranchRet);
63 else if (BranchType == "indirect")
65 else {
66 errs() << "invalid argument " << BranchType.str()
67 << " to -x86-align-branch=; each element must be one of: fused, "
68 "jcc, jmp, call, ret, indirect.(plus separated)\n";
69 }
70 }
71 }
72
73 operator uint8_t() const { return AlignBranchKind; }
74 void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
75};
76
77X86AlignBranchKind X86AlignBranchKindLoc;
78
79cl::opt<unsigned> X86AlignBranchBoundary(
80 "x86-align-branch-boundary", cl::init(0),
82 "Control how the assembler should align branches with NOP. If the "
83 "boundary's size is not 0, it should be a power of 2 and no less "
84 "than 32. Branches will be aligned to prevent from being across or "
85 "against the boundary of specified size. The default value 0 does not "
86 "align branches."));
87
89 "x86-align-branch",
91 "Specify types of branches to align (plus separated list of types):"
92 "\njcc indicates conditional jumps"
93 "\nfused indicates fused conditional jumps"
94 "\njmp indicates direct unconditional jumps"
95 "\ncall indicates direct and indirect calls"
96 "\nret indicates rets"
97 "\nindirect indicates indirect unconditional jumps"),
98 cl::location(X86AlignBranchKindLoc));
99
100cl::opt<bool> X86AlignBranchWithin32BBoundaries(
101 "x86-branches-within-32B-boundaries", cl::init(false),
102 cl::desc(
103 "Align selected instructions to mitigate negative performance impact "
104 "of Intel's micro code update for errata skx102. May break "
105 "assumptions about labels corresponding to particular instructions, "
106 "and should be used with caution."));
107
108cl::opt<unsigned> X86PadMaxPrefixSize(
109 "x86-pad-max-prefix-size", cl::init(0),
110 cl::desc("Maximum number of prefixes to use for padding"));
111
112cl::opt<bool> X86PadForAlign(
113 "x86-pad-for-align", cl::init(false), cl::Hidden,
114 cl::desc("Pad previous instructions to implement align directives"));
115
116cl::opt<bool> X86PadForBranchAlign(
117 "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
118 cl::desc("Pad previous instructions to implement branch alignment"));
119
120class X86AsmBackend : public MCAsmBackend {
121 const MCSubtargetInfo &STI;
122 std::unique_ptr<const MCInstrInfo> MCII;
123 X86AlignBranchKind AlignBranchType;
124 Align AlignBoundary;
125 unsigned TargetPrefixMax = 0;
126
127 MCInst PrevInst;
128 unsigned PrevInstOpcode = 0;
129 MCBoundaryAlignFragment *PendingBA = nullptr;
130 std::pair<MCFragment *, size_t> PrevInstPosition;
131 bool IsRightAfterData = false;
132
133 uint8_t determinePaddingPrefix(const MCInst &Inst) const;
134 bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
135 bool needAlign(const MCInst &Inst) const;
136 bool canPadBranches(MCObjectStreamer &OS) const;
137 bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
138
139public:
140 X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
141 : MCAsmBackend(llvm::endianness::little), STI(STI),
142 MCII(T.createMCInstrInfo()) {
143 if (X86AlignBranchWithin32BBoundaries) {
144 // At the moment, this defaults to aligning fused branches, unconditional
145 // jumps, and (unfused) conditional jumps with nops. Both the
146 // instructions aligned and the alignment method (nop vs prefix) may
147 // change in the future.
148 AlignBoundary = assumeAligned(32);
149 AlignBranchType.addKind(X86::AlignBranchFused);
150 AlignBranchType.addKind(X86::AlignBranchJcc);
151 AlignBranchType.addKind(X86::AlignBranchJmp);
152 }
153 // Allow overriding defaults set by main flag
154 if (X86AlignBranchBoundary.getNumOccurrences())
155 AlignBoundary = assumeAligned(X86AlignBranchBoundary);
156 if (X86AlignBranch.getNumOccurrences())
157 AlignBranchType = X86AlignBranchKindLoc;
158 if (X86PadMaxPrefixSize.getNumOccurrences())
159 TargetPrefixMax = X86PadMaxPrefixSize;
160 }
161
162 bool allowAutoPadding() const override;
163 bool allowEnhancedRelaxation() const override;
165 const MCSubtargetInfo &STI) override;
166 void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst) override;
167
168 unsigned getNumFixupKinds() const override {
170 }
171
172 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
173
174 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
175
176 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
177 const MCValue &Target,
178 const MCSubtargetInfo *STI) override;
179
180 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
182 uint64_t Value, bool IsResolved,
183 const MCSubtargetInfo *STI) const override;
184
185 bool mayNeedRelaxation(const MCInst &Inst,
186 const MCSubtargetInfo &STI) const override;
187
189 const MCRelaxableFragment *DF,
190 const MCAsmLayout &Layout) const override;
191
192 void relaxInstruction(MCInst &Inst,
193 const MCSubtargetInfo &STI) const override;
194
195 bool padInstructionViaRelaxation(MCRelaxableFragment &RF,
197 unsigned &RemainingSize) const;
198
199 bool padInstructionViaPrefix(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
200 unsigned &RemainingSize) const;
201
202 bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
203 unsigned &RemainingSize) const;
204
205 void finishLayout(MCAssembler const &Asm, MCAsmLayout &Layout) const override;
206
207 unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
208
209 bool writeNopData(raw_ostream &OS, uint64_t Count,
210 const MCSubtargetInfo *STI) const override;
211};
212} // end anonymous namespace
213
214static bool isRelaxableBranch(unsigned Opcode) {
215 return Opcode == X86::JCC_1 || Opcode == X86::JMP_1;
216}
217
218static unsigned getRelaxedOpcodeBranch(unsigned Opcode,
219 bool Is16BitMode = false) {
220 switch (Opcode) {
221 default:
222 llvm_unreachable("invalid opcode for branch");
223 case X86::JCC_1:
224 return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
225 case X86::JMP_1:
226 return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
227 }
228}
229
230static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode) {
231 unsigned Opcode = MI.getOpcode();
232 return isRelaxableBranch(Opcode) ? getRelaxedOpcodeBranch(Opcode, Is16BitMode)
234}
235
237 const MCInstrInfo &MCII) {
238 unsigned Opcode = MI.getOpcode();
239 switch (Opcode) {
240 default:
241 return X86::COND_INVALID;
242 case X86::JCC_1: {
243 const MCInstrDesc &Desc = MCII.get(Opcode);
244 return static_cast<X86::CondCode>(
245 MI.getOperand(Desc.getNumOperands() - 1).getImm());
246 }
247 }
248}
249
253 return classifySecondCondCodeInMacroFusion(CC);
254}
255
256/// Check if the instruction uses RIP relative addressing.
257static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
258 unsigned Opcode = MI.getOpcode();
259 const MCInstrDesc &Desc = MCII.get(Opcode);
260 uint64_t TSFlags = Desc.TSFlags;
261 unsigned CurOp = X86II::getOperandBias(Desc);
262 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
263 if (MemoryOperand < 0)
264 return false;
265 unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
266 unsigned BaseReg = MI.getOperand(BaseRegNum).getReg();
267 return (BaseReg == X86::RIP);
268}
269
270/// Check if the instruction is a prefix.
271static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII) {
272 return X86II::isPrefix(MCII.get(Opcode).TSFlags);
273}
274
275/// Check if the instruction is valid as the first instruction in macro fusion.
276static bool isFirstMacroFusibleInst(const MCInst &Inst,
277 const MCInstrInfo &MCII) {
278 // An Intel instruction with RIP relative addressing is not macro fusible.
279 if (isRIPRelative(Inst, MCII))
280 return false;
283 return FIK != X86::FirstMacroFusionInstKind::Invalid;
284}
285
286/// X86 can reduce the bytes of NOP by padding instructions with prefixes to
287/// get a better peformance in some cases. Here, we determine which prefix is
288/// the most suitable.
289///
290/// If the instruction has a segment override prefix, use the existing one.
291/// If the target is 64-bit, use the CS.
292/// If the target is 32-bit,
293/// - If the instruction has a ESP/EBP base register, use SS.
294/// - Otherwise use DS.
295uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
296 assert((STI.hasFeature(X86::Is32Bit) || STI.hasFeature(X86::Is64Bit)) &&
297 "Prefixes can be added only in 32-bit or 64-bit mode.");
298 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
299 uint64_t TSFlags = Desc.TSFlags;
300
301 // Determine where the memory operand starts, if present.
302 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
303 if (MemoryOperand != -1)
304 MemoryOperand += X86II::getOperandBias(Desc);
305
306 unsigned SegmentReg = 0;
307 if (MemoryOperand >= 0) {
308 // Check for explicit segment override on memory operand.
309 SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
310 }
311
312 switch (TSFlags & X86II::FormMask) {
313 default:
314 break;
315 case X86II::RawFrmDstSrc: {
316 // Check segment override opcode prefix as needed (not for %ds).
317 if (Inst.getOperand(2).getReg() != X86::DS)
318 SegmentReg = Inst.getOperand(2).getReg();
319 break;
320 }
321 case X86II::RawFrmSrc: {
322 // Check segment override opcode prefix as needed (not for %ds).
323 if (Inst.getOperand(1).getReg() != X86::DS)
324 SegmentReg = Inst.getOperand(1).getReg();
325 break;
326 }
328 // Check segment override opcode prefix as needed.
329 SegmentReg = Inst.getOperand(1).getReg();
330 break;
331 }
332 }
333
334 if (SegmentReg != 0)
335 return X86::getSegmentOverridePrefixForReg(SegmentReg);
336
337 if (STI.hasFeature(X86::Is64Bit))
338 return X86::CS_Encoding;
339
340 if (MemoryOperand >= 0) {
341 unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
342 unsigned BaseReg = Inst.getOperand(BaseRegNum).getReg();
343 if (BaseReg == X86::ESP || BaseReg == X86::EBP)
344 return X86::SS_Encoding;
345 }
346 return X86::DS_Encoding;
347}
348
349/// Check if the two instructions will be macro-fused on the target cpu.
350bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
351 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
352 if (!InstDesc.isConditionalBranch())
353 return false;
354 if (!isFirstMacroFusibleInst(Cmp, *MCII))
355 return false;
356 const X86::FirstMacroFusionInstKind CmpKind =
358 const X86::SecondMacroFusionInstKind BranchKind =
360 return X86::isMacroFused(CmpKind, BranchKind);
361}
362
363/// Check if the instruction has a variant symbol operand.
364static bool hasVariantSymbol(const MCInst &MI) {
365 for (auto &Operand : MI) {
366 if (!Operand.isExpr())
367 continue;
368 const MCExpr &Expr = *Operand.getExpr();
369 if (Expr.getKind() == MCExpr::SymbolRef &&
370 cast<MCSymbolRefExpr>(Expr).getKind() != MCSymbolRefExpr::VK_None)
371 return true;
372 }
373 return false;
374}
375
376bool X86AsmBackend::allowAutoPadding() const {
377 return (AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone);
378}
379
380bool X86AsmBackend::allowEnhancedRelaxation() const {
381 return allowAutoPadding() && TargetPrefixMax != 0 && X86PadForBranchAlign;
382}
383
384/// X86 has certain instructions which enable interrupts exactly one
385/// instruction *after* the instruction which stores to SS. Return true if the
386/// given instruction may have such an interrupt delay slot.
387static bool mayHaveInterruptDelaySlot(unsigned InstOpcode) {
388 switch (InstOpcode) {
389 case X86::POPSS16:
390 case X86::POPSS32:
391 case X86::STI:
392 return true;
393
394 case X86::MOV16sr:
395 case X86::MOV32sr:
396 case X86::MOV64sr:
397 case X86::MOV16sm:
398 // In fact, this is only the case if the first operand is SS. However, as
399 // segment moves occur extremely rarely, this is just a minor pessimization.
400 return true;
401 }
402 return false;
403}
404
405/// Check if the instruction to be emitted is right after any data.
406static bool
408 const std::pair<MCFragment *, size_t> &PrevInstPosition) {
409 MCFragment *F = CurrentFragment;
410 // Since data is always emitted into a DataFragment, our check strategy is
411 // simple here.
412 // - If the fragment is a DataFragment
413 // - If it's empty (section start or data after align), return false.
414 // - If it's not the fragment where the previous instruction is,
415 // returns true.
416 // - If it's the fragment holding the previous instruction but its
417 // size changed since the previous instruction was emitted into
418 // it, returns true.
419 // - Otherwise returns false.
420 // - If the fragment is not a DataFragment, returns false.
421 if (auto *DF = dyn_cast_or_null<MCDataFragment>(F))
422 return DF->getContents().size() &&
423 (DF != PrevInstPosition.first ||
424 DF->getContents().size() != PrevInstPosition.second);
425
426 return false;
427}
428
429/// \returns the fragment size if it has instructions, otherwise returns 0.
430static size_t getSizeForInstFragment(const MCFragment *F) {
431 if (!F || !F->hasInstructions())
432 return 0;
433 // MCEncodedFragmentWithContents being templated makes this tricky.
434 switch (F->getKind()) {
435 default:
436 llvm_unreachable("Unknown fragment with instructions!");
438 return cast<MCDataFragment>(*F).getContents().size();
440 return cast<MCRelaxableFragment>(*F).getContents().size();
442 return cast<MCCompactEncodedInstFragment>(*F).getContents().size();
443 }
444}
445
446/// Return true if we can insert NOP or prefixes automatically before the
447/// the instruction to be emitted.
448bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
449 if (hasVariantSymbol(Inst))
450 // Linker may rewrite the instruction with variant symbol operand(e.g.
451 // TLSCALL).
452 return false;
453
454 if (mayHaveInterruptDelaySlot(PrevInstOpcode))
455 // If this instruction follows an interrupt enabling instruction with a one
456 // instruction delay, inserting a nop would change behavior.
457 return false;
458
459 if (isPrefix(PrevInstOpcode, *MCII))
460 // If this instruction follows a prefix, inserting a nop/prefix would change
461 // semantic.
462 return false;
463
464 if (isPrefix(Inst.getOpcode(), *MCII))
465 // If this instruction is a prefix, inserting a prefix would change
466 // semantic.
467 return false;
468
469 if (IsRightAfterData)
470 // If this instruction follows any data, there is no clear
471 // instruction boundary, inserting a nop/prefix would change semantic.
472 return false;
473
474 return true;
475}
476
477bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
478 if (!OS.getAllowAutoPadding())
479 return false;
480 assert(allowAutoPadding() && "incorrect initialization!");
481
482 // We only pad in text section.
483 if (!OS.getCurrentSectionOnly()->isText())
484 return false;
485
486 // To be Done: Currently don't deal with Bundle cases.
487 if (OS.getAssembler().isBundlingEnabled())
488 return false;
489
490 // Branches only need to be aligned in 32-bit or 64-bit mode.
491 if (!(STI.hasFeature(X86::Is64Bit) || STI.hasFeature(X86::Is32Bit)))
492 return false;
493
494 return true;
495}
496
497/// Check if the instruction operand needs to be aligned.
498bool X86AsmBackend::needAlign(const MCInst &Inst) const {
499 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
500 return (Desc.isConditionalBranch() &&
501 (AlignBranchType & X86::AlignBranchJcc)) ||
502 (Desc.isUnconditionalBranch() &&
503 (AlignBranchType & X86::AlignBranchJmp)) ||
504 (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
505 (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
506 (Desc.isIndirectBranch() &&
507 (AlignBranchType & X86::AlignBranchIndirect));
508}
509
510/// Insert BoundaryAlignFragment before instructions to align branches.
511void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
512 const MCInst &Inst, const MCSubtargetInfo &STI) {
513 // Used by canPadInst. Done here, because in emitInstructionEnd, the current
514 // fragment will have changed.
515 IsRightAfterData =
516 isRightAfterData(OS.getCurrentFragment(), PrevInstPosition);
517
518 if (!canPadBranches(OS))
519 return;
520
521 // NB: PrevInst only valid if canPadBranches is true.
522 if (!isMacroFused(PrevInst, Inst))
523 // Macro fusion doesn't happen indeed, clear the pending.
524 PendingBA = nullptr;
525
526 // When branch padding is enabled (basically the skx102 erratum => unlikely),
527 // we call canPadInst (not cheap) twice. However, in the common case, we can
528 // avoid unnecessary calls to that, as this is otherwise only used for
529 // relaxable fragments.
530 if (!canPadInst(Inst, OS))
531 return;
532
533 if (PendingBA && PendingBA->getNext() == OS.getCurrentFragment()) {
534 // Macro fusion actually happens and there is no other fragment inserted
535 // after the previous instruction.
536 //
537 // Do nothing here since we already inserted a BoudaryAlign fragment when
538 // we met the first instruction in the fused pair and we'll tie them
539 // together in emitInstructionEnd.
540 //
541 // Note: When there is at least one fragment, such as MCAlignFragment,
542 // inserted after the previous instruction, e.g.
543 //
544 // \code
545 // cmp %rax %rcx
546 // .align 16
547 // je .Label0
548 // \ endcode
549 //
550 // We will treat the JCC as a unfused branch although it may be fused
551 // with the CMP.
552 return;
553 }
554
555 if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
556 isFirstMacroFusibleInst(Inst, *MCII))) {
557 // If we meet a unfused branch or the first instuction in a fusiable pair,
558 // insert a BoundaryAlign fragment.
559 PendingBA = OS.getContext().allocFragment<MCBoundaryAlignFragment>(
560 AlignBoundary, STI);
561 OS.insert(PendingBA);
562 }
563}
564
565/// Set the last fragment to be aligned for the BoundaryAlignFragment.
566void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS,
567 const MCInst &Inst) {
568 MCFragment *CF = OS.getCurrentFragment();
569 if (auto *F = dyn_cast_or_null<MCRelaxableFragment>(CF))
570 F->setAllowAutoPadding(canPadInst(Inst, OS));
571
572 // Update PrevInstOpcode here, canPadInst() reads that.
573 PrevInstOpcode = Inst.getOpcode();
574 PrevInstPosition = std::make_pair(CF, getSizeForInstFragment(CF));
575
576 if (!canPadBranches(OS))
577 return;
578
579 // PrevInst is only needed if canPadBranches. Copying an MCInst isn't cheap.
580 PrevInst = Inst;
581
582 if (!needAlign(Inst) || !PendingBA)
583 return;
584
585 // Tie the aligned instructions into a pending BoundaryAlign.
586 PendingBA->setLastFragment(CF);
587 PendingBA = nullptr;
588
589 // We need to ensure that further data isn't added to the current
590 // DataFragment, so that we can get the size of instructions later in
591 // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
592 // DataFragment.
593 if (isa_and_nonnull<MCDataFragment>(CF))
594 OS.insert(OS.getContext().allocFragment<MCDataFragment>());
595
596 // Update the maximum alignment on the current section if necessary.
597 MCSection *Sec = OS.getCurrentSectionOnly();
598 Sec->ensureMinAlignment(AlignBoundary);
599}
600
601std::optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
602 if (STI.getTargetTriple().isOSBinFormatELF()) {
603 unsigned Type;
604 if (STI.getTargetTriple().getArch() == Triple::x86_64) {
606#define ELF_RELOC(X, Y) .Case(#X, Y)
607#include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
608#undef ELF_RELOC
609 .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
610 .Case("BFD_RELOC_8", ELF::R_X86_64_8)
611 .Case("BFD_RELOC_16", ELF::R_X86_64_16)
612 .Case("BFD_RELOC_32", ELF::R_X86_64_32)
613 .Case("BFD_RELOC_64", ELF::R_X86_64_64)
614 .Default(-1u);
615 } else {
617#define ELF_RELOC(X, Y) .Case(#X, Y)
618#include "llvm/BinaryFormat/ELFRelocs/i386.def"
619#undef ELF_RELOC
620 .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
621 .Case("BFD_RELOC_8", ELF::R_386_8)
622 .Case("BFD_RELOC_16", ELF::R_386_16)
623 .Case("BFD_RELOC_32", ELF::R_386_32)
624 .Default(-1u);
625 }
626 if (Type == -1u)
627 return std::nullopt;
628 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
629 }
631}
632
633const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
634 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
635 {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
636 {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
637 {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
638 {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
639 {"reloc_signed_4byte", 0, 32, 0},
640 {"reloc_signed_4byte_relax", 0, 32, 0},
641 {"reloc_global_offset_table", 0, 32, 0},
642 {"reloc_global_offset_table8", 0, 64, 0},
643 {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
644 };
645
646 // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
647 // do not require any extra processing.
648 if (Kind >= FirstLiteralRelocationKind)
650
651 if (Kind < FirstTargetFixupKind)
653
654 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
655 "Invalid kind!");
656 assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
657 return Infos[Kind - FirstTargetFixupKind];
658}
659
660bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
661 const MCFixup &Fixup, const MCValue &,
662 const MCSubtargetInfo *STI) {
663 return Fixup.getKind() >= FirstLiteralRelocationKind;
664}
665
666static unsigned getFixupKindSize(unsigned Kind) {
667 switch (Kind) {
668 default:
669 llvm_unreachable("invalid fixup kind!");
670 case FK_NONE:
671 return 0;
672 case FK_PCRel_1:
673 case FK_SecRel_1:
674 case FK_Data_1:
675 return 1;
676 case FK_PCRel_2:
677 case FK_SecRel_2:
678 case FK_Data_2:
679 return 2;
680 case FK_PCRel_4:
689 case FK_SecRel_4:
690 case FK_Data_4:
691 return 4;
692 case FK_PCRel_8:
693 case FK_SecRel_8:
694 case FK_Data_8:
696 return 8;
697 }
698}
699
700void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
701 const MCValue &Target,
703 uint64_t Value, bool IsResolved,
704 const MCSubtargetInfo *STI) const {
705 unsigned Kind = Fixup.getKind();
706 if (Kind >= FirstLiteralRelocationKind)
707 return;
708 unsigned Size = getFixupKindSize(Kind);
709
710 assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
711
712 int64_t SignedValue = static_cast<int64_t>(Value);
713 if ((Target.isAbsolute() || IsResolved) &&
714 getFixupKindInfo(Fixup.getKind()).Flags &
716 // check that PC relative fixup fits into the fixup size.
717 if (Size > 0 && !isIntN(Size * 8, SignedValue))
718 Asm.getContext().reportError(
719 Fixup.getLoc(), "value of " + Twine(SignedValue) +
720 " is too large for field of " + Twine(Size) +
721 ((Size == 1) ? " byte." : " bytes."));
722 } else {
723 // Check that uppper bits are either all zeros or all ones.
724 // Specifically ignore overflow/underflow as long as the leakage is
725 // limited to the lower bits. This is to remain compatible with
726 // other assemblers.
727 assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
728 "Value does not fit in the Fixup field");
729 }
730
731 for (unsigned i = 0; i != Size; ++i)
732 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
733}
734
735bool X86AsmBackend::mayNeedRelaxation(const MCInst &MI,
736 const MCSubtargetInfo &STI) const {
737 unsigned Opcode = MI.getOpcode();
738 unsigned SkipOperands = X86::isCCMPCC(Opcode) ? 2 : 0;
739 return isRelaxableBranch(Opcode) ||
740 (X86::getOpcodeForLongImmediateForm(Opcode) != Opcode &&
741 MI.getOperand(MI.getNumOperands() - 1 - SkipOperands).isExpr());
742}
743
744bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
746 const MCRelaxableFragment *DF,
747 const MCAsmLayout &Layout) const {
748 // Relax if the value is too big for a (signed) i8.
749 return !isInt<8>(Value);
750}
751
752// FIXME: Can tblgen help at all here to verify there aren't other instructions
753// we can relax?
754void X86AsmBackend::relaxInstruction(MCInst &Inst,
755 const MCSubtargetInfo &STI) const {
756 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
757 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
758 unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
759
760 if (RelaxedOp == Inst.getOpcode()) {
763 Inst.dump_pretty(OS);
764 OS << "\n";
765 report_fatal_error("unexpected instruction to relax: " + OS.str());
766 }
767
768 Inst.setOpcode(RelaxedOp);
769}
770
771bool X86AsmBackend::padInstructionViaPrefix(MCRelaxableFragment &RF,
773 unsigned &RemainingSize) const {
774 if (!RF.getAllowAutoPadding())
775 return false;
776 // If the instruction isn't fully relaxed, shifting it around might require a
777 // larger value for one of the fixups then can be encoded. The outer loop
778 // will also catch this before moving to the next instruction, but we need to
779 // prevent padding this single instruction as well.
780 if (mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
781 return false;
782
783 const unsigned OldSize = RF.getContents().size();
784 if (OldSize == 15)
785 return false;
786
787 const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
788 const unsigned RemainingPrefixSize = [&]() -> unsigned {
790 Emitter.emitPrefix(RF.getInst(), Code, STI);
791 assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
792
793 // TODO: It turns out we need a decent amount of plumbing for the target
794 // specific bits to determine number of prefixes its safe to add. Various
795 // targets (older chips mostly, but also Atom family) encounter decoder
796 // stalls with too many prefixes. For testing purposes, we set the value
797 // externally for the moment.
798 unsigned ExistingPrefixSize = Code.size();
799 if (TargetPrefixMax <= ExistingPrefixSize)
800 return 0;
801 return TargetPrefixMax - ExistingPrefixSize;
802 }();
803 const unsigned PrefixBytesToAdd =
804 std::min(MaxPossiblePad, RemainingPrefixSize);
805 if (PrefixBytesToAdd == 0)
806 return false;
807
808 const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
809
811 Code.append(PrefixBytesToAdd, Prefix);
812 Code.append(RF.getContents().begin(), RF.getContents().end());
813 RF.getContents() = Code;
814
815 // Adjust the fixups for the change in offsets
816 for (auto &F : RF.getFixups()) {
817 F.setOffset(F.getOffset() + PrefixBytesToAdd);
818 }
819
820 RemainingSize -= PrefixBytesToAdd;
821 return true;
822}
823
824bool X86AsmBackend::padInstructionViaRelaxation(MCRelaxableFragment &RF,
826 unsigned &RemainingSize) const {
827 if (!mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
828 // TODO: There are lots of other tricks we could apply for increasing
829 // encoding size without impacting performance.
830 return false;
831
832 MCInst Relaxed = RF.getInst();
833 relaxInstruction(Relaxed, *RF.getSubtargetInfo());
834
837 Emitter.encodeInstruction(Relaxed, Code, Fixups, *RF.getSubtargetInfo());
838 const unsigned OldSize = RF.getContents().size();
839 const unsigned NewSize = Code.size();
840 assert(NewSize >= OldSize && "size decrease during relaxation?");
841 unsigned Delta = NewSize - OldSize;
842 if (Delta > RemainingSize)
843 return false;
844 RF.setInst(Relaxed);
845 RF.getContents() = Code;
846 RF.getFixups() = Fixups;
847 RemainingSize -= Delta;
848 return true;
849}
850
851bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
853 unsigned &RemainingSize) const {
854 bool Changed = false;
855 if (RemainingSize != 0)
856 Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
857 if (RemainingSize != 0)
858 Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
859 return Changed;
860}
861
862void X86AsmBackend::finishLayout(MCAssembler const &Asm,
863 MCAsmLayout &Layout) const {
864 // See if we can further relax some instructions to cut down on the number of
865 // nop bytes required for code alignment. The actual win is in reducing
866 // instruction count, not number of bytes. Modern X86-64 can easily end up
867 // decode limited. It is often better to reduce the number of instructions
868 // (i.e. eliminate nops) even at the cost of increasing the size and
869 // complexity of others.
870 if (!X86PadForAlign && !X86PadForBranchAlign)
871 return;
872
873 // The processed regions are delimitered by LabeledFragments. -g may have more
874 // MCSymbols and therefore different relaxation results. X86PadForAlign is
875 // disabled by default to eliminate the -g vs non -g difference.
876 DenseSet<MCFragment *> LabeledFragments;
877 for (const MCSymbol &S : Asm.symbols())
878 LabeledFragments.insert(S.getFragment(false));
879
880 for (MCSection &Sec : Asm) {
881 if (!Sec.isText())
882 continue;
883
885 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
886 MCFragment &F = *I;
887
888 if (LabeledFragments.count(&F))
889 Relaxable.clear();
890
891 if (F.getKind() == MCFragment::FT_Data ||
893 // Skip and ignore
894 continue;
895
896 if (F.getKind() == MCFragment::FT_Relaxable) {
897 auto &RF = cast<MCRelaxableFragment>(*I);
898 Relaxable.push_back(&RF);
899 continue;
900 }
901
902 auto canHandle = [](MCFragment &F) -> bool {
903 switch (F.getKind()) {
904 default:
905 return false;
907 return X86PadForAlign;
909 return X86PadForBranchAlign;
910 }
911 };
912 // For any unhandled kind, assume we can't change layout.
913 if (!canHandle(F)) {
914 Relaxable.clear();
915 continue;
916 }
917
918#ifndef NDEBUG
919 const uint64_t OrigOffset = Layout.getFragmentOffset(&F);
920#endif
921 const uint64_t OrigSize = Asm.computeFragmentSize(Layout, F);
922
923 // To keep the effects local, prefer to relax instructions closest to
924 // the align directive. This is purely about human understandability
925 // of the resulting code. If we later find a reason to expand
926 // particular instructions over others, we can adjust.
927 MCFragment *FirstChangedFragment = nullptr;
928 unsigned RemainingSize = OrigSize;
929 while (!Relaxable.empty() && RemainingSize != 0) {
930 auto &RF = *Relaxable.pop_back_val();
931 // Give the backend a chance to play any tricks it wishes to increase
932 // the encoding size of the given instruction. Target independent code
933 // will try further relaxation, but target's may play further tricks.
934 if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
935 FirstChangedFragment = &RF;
936
937 // If we have an instruction which hasn't been fully relaxed, we can't
938 // skip past it and insert bytes before it. Changing its starting
939 // offset might require a larger negative offset than it can encode.
940 // We don't need to worry about larger positive offsets as none of the
941 // possible offsets between this and our align are visible, and the
942 // ones afterwards aren't changing.
943 if (mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
944 break;
945 }
946 Relaxable.clear();
947
948 if (FirstChangedFragment) {
949 // Make sure the offsets for any fragments in the effected range get
950 // updated. Note that this (conservatively) invalidates the offsets of
951 // those following, but this is not required.
952 Layout.invalidateFragmentsFrom(FirstChangedFragment);
953 }
954
955 // BoundaryAlign explicitly tracks it's size (unlike align)
956 if (F.getKind() == MCFragment::FT_BoundaryAlign)
957 cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
958
959#ifndef NDEBUG
960 const uint64_t FinalOffset = Layout.getFragmentOffset(&F);
961 const uint64_t FinalSize = Asm.computeFragmentSize(Layout, F);
962 assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
963 "can't move start of next fragment!");
964 assert(FinalSize == RemainingSize && "inconsistent size computation?");
965#endif
966
967 // If we're looking at a boundary align, make sure we don't try to pad
968 // its target instructions for some following directive. Doing so would
969 // break the alignment of the current boundary align.
970 if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
971 const MCFragment *LastFragment = BF->getLastFragment();
972 if (!LastFragment)
973 continue;
974 while (&*I != LastFragment)
975 ++I;
976 }
977 }
978 }
979
980 // The layout is done. Mark every fragment as valid.
981 for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
982 MCSection &Section = *Layout.getSectionOrder()[i];
983 Layout.getFragmentOffset(&*Section.curFragList()->Tail);
984 Asm.computeFragmentSize(Layout, *Section.curFragList()->Tail);
985 }
986}
987
988unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
989 if (STI.hasFeature(X86::Is16Bit))
990 return 4;
991 if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit))
992 return 1;
993 if (STI.hasFeature(X86::TuningFast7ByteNOP))
994 return 7;
995 if (STI.hasFeature(X86::TuningFast15ByteNOP))
996 return 15;
997 if (STI.hasFeature(X86::TuningFast11ByteNOP))
998 return 11;
999 // FIXME: handle 32-bit mode
1000 // 15-bytes is the longest single NOP instruction, but 10-bytes is
1001 // commonly the longest that can be efficiently decoded.
1002 return 10;
1003}
1004
1005/// Write a sequence of optimal nops to the output, covering \p Count
1006/// bytes.
1007/// \return - true on success, false on failure
1008bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
1009 const MCSubtargetInfo *STI) const {
1010 static const char Nops32Bit[10][11] = {
1011 // nop
1012 "\x90",
1013 // xchg %ax,%ax
1014 "\x66\x90",
1015 // nopl (%[re]ax)
1016 "\x0f\x1f\x00",
1017 // nopl 0(%[re]ax)
1018 "\x0f\x1f\x40\x00",
1019 // nopl 0(%[re]ax,%[re]ax,1)
1020 "\x0f\x1f\x44\x00\x00",
1021 // nopw 0(%[re]ax,%[re]ax,1)
1022 "\x66\x0f\x1f\x44\x00\x00",
1023 // nopl 0L(%[re]ax)
1024 "\x0f\x1f\x80\x00\x00\x00\x00",
1025 // nopl 0L(%[re]ax,%[re]ax,1)
1026 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
1027 // nopw 0L(%[re]ax,%[re]ax,1)
1028 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
1029 // nopw %cs:0L(%[re]ax,%[re]ax,1)
1030 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
1031 };
1032
1033 // 16-bit mode uses different nop patterns than 32-bit.
1034 static const char Nops16Bit[4][11] = {
1035 // nop
1036 "\x90",
1037 // xchg %eax,%eax
1038 "\x66\x90",
1039 // lea 0(%si),%si
1040 "\x8d\x74\x00",
1041 // lea 0w(%si),%si
1042 "\x8d\xb4\x00\x00",
1043 };
1044
1045 const char(*Nops)[11] =
1046 STI->hasFeature(X86::Is16Bit) ? Nops16Bit : Nops32Bit;
1047
1048 uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
1049
1050 // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
1051 // length.
1052 do {
1053 const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
1054 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1055 for (uint8_t i = 0; i < Prefixes; i++)
1056 OS << '\x66';
1057 const uint8_t Rest = ThisNopLength - Prefixes;
1058 if (Rest != 0)
1059 OS.write(Nops[Rest - 1], Rest);
1060 Count -= ThisNopLength;
1061 } while (Count != 0);
1062
1063 return true;
1064}
1065
1066/* *** */
1067
1068namespace {
1069
1070class ELFX86AsmBackend : public X86AsmBackend {
1071public:
1072 uint8_t OSABI;
1073 ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
1074 : X86AsmBackend(T, STI), OSABI(OSABI) {}
1075};
1076
1077class ELFX86_32AsmBackend : public ELFX86AsmBackend {
1078public:
1079 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
1080 const MCSubtargetInfo &STI)
1081 : ELFX86AsmBackend(T, OSABI, STI) {}
1082
1083 std::unique_ptr<MCObjectTargetWriter>
1084 createObjectTargetWriter() const override {
1085 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
1086 }
1087};
1088
1089class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
1090public:
1091 ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
1092 const MCSubtargetInfo &STI)
1093 : ELFX86AsmBackend(T, OSABI, STI) {}
1094
1095 std::unique_ptr<MCObjectTargetWriter>
1096 createObjectTargetWriter() const override {
1097 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1099 }
1100};
1101
1102class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
1103public:
1104 ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
1105 const MCSubtargetInfo &STI)
1106 : ELFX86AsmBackend(T, OSABI, STI) {}
1107
1108 std::unique_ptr<MCObjectTargetWriter>
1109 createObjectTargetWriter() const override {
1110 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1112 }
1113};
1114
1115class ELFX86_64AsmBackend : public ELFX86AsmBackend {
1116public:
1117 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
1118 const MCSubtargetInfo &STI)
1119 : ELFX86AsmBackend(T, OSABI, STI) {}
1120
1121 std::unique_ptr<MCObjectTargetWriter>
1122 createObjectTargetWriter() const override {
1123 return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
1124 }
1125};
1126
1127class WindowsX86AsmBackend : public X86AsmBackend {
1128 bool Is64Bit;
1129
1130public:
1131 WindowsX86AsmBackend(const Target &T, bool is64Bit,
1132 const MCSubtargetInfo &STI)
1133 : X86AsmBackend(T, STI)
1134 , Is64Bit(is64Bit) {
1135 }
1136
1137 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override {
1139 .Case("dir32", FK_Data_4)
1140 .Case("secrel32", FK_SecRel_4)
1141 .Case("secidx", FK_SecRel_2)
1143 }
1144
1145 std::unique_ptr<MCObjectTargetWriter>
1146 createObjectTargetWriter() const override {
1147 return createX86WinCOFFObjectWriter(Is64Bit);
1148 }
1149};
1150
1151namespace CU {
1152
1153 /// Compact unwind encoding values.
1155 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
1156 /// the return address, then [RE]SP is moved to [RE]BP.
1157 UNWIND_MODE_BP_FRAME = 0x01000000,
1158
1159 /// A frameless function with a small constant stack size.
1160 UNWIND_MODE_STACK_IMMD = 0x02000000,
1161
1162 /// A frameless function with a large constant stack size.
1163 UNWIND_MODE_STACK_IND = 0x03000000,
1164
1165 /// No compact unwind encoding is available.
1166 UNWIND_MODE_DWARF = 0x04000000,
1167
1168 /// Mask for encoding the frame registers.
1169 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1170
1171 /// Mask for encoding the frameless registers.
1172 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1173 };
1174
1175} // namespace CU
1176
1177class DarwinX86AsmBackend : public X86AsmBackend {
1178 const MCRegisterInfo &MRI;
1179
1180 /// Number of registers that can be saved in a compact unwind encoding.
1181 enum { CU_NUM_SAVED_REGS = 6 };
1182
1183 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1184 Triple TT;
1185 bool Is64Bit;
1186
1187 unsigned OffsetSize; ///< Offset of a "push" instruction.
1188 unsigned MoveInstrSize; ///< Size of a "move" instruction.
1189 unsigned StackDivide; ///< Amount to adjust stack size by.
1190protected:
1191 /// Size of a "push" instruction for the given register.
1192 unsigned PushInstrSize(unsigned Reg) const {
1193 switch (Reg) {
1194 case X86::EBX:
1195 case X86::ECX:
1196 case X86::EDX:
1197 case X86::EDI:
1198 case X86::ESI:
1199 case X86::EBP:
1200 case X86::RBX:
1201 case X86::RBP:
1202 return 1;
1203 case X86::R12:
1204 case X86::R13:
1205 case X86::R14:
1206 case X86::R15:
1207 return 2;
1208 }
1209 return 1;
1210 }
1211
1212private:
1213 /// Get the compact unwind number for a given register. The number
1214 /// corresponds to the enum lists in compact_unwind_encoding.h.
1215 int getCompactUnwindRegNum(unsigned Reg) const {
1216 static const MCPhysReg CU32BitRegs[7] = {
1217 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1218 };
1219 static const MCPhysReg CU64BitRegs[] = {
1220 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1221 };
1222 const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1223 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1224 if (*CURegs == Reg)
1225 return Idx;
1226
1227 return -1;
1228 }
1229
1230 /// Return the registers encoded for a compact encoding with a frame
1231 /// pointer.
1232 uint32_t encodeCompactUnwindRegistersWithFrame() const {
1233 // Encode the registers in the order they were saved --- 3-bits per
1234 // register. The list of saved registers is assumed to be in reverse
1235 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1236 uint32_t RegEnc = 0;
1237 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1238 unsigned Reg = SavedRegs[i];
1239 if (Reg == 0) break;
1240
1241 int CURegNum = getCompactUnwindRegNum(Reg);
1242 if (CURegNum == -1) return ~0U;
1243
1244 // Encode the 3-bit register number in order, skipping over 3-bits for
1245 // each register.
1246 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1247 }
1248
1249 assert((RegEnc & 0x3FFFF) == RegEnc &&
1250 "Invalid compact register encoding!");
1251 return RegEnc;
1252 }
1253
1254 /// Create the permutation encoding used with frameless stacks. It is
1255 /// passed the number of registers to be saved and an array of the registers
1256 /// saved.
1257 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1258 // The saved registers are numbered from 1 to 6. In order to encode the
1259 // order in which they were saved, we re-number them according to their
1260 // place in the register order. The re-numbering is relative to the last
1261 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1262 // that order:
1263 //
1264 // Orig Re-Num
1265 // ---- ------
1266 // 6 6
1267 // 2 2
1268 // 4 3
1269 // 5 3
1270 //
1271 for (unsigned i = 0; i < RegCount; ++i) {
1272 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1273 if (CUReg == -1) return ~0U;
1274 SavedRegs[i] = CUReg;
1275 }
1276
1277 // Reverse the list.
1278 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1279
1280 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1281 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1282 unsigned Countless = 0;
1283 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1284 if (SavedRegs[j] < SavedRegs[i])
1285 ++Countless;
1286
1287 RenumRegs[i] = SavedRegs[i] - Countless - 1;
1288 }
1289
1290 // Take the renumbered values and encode them into a 10-bit number.
1291 uint32_t permutationEncoding = 0;
1292 switch (RegCount) {
1293 case 6:
1294 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1295 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1296 + RenumRegs[4];
1297 break;
1298 case 5:
1299 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1300 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1301 + RenumRegs[5];
1302 break;
1303 case 4:
1304 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1305 + 3 * RenumRegs[4] + RenumRegs[5];
1306 break;
1307 case 3:
1308 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1309 + RenumRegs[5];
1310 break;
1311 case 2:
1312 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1313 break;
1314 case 1:
1315 permutationEncoding |= RenumRegs[5];
1316 break;
1317 }
1318
1319 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1320 "Invalid compact register encoding!");
1321 return permutationEncoding;
1322 }
1323
1324public:
1325 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1326 const MCSubtargetInfo &STI)
1327 : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1328 Is64Bit(TT.isArch64Bit()) {
1329 memset(SavedRegs, 0, sizeof(SavedRegs));
1330 OffsetSize = Is64Bit ? 8 : 4;
1331 MoveInstrSize = Is64Bit ? 3 : 2;
1332 StackDivide = Is64Bit ? 8 : 4;
1333 }
1334
1335 std::unique_ptr<MCObjectTargetWriter>
1336 createObjectTargetWriter() const override {
1338 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1339 return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
1340 }
1341
1342 /// Implementation of algorithm to generate the compact unwind encoding
1343 /// for the CFI instructions.
1344 uint32_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
1345 const MCContext *Ctxt) const override {
1347 if (Instrs.empty()) return 0;
1348 if (!isDarwinCanonicalPersonality(FI->Personality) &&
1350 return CU::UNWIND_MODE_DWARF;
1351
1352 // Reset the saved registers.
1353 unsigned SavedRegIdx = 0;
1354 memset(SavedRegs, 0, sizeof(SavedRegs));
1355
1356 bool HasFP = false;
1357
1358 // Encode that we are using EBP/RBP as the frame pointer.
1359 uint32_t CompactUnwindEncoding = 0;
1360
1361 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1362 unsigned InstrOffset = 0;
1363 unsigned StackAdjust = 0;
1364 unsigned StackSize = 0;
1365 int MinAbsOffset = std::numeric_limits<int>::max();
1366
1367 for (const MCCFIInstruction &Inst : Instrs) {
1368 switch (Inst.getOperation()) {
1369 default:
1370 // Any other CFI directives indicate a frame that we aren't prepared
1371 // to represent via compact unwind, so just bail out.
1372 return CU::UNWIND_MODE_DWARF;
1374 // Defines a frame pointer. E.g.
1375 //
1376 // movq %rsp, %rbp
1377 // L0:
1378 // .cfi_def_cfa_register %rbp
1379 //
1380 HasFP = true;
1381
1382 // If the frame pointer is other than esp/rsp, we do not have a way to
1383 // generate a compact unwinding representation, so bail out.
1384 if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1385 (Is64Bit ? X86::RBP : X86::EBP))
1386 return CU::UNWIND_MODE_DWARF;
1387
1388 // Reset the counts.
1389 memset(SavedRegs, 0, sizeof(SavedRegs));
1390 StackAdjust = 0;
1391 SavedRegIdx = 0;
1392 MinAbsOffset = std::numeric_limits<int>::max();
1393 InstrOffset += MoveInstrSize;
1394 break;
1395 }
1397 // Defines a new offset for the CFA. E.g.
1398 //
1399 // With frame:
1400 //
1401 // pushq %rbp
1402 // L0:
1403 // .cfi_def_cfa_offset 16
1404 //
1405 // Without frame:
1406 //
1407 // subq $72, %rsp
1408 // L0:
1409 // .cfi_def_cfa_offset 80
1410 //
1411 StackSize = Inst.getOffset() / StackDivide;
1412 break;
1413 }
1415 // Defines a "push" of a callee-saved register. E.g.
1416 //
1417 // pushq %r15
1418 // pushq %r14
1419 // pushq %rbx
1420 // L0:
1421 // subq $120, %rsp
1422 // L1:
1423 // .cfi_offset %rbx, -40
1424 // .cfi_offset %r14, -32
1425 // .cfi_offset %r15, -24
1426 //
1427 if (SavedRegIdx == CU_NUM_SAVED_REGS)
1428 // If there are too many saved registers, we cannot use a compact
1429 // unwind encoding.
1430 return CU::UNWIND_MODE_DWARF;
1431
1432 unsigned Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1433 SavedRegs[SavedRegIdx++] = Reg;
1434 StackAdjust += OffsetSize;
1435 MinAbsOffset = std::min(MinAbsOffset, abs(Inst.getOffset()));
1436 InstrOffset += PushInstrSize(Reg);
1437 break;
1438 }
1439 }
1440 }
1441
1442 StackAdjust /= StackDivide;
1443
1444 if (HasFP) {
1445 if ((StackAdjust & 0xFF) != StackAdjust)
1446 // Offset was too big for a compact unwind encoding.
1447 return CU::UNWIND_MODE_DWARF;
1448
1449 // We don't attempt to track a real StackAdjust, so if the saved registers
1450 // aren't adjacent to rbp we can't cope.
1451 if (SavedRegIdx != 0 && MinAbsOffset != 3 * (int)OffsetSize)
1452 return CU::UNWIND_MODE_DWARF;
1453
1454 // Get the encoding of the saved registers when we have a frame pointer.
1455 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1456 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1457
1458 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1459 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1460 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1461 } else {
1462 SubtractInstrIdx += InstrOffset;
1463 ++StackAdjust;
1464
1465 if ((StackSize & 0xFF) == StackSize) {
1466 // Frameless stack with a small stack size.
1467 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1468
1469 // Encode the stack size.
1470 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1471 } else {
1472 if ((StackAdjust & 0x7) != StackAdjust)
1473 // The extra stack adjustments are too big for us to handle.
1474 return CU::UNWIND_MODE_DWARF;
1475
1476 // Frameless stack with an offset too large for us to encode compactly.
1477 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1478
1479 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1480 // instruction.
1481 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1482
1483 // Encode any extra stack adjustments (done via push instructions).
1484 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1485 }
1486
1487 // Encode the number of registers saved. (Reverse the list first.)
1488 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1489 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1490
1491 // Get the encoding of the saved registers when we don't have a frame
1492 // pointer.
1493 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1494 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1495
1496 // Encode the register encoding.
1497 CompactUnwindEncoding |=
1498 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1499 }
1500
1501 return CompactUnwindEncoding;
1502 }
1503};
1504
1505} // end anonymous namespace
1506
1508 const MCSubtargetInfo &STI,
1509 const MCRegisterInfo &MRI,
1510 const MCTargetOptions &Options) {
1511 const Triple &TheTriple = STI.getTargetTriple();
1512 if (TheTriple.isOSBinFormatMachO())
1513 return new DarwinX86AsmBackend(T, MRI, STI);
1514
1515 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1516 return new WindowsX86AsmBackend(T, false, STI);
1517
1518 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1519
1520 if (TheTriple.isOSIAMCU())
1521 return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1522
1523 return new ELFX86_32AsmBackend(T, OSABI, STI);
1524}
1525
1527 const MCSubtargetInfo &STI,
1528 const MCRegisterInfo &MRI,
1529 const MCTargetOptions &Options) {
1530 const Triple &TheTriple = STI.getTargetTriple();
1531 if (TheTriple.isOSBinFormatMachO())
1532 return new DarwinX86AsmBackend(T, MRI, STI);
1533
1534 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1535 return new WindowsX86AsmBackend(T, true, STI);
1536
1537 if (TheTriple.isUEFI()) {
1538 assert(TheTriple.isOSBinFormatCOFF() &&
1539 "Only COFF format is supported in UEFI environment.");
1540 return new WindowsX86AsmBackend(T, true, STI);
1541 }
1542
1543 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1544
1545 if (TheTriple.isX32())
1546 return new ELFX86_X32AsmBackend(T, OSABI, STI);
1547 return new ELFX86_64AsmBackend(T, OSABI, STI);
1548}
unsigned const MachineRegisterInfo * MRI
dxil DXContainer Global Emitter
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
PowerPC TLS Dynamic Call Fixup
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static MCInstrInfo * createMCInstrInfo()
static unsigned getRelaxedOpcodeBranch(unsigned Opcode, bool Is16BitMode=false)
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
static size_t getSizeForInstFragment(const MCFragment *F)
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
static bool isRightAfterData(MCFragment *CurrentFragment, const std::pair< MCFragment *, size_t > &PrevInstPosition)
Check if the instruction to be emitted is right after any data.
static bool mayHaveInterruptDelaySlot(unsigned InstOpcode)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode)
static unsigned getFixupKindSize(unsigned Kind)
static bool isRelaxableBranch(unsigned Opcode)
static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
static bool is64Bit(const char *name)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Generic interface to target specific assembler backends.
Definition: MCAsmBackend.h:43
virtual bool allowEnhancedRelaxation() const
Return true if this target allows an unrelaxable instruction to be emitted into RelaxableFragment and...
Definition: MCAsmBackend.h:63
virtual unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const
Returns the maximum size of a nop in bytes on this target.
Definition: MCAsmBackend.h:216
virtual bool writeNopData(raw_ostream &OS, uint64_t Count, const MCSubtargetInfo *STI) const =0
Write an (optimal) nop sequence of Count bytes to the given output.
virtual void relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const
Relax the instruction in the given fragment to the next wider instruction.
Definition: MCAsmBackend.h:186
virtual bool mayNeedRelaxation(const MCInst &Inst, const MCSubtargetInfo &STI) const
Check whether the given instruction may need relaxation.
Definition: MCAsmBackend.h:163
virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *DF, const MCAsmLayout &Layout) const =0
Simple predicate for targets where !Resolved implies requiring relaxation.
virtual void finishLayout(MCAssembler const &Asm, MCAsmLayout &Layout) const
Give backend an opportunity to finish layout after relaxation.
Definition: MCAsmBackend.h:228
virtual void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst)
Definition: MCAsmBackend.h:70
virtual void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst, const MCSubtargetInfo &STI)
Give the target a chance to manipulate state related to instruction alignment (e.g.
Definition: MCAsmBackend.h:68
virtual bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, const MCSubtargetInfo *STI)
Hook to check if a relocation is needed for some target specific reason.
Definition: MCAsmBackend.h:102
virtual unsigned getNumFixupKinds() const =0
Get the number of target specific fixup kinds.
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual std::optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef< char > Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const =0
Apply the Value for given Fixup into the provided data fragment, at the offset specified by the fixup...
virtual bool allowAutoPadding() const
Return true if this target might automatically pad instructions and thus need to emit padding enable/...
Definition: MCAsmBackend.h:59
Encapsulates the layout of an assembly file at a particular point in time.
Definition: MCAsmLayout.h:28
void invalidateFragmentsFrom(MCFragment *F)
Invalidate the fragments starting with F because it has been resized.
Definition: MCFragment.cpp:43
llvm::SmallVectorImpl< MCSection * > & getSectionOrder()
Definition: MCAsmLayout.h:53
uint64_t getFragmentOffset(const MCFragment *F) const
Get the offset of the given fragment inside its containing section.
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition: MCFragment.h:562
void setLastFragment(const MCFragment *F)
Definition: MCFragment.h:586
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
Context object for machine code objects.
Definition: MCContext.h:83
bool emitCompactUnwindNonCanonical() const
Definition: MCContext.cpp:929
Fragment for data and encoded instructions.
Definition: MCFragment.h:230
SmallVectorImpl< char > & getContents()
Definition: MCFragment.h:187
SmallVectorImpl< MCFixup > & getFixups()
Definition: MCFragment.h:211
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition: MCFragment.h:164
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
ExprKind getKind() const
Definition: MCExpr.h:81
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:71
MCFragment * getNext() const
Definition: MCFragment.h:88
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
Definition: MCInst.cpp:84
unsigned getOpcode() const
Definition: MCInst.h:198
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:317
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Streaming object file generation interface.
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
Definition: MCFragment.h:260
bool getAllowAutoPadding() const
Definition: MCFragment.h:276
const MCInst & getInst() const
Definition: MCFragment.h:273
void setInst(const MCInst &Value)
Definition: MCFragment.h:274
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:35
void ensureMinAlignment(Align MinAlignment)
Makes sure that Alignment is at least MinAlignment.
Definition: MCSection.h:157
bool isText() const
Definition: MCSection.h:138
iterator end() const
Definition: MCSection.h:193
iterator begin() const
Definition: MCSection.h:192
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
This represents an "assembler immediate".
Definition: MCValue.h:36
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:307
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:693
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isX32() const
Tests whether the target is X32.
Definition: Triple.h:1037
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition: Triple.h:732
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:382
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition: Triple.h:724
bool isUEFI() const
Tests whether the OS is UEFI.
Definition: Triple.h:619
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:624
bool isOSIAMCU() const
Definition: Triple.h:598
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:719
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & write(unsigned char C)
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CompactUnwindEncodings
Compact unwind encoding values.
@ EM_386
Definition: ELF.h:136
@ EM_X86_64
Definition: ELF.h:178
@ EM_IAMCU
Definition: ELF.h:139
Expected< uint32_t > getCPUSubType(const Triple &T)
Definition: MachO.cpp:95
Expected< uint32_t > getCPUType(const Triple &T)
Definition: MachO.cpp:77
Reg
All possible values of the reg field in the ModR/M byte.
bool isPrefix(uint64_t TSFlags)
Definition: X86BaseInfo.h:895
int getMemoryOperandNo(uint64_t TSFlags)
Definition: X86BaseInfo.h:1024
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:981
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:534
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:527
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:524
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
Definition: X86BaseInfo.h:126
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
Definition: X86BaseInfo.h:325
@ AlignBranchJmp
Definition: X86BaseInfo.h:329
@ AlignBranchIndirect
Definition: X86BaseInfo.h:332
@ AlignBranchJcc
Definition: X86BaseInfo.h:328
@ AlignBranchCall
Definition: X86BaseInfo.h:330
@ AlignBranchRet
Definition: X86BaseInfo.h:331
@ AlignBranchNone
Definition: X86BaseInfo.h:326
@ AlignBranchFused
Definition: X86BaseInfo.h:327
@ AddrSegmentReg
Definition: X86BaseInfo.h:34
SecondMacroFusionInstKind
Definition: X86BaseInfo.h:116
FirstMacroFusionInstKind
Definition: X86BaseInfo.h:107
unsigned getOpcodeForLongImmediateForm(unsigned Opcode)
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(unsigned Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:348
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
Definition: X86BaseInfo.h:306
@ reloc_global_offset_table8
Definition: X86FixupKinds.h:31
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:26
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:32
@ NumTargetFixupKinds
Definition: X86FixupKinds.h:35
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:19
@ reloc_signed_4byte
Definition: X86FixupKinds.h:23
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:21
@ reloc_global_offset_table
Definition: X86FixupKinds.h:28
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1434
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FirstTargetFixupKind
Definition: MCFixup.h:45
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_PCRel_2
A two-byte pc relative fixup.
Definition: MCFixup.h:29
@ FK_SecRel_2
A two-byte section relative fixup.
Definition: MCFixup.h:41
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
Definition: MCFixup.h:50
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
@ FK_SecRel_8
A eight-byte section relative fixup.
Definition: MCFixup.h:43
@ FK_PCRel_8
A eight-byte pc relative fixup.
Definition: MCFixup.h:31
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:42
@ FK_PCRel_1
A one-byte pc relative fixup.
Definition: MCFixup.h:28
@ FK_SecRel_1
A one-byte section relative fixup.
Definition: MCFixup.h:40
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:756
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:244
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
endianness
Definition: bit.h:70
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.
const MCSymbol * Personality
Definition: MCDwarf.h:702
std::vector< MCCFIInstruction > Instructions
Definition: MCDwarf.h:704
Target independent information on a fixup kind.
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...