LLVM 19.0.0git
X86AsmBackend.cpp
Go to the documentation of this file.
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
16#include "llvm/MC/MCAsmLayout.h"
17#include "llvm/MC/MCAssembler.h"
19#include "llvm/MC/MCContext.h"
20#include "llvm/MC/MCDwarf.h"
23#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCInstrInfo.h"
33#include "llvm/MC/MCValue.h"
38
39using namespace llvm;
40
41namespace {
42/// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
43class X86AlignBranchKind {
44private:
45 uint8_t AlignBranchKind = 0;
46
47public:
48 void operator=(const std::string &Val) {
49 if (Val.empty())
50 return;
51 SmallVector<StringRef, 6> BranchTypes;
52 StringRef(Val).split(BranchTypes, '+', -1, false);
53 for (auto BranchType : BranchTypes) {
54 if (BranchType == "fused")
55 addKind(X86::AlignBranchFused);
56 else if (BranchType == "jcc")
57 addKind(X86::AlignBranchJcc);
58 else if (BranchType == "jmp")
59 addKind(X86::AlignBranchJmp);
60 else if (BranchType == "call")
61 addKind(X86::AlignBranchCall);
62 else if (BranchType == "ret")
63 addKind(X86::AlignBranchRet);
64 else if (BranchType == "indirect")
66 else {
67 errs() << "invalid argument " << BranchType.str()
68 << " to -x86-align-branch=; each element must be one of: fused, "
69 "jcc, jmp, call, ret, indirect.(plus separated)\n";
70 }
71 }
72 }
73
74 operator uint8_t() const { return AlignBranchKind; }
75 void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
76};
77
78X86AlignBranchKind X86AlignBranchKindLoc;
79
80cl::opt<unsigned> X86AlignBranchBoundary(
81 "x86-align-branch-boundary", cl::init(0),
83 "Control how the assembler should align branches with NOP. If the "
84 "boundary's size is not 0, it should be a power of 2 and no less "
85 "than 32. Branches will be aligned to prevent from being across or "
86 "against the boundary of specified size. The default value 0 does not "
87 "align branches."));
88
90 "x86-align-branch",
92 "Specify types of branches to align (plus separated list of types):"
93 "\njcc indicates conditional jumps"
94 "\nfused indicates fused conditional jumps"
95 "\njmp indicates direct unconditional jumps"
96 "\ncall indicates direct and indirect calls"
97 "\nret indicates rets"
98 "\nindirect indicates indirect unconditional jumps"),
99 cl::location(X86AlignBranchKindLoc));
100
101cl::opt<bool> X86AlignBranchWithin32BBoundaries(
102 "x86-branches-within-32B-boundaries", cl::init(false),
103 cl::desc(
104 "Align selected instructions to mitigate negative performance impact "
105 "of Intel's micro code update for errata skx102. May break "
106 "assumptions about labels corresponding to particular instructions, "
107 "and should be used with caution."));
108
109cl::opt<unsigned> X86PadMaxPrefixSize(
110 "x86-pad-max-prefix-size", cl::init(0),
111 cl::desc("Maximum number of prefixes to use for padding"));
112
113cl::opt<bool> X86PadForAlign(
114 "x86-pad-for-align", cl::init(false), cl::Hidden,
115 cl::desc("Pad previous instructions to implement align directives"));
116
117cl::opt<bool> X86PadForBranchAlign(
118 "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
119 cl::desc("Pad previous instructions to implement branch alignment"));
120
121class X86AsmBackend : public MCAsmBackend {
122 const MCSubtargetInfo &STI;
123 std::unique_ptr<const MCInstrInfo> MCII;
124 X86AlignBranchKind AlignBranchType;
125 Align AlignBoundary;
126 unsigned TargetPrefixMax = 0;
127
128 MCInst PrevInst;
129 unsigned PrevInstOpcode = 0;
130 MCBoundaryAlignFragment *PendingBA = nullptr;
131 std::pair<MCFragment *, size_t> PrevInstPosition;
132 bool IsRightAfterData = false;
133
134 uint8_t determinePaddingPrefix(const MCInst &Inst) const;
135 bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
136 bool needAlign(const MCInst &Inst) const;
137 bool canPadBranches(MCObjectStreamer &OS) const;
138 bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
139
140public:
141 X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
142 : MCAsmBackend(llvm::endianness::little), STI(STI),
143 MCII(T.createMCInstrInfo()) {
144 if (X86AlignBranchWithin32BBoundaries) {
145 // At the moment, this defaults to aligning fused branches, unconditional
146 // jumps, and (unfused) conditional jumps with nops. Both the
147 // instructions aligned and the alignment method (nop vs prefix) may
148 // change in the future.
149 AlignBoundary = assumeAligned(32);
150 AlignBranchType.addKind(X86::AlignBranchFused);
151 AlignBranchType.addKind(X86::AlignBranchJcc);
152 AlignBranchType.addKind(X86::AlignBranchJmp);
153 }
154 // Allow overriding defaults set by main flag
155 if (X86AlignBranchBoundary.getNumOccurrences())
156 AlignBoundary = assumeAligned(X86AlignBranchBoundary);
157 if (X86AlignBranch.getNumOccurrences())
158 AlignBranchType = X86AlignBranchKindLoc;
159 if (X86PadMaxPrefixSize.getNumOccurrences())
160 TargetPrefixMax = X86PadMaxPrefixSize;
161 }
162
163 bool allowAutoPadding() const override;
164 bool allowEnhancedRelaxation() const override;
165 void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst,
166 const MCSubtargetInfo &STI);
167 void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst);
168
169 unsigned getNumFixupKinds() const override {
171 }
172
173 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
174
175 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
176
177 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
178 const MCValue &Target,
179 const MCSubtargetInfo *STI) override;
180
181 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
183 uint64_t Value, bool IsResolved,
184 const MCSubtargetInfo *STI) const override;
185
186 bool mayNeedRelaxation(const MCInst &Inst,
187 const MCSubtargetInfo &STI) const override;
188
190 const MCRelaxableFragment *DF,
191 const MCAsmLayout &Layout) const override;
192
193 void relaxInstruction(MCInst &Inst,
194 const MCSubtargetInfo &STI) const override;
195
196 bool padInstructionViaRelaxation(MCRelaxableFragment &RF,
198 unsigned &RemainingSize) const;
199
200 bool padInstructionViaPrefix(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
201 unsigned &RemainingSize) const;
202
203 bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
204 unsigned &RemainingSize) const;
205
206 void finishLayout(MCAssembler const &Asm, MCAsmLayout &Layout) const override;
207
208 unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
209
210 bool writeNopData(raw_ostream &OS, uint64_t Count,
211 const MCSubtargetInfo *STI) const override;
212};
213} // end anonymous namespace
214
215static bool isRelaxableBranch(unsigned Opcode) {
216 return Opcode == X86::JCC_1 || Opcode == X86::JMP_1;
217}
218
219static unsigned getRelaxedOpcodeBranch(unsigned Opcode,
220 bool Is16BitMode = false) {
221 switch (Opcode) {
222 default:
223 llvm_unreachable("invalid opcode for branch");
224 case X86::JCC_1:
225 return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
226 case X86::JMP_1:
227 return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
228 }
229}
230
231static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode) {
232 unsigned Opcode = MI.getOpcode();
233 return isRelaxableBranch(Opcode) ? getRelaxedOpcodeBranch(Opcode, Is16BitMode)
235}
236
238 const MCInstrInfo &MCII) {
239 unsigned Opcode = MI.getOpcode();
240 switch (Opcode) {
241 default:
242 return X86::COND_INVALID;
243 case X86::JCC_1: {
244 const MCInstrDesc &Desc = MCII.get(Opcode);
245 return static_cast<X86::CondCode>(
246 MI.getOperand(Desc.getNumOperands() - 1).getImm());
247 }
248 }
249}
250
254 return classifySecondCondCodeInMacroFusion(CC);
255}
256
257/// Check if the instruction uses RIP relative addressing.
258static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
259 unsigned Opcode = MI.getOpcode();
260 const MCInstrDesc &Desc = MCII.get(Opcode);
261 uint64_t TSFlags = Desc.TSFlags;
262 unsigned CurOp = X86II::getOperandBias(Desc);
263 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
264 if (MemoryOperand < 0)
265 return false;
266 unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
267 unsigned BaseReg = MI.getOperand(BaseRegNum).getReg();
268 return (BaseReg == X86::RIP);
269}
270
271/// Check if the instruction is a prefix.
272static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII) {
273 return X86II::isPrefix(MCII.get(Opcode).TSFlags);
274}
275
276/// Check if the instruction is valid as the first instruction in macro fusion.
277static bool isFirstMacroFusibleInst(const MCInst &Inst,
278 const MCInstrInfo &MCII) {
279 // An Intel instruction with RIP relative addressing is not macro fusible.
280 if (isRIPRelative(Inst, MCII))
281 return false;
284 return FIK != X86::FirstMacroFusionInstKind::Invalid;
285}
286
287/// X86 can reduce the bytes of NOP by padding instructions with prefixes to
288/// get a better peformance in some cases. Here, we determine which prefix is
289/// the most suitable.
290///
291/// If the instruction has a segment override prefix, use the existing one.
292/// If the target is 64-bit, use the CS.
293/// If the target is 32-bit,
294/// - If the instruction has a ESP/EBP base register, use SS.
295/// - Otherwise use DS.
296uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
297 assert((STI.hasFeature(X86::Is32Bit) || STI.hasFeature(X86::Is64Bit)) &&
298 "Prefixes can be added only in 32-bit or 64-bit mode.");
299 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
300 uint64_t TSFlags = Desc.TSFlags;
301
302 // Determine where the memory operand starts, if present.
303 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
304 if (MemoryOperand != -1)
305 MemoryOperand += X86II::getOperandBias(Desc);
306
307 unsigned SegmentReg = 0;
308 if (MemoryOperand >= 0) {
309 // Check for explicit segment override on memory operand.
310 SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
311 }
312
313 switch (TSFlags & X86II::FormMask) {
314 default:
315 break;
316 case X86II::RawFrmDstSrc: {
317 // Check segment override opcode prefix as needed (not for %ds).
318 if (Inst.getOperand(2).getReg() != X86::DS)
319 SegmentReg = Inst.getOperand(2).getReg();
320 break;
321 }
322 case X86II::RawFrmSrc: {
323 // Check segment override opcode prefix as needed (not for %ds).
324 if (Inst.getOperand(1).getReg() != X86::DS)
325 SegmentReg = Inst.getOperand(1).getReg();
326 break;
327 }
329 // Check segment override opcode prefix as needed.
330 SegmentReg = Inst.getOperand(1).getReg();
331 break;
332 }
333 }
334
335 if (SegmentReg != 0)
336 return X86::getSegmentOverridePrefixForReg(SegmentReg);
337
338 if (STI.hasFeature(X86::Is64Bit))
339 return X86::CS_Encoding;
340
341 if (MemoryOperand >= 0) {
342 unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
343 unsigned BaseReg = Inst.getOperand(BaseRegNum).getReg();
344 if (BaseReg == X86::ESP || BaseReg == X86::EBP)
345 return X86::SS_Encoding;
346 }
347 return X86::DS_Encoding;
348}
349
350/// Check if the two instructions will be macro-fused on the target cpu.
351bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
352 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
353 if (!InstDesc.isConditionalBranch())
354 return false;
355 if (!isFirstMacroFusibleInst(Cmp, *MCII))
356 return false;
357 const X86::FirstMacroFusionInstKind CmpKind =
359 const X86::SecondMacroFusionInstKind BranchKind =
361 return X86::isMacroFused(CmpKind, BranchKind);
362}
363
364/// Check if the instruction has a variant symbol operand.
365static bool hasVariantSymbol(const MCInst &MI) {
366 for (auto &Operand : MI) {
367 if (!Operand.isExpr())
368 continue;
369 const MCExpr &Expr = *Operand.getExpr();
370 if (Expr.getKind() == MCExpr::SymbolRef &&
371 cast<MCSymbolRefExpr>(Expr).getKind() != MCSymbolRefExpr::VK_None)
372 return true;
373 }
374 return false;
375}
376
377bool X86AsmBackend::allowAutoPadding() const {
378 return (AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone);
379}
380
381bool X86AsmBackend::allowEnhancedRelaxation() const {
382 return allowAutoPadding() && TargetPrefixMax != 0 && X86PadForBranchAlign;
383}
384
385/// X86 has certain instructions which enable interrupts exactly one
386/// instruction *after* the instruction which stores to SS. Return true if the
387/// given instruction may have such an interrupt delay slot.
388static bool mayHaveInterruptDelaySlot(unsigned InstOpcode) {
389 switch (InstOpcode) {
390 case X86::POPSS16:
391 case X86::POPSS32:
392 case X86::STI:
393 return true;
394
395 case X86::MOV16sr:
396 case X86::MOV32sr:
397 case X86::MOV64sr:
398 case X86::MOV16sm:
399 // In fact, this is only the case if the first operand is SS. However, as
400 // segment moves occur extremely rarely, this is just a minor pessimization.
401 return true;
402 }
403 return false;
404}
405
406/// Check if the instruction to be emitted is right after any data.
407static bool
409 const std::pair<MCFragment *, size_t> &PrevInstPosition) {
410 MCFragment *F = CurrentFragment;
411 // Since data is always emitted into a DataFragment, our check strategy is
412 // simple here.
413 // - If the fragment is a DataFragment
414 // - If it's empty (section start or data after align), return false.
415 // - If it's not the fragment where the previous instruction is,
416 // returns true.
417 // - If it's the fragment holding the previous instruction but its
418 // size changed since the previous instruction was emitted into
419 // it, returns true.
420 // - Otherwise returns false.
421 // - If the fragment is not a DataFragment, returns false.
422 if (auto *DF = dyn_cast_or_null<MCDataFragment>(F))
423 return DF->getContents().size() &&
424 (DF != PrevInstPosition.first ||
425 DF->getContents().size() != PrevInstPosition.second);
426
427 return false;
428}
429
430/// \returns the fragment size if it has instructions, otherwise returns 0.
431static size_t getSizeForInstFragment(const MCFragment *F) {
432 if (!F || !F->hasInstructions())
433 return 0;
434 // MCEncodedFragmentWithContents being templated makes this tricky.
435 switch (F->getKind()) {
436 default:
437 llvm_unreachable("Unknown fragment with instructions!");
439 return cast<MCDataFragment>(*F).getContents().size();
441 return cast<MCRelaxableFragment>(*F).getContents().size();
443 return cast<MCCompactEncodedInstFragment>(*F).getContents().size();
444 }
445}
446
447/// Return true if we can insert NOP or prefixes automatically before the
448/// the instruction to be emitted.
449bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
450 if (hasVariantSymbol(Inst))
451 // Linker may rewrite the instruction with variant symbol operand(e.g.
452 // TLSCALL).
453 return false;
454
455 if (mayHaveInterruptDelaySlot(PrevInstOpcode))
456 // If this instruction follows an interrupt enabling instruction with a one
457 // instruction delay, inserting a nop would change behavior.
458 return false;
459
460 if (isPrefix(PrevInstOpcode, *MCII))
461 // If this instruction follows a prefix, inserting a nop/prefix would change
462 // semantic.
463 return false;
464
465 if (isPrefix(Inst.getOpcode(), *MCII))
466 // If this instruction is a prefix, inserting a prefix would change
467 // semantic.
468 return false;
469
470 if (IsRightAfterData)
471 // If this instruction follows any data, there is no clear
472 // instruction boundary, inserting a nop/prefix would change semantic.
473 return false;
474
475 return true;
476}
477
478bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
479 if (!OS.getAllowAutoPadding())
480 return false;
481 assert(allowAutoPadding() && "incorrect initialization!");
482
483 // We only pad in text section.
484 if (!OS.getCurrentSectionOnly()->isText())
485 return false;
486
487 // To be Done: Currently don't deal with Bundle cases.
488 if (OS.getAssembler().isBundlingEnabled())
489 return false;
490
491 // Branches only need to be aligned in 32-bit or 64-bit mode.
492 if (!(STI.hasFeature(X86::Is64Bit) || STI.hasFeature(X86::Is32Bit)))
493 return false;
494
495 return true;
496}
497
498/// Check if the instruction operand needs to be aligned.
499bool X86AsmBackend::needAlign(const MCInst &Inst) const {
500 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
501 return (Desc.isConditionalBranch() &&
502 (AlignBranchType & X86::AlignBranchJcc)) ||
503 (Desc.isUnconditionalBranch() &&
504 (AlignBranchType & X86::AlignBranchJmp)) ||
505 (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
506 (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
507 (Desc.isIndirectBranch() &&
508 (AlignBranchType & X86::AlignBranchIndirect));
509}
510
511/// Insert BoundaryAlignFragment before instructions to align branches.
512void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
513 const MCInst &Inst, const MCSubtargetInfo &STI) {
514 // Used by canPadInst. Done here, because in emitInstructionEnd, the current
515 // fragment will have changed.
516 IsRightAfterData =
517 isRightAfterData(OS.getCurrentFragment(), PrevInstPosition);
518
519 if (!canPadBranches(OS))
520 return;
521
522 // NB: PrevInst only valid if canPadBranches is true.
523 if (!isMacroFused(PrevInst, Inst))
524 // Macro fusion doesn't happen indeed, clear the pending.
525 PendingBA = nullptr;
526
527 // When branch padding is enabled (basically the skx102 erratum => unlikely),
528 // we call canPadInst (not cheap) twice. However, in the common case, we can
529 // avoid unnecessary calls to that, as this is otherwise only used for
530 // relaxable fragments.
531 if (!canPadInst(Inst, OS))
532 return;
533
534 if (PendingBA && PendingBA->getNext() == OS.getCurrentFragment()) {
535 // Macro fusion actually happens and there is no other fragment inserted
536 // after the previous instruction.
537 //
538 // Do nothing here since we already inserted a BoudaryAlign fragment when
539 // we met the first instruction in the fused pair and we'll tie them
540 // together in emitInstructionEnd.
541 //
542 // Note: When there is at least one fragment, such as MCAlignFragment,
543 // inserted after the previous instruction, e.g.
544 //
545 // \code
546 // cmp %rax %rcx
547 // .align 16
548 // je .Label0
549 // \ endcode
550 //
551 // We will treat the JCC as a unfused branch although it may be fused
552 // with the CMP.
553 return;
554 }
555
556 if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
557 isFirstMacroFusibleInst(Inst, *MCII))) {
558 // If we meet a unfused branch or the first instuction in a fusiable pair,
559 // insert a BoundaryAlign fragment.
560 PendingBA = OS.getContext().allocFragment<MCBoundaryAlignFragment>(
561 AlignBoundary, STI);
562 OS.insert(PendingBA);
563 }
564}
565
566/// Set the last fragment to be aligned for the BoundaryAlignFragment.
567void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS,
568 const MCInst &Inst) {
569 MCFragment *CF = OS.getCurrentFragment();
570 if (auto *F = dyn_cast_or_null<MCRelaxableFragment>(CF))
571 F->setAllowAutoPadding(canPadInst(Inst, OS));
572
573 // Update PrevInstOpcode here, canPadInst() reads that.
574 PrevInstOpcode = Inst.getOpcode();
575 PrevInstPosition = std::make_pair(CF, getSizeForInstFragment(CF));
576
577 if (!canPadBranches(OS))
578 return;
579
580 // PrevInst is only needed if canPadBranches. Copying an MCInst isn't cheap.
581 PrevInst = Inst;
582
583 if (!needAlign(Inst) || !PendingBA)
584 return;
585
586 // Tie the aligned instructions into a pending BoundaryAlign.
587 PendingBA->setLastFragment(CF);
588 PendingBA = nullptr;
589
590 // We need to ensure that further data isn't added to the current
591 // DataFragment, so that we can get the size of instructions later in
592 // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
593 // DataFragment.
594 if (isa_and_nonnull<MCDataFragment>(CF))
595 OS.insert(OS.getContext().allocFragment<MCDataFragment>());
596
597 // Update the maximum alignment on the current section if necessary.
598 MCSection *Sec = OS.getCurrentSectionOnly();
599 Sec->ensureMinAlignment(AlignBoundary);
600}
601
602std::optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
603 if (STI.getTargetTriple().isOSBinFormatELF()) {
604 unsigned Type;
605 if (STI.getTargetTriple().getArch() == Triple::x86_64) {
607#define ELF_RELOC(X, Y) .Case(#X, Y)
608#include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
609#undef ELF_RELOC
610 .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
611 .Case("BFD_RELOC_8", ELF::R_X86_64_8)
612 .Case("BFD_RELOC_16", ELF::R_X86_64_16)
613 .Case("BFD_RELOC_32", ELF::R_X86_64_32)
614 .Case("BFD_RELOC_64", ELF::R_X86_64_64)
615 .Default(-1u);
616 } else {
618#define ELF_RELOC(X, Y) .Case(#X, Y)
619#include "llvm/BinaryFormat/ELFRelocs/i386.def"
620#undef ELF_RELOC
621 .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
622 .Case("BFD_RELOC_8", ELF::R_386_8)
623 .Case("BFD_RELOC_16", ELF::R_386_16)
624 .Case("BFD_RELOC_32", ELF::R_386_32)
625 .Default(-1u);
626 }
627 if (Type == -1u)
628 return std::nullopt;
629 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
630 }
632}
633
634const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
635 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
636 {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
637 {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
638 {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
639 {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
640 {"reloc_signed_4byte", 0, 32, 0},
641 {"reloc_signed_4byte_relax", 0, 32, 0},
642 {"reloc_global_offset_table", 0, 32, 0},
643 {"reloc_global_offset_table8", 0, 64, 0},
644 {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
645 };
646
647 // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
648 // do not require any extra processing.
649 if (Kind >= FirstLiteralRelocationKind)
651
652 if (Kind < FirstTargetFixupKind)
654
655 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
656 "Invalid kind!");
657 assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
658 return Infos[Kind - FirstTargetFixupKind];
659}
660
661bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
662 const MCFixup &Fixup, const MCValue &,
663 const MCSubtargetInfo *STI) {
664 return Fixup.getKind() >= FirstLiteralRelocationKind;
665}
666
667static unsigned getFixupKindSize(unsigned Kind) {
668 switch (Kind) {
669 default:
670 llvm_unreachable("invalid fixup kind!");
671 case FK_NONE:
672 return 0;
673 case FK_PCRel_1:
674 case FK_SecRel_1:
675 case FK_Data_1:
676 return 1;
677 case FK_PCRel_2:
678 case FK_SecRel_2:
679 case FK_Data_2:
680 return 2;
681 case FK_PCRel_4:
690 case FK_SecRel_4:
691 case FK_Data_4:
692 return 4;
693 case FK_PCRel_8:
694 case FK_SecRel_8:
695 case FK_Data_8:
697 return 8;
698 }
699}
700
701void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
702 const MCValue &Target,
704 uint64_t Value, bool IsResolved,
705 const MCSubtargetInfo *STI) const {
706 unsigned Kind = Fixup.getKind();
707 if (Kind >= FirstLiteralRelocationKind)
708 return;
709 unsigned Size = getFixupKindSize(Kind);
710
711 assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
712
713 int64_t SignedValue = static_cast<int64_t>(Value);
714 if ((Target.isAbsolute() || IsResolved) &&
715 getFixupKindInfo(Fixup.getKind()).Flags &
717 // check that PC relative fixup fits into the fixup size.
718 if (Size > 0 && !isIntN(Size * 8, SignedValue))
719 Asm.getContext().reportError(
720 Fixup.getLoc(), "value of " + Twine(SignedValue) +
721 " is too large for field of " + Twine(Size) +
722 ((Size == 1) ? " byte." : " bytes."));
723 } else {
724 // Check that uppper bits are either all zeros or all ones.
725 // Specifically ignore overflow/underflow as long as the leakage is
726 // limited to the lower bits. This is to remain compatible with
727 // other assemblers.
728 assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
729 "Value does not fit in the Fixup field");
730 }
731
732 for (unsigned i = 0; i != Size; ++i)
733 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
734}
735
736bool X86AsmBackend::mayNeedRelaxation(const MCInst &MI,
737 const MCSubtargetInfo &STI) const {
738 unsigned Opcode = MI.getOpcode();
739 unsigned SkipOperands = X86::isCCMPCC(Opcode) ? 2 : 0;
740 return isRelaxableBranch(Opcode) ||
741 (X86::getOpcodeForLongImmediateForm(Opcode) != Opcode &&
742 MI.getOperand(MI.getNumOperands() - 1 - SkipOperands).isExpr());
743}
744
745bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
747 const MCRelaxableFragment *DF,
748 const MCAsmLayout &Layout) const {
749 // Relax if the value is too big for a (signed) i8.
750 return !isInt<8>(Value);
751}
752
753// FIXME: Can tblgen help at all here to verify there aren't other instructions
754// we can relax?
755void X86AsmBackend::relaxInstruction(MCInst &Inst,
756 const MCSubtargetInfo &STI) const {
757 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
758 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
759 unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
760
761 if (RelaxedOp == Inst.getOpcode()) {
764 Inst.dump_pretty(OS);
765 OS << "\n";
766 report_fatal_error("unexpected instruction to relax: " + OS.str());
767 }
768
769 Inst.setOpcode(RelaxedOp);
770}
771
772bool X86AsmBackend::padInstructionViaPrefix(MCRelaxableFragment &RF,
774 unsigned &RemainingSize) const {
775 if (!RF.getAllowAutoPadding())
776 return false;
777 // If the instruction isn't fully relaxed, shifting it around might require a
778 // larger value for one of the fixups then can be encoded. The outer loop
779 // will also catch this before moving to the next instruction, but we need to
780 // prevent padding this single instruction as well.
781 if (mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
782 return false;
783
784 const unsigned OldSize = RF.getContents().size();
785 if (OldSize == 15)
786 return false;
787
788 const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
789 const unsigned RemainingPrefixSize = [&]() -> unsigned {
791 X86_MC::emitPrefix(Emitter, RF.getInst(), Code, STI);
792 assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
793
794 // TODO: It turns out we need a decent amount of plumbing for the target
795 // specific bits to determine number of prefixes its safe to add. Various
796 // targets (older chips mostly, but also Atom family) encounter decoder
797 // stalls with too many prefixes. For testing purposes, we set the value
798 // externally for the moment.
799 unsigned ExistingPrefixSize = Code.size();
800 if (TargetPrefixMax <= ExistingPrefixSize)
801 return 0;
802 return TargetPrefixMax - ExistingPrefixSize;
803 }();
804 const unsigned PrefixBytesToAdd =
805 std::min(MaxPossiblePad, RemainingPrefixSize);
806 if (PrefixBytesToAdd == 0)
807 return false;
808
809 const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
810
812 Code.append(PrefixBytesToAdd, Prefix);
813 Code.append(RF.getContents().begin(), RF.getContents().end());
814 RF.getContents() = Code;
815
816 // Adjust the fixups for the change in offsets
817 for (auto &F : RF.getFixups()) {
818 F.setOffset(F.getOffset() + PrefixBytesToAdd);
819 }
820
821 RemainingSize -= PrefixBytesToAdd;
822 return true;
823}
824
825bool X86AsmBackend::padInstructionViaRelaxation(MCRelaxableFragment &RF,
827 unsigned &RemainingSize) const {
828 if (!mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
829 // TODO: There are lots of other tricks we could apply for increasing
830 // encoding size without impacting performance.
831 return false;
832
833 MCInst Relaxed = RF.getInst();
834 relaxInstruction(Relaxed, *RF.getSubtargetInfo());
835
838 Emitter.encodeInstruction(Relaxed, Code, Fixups, *RF.getSubtargetInfo());
839 const unsigned OldSize = RF.getContents().size();
840 const unsigned NewSize = Code.size();
841 assert(NewSize >= OldSize && "size decrease during relaxation?");
842 unsigned Delta = NewSize - OldSize;
843 if (Delta > RemainingSize)
844 return false;
845 RF.setInst(Relaxed);
846 RF.getContents() = Code;
847 RF.getFixups() = Fixups;
848 RemainingSize -= Delta;
849 return true;
850}
851
852bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
854 unsigned &RemainingSize) const {
855 bool Changed = false;
856 if (RemainingSize != 0)
857 Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
858 if (RemainingSize != 0)
859 Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
860 return Changed;
861}
862
863void X86AsmBackend::finishLayout(MCAssembler const &Asm,
864 MCAsmLayout &Layout) const {
865 // See if we can further relax some instructions to cut down on the number of
866 // nop bytes required for code alignment. The actual win is in reducing
867 // instruction count, not number of bytes. Modern X86-64 can easily end up
868 // decode limited. It is often better to reduce the number of instructions
869 // (i.e. eliminate nops) even at the cost of increasing the size and
870 // complexity of others.
871 if (!X86PadForAlign && !X86PadForBranchAlign)
872 return;
873
874 // The processed regions are delimitered by LabeledFragments. -g may have more
875 // MCSymbols and therefore different relaxation results. X86PadForAlign is
876 // disabled by default to eliminate the -g vs non -g difference.
877 DenseSet<MCFragment *> LabeledFragments;
878 for (const MCSymbol &S : Asm.symbols())
879 LabeledFragments.insert(S.getFragment(false));
880
881 for (MCSection &Sec : Asm) {
882 if (!Sec.isText())
883 continue;
884
886 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
887 MCFragment &F = *I;
888
889 if (LabeledFragments.count(&F))
890 Relaxable.clear();
891
892 if (F.getKind() == MCFragment::FT_Data ||
894 // Skip and ignore
895 continue;
896
897 if (F.getKind() == MCFragment::FT_Relaxable) {
898 auto &RF = cast<MCRelaxableFragment>(*I);
899 Relaxable.push_back(&RF);
900 continue;
901 }
902
903 auto canHandle = [](MCFragment &F) -> bool {
904 switch (F.getKind()) {
905 default:
906 return false;
908 return X86PadForAlign;
910 return X86PadForBranchAlign;
911 }
912 };
913 // For any unhandled kind, assume we can't change layout.
914 if (!canHandle(F)) {
915 Relaxable.clear();
916 continue;
917 }
918
919#ifndef NDEBUG
920 const uint64_t OrigOffset = Layout.getFragmentOffset(&F);
921#endif
922 const uint64_t OrigSize = Asm.computeFragmentSize(Layout, F);
923
924 // To keep the effects local, prefer to relax instructions closest to
925 // the align directive. This is purely about human understandability
926 // of the resulting code. If we later find a reason to expand
927 // particular instructions over others, we can adjust.
928 MCFragment *FirstChangedFragment = nullptr;
929 unsigned RemainingSize = OrigSize;
930 while (!Relaxable.empty() && RemainingSize != 0) {
931 auto &RF = *Relaxable.pop_back_val();
932 // Give the backend a chance to play any tricks it wishes to increase
933 // the encoding size of the given instruction. Target independent code
934 // will try further relaxation, but target's may play further tricks.
935 if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
936 FirstChangedFragment = &RF;
937
938 // If we have an instruction which hasn't been fully relaxed, we can't
939 // skip past it and insert bytes before it. Changing its starting
940 // offset might require a larger negative offset than it can encode.
941 // We don't need to worry about larger positive offsets as none of the
942 // possible offsets between this and our align are visible, and the
943 // ones afterwards aren't changing.
944 if (mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
945 break;
946 }
947 Relaxable.clear();
948
949 if (FirstChangedFragment) {
950 // Make sure the offsets for any fragments in the effected range get
951 // updated. Note that this (conservatively) invalidates the offsets of
952 // those following, but this is not required.
953 Layout.invalidateFragmentsFrom(FirstChangedFragment);
954 }
955
956 // BoundaryAlign explicitly tracks it's size (unlike align)
957 if (F.getKind() == MCFragment::FT_BoundaryAlign)
958 cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
959
960#ifndef NDEBUG
961 const uint64_t FinalOffset = Layout.getFragmentOffset(&F);
962 const uint64_t FinalSize = Asm.computeFragmentSize(Layout, F);
963 assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
964 "can't move start of next fragment!");
965 assert(FinalSize == RemainingSize && "inconsistent size computation?");
966#endif
967
968 // If we're looking at a boundary align, make sure we don't try to pad
969 // its target instructions for some following directive. Doing so would
970 // break the alignment of the current boundary align.
971 if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
972 const MCFragment *LastFragment = BF->getLastFragment();
973 if (!LastFragment)
974 continue;
975 while (&*I != LastFragment)
976 ++I;
977 }
978 }
979 }
980
981 // The layout is done. Mark every fragment as valid.
982 for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
983 MCSection &Section = *Layout.getSectionOrder()[i];
984 Layout.getFragmentOffset(&*Section.curFragList()->Tail);
985 Asm.computeFragmentSize(Layout, *Section.curFragList()->Tail);
986 }
987}
988
989unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
990 if (STI.hasFeature(X86::Is16Bit))
991 return 4;
992 if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit))
993 return 1;
994 if (STI.hasFeature(X86::TuningFast7ByteNOP))
995 return 7;
996 if (STI.hasFeature(X86::TuningFast15ByteNOP))
997 return 15;
998 if (STI.hasFeature(X86::TuningFast11ByteNOP))
999 return 11;
1000 // FIXME: handle 32-bit mode
1001 // 15-bytes is the longest single NOP instruction, but 10-bytes is
1002 // commonly the longest that can be efficiently decoded.
1003 return 10;
1004}
1005
1006/// Write a sequence of optimal nops to the output, covering \p Count
1007/// bytes.
1008/// \return - true on success, false on failure
1009bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
1010 const MCSubtargetInfo *STI) const {
1011 static const char Nops32Bit[10][11] = {
1012 // nop
1013 "\x90",
1014 // xchg %ax,%ax
1015 "\x66\x90",
1016 // nopl (%[re]ax)
1017 "\x0f\x1f\x00",
1018 // nopl 0(%[re]ax)
1019 "\x0f\x1f\x40\x00",
1020 // nopl 0(%[re]ax,%[re]ax,1)
1021 "\x0f\x1f\x44\x00\x00",
1022 // nopw 0(%[re]ax,%[re]ax,1)
1023 "\x66\x0f\x1f\x44\x00\x00",
1024 // nopl 0L(%[re]ax)
1025 "\x0f\x1f\x80\x00\x00\x00\x00",
1026 // nopl 0L(%[re]ax,%[re]ax,1)
1027 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
1028 // nopw 0L(%[re]ax,%[re]ax,1)
1029 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
1030 // nopw %cs:0L(%[re]ax,%[re]ax,1)
1031 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
1032 };
1033
1034 // 16-bit mode uses different nop patterns than 32-bit.
1035 static const char Nops16Bit[4][11] = {
1036 // nop
1037 "\x90",
1038 // xchg %eax,%eax
1039 "\x66\x90",
1040 // lea 0(%si),%si
1041 "\x8d\x74\x00",
1042 // lea 0w(%si),%si
1043 "\x8d\xb4\x00\x00",
1044 };
1045
1046 const char(*Nops)[11] =
1047 STI->hasFeature(X86::Is16Bit) ? Nops16Bit : Nops32Bit;
1048
1049 uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
1050
1051 // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
1052 // length.
1053 do {
1054 const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
1055 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1056 for (uint8_t i = 0; i < Prefixes; i++)
1057 OS << '\x66';
1058 const uint8_t Rest = ThisNopLength - Prefixes;
1059 if (Rest != 0)
1060 OS.write(Nops[Rest - 1], Rest);
1061 Count -= ThisNopLength;
1062 } while (Count != 0);
1063
1064 return true;
1065}
1066
1067/* *** */
1068
1069namespace {
1070
1071class ELFX86AsmBackend : public X86AsmBackend {
1072public:
1073 uint8_t OSABI;
1074 ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
1075 : X86AsmBackend(T, STI), OSABI(OSABI) {}
1076};
1077
1078class ELFX86_32AsmBackend : public ELFX86AsmBackend {
1079public:
1080 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
1081 const MCSubtargetInfo &STI)
1082 : ELFX86AsmBackend(T, OSABI, STI) {}
1083
1084 std::unique_ptr<MCObjectTargetWriter>
1085 createObjectTargetWriter() const override {
1086 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
1087 }
1088};
1089
1090class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
1091public:
1092 ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
1093 const MCSubtargetInfo &STI)
1094 : ELFX86AsmBackend(T, OSABI, STI) {}
1095
1096 std::unique_ptr<MCObjectTargetWriter>
1097 createObjectTargetWriter() const override {
1098 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1100 }
1101};
1102
1103class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
1104public:
1105 ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
1106 const MCSubtargetInfo &STI)
1107 : ELFX86AsmBackend(T, OSABI, STI) {}
1108
1109 std::unique_ptr<MCObjectTargetWriter>
1110 createObjectTargetWriter() const override {
1111 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1113 }
1114};
1115
1116class ELFX86_64AsmBackend : public ELFX86AsmBackend {
1117public:
1118 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
1119 const MCSubtargetInfo &STI)
1120 : ELFX86AsmBackend(T, OSABI, STI) {}
1121
1122 std::unique_ptr<MCObjectTargetWriter>
1123 createObjectTargetWriter() const override {
1124 return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
1125 }
1126};
1127
1128class WindowsX86AsmBackend : public X86AsmBackend {
1129 bool Is64Bit;
1130
1131public:
1132 WindowsX86AsmBackend(const Target &T, bool is64Bit,
1133 const MCSubtargetInfo &STI)
1134 : X86AsmBackend(T, STI)
1135 , Is64Bit(is64Bit) {
1136 }
1137
1138 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override {
1140 .Case("dir32", FK_Data_4)
1141 .Case("secrel32", FK_SecRel_4)
1142 .Case("secidx", FK_SecRel_2)
1144 }
1145
1146 std::unique_ptr<MCObjectTargetWriter>
1147 createObjectTargetWriter() const override {
1148 return createX86WinCOFFObjectWriter(Is64Bit);
1149 }
1150};
1151
1152namespace CU {
1153
1154 /// Compact unwind encoding values.
1156 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
1157 /// the return address, then [RE]SP is moved to [RE]BP.
1158 UNWIND_MODE_BP_FRAME = 0x01000000,
1159
1160 /// A frameless function with a small constant stack size.
1161 UNWIND_MODE_STACK_IMMD = 0x02000000,
1162
1163 /// A frameless function with a large constant stack size.
1164 UNWIND_MODE_STACK_IND = 0x03000000,
1165
1166 /// No compact unwind encoding is available.
1167 UNWIND_MODE_DWARF = 0x04000000,
1168
1169 /// Mask for encoding the frame registers.
1170 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1171
1172 /// Mask for encoding the frameless registers.
1173 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1174 };
1175
1176} // namespace CU
1177
1178class DarwinX86AsmBackend : public X86AsmBackend {
1179 const MCRegisterInfo &MRI;
1180
1181 /// Number of registers that can be saved in a compact unwind encoding.
1182 enum { CU_NUM_SAVED_REGS = 6 };
1183
1184 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1185 Triple TT;
1186 bool Is64Bit;
1187
1188 unsigned OffsetSize; ///< Offset of a "push" instruction.
1189 unsigned MoveInstrSize; ///< Size of a "move" instruction.
1190 unsigned StackDivide; ///< Amount to adjust stack size by.
1191protected:
1192 /// Size of a "push" instruction for the given register.
1193 unsigned PushInstrSize(unsigned Reg) const {
1194 switch (Reg) {
1195 case X86::EBX:
1196 case X86::ECX:
1197 case X86::EDX:
1198 case X86::EDI:
1199 case X86::ESI:
1200 case X86::EBP:
1201 case X86::RBX:
1202 case X86::RBP:
1203 return 1;
1204 case X86::R12:
1205 case X86::R13:
1206 case X86::R14:
1207 case X86::R15:
1208 return 2;
1209 }
1210 return 1;
1211 }
1212
1213private:
1214 /// Get the compact unwind number for a given register. The number
1215 /// corresponds to the enum lists in compact_unwind_encoding.h.
1216 int getCompactUnwindRegNum(unsigned Reg) const {
1217 static const MCPhysReg CU32BitRegs[7] = {
1218 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1219 };
1220 static const MCPhysReg CU64BitRegs[] = {
1221 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1222 };
1223 const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1224 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1225 if (*CURegs == Reg)
1226 return Idx;
1227
1228 return -1;
1229 }
1230
1231 /// Return the registers encoded for a compact encoding with a frame
1232 /// pointer.
1233 uint32_t encodeCompactUnwindRegistersWithFrame() const {
1234 // Encode the registers in the order they were saved --- 3-bits per
1235 // register. The list of saved registers is assumed to be in reverse
1236 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1237 uint32_t RegEnc = 0;
1238 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1239 unsigned Reg = SavedRegs[i];
1240 if (Reg == 0) break;
1241
1242 int CURegNum = getCompactUnwindRegNum(Reg);
1243 if (CURegNum == -1) return ~0U;
1244
1245 // Encode the 3-bit register number in order, skipping over 3-bits for
1246 // each register.
1247 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1248 }
1249
1250 assert((RegEnc & 0x3FFFF) == RegEnc &&
1251 "Invalid compact register encoding!");
1252 return RegEnc;
1253 }
1254
1255 /// Create the permutation encoding used with frameless stacks. It is
1256 /// passed the number of registers to be saved and an array of the registers
1257 /// saved.
1258 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1259 // The saved registers are numbered from 1 to 6. In order to encode the
1260 // order in which they were saved, we re-number them according to their
1261 // place in the register order. The re-numbering is relative to the last
1262 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1263 // that order:
1264 //
1265 // Orig Re-Num
1266 // ---- ------
1267 // 6 6
1268 // 2 2
1269 // 4 3
1270 // 5 3
1271 //
1272 for (unsigned i = 0; i < RegCount; ++i) {
1273 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1274 if (CUReg == -1) return ~0U;
1275 SavedRegs[i] = CUReg;
1276 }
1277
1278 // Reverse the list.
1279 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1280
1281 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1282 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1283 unsigned Countless = 0;
1284 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1285 if (SavedRegs[j] < SavedRegs[i])
1286 ++Countless;
1287
1288 RenumRegs[i] = SavedRegs[i] - Countless - 1;
1289 }
1290
1291 // Take the renumbered values and encode them into a 10-bit number.
1292 uint32_t permutationEncoding = 0;
1293 switch (RegCount) {
1294 case 6:
1295 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1296 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1297 + RenumRegs[4];
1298 break;
1299 case 5:
1300 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1301 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1302 + RenumRegs[5];
1303 break;
1304 case 4:
1305 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1306 + 3 * RenumRegs[4] + RenumRegs[5];
1307 break;
1308 case 3:
1309 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1310 + RenumRegs[5];
1311 break;
1312 case 2:
1313 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1314 break;
1315 case 1:
1316 permutationEncoding |= RenumRegs[5];
1317 break;
1318 }
1319
1320 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1321 "Invalid compact register encoding!");
1322 return permutationEncoding;
1323 }
1324
1325public:
1326 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1327 const MCSubtargetInfo &STI)
1328 : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1329 Is64Bit(TT.isArch64Bit()) {
1330 memset(SavedRegs, 0, sizeof(SavedRegs));
1331 OffsetSize = Is64Bit ? 8 : 4;
1332 MoveInstrSize = Is64Bit ? 3 : 2;
1333 StackDivide = Is64Bit ? 8 : 4;
1334 }
1335
1336 std::unique_ptr<MCObjectTargetWriter>
1337 createObjectTargetWriter() const override {
1339 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1340 return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
1341 }
1342
1343 /// Implementation of algorithm to generate the compact unwind encoding
1344 /// for the CFI instructions.
1345 uint32_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
1346 const MCContext *Ctxt) const override {
1348 if (Instrs.empty()) return 0;
1349 if (!isDarwinCanonicalPersonality(FI->Personality) &&
1351 return CU::UNWIND_MODE_DWARF;
1352
1353 // Reset the saved registers.
1354 unsigned SavedRegIdx = 0;
1355 memset(SavedRegs, 0, sizeof(SavedRegs));
1356
1357 bool HasFP = false;
1358
1359 // Encode that we are using EBP/RBP as the frame pointer.
1360 uint32_t CompactUnwindEncoding = 0;
1361
1362 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1363 unsigned InstrOffset = 0;
1364 unsigned StackAdjust = 0;
1365 unsigned StackSize = 0;
1366 int MinAbsOffset = std::numeric_limits<int>::max();
1367
1368 for (const MCCFIInstruction &Inst : Instrs) {
1369 switch (Inst.getOperation()) {
1370 default:
1371 // Any other CFI directives indicate a frame that we aren't prepared
1372 // to represent via compact unwind, so just bail out.
1373 return CU::UNWIND_MODE_DWARF;
1375 // Defines a frame pointer. E.g.
1376 //
1377 // movq %rsp, %rbp
1378 // L0:
1379 // .cfi_def_cfa_register %rbp
1380 //
1381 HasFP = true;
1382
1383 // If the frame pointer is other than esp/rsp, we do not have a way to
1384 // generate a compact unwinding representation, so bail out.
1385 if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1386 (Is64Bit ? X86::RBP : X86::EBP))
1387 return CU::UNWIND_MODE_DWARF;
1388
1389 // Reset the counts.
1390 memset(SavedRegs, 0, sizeof(SavedRegs));
1391 StackAdjust = 0;
1392 SavedRegIdx = 0;
1393 MinAbsOffset = std::numeric_limits<int>::max();
1394 InstrOffset += MoveInstrSize;
1395 break;
1396 }
1398 // Defines a new offset for the CFA. E.g.
1399 //
1400 // With frame:
1401 //
1402 // pushq %rbp
1403 // L0:
1404 // .cfi_def_cfa_offset 16
1405 //
1406 // Without frame:
1407 //
1408 // subq $72, %rsp
1409 // L0:
1410 // .cfi_def_cfa_offset 80
1411 //
1412 StackSize = Inst.getOffset() / StackDivide;
1413 break;
1414 }
1416 // Defines a "push" of a callee-saved register. E.g.
1417 //
1418 // pushq %r15
1419 // pushq %r14
1420 // pushq %rbx
1421 // L0:
1422 // subq $120, %rsp
1423 // L1:
1424 // .cfi_offset %rbx, -40
1425 // .cfi_offset %r14, -32
1426 // .cfi_offset %r15, -24
1427 //
1428 if (SavedRegIdx == CU_NUM_SAVED_REGS)
1429 // If there are too many saved registers, we cannot use a compact
1430 // unwind encoding.
1431 return CU::UNWIND_MODE_DWARF;
1432
1433 unsigned Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1434 SavedRegs[SavedRegIdx++] = Reg;
1435 StackAdjust += OffsetSize;
1436 MinAbsOffset = std::min(MinAbsOffset, abs(Inst.getOffset()));
1437 InstrOffset += PushInstrSize(Reg);
1438 break;
1439 }
1440 }
1441 }
1442
1443 StackAdjust /= StackDivide;
1444
1445 if (HasFP) {
1446 if ((StackAdjust & 0xFF) != StackAdjust)
1447 // Offset was too big for a compact unwind encoding.
1448 return CU::UNWIND_MODE_DWARF;
1449
1450 // We don't attempt to track a real StackAdjust, so if the saved registers
1451 // aren't adjacent to rbp we can't cope.
1452 if (SavedRegIdx != 0 && MinAbsOffset != 3 * (int)OffsetSize)
1453 return CU::UNWIND_MODE_DWARF;
1454
1455 // Get the encoding of the saved registers when we have a frame pointer.
1456 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1457 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1458
1459 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1460 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1461 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1462 } else {
1463 SubtractInstrIdx += InstrOffset;
1464 ++StackAdjust;
1465
1466 if ((StackSize & 0xFF) == StackSize) {
1467 // Frameless stack with a small stack size.
1468 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1469
1470 // Encode the stack size.
1471 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1472 } else {
1473 if ((StackAdjust & 0x7) != StackAdjust)
1474 // The extra stack adjustments are too big for us to handle.
1475 return CU::UNWIND_MODE_DWARF;
1476
1477 // Frameless stack with an offset too large for us to encode compactly.
1478 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1479
1480 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1481 // instruction.
1482 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1483
1484 // Encode any extra stack adjustments (done via push instructions).
1485 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1486 }
1487
1488 // Encode the number of registers saved. (Reverse the list first.)
1489 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1490 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1491
1492 // Get the encoding of the saved registers when we don't have a frame
1493 // pointer.
1494 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1495 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1496
1497 // Encode the register encoding.
1498 CompactUnwindEncoding |=
1499 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1500 }
1501
1502 return CompactUnwindEncoding;
1503 }
1504};
1505
1506} // end anonymous namespace
1507
1509 const MCSubtargetInfo &STI,
1510 const MCRegisterInfo &MRI,
1511 const MCTargetOptions &Options) {
1512 const Triple &TheTriple = STI.getTargetTriple();
1513 if (TheTriple.isOSBinFormatMachO())
1514 return new DarwinX86AsmBackend(T, MRI, STI);
1515
1516 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1517 return new WindowsX86AsmBackend(T, false, STI);
1518
1519 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1520
1521 if (TheTriple.isOSIAMCU())
1522 return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1523
1524 return new ELFX86_32AsmBackend(T, OSABI, STI);
1525}
1526
1528 const MCSubtargetInfo &STI,
1529 const MCRegisterInfo &MRI,
1530 const MCTargetOptions &Options) {
1531 const Triple &TheTriple = STI.getTargetTriple();
1532 if (TheTriple.isOSBinFormatMachO())
1533 return new DarwinX86AsmBackend(T, MRI, STI);
1534
1535 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1536 return new WindowsX86AsmBackend(T, true, STI);
1537
1538 if (TheTriple.isUEFI()) {
1539 assert(TheTriple.isOSBinFormatCOFF() &&
1540 "Only COFF format is supported in UEFI environment.");
1541 return new WindowsX86AsmBackend(T, true, STI);
1542 }
1543
1544 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1545
1546 if (TheTriple.isX32())
1547 return new ELFX86_X32AsmBackend(T, OSABI, STI);
1548 return new ELFX86_64AsmBackend(T, OSABI, STI);
1549}
1550
1551namespace {
1552class X86ELFStreamer : public MCELFStreamer {
1553public:
1554 X86ELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
1555 std::unique_ptr<MCObjectWriter> OW,
1556 std::unique_ptr<MCCodeEmitter> Emitter)
1557 : MCELFStreamer(Context, std::move(TAB), std::move(OW),
1558 std::move(Emitter)) {}
1559
1560 void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;
1561};
1562} // end anonymous namespace
1563
1565 const MCSubtargetInfo &STI) {
1566 auto &Backend = static_cast<X86AsmBackend &>(S.getAssembler().getBackend());
1567 Backend.emitInstructionBegin(S, Inst, STI);
1568 S.MCObjectStreamer::emitInstruction(Inst, STI);
1569 Backend.emitInstructionEnd(S, Inst);
1570}
1571
1572void X86ELFStreamer::emitInstruction(const MCInst &Inst,
1573 const MCSubtargetInfo &STI) {
1574 X86_MC::emitInstruction(*this, Inst, STI);
1575}
1576
1578 std::unique_ptr<MCAsmBackend> &&MAB,
1579 std::unique_ptr<MCObjectWriter> &&MOW,
1580 std::unique_ptr<MCCodeEmitter> &&MCE) {
1581 return new X86ELFStreamer(Context, std::move(MAB), std::move(MOW),
1582 std::move(MCE));
1583}
unsigned const MachineRegisterInfo * MRI
dxil DXContainer Global Emitter
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
PowerPC TLS Dynamic Call Fixup
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static MCInstrInfo * createMCInstrInfo()
static unsigned getRelaxedOpcodeBranch(unsigned Opcode, bool Is16BitMode=false)
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
static size_t getSizeForInstFragment(const MCFragment *F)
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
static bool isRightAfterData(MCFragment *CurrentFragment, const std::pair< MCFragment *, size_t > &PrevInstPosition)
Check if the instruction to be emitted is right after any data.
static bool mayHaveInterruptDelaySlot(unsigned InstOpcode)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode)
static unsigned getFixupKindSize(unsigned Kind)
static bool isRelaxableBranch(unsigned Opcode)
static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
static bool is64Bit(const char *name)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Generic interface to target specific assembler backends.
Definition: MCAsmBackend.h:43
virtual bool allowEnhancedRelaxation() const
Return true if this target allows an unrelaxable instruction to be emitted into RelaxableFragment and...
Definition: MCAsmBackend.h:63
virtual unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const
Returns the maximum size of a nop in bytes on this target.
Definition: MCAsmBackend.h:209
virtual bool writeNopData(raw_ostream &OS, uint64_t Count, const MCSubtargetInfo *STI) const =0
Write an (optimal) nop sequence of Count bytes to the given output.
virtual void relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const
Relax the instruction in the given fragment to the next wider instruction.
Definition: MCAsmBackend.h:179
virtual bool mayNeedRelaxation(const MCInst &Inst, const MCSubtargetInfo &STI) const
Check whether the given instruction may need relaxation.
Definition: MCAsmBackend.h:156
virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *DF, const MCAsmLayout &Layout) const =0
Simple predicate for targets where !Resolved implies requiring relaxation.
virtual void finishLayout(MCAssembler const &Asm, MCAsmLayout &Layout) const
Give backend an opportunity to finish layout after relaxation.
Definition: MCAsmBackend.h:221
virtual bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, const MCSubtargetInfo *STI)
Hook to check if a relocation is needed for some target specific reason.
Definition: MCAsmBackend.h:95
virtual unsigned getNumFixupKinds() const =0
Get the number of target specific fixup kinds.
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual std::optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef< char > Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const =0
Apply the Value for given Fixup into the provided data fragment, at the offset specified by the fixup...
virtual bool allowAutoPadding() const
Return true if this target might automatically pad instructions and thus need to emit padding enable/...
Definition: MCAsmBackend.h:59
Encapsulates the layout of an assembly file at a particular point in time.
Definition: MCAsmLayout.h:28
void invalidateFragmentsFrom(MCFragment *F)
Invalidate the fragments starting with F because it has been resized.
Definition: MCFragment.cpp:43
llvm::SmallVectorImpl< MCSection * > & getSectionOrder()
Definition: MCAsmLayout.h:53
uint64_t getFragmentOffset(const MCFragment *F) const
Get the offset of the given fragment inside its containing section.
MCAsmBackend & getBackend() const
Definition: MCAssembler.h:330
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition: MCFragment.h:564
void setLastFragment(const MCFragment *F)
Definition: MCFragment.h:588
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
Context object for machine code objects.
Definition: MCContext.h:83
bool emitCompactUnwindNonCanonical() const
Definition: MCContext.cpp:940
Fragment for data and encoded instructions.
Definition: MCFragment.h:232
SmallVectorImpl< char > & getContents()
Definition: MCFragment.h:189
SmallVectorImpl< MCFixup > & getFixups()
Definition: MCFragment.h:213
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition: MCFragment.h:166
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
ExprKind getKind() const
Definition: MCExpr.h:81
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:71
MCFragment * getNext() const
Definition: MCFragment.h:90
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
Definition: MCInst.cpp:84
unsigned getOpcode() const
Definition: MCInst.h:198
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:317
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Streaming object file generation interface.
MCAssembler & getAssembler()
void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override
Emit the given Instruction into the current section.
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
Definition: MCFragment.h:262
bool getAllowAutoPadding() const
Definition: MCFragment.h:278
const MCInst & getInst() const
Definition: MCFragment.h:275
void setInst(const MCInst &Value)
Definition: MCFragment.h:276
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:36
void ensureMinAlignment(Align MinAlignment)
Makes sure that Alignment is at least MinAlignment.
Definition: MCSection.h:162
bool isText() const
Definition: MCSection.h:143
iterator end() const
Definition: MCSection.h:198
iterator begin() const
Definition: MCSection.h:197
Streaming machine code generation interface.
Definition: MCStreamer.h:213
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
This represents an "assembler immediate".
Definition: MCValue.h:36
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:307
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:693
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isX32() const
Tests whether the target is X32.
Definition: Triple.h:1037
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition: Triple.h:732
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:382
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition: Triple.h:724
bool isUEFI() const
Tests whether the OS is UEFI.
Definition: Triple.h:619
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:624
bool isOSIAMCU() const
Definition: Triple.h:598
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:719
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & write(unsigned char C)
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CompactUnwindEncodings
Compact unwind encoding values.
@ EM_386
Definition: ELF.h:136
@ EM_X86_64
Definition: ELF.h:178
@ EM_IAMCU
Definition: ELF.h:139
Expected< uint32_t > getCPUSubType(const Triple &T)
Definition: MachO.cpp:95
Expected< uint32_t > getCPUType(const Triple &T)
Definition: MachO.cpp:77
Reg
All possible values of the reg field in the ModR/M byte.
bool isPrefix(uint64_t TSFlags)
Definition: X86BaseInfo.h:895
int getMemoryOperandNo(uint64_t TSFlags)
Definition: X86BaseInfo.h:1024
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:981
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:534
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:527
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:524
void emitPrefix(MCCodeEmitter &MCE, const MCInst &MI, SmallVectorImpl< char > &CB, const MCSubtargetInfo &STI)
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
Definition: X86BaseInfo.h:126
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
Definition: X86BaseInfo.h:325
@ AlignBranchJmp
Definition: X86BaseInfo.h:329
@ AlignBranchIndirect
Definition: X86BaseInfo.h:332
@ AlignBranchJcc
Definition: X86BaseInfo.h:328
@ AlignBranchCall
Definition: X86BaseInfo.h:330
@ AlignBranchRet
Definition: X86BaseInfo.h:331
@ AlignBranchNone
Definition: X86BaseInfo.h:326
@ AlignBranchFused
Definition: X86BaseInfo.h:327
@ AddrSegmentReg
Definition: X86BaseInfo.h:34
SecondMacroFusionInstKind
Definition: X86BaseInfo.h:116
FirstMacroFusionInstKind
Definition: X86BaseInfo.h:107
unsigned getOpcodeForLongImmediateForm(unsigned Opcode)
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(unsigned Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:348
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
Definition: X86BaseInfo.h:306
@ reloc_global_offset_table8
Definition: X86FixupKinds.h:31
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:26
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:32
@ NumTargetFixupKinds
Definition: X86FixupKinds.h:35
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:19
@ reloc_signed_4byte
Definition: X86FixupKinds.h:23
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:21
@ reloc_global_offset_table
Definition: X86FixupKinds.h:28
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1434
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
MCStreamer * createX86ELFStreamer(const Triple &T, MCContext &Context, std::unique_ptr< MCAsmBackend > &&MAB, std::unique_ptr< MCObjectWriter > &&MOW, std::unique_ptr< MCCodeEmitter > &&MCE)
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FirstTargetFixupKind
Definition: MCFixup.h:45
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_PCRel_2
A two-byte pc relative fixup.
Definition: MCFixup.h:29
@ FK_SecRel_2
A two-byte section relative fixup.
Definition: MCFixup.h:41
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
Definition: MCFixup.h:50
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
@ FK_SecRel_8
A eight-byte section relative fixup.
Definition: MCFixup.h:43
@ FK_PCRel_8
A eight-byte pc relative fixup.
Definition: MCFixup.h:31
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:42
@ FK_PCRel_1
A one-byte pc relative fixup.
Definition: MCFixup.h:28
@ FK_SecRel_1
A one-byte section relative fixup.
Definition: MCFixup.h:40
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:756
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:244
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
endianness
Definition: bit.h:70
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.
const MCSymbol * Personality
Definition: MCDwarf.h:702
std::vector< MCCFIInstruction > Instructions
Definition: MCDwarf.h:704
Target independent information on a fixup kind.
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...