LLVM 23.0.0git
X86AsmBackend.cpp
Go to the documentation of this file.
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
17#include "llvm/MC/MCAssembler.h"
19#include "llvm/MC/MCContext.h"
20#include "llvm/MC/MCDwarf.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
29#include "llvm/MC/MCSection.h"
31#include "llvm/MC/MCValue.h"
36
37using namespace llvm;
38
39namespace {
40/// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
41class X86AlignBranchKind {
42private:
43 uint8_t AlignBranchKind = 0;
44
45public:
46 void operator=(const std::string &Val) {
47 if (Val.empty())
48 return;
49 SmallVector<StringRef, 6> BranchTypes;
50 StringRef(Val).split(BranchTypes, '+', -1, false);
51 for (auto BranchType : BranchTypes) {
52 if (BranchType == "fused")
53 addKind(X86::AlignBranchFused);
54 else if (BranchType == "jcc")
55 addKind(X86::AlignBranchJcc);
56 else if (BranchType == "jmp")
57 addKind(X86::AlignBranchJmp);
58 else if (BranchType == "call")
59 addKind(X86::AlignBranchCall);
60 else if (BranchType == "ret")
61 addKind(X86::AlignBranchRet);
62 else if (BranchType == "indirect")
64 else {
65 errs() << "invalid argument " << BranchType.str()
66 << " to -x86-align-branch=; each element must be one of: fused, "
67 "jcc, jmp, call, ret, indirect.(plus separated)\n";
68 }
69 }
70 }
71
72 operator uint8_t() const { return AlignBranchKind; }
73 void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
74};
75
76X86AlignBranchKind X86AlignBranchKindLoc;
77
78cl::opt<unsigned> X86AlignBranchBoundary(
79 "x86-align-branch-boundary", cl::init(0),
81 "Control how the assembler should align branches with NOP. If the "
82 "boundary's size is not 0, it should be a power of 2 and no less "
83 "than 32. Branches will be aligned to prevent from being across or "
84 "against the boundary of specified size. The default value 0 does not "
85 "align branches."));
86
88 "x86-align-branch",
90 "Specify types of branches to align (plus separated list of types):"
91 "\njcc indicates conditional jumps"
92 "\nfused indicates fused conditional jumps"
93 "\njmp indicates direct unconditional jumps"
94 "\ncall indicates direct and indirect calls"
95 "\nret indicates rets"
96 "\nindirect indicates indirect unconditional jumps"),
97 cl::location(X86AlignBranchKindLoc));
98
99cl::opt<bool> X86AlignBranchWithin32BBoundaries(
100 "x86-branches-within-32B-boundaries", cl::init(false),
101 cl::desc(
102 "Align selected instructions to mitigate negative performance impact "
103 "of Intel's micro code update for errata skx102. May break "
104 "assumptions about labels corresponding to particular instructions, "
105 "and should be used with caution."));
106
107cl::opt<unsigned> X86PadMaxPrefixSize(
108 "x86-pad-max-prefix-size", cl::init(0),
109 cl::desc("Maximum number of prefixes to use for padding"));
110
111cl::opt<bool> X86PadForAlign(
112 "x86-pad-for-align", cl::init(false), cl::Hidden,
113 cl::desc("Pad previous instructions to implement align directives"));
114
115cl::opt<bool> X86PadForBranchAlign(
116 "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
117 cl::desc("Pad previous instructions to implement branch alignment"));
118
119class X86AsmBackend : public MCAsmBackend {
120 const MCSubtargetInfo &STI;
121 std::unique_ptr<const MCInstrInfo> MCII;
122 X86AlignBranchKind AlignBranchType;
123 Align AlignBoundary;
124 unsigned TargetPrefixMax = 0;
125
126 MCInst PrevInst;
127 unsigned PrevInstOpcode = 0;
128 MCBoundaryAlignFragment *PendingBA = nullptr;
129 std::pair<MCFragment *, size_t> PrevInstPosition;
130
131 uint8_t determinePaddingPrefix(const MCInst &Inst) const;
132 bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
133 bool needAlign(const MCInst &Inst) const;
134 bool canPadBranches(MCObjectStreamer &OS) const;
135 bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
136
137public:
138 X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
139 : MCAsmBackend(llvm::endianness::little), STI(STI),
140 MCII(T.createMCInstrInfo()) {
141 if (X86AlignBranchWithin32BBoundaries) {
142 // At the moment, this defaults to aligning fused branches, unconditional
143 // jumps, and (unfused) conditional jumps with nops. Both the
144 // instructions aligned and the alignment method (nop vs prefix) may
145 // change in the future.
146 AlignBoundary = assumeAligned(32);
147 AlignBranchType.addKind(X86::AlignBranchFused);
148 AlignBranchType.addKind(X86::AlignBranchJcc);
149 AlignBranchType.addKind(X86::AlignBranchJmp);
150 }
151 // Allow overriding defaults set by main flag
152 if (X86AlignBranchBoundary.getNumOccurrences())
153 AlignBoundary = assumeAligned(X86AlignBranchBoundary);
154 if (X86AlignBranch.getNumOccurrences())
155 AlignBranchType = X86AlignBranchKindLoc;
156 if (X86PadMaxPrefixSize.getNumOccurrences())
157 TargetPrefixMax = X86PadMaxPrefixSize;
158
159 AllowAutoPadding =
160 AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone;
161 AllowEnhancedRelaxation =
162 AllowAutoPadding && TargetPrefixMax != 0 && X86PadForBranchAlign;
163 }
164
165 void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst,
166 const MCSubtargetInfo &STI);
167 void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst);
168
169
170 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
171
172 MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
173
174 std::optional<bool> evaluateFixup(const MCFragment &, MCFixup &, MCValue &,
175 uint64_t &) override;
176 void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
177 uint8_t *Data, uint64_t Value, bool IsResolved) override;
178
179 bool mayNeedRelaxation(unsigned Opcode, ArrayRef<MCOperand> Operands,
180 const MCSubtargetInfo &STI) const override;
181
182 bool fixupNeedsRelaxationAdvanced(const MCFragment &, const MCFixup &,
183 const MCValue &, uint64_t,
184 bool) const override;
185
186 void relaxInstruction(MCInst &Inst,
187 const MCSubtargetInfo &STI) const override;
188
189 bool padInstructionViaRelaxation(MCFragment &RF, MCCodeEmitter &Emitter,
190 unsigned &RemainingSize) const;
191
192 bool padInstructionViaPrefix(MCFragment &RF, MCCodeEmitter &Emitter,
193 unsigned &RemainingSize) const;
194
195 bool padInstructionEncoding(MCFragment &RF, MCCodeEmitter &Emitter,
196 unsigned &RemainingSize) const;
197
198 bool finishLayout() const override;
199
200 unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
201
202 bool writeNopData(raw_ostream &OS, uint64_t Count,
203 const MCSubtargetInfo *STI) const override;
204};
205} // end anonymous namespace
206
207static bool isRelaxableBranch(unsigned Opcode) {
208 return Opcode == X86::JCC_1 || Opcode == X86::JMP_1;
209}
210
211static unsigned getRelaxedOpcodeBranch(unsigned Opcode,
212 bool Is16BitMode = false) {
213 switch (Opcode) {
214 default:
215 llvm_unreachable("invalid opcode for branch");
216 case X86::JCC_1:
217 return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
218 case X86::JMP_1:
219 return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
220 }
221}
222
223static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode) {
224 unsigned Opcode = MI.getOpcode();
225 return isRelaxableBranch(Opcode) ? getRelaxedOpcodeBranch(Opcode, Is16BitMode)
227}
228
230 const MCInstrInfo &MCII) {
231 unsigned Opcode = MI.getOpcode();
232 switch (Opcode) {
233 default:
234 return X86::COND_INVALID;
235 case X86::JCC_1: {
236 const MCInstrDesc &Desc = MCII.get(Opcode);
237 return static_cast<X86::CondCode>(
238 MI.getOperand(Desc.getNumOperands() - 1).getImm());
239 }
240 }
241}
242
246 return classifySecondCondCodeInMacroFusion(CC);
247}
248
249/// Check if the instruction uses RIP relative addressing.
250static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
251 unsigned Opcode = MI.getOpcode();
252 const MCInstrDesc &Desc = MCII.get(Opcode);
253 uint64_t TSFlags = Desc.TSFlags;
254 unsigned CurOp = X86II::getOperandBias(Desc);
255 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
256 if (MemoryOperand < 0)
257 return false;
258 unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
259 MCRegister BaseReg = MI.getOperand(BaseRegNum).getReg();
260 return (BaseReg == X86::RIP);
261}
262
263/// Check if the instruction is a prefix.
264static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII) {
265 return X86II::isPrefix(MCII.get(Opcode).TSFlags);
266}
267
268/// Check if the instruction is valid as the first instruction in macro fusion.
269static bool isFirstMacroFusibleInst(const MCInst &Inst,
270 const MCInstrInfo &MCII) {
271 // An Intel instruction with RIP relative addressing is not macro fusible.
272 if (isRIPRelative(Inst, MCII))
273 return false;
277}
278
279/// X86 can reduce the bytes of NOP by padding instructions with prefixes to
280/// get a better peformance in some cases. Here, we determine which prefix is
281/// the most suitable.
282///
283/// If the instruction has a segment override prefix, use the existing one.
284/// If the target is 64-bit, use the CS.
285/// If the target is 32-bit,
286/// - If the instruction has a ESP/EBP base register, use SS.
287/// - Otherwise use DS.
288uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
289 assert((STI.hasFeature(X86::Is32Bit) || STI.hasFeature(X86::Is64Bit)) &&
290 "Prefixes can be added only in 32-bit or 64-bit mode.");
291 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
292 uint64_t TSFlags = Desc.TSFlags;
293
294 // Determine where the memory operand starts, if present.
295 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
296 if (MemoryOperand != -1)
297 MemoryOperand += X86II::getOperandBias(Desc);
298
299 MCRegister SegmentReg;
300 if (MemoryOperand >= 0) {
301 // Check for explicit segment override on memory operand.
302 SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
303 }
304
305 switch (TSFlags & X86II::FormMask) {
306 default:
307 break;
308 case X86II::RawFrmDstSrc: {
309 // Check segment override opcode prefix as needed (not for %ds).
310 if (Inst.getOperand(2).getReg() != X86::DS)
311 SegmentReg = Inst.getOperand(2).getReg();
312 break;
313 }
314 case X86II::RawFrmSrc: {
315 // Check segment override opcode prefix as needed (not for %ds).
316 if (Inst.getOperand(1).getReg() != X86::DS)
317 SegmentReg = Inst.getOperand(1).getReg();
318 break;
319 }
321 // Check segment override opcode prefix as needed.
322 SegmentReg = Inst.getOperand(1).getReg();
323 break;
324 }
325 }
326
327 if (SegmentReg)
328 return X86::getSegmentOverridePrefixForReg(SegmentReg);
329
330 if (STI.hasFeature(X86::Is64Bit))
331 return X86::CS_Encoding;
332
333 if (MemoryOperand >= 0) {
334 unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
335 MCRegister BaseReg = Inst.getOperand(BaseRegNum).getReg();
336 if (BaseReg == X86::ESP || BaseReg == X86::EBP)
337 return X86::SS_Encoding;
338 }
339 return X86::DS_Encoding;
340}
341
342/// Check if the two instructions will be macro-fused on the target cpu.
343bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
344 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
345 if (!InstDesc.isConditionalBranch())
346 return false;
347 if (!isFirstMacroFusibleInst(Cmp, *MCII))
348 return false;
349 const X86::FirstMacroFusionInstKind CmpKind =
351 const X86::SecondMacroFusionInstKind BranchKind =
353 return X86::isMacroFused(CmpKind, BranchKind);
354}
355
356/// Check if the instruction has a variant symbol operand.
357static bool hasVariantSymbol(const MCInst &MI) {
358 for (auto &Operand : MI) {
359 if (!Operand.isExpr())
360 continue;
361 const MCExpr &Expr = *Operand.getExpr();
362 if (Expr.getKind() == MCExpr::SymbolRef &&
363 cast<MCSymbolRefExpr>(&Expr)->getSpecifier())
364 return true;
365 }
366 return false;
367}
368
369/// X86 has certain instructions which enable interrupts exactly one
370/// instruction *after* the instruction which stores to SS. Return true if the
371/// given instruction may have such an interrupt delay slot.
372static bool mayHaveInterruptDelaySlot(unsigned InstOpcode) {
373 switch (InstOpcode) {
374 case X86::POPSS16:
375 case X86::POPSS32:
376 case X86::STI:
377 return true;
378
379 case X86::MOV16sr:
380 case X86::MOV32sr:
381 case X86::MOV64sr:
382 case X86::MOV16sm:
383 // In fact, this is only the case if the first operand is SS. However, as
384 // segment moves occur extremely rarely, this is just a minor pessimization.
385 return true;
386 }
387 return false;
388}
389
390/// Return true if we can insert NOP or prefixes automatically before the
391/// the instruction to be emitted.
392bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
393 if (hasVariantSymbol(Inst))
394 // Linker may rewrite the instruction with variant symbol operand(e.g.
395 // TLSCALL).
396 return false;
397
398 if (mayHaveInterruptDelaySlot(PrevInstOpcode))
399 // If this instruction follows an interrupt enabling instruction with a one
400 // instruction delay, inserting a nop would change behavior.
401 return false;
402
403 if (isPrefix(PrevInstOpcode, *MCII))
404 // If this instruction follows a prefix, inserting a nop/prefix would change
405 // semantic.
406 return false;
407
408 if (isPrefix(Inst.getOpcode(), *MCII))
409 // If this instruction is a prefix, inserting a prefix would change
410 // semantic.
411 return false;
412
413 // If this instruction follows any data, there is no clear instruction
414 // boundary, inserting a nop/prefix would change semantic.
415 auto Offset = OS.getCurFragSize();
416 if (Offset && (OS.getCurrentFragment() != PrevInstPosition.first ||
417 Offset != PrevInstPosition.second))
418 return false;
419
420 return true;
421}
422
423bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
424 if (!OS.getAllowAutoPadding())
425 return false;
426 assert(allowAutoPadding() && "incorrect initialization!");
427
428 // We only pad in text section.
429 if (!OS.getCurrentSectionOnly()->isText())
430 return false;
431
432 // Branches only need to be aligned in 32-bit or 64-bit mode.
433 if (!(STI.hasFeature(X86::Is64Bit) || STI.hasFeature(X86::Is32Bit)))
434 return false;
435
436 return true;
437}
438
439/// Check if the instruction operand needs to be aligned.
440bool X86AsmBackend::needAlign(const MCInst &Inst) const {
441 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
442 return (Desc.isConditionalBranch() &&
443 (AlignBranchType & X86::AlignBranchJcc)) ||
444 (Desc.isUnconditionalBranch() &&
445 (AlignBranchType & X86::AlignBranchJmp)) ||
446 (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
447 (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
448 (Desc.isIndirectBranch() &&
449 (AlignBranchType & X86::AlignBranchIndirect));
450}
451
453 const MCSubtargetInfo &STI) {
454 bool AutoPadding = S.getAllowAutoPadding();
455 if (LLVM_LIKELY(!AutoPadding && !X86PadForAlign)) {
456 S.MCObjectStreamer::emitInstruction(Inst, STI);
457 return;
458 }
459
460 auto &Backend = static_cast<X86AsmBackend &>(S.getAssembler().getBackend());
461 Backend.emitInstructionBegin(S, Inst, STI);
462 S.MCObjectStreamer::emitInstruction(Inst, STI);
463 Backend.emitInstructionEnd(S, Inst);
464}
465
466/// Insert BoundaryAlignFragment before instructions to align branches.
467void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
468 const MCInst &Inst, const MCSubtargetInfo &STI) {
469 bool CanPadInst = canPadInst(Inst, OS);
470 if (CanPadInst)
472
473 if (!canPadBranches(OS))
474 return;
475
476 // NB: PrevInst only valid if canPadBranches is true.
477 if (!isMacroFused(PrevInst, Inst))
478 // Macro fusion doesn't happen indeed, clear the pending.
479 PendingBA = nullptr;
480
481 // When branch padding is enabled (basically the skx102 erratum => unlikely),
482 // we call canPadInst (not cheap) twice. However, in the common case, we can
483 // avoid unnecessary calls to that, as this is otherwise only used for
484 // relaxable fragments.
485 if (!CanPadInst)
486 return;
487
488 if (PendingBA) {
489 auto *NextFragment = PendingBA->getNext();
490 assert(NextFragment && "NextFragment should not be null");
491 if (NextFragment == OS.getCurrentFragment())
492 return;
493 // We eagerly create an empty fragment when inserting a fragment
494 // with a variable-size tail.
495 if (NextFragment->getNext() == OS.getCurrentFragment())
496 return;
497
498 // Macro fusion actually happens and there is no other fragment inserted
499 // after the previous instruction.
500 //
501 // Do nothing here since we already inserted a BoudaryAlign fragment when
502 // we met the first instruction in the fused pair and we'll tie them
503 // together in emitInstructionEnd.
504 //
505 // Note: When there is at least one fragment, such as MCAlignFragment,
506 // inserted after the previous instruction, e.g.
507 //
508 // \code
509 // cmp %rax %rcx
510 // .align 16
511 // je .Label0
512 // \ endcode
513 //
514 // We will treat the JCC as a unfused branch although it may be fused
515 // with the CMP.
516 return;
517 }
518
519 if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
520 isFirstMacroFusibleInst(Inst, *MCII))) {
521 // If we meet a unfused branch or the first instuction in a fusiable pair,
522 // insert a BoundaryAlign fragment.
523 PendingBA =
524 OS.newSpecialFragment<MCBoundaryAlignFragment>(AlignBoundary, STI);
525 }
526}
527
528/// Set the last fragment to be aligned for the BoundaryAlignFragment.
529void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS,
530 const MCInst &Inst) {
531 // Update PrevInstOpcode here, canPadInst() reads that.
532 MCFragment *CF = OS.getCurrentFragment();
533 PrevInstOpcode = Inst.getOpcode();
534 PrevInstPosition = std::make_pair(CF, OS.getCurFragSize());
535
536 if (!canPadBranches(OS))
537 return;
538
539 // PrevInst is only needed if canPadBranches. Copying an MCInst isn't cheap.
540 PrevInst = Inst;
541
542 if (!needAlign(Inst) || !PendingBA)
543 return;
544
545 // Tie the aligned instructions into a pending BoundaryAlign.
546 PendingBA->setLastFragment(CF);
547 PendingBA = nullptr;
548
549 // We need to ensure that further data isn't added to the current
550 // DataFragment, so that we can get the size of instructions later in
551 // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
552 // DataFragment.
553 OS.newFragment();
554
555 // Update the maximum alignment on the current section if necessary.
556 CF->getParent()->ensureMinAlignment(AlignBoundary);
557}
558
559std::optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
560 if (STI.getTargetTriple().isOSBinFormatELF()) {
561 unsigned Type;
562 if (STI.getTargetTriple().isX86_64()) {
563 Type = llvm::StringSwitch<unsigned>(Name)
564#define ELF_RELOC(X, Y) .Case(#X, Y)
565#include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
566#undef ELF_RELOC
567 .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
568 .Case("BFD_RELOC_8", ELF::R_X86_64_8)
569 .Case("BFD_RELOC_16", ELF::R_X86_64_16)
570 .Case("BFD_RELOC_32", ELF::R_X86_64_32)
571 .Case("BFD_RELOC_64", ELF::R_X86_64_64)
572 .Default(-1u);
573 } else {
574 Type = llvm::StringSwitch<unsigned>(Name)
575#define ELF_RELOC(X, Y) .Case(#X, Y)
576#include "llvm/BinaryFormat/ELFRelocs/i386.def"
577#undef ELF_RELOC
578 .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
579 .Case("BFD_RELOC_8", ELF::R_386_8)
580 .Case("BFD_RELOC_16", ELF::R_386_16)
581 .Case("BFD_RELOC_32", ELF::R_386_32)
582 .Default(-1u);
583 }
584 if (Type == -1u)
585 return std::nullopt;
586 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
587 }
588 return MCAsmBackend::getFixupKind(Name);
589}
590
591MCFixupKindInfo X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
592 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
593 // clang-format off
594 {"reloc_riprel_4byte", 0, 32, 0},
595 {"reloc_riprel_4byte_movq_load", 0, 32, 0},
596 {"reloc_riprel_4byte_movq_load_rex2", 0, 32, 0},
597 {"reloc_riprel_4byte_relax", 0, 32, 0},
598 {"reloc_riprel_4byte_relax_rex", 0, 32, 0},
599 {"reloc_riprel_4byte_relax_rex2", 0, 32, 0},
600 {"reloc_riprel_4byte_relax_evex", 0, 32, 0},
601 {"reloc_signed_4byte", 0, 32, 0},
602 {"reloc_signed_4byte_relax", 0, 32, 0},
603 {"reloc_global_offset_table", 0, 32, 0},
604 {"reloc_branch_4byte_pcrel", 0, 32, 0},
605 // clang-format on
606 };
607
608 // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
609 // do not require any extra processing.
610 if (mc::isRelocation(Kind))
611 return {};
612
613 if (Kind < FirstTargetFixupKind)
615
617 "Invalid kind!");
618 assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
619 return Infos[Kind - FirstTargetFixupKind];
620}
621
622static unsigned getFixupKindSize(unsigned Kind) {
623 switch (Kind) {
624 default:
625 llvm_unreachable("invalid fixup kind!");
626 case FK_NONE:
627 return 0;
628 case FK_SecRel_1:
629 case FK_Data_1:
630 return 1;
631 case FK_SecRel_2:
632 case FK_Data_2:
633 return 2;
645 case FK_SecRel_4:
646 case FK_Data_4:
647 return 4;
648 case FK_SecRel_8:
649 case FK_Data_8:
650 return 8;
651 }
652}
653
654constexpr char GotSymName[] = "_GLOBAL_OFFSET_TABLE_";
655
656// Adjust PC-relative fixup offsets, which are calculated from the start of the
657// next instruction.
658std::optional<bool> X86AsmBackend::evaluateFixup(const MCFragment &,
659 MCFixup &Fixup,
660 MCValue &Target, uint64_t &) {
661 if (Fixup.isPCRel()) {
662 switch (Fixup.getKind()) {
663 case FK_Data_1:
664 Target.setConstant(Target.getConstant() - 1);
665 break;
666 case FK_Data_2:
667 Target.setConstant(Target.getConstant() - 2);
668 break;
669 default: {
670 Target.setConstant(Target.getConstant() - 4);
671 auto *Add = Target.getAddSym();
672 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
673 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
674 // this needs to be a GOTPC32 relocation.
675 if (Add && Add->getName() == GotSymName)
676 Fixup = MCFixup::create(Fixup.getOffset(), Fixup.getValue(),
678 } break;
679 }
680 }
681 // Use default handling for `Value` and `IsResolved`.
682 return {};
683}
684
685void X86AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
686 const MCValue &Target, uint8_t *Data,
687 uint64_t Value, bool IsResolved) {
688 // Force relocation when there is a specifier. This might be too conservative
689 // - GAS doesn't emit a relocation for call local@plt; local:.
690 if (Target.getSpecifier())
691 IsResolved = false;
692 maybeAddReloc(F, Fixup, Target, Value, IsResolved);
693
694 auto Kind = Fixup.getKind();
695 if (mc::isRelocation(Kind))
696 return;
697 unsigned Size = getFixupKindSize(Kind);
698
699 assert(Fixup.getOffset() + Size <= F.getSize() && "Invalid fixup offset!");
700
701 // Check fixup value overflow similar to GAS (fixups emitted as RELA
702 // relocations have a value of 0).
703 // - Unknown signedness: the range (-2^N, 2^N) is allowed,
704 // accommodating intN_t, uintN_t, and a non-positive value type.
705 // - Signed (intN_t): the range [-2^(N-1), 2^(N-1)) is allowed.
706 //
707 // Currently only resolved PC-relative fixups are treated as signed. GAS
708 // treats more as signed (e.g. unresolved R_X86_64_32S).
709 // Unresolved fixups have unknown signedness to allow `jmp foo+0xffffffff`.
710 if (Size && Size < 8) {
711 bool Signed = IsResolved && Fixup.isPCRel();
712 uint64_t Mask = ~uint64_t(0) << (Size * 8 - (Signed ? 1 : 0));
713 if ((Value & Mask) && (Signed ? (Value & Mask) != Mask : (-Value & Mask)))
714 getContext().reportError(Fixup.getLoc(),
715 "value of " + Twine(int64_t(Value)) +
716 " is too large for field of " + Twine(Size) +
717 (Size == 1 ? " byte" : " bytes"));
718 }
719
720 for (unsigned i = 0; i != Size; ++i)
721 Data[i] = uint8_t(Value >> (i * 8));
722}
723
724bool X86AsmBackend::mayNeedRelaxation(unsigned Opcode,
725 ArrayRef<MCOperand> Operands,
726 const MCSubtargetInfo &STI) const {
727 unsigned SkipOperands = X86::isCCMPCC(Opcode) ? 2 : 0;
728 return isRelaxableBranch(Opcode) ||
729 (X86::getOpcodeForLongImmediateForm(Opcode) != Opcode &&
730 Operands[Operands.size() - 1 - SkipOperands].isExpr());
731}
732
733bool X86AsmBackend::fixupNeedsRelaxationAdvanced(const MCFragment &,
734 const MCFixup &Fixup,
735 const MCValue &Target,
736 uint64_t Value,
737 bool Resolved) const {
738 // If resolved, relax if the value is too big for a (signed) i8.
739 //
740 // Currently, `jmp local@plt` relaxes JMP even if the offset is small,
741 // different from gas.
742 if (Resolved)
743 return !isInt<8>(Value) || Target.getSpecifier();
744
745 // Otherwise, relax unless there is a @ABS8 specifier.
746 if (Fixup.getKind() == FK_Data_1 && Target.getAddSym() &&
747 Target.getSpecifier() == X86::S_ABS8)
748 return false;
749 return true;
750}
751
752// FIXME: Can tblgen help at all here to verify there aren't other instructions
753// we can relax?
754void X86AsmBackend::relaxInstruction(MCInst &Inst,
755 const MCSubtargetInfo &STI) const {
756 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
757 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
758 unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
759 assert(RelaxedOp != Inst.getOpcode());
760 Inst.setOpcode(RelaxedOp);
761}
762
763bool X86AsmBackend::padInstructionViaPrefix(MCFragment &RF,
764 MCCodeEmitter &Emitter,
765 unsigned &RemainingSize) const {
766 if (!RF.getAllowAutoPadding())
767 return false;
768 // If the instruction isn't fully relaxed, shifting it around might require a
769 // larger value for one of the fixups then can be encoded. The outer loop
770 // will also catch this before moving to the next instruction, but we need to
771 // prevent padding this single instruction as well.
772 if (mayNeedRelaxation(RF.getOpcode(), RF.getOperands(),
773 *RF.getSubtargetInfo()))
774 return false;
775
776 const unsigned OldSize = RF.getVarSize();
777 if (OldSize == 15)
778 return false;
779
780 const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
781 const unsigned RemainingPrefixSize = [&]() -> unsigned {
782 SmallString<15> Code;
783 X86_MC::emitPrefix(Emitter, RF.getInst(), Code, STI);
784 assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
785
786 // TODO: It turns out we need a decent amount of plumbing for the target
787 // specific bits to determine number of prefixes its safe to add. Various
788 // targets (older chips mostly, but also Atom family) encounter decoder
789 // stalls with too many prefixes. For testing purposes, we set the value
790 // externally for the moment.
791 unsigned ExistingPrefixSize = Code.size();
792 if (TargetPrefixMax <= ExistingPrefixSize)
793 return 0;
794 return TargetPrefixMax - ExistingPrefixSize;
795 }();
796 const unsigned PrefixBytesToAdd =
797 std::min(MaxPossiblePad, RemainingPrefixSize);
798 if (PrefixBytesToAdd == 0)
799 return false;
800
801 const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
802
803 SmallString<256> Code;
804 Code.append(PrefixBytesToAdd, Prefix);
805 Code.append(RF.getVarContents().begin(), RF.getVarContents().end());
806 RF.setVarContents(Code);
807
808 // Adjust the fixups for the change in offsets
809 for (auto &F : RF.getVarFixups())
810 F.setOffset(PrefixBytesToAdd + F.getOffset());
811
812 RemainingSize -= PrefixBytesToAdd;
813 return true;
814}
815
816bool X86AsmBackend::padInstructionViaRelaxation(MCFragment &RF,
817 MCCodeEmitter &Emitter,
818 unsigned &RemainingSize) const {
819 if (!mayNeedRelaxation(RF.getOpcode(), RF.getOperands(),
820 *RF.getSubtargetInfo()))
821 // TODO: There are lots of other tricks we could apply for increasing
822 // encoding size without impacting performance.
823 return false;
824
825 MCInst Relaxed = RF.getInst();
826 relaxInstruction(Relaxed, *RF.getSubtargetInfo());
827
829 SmallString<15> Code;
830 Emitter.encodeInstruction(Relaxed, Code, Fixups, *RF.getSubtargetInfo());
831 const unsigned OldSize = RF.getVarContents().size();
832 const unsigned NewSize = Code.size();
833 assert(NewSize >= OldSize && "size decrease during relaxation?");
834 unsigned Delta = NewSize - OldSize;
835 if (Delta > RemainingSize)
836 return false;
837 RF.setInst(Relaxed);
838 RF.setVarContents(Code);
839 RF.setVarFixups(Fixups);
840 RemainingSize -= Delta;
841 return true;
842}
843
844bool X86AsmBackend::padInstructionEncoding(MCFragment &RF,
845 MCCodeEmitter &Emitter,
846 unsigned &RemainingSize) const {
847 bool Changed = false;
848 if (RemainingSize != 0)
849 Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
850 if (RemainingSize != 0)
851 Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
852 return Changed;
853}
854
855bool X86AsmBackend::finishLayout() const {
856 // See if we can further relax some instructions to cut down on the number of
857 // nop bytes required for code alignment. The actual win is in reducing
858 // instruction count, not number of bytes. Modern X86-64 can easily end up
859 // decode limited. It is often better to reduce the number of instructions
860 // (i.e. eliminate nops) even at the cost of increasing the size and
861 // complexity of others.
862 if (!X86PadForAlign && !X86PadForBranchAlign)
863 return false;
864
865 // The processed regions are delimitered by LabeledFragments. -g may have more
866 // MCSymbols and therefore different relaxation results. X86PadForAlign is
867 // disabled by default to eliminate the -g vs non -g difference.
868 DenseSet<MCFragment *> LabeledFragments;
869 for (const MCSymbol &S : Asm->symbols())
870 LabeledFragments.insert(S.getFragment());
871
872 bool Changed = false;
873 for (MCSection &Sec : *Asm) {
874 if (!Sec.isText())
875 continue;
876
878 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
879 MCFragment &F = *I;
880
881 if (LabeledFragments.count(&F))
882 Relaxable.clear();
883
884 if (F.getKind() == MCFragment::FT_Data) // Skip and ignore
885 continue;
886
887 if (F.getKind() == MCFragment::FT_Relaxable) {
888 auto &RF = cast<MCFragment>(*I);
889 Relaxable.push_back(&RF);
890 continue;
891 }
892
893 auto canHandle = [](MCFragment &F) -> bool {
894 switch (F.getKind()) {
895 default:
896 return false;
898 return X86PadForAlign;
900 return X86PadForBranchAlign;
901 }
902 };
903 // For any unhandled kind, assume we can't change layout.
904 if (!canHandle(F)) {
905 Relaxable.clear();
906 continue;
907 }
908
909 // To keep the effects local, prefer to relax instructions closest to
910 // the align directive. This is purely about human understandability
911 // of the resulting code. If we later find a reason to expand
912 // particular instructions over others, we can adjust.
913 unsigned RemainingSize = Asm->computeFragmentSize(F) - F.getFixedSize();
914 while (!Relaxable.empty() && RemainingSize != 0) {
915 auto &RF = *Relaxable.pop_back_val();
916 // Give the backend a chance to play any tricks it wishes to increase
917 // the encoding size of the given instruction. Target independent code
918 // will try further relaxation, but target's may play further tricks.
919 Changed |= padInstructionEncoding(RF, Asm->getEmitter(), RemainingSize);
920
921 // If we have an instruction which hasn't been fully relaxed, we can't
922 // skip past it and insert bytes before it. Changing its starting
923 // offset might require a larger negative offset than it can encode.
924 // We don't need to worry about larger positive offsets as none of the
925 // possible offsets between this and our align are visible, and the
926 // ones afterwards aren't changing.
927 if (mayNeedRelaxation(RF.getOpcode(), RF.getOperands(),
928 *RF.getSubtargetInfo()))
929 break;
930 }
931 Relaxable.clear();
932
933 // If we're looking at a boundary align, make sure we don't try to pad
934 // its target instructions for some following directive. Doing so would
935 // break the alignment of the current boundary align.
936 if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
937 cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
938 Changed = true;
939 const MCFragment *LastFragment = BF->getLastFragment();
940 if (!LastFragment)
941 continue;
942 while (&*I != LastFragment)
943 ++I;
944 }
945 }
946 }
947
948 return Changed;
949}
950
951unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
952 if (STI.hasFeature(X86::Is16Bit))
953 return 4;
954 if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit))
955 return 1;
956 if (STI.hasFeature(X86::TuningFast7ByteNOP))
957 return 7;
958 if (STI.hasFeature(X86::TuningFast15ByteNOP))
959 return 15;
960 if (STI.hasFeature(X86::TuningFast11ByteNOP))
961 return 11;
962 // FIXME: handle 32-bit mode
963 // 15-bytes is the longest single NOP instruction, but 10-bytes is
964 // commonly the longest that can be efficiently decoded.
965 return 10;
966}
967
968/// Write a sequence of optimal nops to the output, covering \p Count
969/// bytes.
970/// \return - true on success, false on failure
971bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
972 const MCSubtargetInfo *STI) const {
973 static const char Nops32Bit[10][11] = {
974 // nop
975 "\x90",
976 // xchg %ax,%ax
977 "\x66\x90",
978 // nopl (%[re]ax)
979 "\x0f\x1f\x00",
980 // nopl 0(%[re]ax)
981 "\x0f\x1f\x40\x00",
982 // nopl 0(%[re]ax,%[re]ax,1)
983 "\x0f\x1f\x44\x00\x00",
984 // nopw 0(%[re]ax,%[re]ax,1)
985 "\x66\x0f\x1f\x44\x00\x00",
986 // nopl 0L(%[re]ax)
987 "\x0f\x1f\x80\x00\x00\x00\x00",
988 // nopl 0L(%[re]ax,%[re]ax,1)
989 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
990 // nopw 0L(%[re]ax,%[re]ax,1)
991 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
992 // nopw %cs:0L(%[re]ax,%[re]ax,1)
993 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
994 };
995
996 // 16-bit mode uses different nop patterns than 32-bit.
997 static const char Nops16Bit[4][11] = {
998 // nop
999 "\x90",
1000 // xchg %eax,%eax
1001 "\x66\x90",
1002 // lea 0(%si),%si
1003 "\x8d\x74\x00",
1004 // lea 0w(%si),%si
1005 "\x8d\xb4\x00\x00",
1006 };
1007
1008 const char(*Nops)[11] =
1009 STI->hasFeature(X86::Is16Bit) ? Nops16Bit : Nops32Bit;
1010
1011 uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
1012
1013 // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
1014 // length.
1015 do {
1016 const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
1017 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1018 for (uint8_t i = 0; i < Prefixes; i++)
1019 OS << '\x66';
1020 const uint8_t Rest = ThisNopLength - Prefixes;
1021 if (Rest != 0)
1022 OS.write(Nops[Rest - 1], Rest);
1023 Count -= ThisNopLength;
1024 } while (Count != 0);
1025
1026 return true;
1027}
1028
1029/* *** */
1030
1031namespace {
1032
1033class ELFX86AsmBackend : public X86AsmBackend {
1034public:
1035 uint8_t OSABI;
1036 ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
1037 : X86AsmBackend(T, STI), OSABI(OSABI) {}
1038};
1039
1040class ELFX86_32AsmBackend : public ELFX86AsmBackend {
1041public:
1042 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
1043 const MCSubtargetInfo &STI)
1044 : ELFX86AsmBackend(T, OSABI, STI) {}
1045
1046 std::unique_ptr<MCObjectTargetWriter>
1047 createObjectTargetWriter() const override {
1048 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
1049 }
1050};
1051
1052class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
1053public:
1054 ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
1055 const MCSubtargetInfo &STI)
1056 : ELFX86AsmBackend(T, OSABI, STI) {}
1057
1058 std::unique_ptr<MCObjectTargetWriter>
1059 createObjectTargetWriter() const override {
1060 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1062 }
1063};
1064
1065class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
1066public:
1067 ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
1068 const MCSubtargetInfo &STI)
1069 : ELFX86AsmBackend(T, OSABI, STI) {}
1070
1071 std::unique_ptr<MCObjectTargetWriter>
1072 createObjectTargetWriter() const override {
1073 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1075 }
1076};
1077
1078class ELFX86_64AsmBackend : public ELFX86AsmBackend {
1079public:
1080 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
1081 const MCSubtargetInfo &STI)
1082 : ELFX86AsmBackend(T, OSABI, STI) {}
1083
1084 std::unique_ptr<MCObjectTargetWriter>
1085 createObjectTargetWriter() const override {
1086 return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
1087 }
1088};
1089
1090class WindowsX86AsmBackend : public X86AsmBackend {
1091 bool Is64Bit;
1092
1093public:
1094 WindowsX86AsmBackend(const Target &T, bool is64Bit,
1095 const MCSubtargetInfo &STI)
1096 : X86AsmBackend(T, STI)
1097 , Is64Bit(is64Bit) {
1098 }
1099
1100 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override {
1101 return StringSwitch<std::optional<MCFixupKind>>(Name)
1102 .Case("dir32", FK_Data_4)
1103 .Case("secrel32", FK_SecRel_4)
1104 .Case("secidx", FK_SecRel_2)
1105 .Default(MCAsmBackend::getFixupKind(Name));
1106 }
1107
1108 std::unique_ptr<MCObjectTargetWriter>
1109 createObjectTargetWriter() const override {
1110 return createX86WinCOFFObjectWriter(Is64Bit);
1111 }
1112};
1113
1114namespace CU {
1115
1116 /// Compact unwind encoding values.
1117 enum CompactUnwindEncodings {
1118 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
1119 /// the return address, then [RE]SP is moved to [RE]BP.
1120 UNWIND_MODE_BP_FRAME = 0x01000000,
1121
1122 /// A frameless function with a small constant stack size.
1123 UNWIND_MODE_STACK_IMMD = 0x02000000,
1124
1125 /// A frameless function with a large constant stack size.
1126 UNWIND_MODE_STACK_IND = 0x03000000,
1127
1128 /// No compact unwind encoding is available.
1129 UNWIND_MODE_DWARF = 0x04000000,
1130
1131 /// Mask for encoding the frame registers.
1132 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1133
1134 /// Mask for encoding the frameless registers.
1135 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1136 };
1137
1138} // namespace CU
1139
1140class DarwinX86AsmBackend : public X86AsmBackend {
1141 const MCRegisterInfo &MRI;
1142
1143 /// Number of registers that can be saved in a compact unwind encoding.
1144 enum { CU_NUM_SAVED_REGS = 6 };
1145
1146 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1147 Triple TT;
1148 bool Is64Bit;
1149
1150 unsigned OffsetSize; ///< Offset of a "push" instruction.
1151 unsigned MoveInstrSize; ///< Size of a "move" instruction.
1152 unsigned StackDivide; ///< Amount to adjust stack size by.
1153protected:
1154 /// Size of a "push" instruction for the given register.
1155 unsigned PushInstrSize(MCRegister Reg) const {
1156 switch (Reg.id()) {
1157 case X86::EBX:
1158 case X86::ECX:
1159 case X86::EDX:
1160 case X86::EDI:
1161 case X86::ESI:
1162 case X86::EBP:
1163 case X86::RBX:
1164 case X86::RBP:
1165 return 1;
1166 case X86::R12:
1167 case X86::R13:
1168 case X86::R14:
1169 case X86::R15:
1170 return 2;
1171 }
1172 return 1;
1173 }
1174
1175private:
1176 /// Get the compact unwind number for a given register. The number
1177 /// corresponds to the enum lists in compact_unwind_encoding.h.
1178 int getCompactUnwindRegNum(unsigned Reg) const {
1179 static const MCPhysReg CU32BitRegs[7] = {
1180 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1181 };
1182 static const MCPhysReg CU64BitRegs[] = {
1183 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1184 };
1185 const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1186 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1187 if (*CURegs == Reg)
1188 return Idx;
1189
1190 return -1;
1191 }
1192
1193 /// Return the registers encoded for a compact encoding with a frame
1194 /// pointer.
1195 uint32_t encodeCompactUnwindRegistersWithFrame() const {
1196 // Encode the registers in the order they were saved --- 3-bits per
1197 // register. The list of saved registers is assumed to be in reverse
1198 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1199 uint32_t RegEnc = 0;
1200 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1201 unsigned Reg = SavedRegs[i];
1202 if (Reg == 0) break;
1203
1204 int CURegNum = getCompactUnwindRegNum(Reg);
1205 if (CURegNum == -1) return ~0U;
1206
1207 // Encode the 3-bit register number in order, skipping over 3-bits for
1208 // each register.
1209 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1210 }
1211
1212 assert((RegEnc & 0x3FFFF) == RegEnc &&
1213 "Invalid compact register encoding!");
1214 return RegEnc;
1215 }
1216
1217 /// Create the permutation encoding used with frameless stacks. It is
1218 /// passed the number of registers to be saved and an array of the registers
1219 /// saved.
1220 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1221 // The saved registers are numbered from 1 to 6. In order to encode the
1222 // order in which they were saved, we re-number them according to their
1223 // place in the register order. The re-numbering is relative to the last
1224 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1225 // that order:
1226 //
1227 // Orig Re-Num
1228 // ---- ------
1229 // 6 6
1230 // 2 2
1231 // 4 3
1232 // 5 3
1233 //
1234 for (unsigned i = 0; i < RegCount; ++i) {
1235 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1236 if (CUReg == -1) return ~0U;
1237 SavedRegs[i] = CUReg;
1238 }
1239
1240 // Reverse the list.
1241 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1242
1243 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1244 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1245 unsigned Countless = 0;
1246 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1247 if (SavedRegs[j] < SavedRegs[i])
1248 ++Countless;
1249
1250 RenumRegs[i] = SavedRegs[i] - Countless - 1;
1251 }
1252
1253 // Take the renumbered values and encode them into a 10-bit number.
1254 uint32_t permutationEncoding = 0;
1255 switch (RegCount) {
1256 case 6:
1257 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1258 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1259 + RenumRegs[4];
1260 break;
1261 case 5:
1262 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1263 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1264 + RenumRegs[5];
1265 break;
1266 case 4:
1267 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1268 + 3 * RenumRegs[4] + RenumRegs[5];
1269 break;
1270 case 3:
1271 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1272 + RenumRegs[5];
1273 break;
1274 case 2:
1275 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1276 break;
1277 case 1:
1278 permutationEncoding |= RenumRegs[5];
1279 break;
1280 }
1281
1282 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1283 "Invalid compact register encoding!");
1284 return permutationEncoding;
1285 }
1286
1287public:
1288 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1289 const MCSubtargetInfo &STI)
1290 : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1291 Is64Bit(TT.isX86_64()) {
1292 memset(SavedRegs, 0, sizeof(SavedRegs));
1293 OffsetSize = Is64Bit ? 8 : 4;
1294 MoveInstrSize = Is64Bit ? 3 : 2;
1295 StackDivide = Is64Bit ? 8 : 4;
1296 }
1297
1298 std::unique_ptr<MCObjectTargetWriter>
1299 createObjectTargetWriter() const override {
1300 uint32_t CPUType = cantFail(MachO::getCPUType(TT));
1301 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1302 return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
1303 }
1304
1305 /// Implementation of algorithm to generate the compact unwind encoding
1306 /// for the CFI instructions.
1307 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
1308 const MCContext *Ctxt) const override {
1310 if (Instrs.empty()) return 0;
1311 if (!isDarwinCanonicalPersonality(FI->Personality) &&
1313 return CU::UNWIND_MODE_DWARF;
1314
1315 // Reset the saved registers.
1316 unsigned SavedRegIdx = 0;
1317 memset(SavedRegs, 0, sizeof(SavedRegs));
1318
1319 bool HasFP = false;
1320
1321 // Encode that we are using EBP/RBP as the frame pointer.
1322 uint64_t CompactUnwindEncoding = 0;
1323
1324 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1325 unsigned InstrOffset = 0;
1326 unsigned StackAdjust = 0;
1327 uint64_t StackSize = 0;
1328 int64_t MinAbsOffset = std::numeric_limits<int64_t>::max();
1329
1330 for (const MCCFIInstruction &Inst : Instrs) {
1331 switch (Inst.getOperation()) {
1332 default:
1333 // Any other CFI directives indicate a frame that we aren't prepared
1334 // to represent via compact unwind, so just bail out.
1335 return CU::UNWIND_MODE_DWARF;
1337 // Defines a frame pointer. E.g.
1338 //
1339 // movq %rsp, %rbp
1340 // L0:
1341 // .cfi_def_cfa_register %rbp
1342 //
1343 HasFP = true;
1344
1345 // If the frame pointer is other than esp/rsp, we do not have a way to
1346 // generate a compact unwinding representation, so bail out.
1347 if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1348 (Is64Bit ? X86::RBP : X86::EBP))
1349 return CU::UNWIND_MODE_DWARF;
1350
1351 // Reset the counts.
1352 memset(SavedRegs, 0, sizeof(SavedRegs));
1353 StackAdjust = 0;
1354 SavedRegIdx = 0;
1355 MinAbsOffset = std::numeric_limits<int64_t>::max();
1356 InstrOffset += MoveInstrSize;
1357 break;
1358 }
1360 // Defines a new offset for the CFA. E.g.
1361 //
1362 // With frame:
1363 //
1364 // pushq %rbp
1365 // L0:
1366 // .cfi_def_cfa_offset 16
1367 //
1368 // Without frame:
1369 //
1370 // subq $72, %rsp
1371 // L0:
1372 // .cfi_def_cfa_offset 80
1373 //
1374 StackSize = Inst.getOffset() / StackDivide;
1375 break;
1376 }
1378 // Defines a "push" of a callee-saved register. E.g.
1379 //
1380 // pushq %r15
1381 // pushq %r14
1382 // pushq %rbx
1383 // L0:
1384 // subq $120, %rsp
1385 // L1:
1386 // .cfi_offset %rbx, -40
1387 // .cfi_offset %r14, -32
1388 // .cfi_offset %r15, -24
1389 //
1390 if (SavedRegIdx == CU_NUM_SAVED_REGS)
1391 // If there are too many saved registers, we cannot use a compact
1392 // unwind encoding.
1393 return CU::UNWIND_MODE_DWARF;
1394
1395 MCRegister Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1396 SavedRegs[SavedRegIdx++] = Reg.id();
1397 StackAdjust += OffsetSize;
1398 MinAbsOffset = std::min(MinAbsOffset, std::abs(Inst.getOffset()));
1399 InstrOffset += PushInstrSize(Reg);
1400 break;
1401 }
1402 }
1403 }
1404
1405 StackAdjust /= StackDivide;
1406
1407 if (HasFP) {
1408 if ((StackAdjust & 0xFF) != StackAdjust)
1409 // Offset was too big for a compact unwind encoding.
1410 return CU::UNWIND_MODE_DWARF;
1411
1412 // We don't attempt to track a real StackAdjust, so if the saved registers
1413 // aren't adjacent to rbp we can't cope.
1414 if (SavedRegIdx != 0 && MinAbsOffset != 3 * (int)OffsetSize)
1415 return CU::UNWIND_MODE_DWARF;
1416
1417 // Get the encoding of the saved registers when we have a frame pointer.
1418 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1419 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1420
1421 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1422 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1423 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1424 } else {
1425 SubtractInstrIdx += InstrOffset;
1426 ++StackAdjust;
1427
1428 if ((StackSize & 0xFF) == StackSize) {
1429 // Frameless stack with a small stack size.
1430 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1431
1432 // Encode the stack size.
1433 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1434 } else {
1435 if ((StackAdjust & 0x7) != StackAdjust)
1436 // The extra stack adjustments are too big for us to handle.
1437 return CU::UNWIND_MODE_DWARF;
1438
1439 // Frameless stack with an offset too large for us to encode compactly.
1440 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1441
1442 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1443 // instruction.
1444 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1445
1446 // Encode any extra stack adjustments (done via push instructions).
1447 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1448 }
1449
1450 // Encode the number of registers saved. (Reverse the list first.)
1451 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1452 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1453
1454 // Get the encoding of the saved registers when we don't have a frame
1455 // pointer.
1456 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1457 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1458
1459 // Encode the register encoding.
1460 CompactUnwindEncoding |=
1461 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1462 }
1463
1464 return CompactUnwindEncoding;
1465 }
1466};
1467
1468} // end anonymous namespace
1469
1471 const MCSubtargetInfo &STI,
1472 const MCRegisterInfo &MRI,
1473 const MCTargetOptions &Options) {
1474 const Triple &TheTriple = STI.getTargetTriple();
1475 if (TheTriple.isOSBinFormatMachO())
1476 return new DarwinX86AsmBackend(T, MRI, STI);
1477
1478 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1479 return new WindowsX86AsmBackend(T, false, STI);
1480
1481 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1482
1483 if (TheTriple.isOSIAMCU())
1484 return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1485
1486 return new ELFX86_32AsmBackend(T, OSABI, STI);
1487}
1488
1490 const MCSubtargetInfo &STI,
1491 const MCRegisterInfo &MRI,
1492 const MCTargetOptions &Options) {
1493 const Triple &TheTriple = STI.getTargetTriple();
1494 if (TheTriple.isOSBinFormatMachO())
1495 return new DarwinX86AsmBackend(T, MRI, STI);
1496
1497 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1498 return new WindowsX86AsmBackend(T, true, STI);
1499
1500 if (TheTriple.isUEFI()) {
1501 assert(TheTriple.isOSBinFormatCOFF() &&
1502 "Only COFF format is supported in UEFI environment.");
1503 return new WindowsX86AsmBackend(T, true, STI);
1504 }
1505
1506 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1507
1508 if (TheTriple.isX32())
1509 return new ELFX86_X32AsmBackend(T, OSABI, STI);
1510 return new ELFX86_64AsmBackend(T, OSABI, STI);
1511}
1512
1513namespace {
1514class X86ELFStreamer : public MCELFStreamer {
1515public:
1516 X86ELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
1517 std::unique_ptr<MCObjectWriter> OW,
1518 std::unique_ptr<MCCodeEmitter> Emitter)
1519 : MCELFStreamer(Context, std::move(TAB), std::move(OW),
1520 std::move(Emitter)) {}
1521
1522 void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;
1523};
1524} // end anonymous namespace
1525
1526void X86ELFStreamer::emitInstruction(const MCInst &Inst,
1527 const MCSubtargetInfo &STI) {
1528 X86_MC::emitInstruction(*this, Inst, STI);
1529}
1530
1532 std::unique_ptr<MCAsmBackend> &&MAB,
1533 std::unique_ptr<MCObjectWriter> &&MOW,
1534 std::unique_ptr<MCCodeEmitter> &&MCE) {
1535 return new X86ELFStreamer(Context, std::move(MAB), std::move(MOW),
1536 std::move(MCE));
1537}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:335
dxil DXContainer Global Emitter
IRTranslator LLVM IR MI
static LVOptions Options
Definition LVOptions.cpp:25
static unsigned getRelaxedOpcode(unsigned Opcode)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
#define T
PowerPC TLS Dynamic Call Fixup
if(PassOpts->AAPipeline)
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static MCInstrInfo * createMCInstrInfo()
static unsigned getRelaxedOpcodeBranch(unsigned Opcode, bool Is16BitMode=false)
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
static bool mayHaveInterruptDelaySlot(unsigned InstOpcode)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
constexpr char GotSymName[]
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode)
static unsigned getFixupKindSize(unsigned Kind)
static bool isRelaxableBranch(unsigned Opcode)
static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
static bool is64Bit(const char *name)
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
Generic interface to target specific assembler backends.
virtual MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual std::optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition MCSection.h:483
void setLastFragment(const MCFragment *F)
Definition MCSection.h:505
Context object for machine code objects.
Definition MCContext.h:83
LLVM_ABI bool emitCompactUnwindNonCanonical() const
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
@ SymbolRef
References to labels and assigned expressions.
Definition MCExpr.h:43
ExprKind getKind() const
Definition MCExpr.h:85
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition MCFixup.h:61
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, bool PCRel=false)
Consider bit fields if we need more flags.
Definition MCFixup.h:86
bool getAllowAutoPadding() const
Definition MCSection.h:195
void setAllowAutoPadding(bool V)
Definition MCSection.h:196
MCInst getInst() const
Definition MCSection.h:677
unsigned getOpcode() const
Definition MCSection.h:231
MCSection * getParent() const
Definition MCSection.h:167
LLVM_ABI void setVarFixups(ArrayRef< MCFixup > Fixups)
Definition MCSection.cpp:87
MCFragment * getNext() const
Definition MCSection.h:163
ArrayRef< MCOperand > getOperands() const
Definition MCSection.h:672
size_t getVarSize() const
Definition MCSection.h:210
LLVM_ABI void setVarContents(ArrayRef< char > Contents)
Definition MCSection.cpp:61
MutableArrayRef< char > getVarContents()
Definition MCSection.h:643
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition MCSection.h:183
MutableArrayRef< MCFixup > getVarFixups()
Definition MCSection.h:663
void setInst(const MCInst &Inst)
Definition MCSection.h:686
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
unsigned getOpcode() const
Definition MCInst.h:202
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
Describe properties that are true of each instruction in the target description file.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
Streaming object file generation interface.
FT * newSpecialFragment(Args &&...args)
MCAssembler & getAssembler()
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
void ensureMinAlignment(Align MinAlignment)
Makes sure that Alignment is at least MinAlignment.
Definition MCSection.h:604
bool isText() const
Definition MCSection.h:587
Streaming machine code generation interface.
Definition MCStreamer.h:220
MCFragment * getCurrentFragment() const
Definition MCStreamer.h:432
size_t getCurFragSize() const
Definition MCStreamer.h:441
bool getAllowAutoPadding() const
Definition MCStreamer.h:329
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
iterator end() const
Definition ArrayRef.h:343
iterator begin() const
Definition ArrayRef.h:342
constexpr unsigned id() const
Definition Register.h:100
void push_back(const T &Elt)
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isX86_64() const
Tests whether the target is x86 (64-bit).
Definition Triple.h:1146
bool isX32() const
Tests whether the target is X32.
Definition Triple.h:1176
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:804
OSType getOS() const
Get the parsed operating system type of this triple.
Definition Triple.h:427
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition Triple.h:796
bool isUEFI() const
Tests whether the OS is UEFI.
Definition Triple.h:695
bool isOSWindows() const
Tests whether the OS is Windows.
Definition Triple.h:700
bool isOSIAMCU() const
Definition Triple.h:673
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition Triple.h:791
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition DenseSet.h:180
raw_ostream & write(unsigned char C)
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ EM_386
Definition ELF.h:141
@ EM_X86_64
Definition ELF.h:183
@ EM_IAMCU
Definition ELF.h:144
LLVM_ABI Expected< uint32_t > getCPUSubType(const Triple &T)
Definition MachO.cpp:101
LLVM_ABI Expected< uint32_t > getCPUType(const Triple &T)
Definition MachO.cpp:81
VE::Fixups getFixupKind(uint8_t S)
bool isPrefix(uint64_t TSFlags)
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
void emitPrefix(MCCodeEmitter &MCE, const MCInst &MI, SmallVectorImpl< char > &CB, const MCSubtargetInfo &STI)
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
@ AlignBranchIndirect
SecondMacroFusionInstKind
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(MCRegister Reg)
Given a segment register, return the encoding of the segment override prefix for it.
FirstMacroFusionInstKind
unsigned getOpcodeForLongImmediateForm(unsigned Opcode)
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
@ reloc_riprel_4byte_movq_load_rex2
@ reloc_signed_4byte_relax
@ reloc_branch_4byte_pcrel
@ NumTargetFixupKinds
@ reloc_riprel_4byte_relax
@ reloc_riprel_4byte_relax_evex
@ reloc_riprel_4byte_relax_rex
@ reloc_global_offset_table
@ reloc_riprel_4byte_movq_load
@ reloc_riprel_4byte_relax_rex2
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
bool isRelocation(MCFixupKind FixupKind)
Definition MCFixup.h:130
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
Op::Description Desc
uint16_t MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition MCFixup.h:22
MCStreamer * createX86ELFStreamer(const Triple &T, MCContext &Context, std::unique_ptr< MCAsmBackend > &&MAB, std::unique_ptr< MCObjectWriter > &&MOW, std::unique_ptr< MCCodeEmitter > &&MCE)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ FirstTargetFixupKind
Definition MCFixup.h:44
@ FK_SecRel_2
A two-byte section relative fixup.
Definition MCFixup.h:40
@ FirstLiteralRelocationKind
Definition MCFixup.h:29
@ FK_Data_8
A eight-byte fixup.
Definition MCFixup.h:37
@ FK_Data_1
A one-byte fixup.
Definition MCFixup.h:34
@ FK_Data_4
A four-byte fixup.
Definition MCFixup.h:36
@ FK_SecRel_8
A eight-byte section relative fixup.
Definition MCFixup.h:42
@ FK_NONE
A no-op fixup.
Definition MCFixup.h:33
@ FK_SecRel_4
A four-byte section relative fixup.
Definition MCFixup.h:41
@ FK_SecRel_1
A one-byte section relative fixup.
Definition MCFixup.h:39
@ FK_Data_2
A two-byte fixup.
Definition MCFixup.h:35
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition Error.h:769
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
@ Add
Sum of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition Alignment.h:100
endianness
Definition bit.h:71
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
const MCSymbol * Personality
Definition MCDwarf.h:767
std::vector< MCCFIInstruction > Instructions
Definition MCDwarf.h:769