LLVM 20.0.0git
MCAssembler.cpp
Go to the documentation of this file.
1//===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "llvm/ADT/ArrayRef.h"
13#include "llvm/ADT/Statistic.h"
14#include "llvm/ADT/StringRef.h"
15#include "llvm/ADT/Twine.h"
17#include "llvm/MC/MCAsmInfo.h"
19#include "llvm/MC/MCCodeView.h"
20#include "llvm/MC/MCContext.h"
21#include "llvm/MC/MCDwarf.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCFixup.h"
25#include "llvm/MC/MCFragment.h"
26#include "llvm/MC/MCInst.h"
28#include "llvm/MC/MCSection.h"
29#include "llvm/MC/MCSymbol.h"
30#include "llvm/MC/MCValue.h"
33#include "llvm/Support/Debug.h"
36#include "llvm/Support/LEB128.h"
38#include <cassert>
39#include <cstdint>
40#include <tuple>
41#include <utility>
42
43using namespace llvm;
44
45namespace llvm {
46class MCSubtargetInfo;
47}
48
49#define DEBUG_TYPE "assembler"
50
51namespace {
52namespace stats {
53
54STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total");
55STATISTIC(EmittedRelaxableFragments,
56 "Number of emitted assembler fragments - relaxable");
57STATISTIC(EmittedDataFragments,
58 "Number of emitted assembler fragments - data");
59STATISTIC(EmittedAlignFragments,
60 "Number of emitted assembler fragments - align");
61STATISTIC(EmittedFillFragments,
62 "Number of emitted assembler fragments - fill");
63STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops");
64STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org");
65STATISTIC(evaluateFixup, "Number of evaluated fixups");
66STATISTIC(ObjectBytes, "Number of emitted object file bytes");
67STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
68STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
69
70} // end namespace stats
71} // end anonymous namespace
72
73// FIXME FIXME FIXME: There are number of places in this file where we convert
74// what is a 64-bit assembler value used for computation into a value in the
75// object file, which may truncate it. We should detect that truncation where
76// invalid and report errors back.
77
78/* *** */
79
81 std::unique_ptr<MCAsmBackend> Backend,
82 std::unique_ptr<MCCodeEmitter> Emitter,
83 std::unique_ptr<MCObjectWriter> Writer)
84 : Context(Context), Backend(std::move(Backend)),
85 Emitter(std::move(Emitter)), Writer(std::move(Writer)) {}
86
88 RelaxAll = false;
89 Sections.clear();
90 Symbols.clear();
91 ThumbFuncs.clear();
92 BundleAlignSize = 0;
93
94 // reset objects owned by us
95 if (getBackendPtr())
97 if (getEmitterPtr())
99 if (Writer)
100 Writer->reset();
101}
102
104 if (Section.isRegistered())
105 return false;
106 assert(Section.curFragList()->Head && "allocInitialFragment not called");
107 Sections.push_back(&Section);
108 Section.setIsRegistered(true);
109 return true;
110}
111
112bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
113 if (ThumbFuncs.count(Symbol))
114 return true;
115
116 if (!Symbol->isVariable())
117 return false;
118
119 const MCExpr *Expr = Symbol->getVariableValue();
120
121 MCValue V;
122 if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr))
123 return false;
124
125 if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None)
126 return false;
127
128 const MCSymbolRefExpr *Ref = V.getSymA();
129 if (!Ref)
130 return false;
131
132 if (Ref->getKind() != MCSymbolRefExpr::VK_None)
133 return false;
134
135 const MCSymbol &Sym = Ref->getSymbol();
136 if (!isThumbFunc(&Sym))
137 return false;
138
139 ThumbFuncs.insert(Symbol); // Cache it.
140 return true;
141}
142
143bool MCAssembler::evaluateFixup(const MCFixup &Fixup, const MCFragment *DF,
144 MCValue &Target, const MCSubtargetInfo *STI,
145 uint64_t &Value, bool &WasForced) const {
146 ++stats::evaluateFixup;
147
148 // FIXME: This code has some duplication with recordRelocation. We should
149 // probably merge the two into a single callback that tries to evaluate a
150 // fixup and records a relocation if one is needed.
151
152 // On error claim to have completely evaluated the fixup, to prevent any
153 // further processing from being done.
154 const MCExpr *Expr = Fixup.getValue();
155 MCContext &Ctx = getContext();
156 Value = 0;
157 WasForced = false;
158 if (!Expr->evaluateAsRelocatable(Target, this, &Fixup)) {
159 Ctx.reportError(Fixup.getLoc(), "expected relocatable expression");
160 return true;
161 }
162 if (const MCSymbolRefExpr *RefB = Target.getSymB()) {
163 if (RefB->getKind() != MCSymbolRefExpr::VK_None) {
164 Ctx.reportError(Fixup.getLoc(),
165 "unsupported subtraction of qualified symbol");
166 return true;
167 }
168 }
169
170 assert(getBackendPtr() && "Expected assembler backend");
171 bool IsTarget = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags &
173
174 if (IsTarget)
175 return getBackend().evaluateTargetFixup(*this, Fixup, DF, Target, STI,
176 Value, WasForced);
177
178 unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags;
179 bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags &
181
182 bool IsResolved = false;
183 if (IsPCRel) {
184 if (Target.getSymB()) {
185 IsResolved = false;
186 } else if (!Target.getSymA()) {
187 IsResolved = false;
188 } else {
189 const MCSymbolRefExpr *A = Target.getSymA();
190 const MCSymbol &SA = A->getSymbol();
191 if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) {
192 IsResolved = false;
193 } else {
194 IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) ||
196 *this, SA, *DF, false, true);
197 }
198 }
199 } else {
200 IsResolved = Target.isAbsolute();
201 }
202
203 Value = Target.getConstant();
204
205 if (const MCSymbolRefExpr *A = Target.getSymA()) {
206 const MCSymbol &Sym = A->getSymbol();
207 if (Sym.isDefined())
209 }
210 if (const MCSymbolRefExpr *B = Target.getSymB()) {
211 const MCSymbol &Sym = B->getSymbol();
212 if (Sym.isDefined())
214 }
215
216 bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags &
218 assert((ShouldAlignPC ? IsPCRel : true) &&
219 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!");
220
221 if (IsPCRel) {
222 uint64_t Offset = getFragmentOffset(*DF) + Fixup.getOffset();
223
224 // A number of ARM fixups in Thumb mode require that the effective PC
225 // address be determined as the 32-bit aligned version of the actual offset.
226 if (ShouldAlignPC) Offset &= ~0x3;
227 Value -= Offset;
228 }
229
230 // Let the backend force a relocation if needed.
231 if (IsResolved &&
232 getBackend().shouldForceRelocation(*this, Fixup, Target, STI)) {
233 IsResolved = false;
234 WasForced = true;
235 }
236
237 // A linker relaxation target may emit ADD/SUB relocations for A-B+C. Let
238 // recordRelocation handle non-VK_None cases like A@plt-B+C.
239 if (!IsResolved && Target.getSymA() && Target.getSymB() &&
240 Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None &&
241 getBackend().handleAddSubRelocations(*this, *DF, Fixup, Target, Value))
242 return true;
243
244 return IsResolved;
245}
246
248 assert(getBackendPtr() && "Requires assembler backend");
249 switch (F.getKind()) {
251 return cast<MCDataFragment>(F).getContents().size();
253 return cast<MCRelaxableFragment>(F).getContents().size();
254 case MCFragment::FT_Fill: {
255 auto &FF = cast<MCFillFragment>(F);
256 int64_t NumValues = 0;
257 if (!FF.getNumValues().evaluateKnownAbsolute(NumValues, *this)) {
258 getContext().reportError(FF.getLoc(),
259 "expected assembly-time absolute expression");
260 return 0;
261 }
262 int64_t Size = NumValues * FF.getValueSize();
263 if (Size < 0) {
264 getContext().reportError(FF.getLoc(), "invalid number of bytes");
265 return 0;
266 }
267 return Size;
268 }
269
271 return cast<MCNopsFragment>(F).getNumBytes();
272
274 return cast<MCLEBFragment>(F).getContents().size();
275
277 return cast<MCBoundaryAlignFragment>(F).getSize();
278
280 return 4;
281
283 const MCAlignFragment &AF = cast<MCAlignFragment>(F);
284 unsigned Offset = getFragmentOffset(AF);
285 unsigned Size = offsetToAlignment(Offset, AF.getAlignment());
286
287 // Insert extra Nops for code alignment if the target define
288 // shouldInsertExtraNopBytesForCodeAlign target hook.
289 if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() &&
290 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size))
291 return Size;
292
293 // If we are padding with nops, force the padding to be larger than the
294 // minimum nop size.
295 if (Size > 0 && AF.hasEmitNops()) {
296 while (Size % getBackend().getMinimumNopSize())
297 Size += AF.getAlignment().value();
298 }
299 if (Size > AF.getMaxBytesToEmit())
300 return 0;
301 return Size;
302 }
303
304 case MCFragment::FT_Org: {
305 const MCOrgFragment &OF = cast<MCOrgFragment>(F);
307 if (!OF.getOffset().evaluateAsValue(Value, *this)) {
309 "expected assembly-time absolute expression");
310 return 0;
311 }
312
313 uint64_t FragmentOffset = getFragmentOffset(OF);
314 int64_t TargetLocation = Value.getConstant();
315 if (const MCSymbolRefExpr *A = Value.getSymA()) {
316 uint64_t Val;
317 if (!getSymbolOffset(A->getSymbol(), Val)) {
318 getContext().reportError(OF.getLoc(), "expected absolute expression");
319 return 0;
320 }
321 TargetLocation += Val;
322 }
323 int64_t Size = TargetLocation - FragmentOffset;
324 if (Size < 0 || Size >= 0x40000000) {
326 OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) +
327 "' (at offset '" + Twine(FragmentOffset) + "')");
328 return 0;
329 }
330 return Size;
331 }
332
334 return cast<MCDwarfLineAddrFragment>(F).getContents().size();
336 return cast<MCDwarfCallFrameFragment>(F).getContents().size();
338 return cast<MCCVInlineLineTableFragment>(F).getContents().size();
340 return cast<MCCVDefRangeFragment>(F).getContents().size();
342 return cast<MCPseudoProbeAddrFragment>(F).getContents().size();
344 llvm_unreachable("Should not have been added");
345 }
346
347 llvm_unreachable("invalid fragment kind");
348}
349
350// Compute the amount of padding required before the fragment \p F to
351// obey bundling restrictions, where \p FOffset is the fragment's offset in
352// its section and \p FSize is the fragment's size.
353static uint64_t computeBundlePadding(unsigned BundleSize,
354 const MCEncodedFragment *F,
355 uint64_t FOffset, uint64_t FSize) {
356 uint64_t OffsetInBundle = FOffset & (BundleSize - 1);
357 uint64_t EndOfFragment = OffsetInBundle + FSize;
358
359 // There are two kinds of bundling restrictions:
360 //
361 // 1) For alignToBundleEnd(), add padding to ensure that the fragment will
362 // *end* on a bundle boundary.
363 // 2) Otherwise, check if the fragment would cross a bundle boundary. If it
364 // would, add padding until the end of the bundle so that the fragment
365 // will start in a new one.
366 if (F->alignToBundleEnd()) {
367 // Three possibilities here:
368 //
369 // A) The fragment just happens to end at a bundle boundary, so we're good.
370 // B) The fragment ends before the current bundle boundary: pad it just
371 // enough to reach the boundary.
372 // C) The fragment ends after the current bundle boundary: pad it until it
373 // reaches the end of the next bundle boundary.
374 //
375 // Note: this code could be made shorter with some modulo trickery, but it's
376 // intentionally kept in its more explicit form for simplicity.
377 if (EndOfFragment == BundleSize)
378 return 0;
379 else if (EndOfFragment < BundleSize)
380 return BundleSize - EndOfFragment;
381 else { // EndOfFragment > BundleSize
382 return 2 * BundleSize - EndOfFragment;
383 }
384 } else if (OffsetInBundle > 0 && EndOfFragment > BundleSize)
385 return BundleSize - OffsetInBundle;
386 else
387 return 0;
388}
389
391 // If bundling is enabled and this fragment has instructions in it, it has to
392 // obey the bundling restrictions. With padding, we'll have:
393 //
394 //
395 // BundlePadding
396 // |||
397 // -------------------------------------
398 // Prev |##########| F |
399 // -------------------------------------
400 // ^
401 // |
402 // F->Offset
403 //
404 // The fragment's offset will point to after the padding, and its computed
405 // size won't include the padding.
406 //
407 // ".align N" is an example of a directive that introduces multiple
408 // fragments. We could add a special case to handle ".align N" by emitting
409 // within-fragment padding (which would produce less padding when N is less
410 // than the bundle size), but for now we don't.
411 //
412 assert(isa<MCEncodedFragment>(F) &&
413 "Only MCEncodedFragment implementations have instructions");
414 MCEncodedFragment *EF = cast<MCEncodedFragment>(F);
415 uint64_t FSize = computeFragmentSize(*EF);
416
417 if (FSize > getBundleAlignSize())
418 report_fatal_error("Fragment can't be larger than a bundle size");
419
420 uint64_t RequiredBundlePadding =
421 computeBundlePadding(getBundleAlignSize(), EF, EF->Offset, FSize);
422 if (RequiredBundlePadding > UINT8_MAX)
423 report_fatal_error("Padding cannot exceed 255 bytes");
424 EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding));
425 EF->Offset += RequiredBundlePadding;
426 if (auto *DF = dyn_cast_or_null<MCDataFragment>(Prev))
427 if (DF->getContents().empty())
428 DF->Offset = EF->Offset;
429}
430
432 if (Sec.hasLayout())
433 return;
434 Sec.setHasLayout(true);
435 MCFragment *Prev = nullptr;
436 uint64_t Offset = 0;
437 for (MCFragment &F : Sec) {
438 F.Offset = Offset;
439 if (isBundlingEnabled() && F.hasInstructions()) {
440 layoutBundle(Prev, &F);
441 Offset = F.Offset;
442 }
444 Prev = &F;
445 }
446}
447
449 ensureValid(*F.getParent());
450 return F.Offset;
451}
452
453// Simple getSymbolOffset helper for the non-variable case.
454static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S,
455 bool ReportError, uint64_t &Val) {
456 if (!S.getFragment()) {
457 if (ReportError)
458 report_fatal_error("unable to evaluate offset to undefined symbol '" +
459 S.getName() + "'");
460 return false;
461 }
462 Val = Asm.getFragmentOffset(*S.getFragment()) + S.getOffset();
463 return true;
464}
465
466static bool getSymbolOffsetImpl(const MCAssembler &Asm, const MCSymbol &S,
467 bool ReportError, uint64_t &Val) {
468 if (!S.isVariable())
469 return getLabelOffset(Asm, S, ReportError, Val);
470
471 // If SD is a variable, evaluate it.
474 report_fatal_error("unable to evaluate offset for variable '" +
475 S.getName() + "'");
476
477 uint64_t Offset = Target.getConstant();
478
479 const MCSymbolRefExpr *A = Target.getSymA();
480 if (A) {
481 uint64_t ValA;
482 // FIXME: On most platforms, `Target`'s component symbols are labels from
483 // having been simplified during evaluation, but on Mach-O they can be
484 // variables due to PR19203. This, and the line below for `B` can be
485 // restored to call `getLabelOffset` when PR19203 is fixed.
486 if (!getSymbolOffsetImpl(Asm, A->getSymbol(), ReportError, ValA))
487 return false;
488 Offset += ValA;
489 }
490
491 const MCSymbolRefExpr *B = Target.getSymB();
492 if (B) {
493 uint64_t ValB;
494 if (!getSymbolOffsetImpl(Asm, B->getSymbol(), ReportError, ValB))
495 return false;
496 Offset -= ValB;
497 }
498
499 Val = Offset;
500 return true;
501}
502
504 return getSymbolOffsetImpl(*this, S, false, Val);
505}
506
508 uint64_t Val;
509 getSymbolOffsetImpl(*this, S, true, Val);
510 return Val;
511}
512
513const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const {
514 assert(HasLayout);
515 if (!Symbol.isVariable())
516 return &Symbol;
517
518 const MCExpr *Expr = Symbol.getVariableValue();
520 if (!Expr->evaluateAsValue(Value, *this)) {
521 getContext().reportError(Expr->getLoc(),
522 "expression could not be evaluated");
523 return nullptr;
524 }
525
526 const MCSymbolRefExpr *RefB = Value.getSymB();
527 if (RefB) {
529 Expr->getLoc(),
530 Twine("symbol '") + RefB->getSymbol().getName() +
531 "' could not be evaluated in a subtraction expression");
532 return nullptr;
533 }
534
535 const MCSymbolRefExpr *A = Value.getSymA();
536 if (!A)
537 return nullptr;
538
539 const MCSymbol &ASym = A->getSymbol();
540 if (ASym.isCommon()) {
541 getContext().reportError(Expr->getLoc(),
542 "Common symbol '" + ASym.getName() +
543 "' cannot be used in assignment expr");
544 return nullptr;
545 }
546
547 return &ASym;
548}
549
551 assert(HasLayout);
552 // The size is the last fragment's end offset.
553 const MCFragment &F = *Sec.curFragList()->Tail;
555}
556
558 // Virtual sections have no file size.
559 if (Sec.isVirtualSection())
560 return 0;
561 return getSectionAddressSize(Sec);
562}
563
565 bool Changed = !Symbol.isRegistered();
566 if (Changed) {
567 Symbol.setIsRegistered(true);
568 Symbols.push_back(&Symbol);
569 }
570 return Changed;
571}
572
574 const MCEncodedFragment &EF,
575 uint64_t FSize) const {
576 assert(getBackendPtr() && "Expected assembler backend");
577 // Should NOP padding be written out before this fragment?
578 unsigned BundlePadding = EF.getBundlePadding();
579 if (BundlePadding > 0) {
581 "Writing bundle padding with disabled bundling");
582 assert(EF.hasInstructions() &&
583 "Writing bundle padding for a fragment without instructions");
584
585 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize);
586 const MCSubtargetInfo *STI = EF.getSubtargetInfo();
587 if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) {
588 // If the padding itself crosses a bundle boundary, it must be emitted
589 // in 2 pieces, since even nop instructions must not cross boundaries.
590 // v--------------v <- BundleAlignSize
591 // v---------v <- BundlePadding
592 // ----------------------------
593 // | Prev |####|####| F |
594 // ----------------------------
595 // ^-------------------^ <- TotalLength
596 unsigned DistanceToBoundary = TotalLength - getBundleAlignSize();
597 if (!getBackend().writeNopData(OS, DistanceToBoundary, STI))
598 report_fatal_error("unable to write NOP sequence of " +
599 Twine(DistanceToBoundary) + " bytes");
600 BundlePadding -= DistanceToBoundary;
601 }
602 if (!getBackend().writeNopData(OS, BundlePadding, STI))
603 report_fatal_error("unable to write NOP sequence of " +
604 Twine(BundlePadding) + " bytes");
605 }
606}
607
608/// Write the fragment \p F to the output file.
609static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
610 const MCFragment &F) {
611 // FIXME: Embed in fragments instead?
612 uint64_t FragmentSize = Asm.computeFragmentSize(F);
613
614 llvm::endianness Endian = Asm.getBackend().Endian;
615
616 if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F))
617 Asm.writeFragmentPadding(OS, *EF, FragmentSize);
618
619 // This variable (and its dummy usage) is to participate in the assert at
620 // the end of the function.
621 uint64_t Start = OS.tell();
622 (void) Start;
623
624 ++stats::EmittedFragments;
625
626 switch (F.getKind()) {
628 ++stats::EmittedAlignFragments;
629 const MCAlignFragment &AF = cast<MCAlignFragment>(F);
630 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
631
632 uint64_t Count = FragmentSize / AF.getValueSize();
633
634 // FIXME: This error shouldn't actually occur (the front end should emit
635 // multiple .align directives to enforce the semantics it wants), but is
636 // severe enough that we want to report it. How to handle this?
637 if (Count * AF.getValueSize() != FragmentSize)
638 report_fatal_error("undefined .align directive, value size '" +
639 Twine(AF.getValueSize()) +
640 "' is not a divisor of padding size '" +
641 Twine(FragmentSize) + "'");
642
643 // See if we are aligning with nops, and if so do that first to try to fill
644 // the Count bytes. Then if that did not fill any bytes or there are any
645 // bytes left to fill use the Value and ValueSize to fill the rest.
646 // If we are aligning with nops, ask that target to emit the right data.
647 if (AF.hasEmitNops()) {
648 if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo()))
649 report_fatal_error("unable to write nop sequence of " +
650 Twine(Count) + " bytes");
651 break;
652 }
653
654 // Otherwise, write out in multiples of the value size.
655 for (uint64_t i = 0; i != Count; ++i) {
656 switch (AF.getValueSize()) {
657 default: llvm_unreachable("Invalid size!");
658 case 1: OS << char(AF.getValue()); break;
659 case 2:
660 support::endian::write<uint16_t>(OS, AF.getValue(), Endian);
661 break;
662 case 4:
663 support::endian::write<uint32_t>(OS, AF.getValue(), Endian);
664 break;
665 case 8:
666 support::endian::write<uint64_t>(OS, AF.getValue(), Endian);
667 break;
668 }
669 }
670 break;
671 }
672
674 ++stats::EmittedDataFragments;
675 OS << cast<MCDataFragment>(F).getContents();
676 break;
677
679 ++stats::EmittedRelaxableFragments;
680 OS << cast<MCRelaxableFragment>(F).getContents();
681 break;
682
683 case MCFragment::FT_Fill: {
684 ++stats::EmittedFillFragments;
685 const MCFillFragment &FF = cast<MCFillFragment>(F);
686 uint64_t V = FF.getValue();
687 unsigned VSize = FF.getValueSize();
688 const unsigned MaxChunkSize = 16;
689 char Data[MaxChunkSize];
690 assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size");
691 // Duplicate V into Data as byte vector to reduce number of
692 // writes done. As such, do endian conversion here.
693 for (unsigned I = 0; I != VSize; ++I) {
694 unsigned index = Endian == llvm::endianness::little ? I : (VSize - I - 1);
695 Data[I] = uint8_t(V >> (index * 8));
696 }
697 for (unsigned I = VSize; I < MaxChunkSize; ++I)
698 Data[I] = Data[I - VSize];
699
700 // Set to largest multiple of VSize in Data.
701 const unsigned NumPerChunk = MaxChunkSize / VSize;
702 // Set ChunkSize to largest multiple of VSize in Data
703 const unsigned ChunkSize = VSize * NumPerChunk;
704
705 // Do copies by chunk.
706 StringRef Ref(Data, ChunkSize);
707 for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I)
708 OS << Ref;
709
710 // do remainder if needed.
711 unsigned TrailingCount = FragmentSize % ChunkSize;
712 if (TrailingCount)
713 OS.write(Data, TrailingCount);
714 break;
715 }
716
717 case MCFragment::FT_Nops: {
718 ++stats::EmittedNopsFragments;
719 const MCNopsFragment &NF = cast<MCNopsFragment>(F);
720
721 int64_t NumBytes = NF.getNumBytes();
722 int64_t ControlledNopLength = NF.getControlledNopLength();
723 int64_t MaximumNopLength =
724 Asm.getBackend().getMaximumNopSize(*NF.getSubtargetInfo());
725
726 assert(NumBytes > 0 && "Expected positive NOPs fragment size");
727 assert(ControlledNopLength >= 0 && "Expected non-negative NOP size");
728
729 if (ControlledNopLength > MaximumNopLength) {
730 Asm.getContext().reportError(NF.getLoc(),
731 "illegal NOP size " +
732 std::to_string(ControlledNopLength) +
733 ". (expected within [0, " +
734 std::to_string(MaximumNopLength) + "])");
735 // Clamp the NOP length as reportError does not stop the execution
736 // immediately.
737 ControlledNopLength = MaximumNopLength;
738 }
739
740 // Use maximum value if the size of each NOP is not specified
741 if (!ControlledNopLength)
742 ControlledNopLength = MaximumNopLength;
743
744 while (NumBytes) {
745 uint64_t NumBytesToEmit =
746 (uint64_t)std::min(NumBytes, ControlledNopLength);
747 assert(NumBytesToEmit && "try to emit empty NOP instruction");
748 if (!Asm.getBackend().writeNopData(OS, NumBytesToEmit,
749 NF.getSubtargetInfo())) {
750 report_fatal_error("unable to write nop sequence of the remaining " +
751 Twine(NumBytesToEmit) + " bytes");
752 break;
753 }
754 NumBytes -= NumBytesToEmit;
755 }
756 break;
757 }
758
759 case MCFragment::FT_LEB: {
760 const MCLEBFragment &LF = cast<MCLEBFragment>(F);
761 OS << LF.getContents();
762 break;
763 }
764
766 const MCBoundaryAlignFragment &BF = cast<MCBoundaryAlignFragment>(F);
767 if (!Asm.getBackend().writeNopData(OS, FragmentSize, BF.getSubtargetInfo()))
768 report_fatal_error("unable to write nop sequence of " +
769 Twine(FragmentSize) + " bytes");
770 break;
771 }
772
774 const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F);
775 support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian);
776 break;
777 }
778
779 case MCFragment::FT_Org: {
780 ++stats::EmittedOrgFragments;
781 const MCOrgFragment &OF = cast<MCOrgFragment>(F);
782
783 for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
784 OS << char(OF.getValue());
785
786 break;
787 }
788
790 const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F);
791 OS << OF.getContents();
792 break;
793 }
795 const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F);
796 OS << CF.getContents();
797 break;
798 }
800 const auto &OF = cast<MCCVInlineLineTableFragment>(F);
801 OS << OF.getContents();
802 break;
803 }
805 const auto &DRF = cast<MCCVDefRangeFragment>(F);
806 OS << DRF.getContents();
807 break;
808 }
810 const MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(F);
811 OS << PF.getContents();
812 break;
813 }
815 llvm_unreachable("Should not have been added");
816 }
817
818 assert(OS.tell() - Start == FragmentSize &&
819 "The stream should advance by fragment size");
820}
821
823 const MCSection *Sec) const {
824 assert(getBackendPtr() && "Expected assembler backend");
825
826 // Ignore virtual sections.
827 if (Sec->isVirtualSection()) {
828 assert(getSectionFileSize(*Sec) == 0 && "Invalid size for section!");
829
830 // Check that contents are only things legal inside a virtual section.
831 for (const MCFragment &F : *Sec) {
832 switch (F.getKind()) {
833 default: llvm_unreachable("Invalid fragment in virtual section!");
834 case MCFragment::FT_Data: {
835 // Check that we aren't trying to write a non-zero contents (or fixups)
836 // into a virtual section. This is to support clients which use standard
837 // directives to fill the contents of virtual sections.
838 const MCDataFragment &DF = cast<MCDataFragment>(F);
839 if (DF.fixup_begin() != DF.fixup_end())
840 getContext().reportError(SMLoc(), Sec->getVirtualSectionKind() +
841 " section '" + Sec->getName() +
842 "' cannot have fixups");
843 for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i)
844 if (DF.getContents()[i]) {
846 Sec->getVirtualSectionKind() +
847 " section '" + Sec->getName() +
848 "' cannot have non-zero initializers");
849 break;
850 }
851 break;
852 }
854 // Check that we aren't trying to write a non-zero value into a virtual
855 // section.
856 assert((cast<MCAlignFragment>(F).getValueSize() == 0 ||
857 cast<MCAlignFragment>(F).getValue() == 0) &&
858 "Invalid align in virtual section!");
859 break;
861 assert((cast<MCFillFragment>(F).getValue() == 0) &&
862 "Invalid fill in virtual section!");
863 break;
865 break;
866 }
867 }
868
869 return;
870 }
871
872 uint64_t Start = OS.tell();
873 (void)Start;
874
875 for (const MCFragment &F : *Sec)
876 writeFragment(OS, *this, F);
877
878 assert(getContext().hadError() ||
879 OS.tell() - Start == getSectionAddressSize(*Sec));
880}
881
882std::tuple<MCValue, uint64_t, bool>
883MCAssembler::handleFixup(MCFragment &F, const MCFixup &Fixup,
884 const MCSubtargetInfo *STI) {
885 // Evaluate the fixup.
887 uint64_t FixedValue;
888 bool WasForced;
889 bool IsResolved =
890 evaluateFixup(Fixup, &F, Target, STI, FixedValue, WasForced);
891 if (!IsResolved) {
892 // The fixup was unresolved, we need a relocation. Inform the object
893 // writer of the relocation, and give it an opportunity to adjust the
894 // fixup value if need be.
895 getWriter().recordRelocation(*this, &F, Fixup, Target, FixedValue);
896 }
897 return std::make_tuple(Target, FixedValue, IsResolved);
898}
899
901 assert(getBackendPtr() && "Expected assembler backend");
902 DEBUG_WITH_TYPE("mc-dump", {
903 errs() << "assembler backend - pre-layout\n--\n";
904 dump(); });
905
906 // Assign section ordinals.
907 unsigned SectionIndex = 0;
908 for (MCSection &Sec : *this) {
909 Sec.setOrdinal(SectionIndex++);
910
911 // Chain together fragments from all subsections.
912 if (Sec.Subsections.size() > 1) {
913 MCDummyFragment Dummy;
914 MCFragment *Tail = &Dummy;
915 for (auto &[_, List] : Sec.Subsections) {
916 assert(List.Head);
917 Tail->Next = List.Head;
918 Tail = List.Tail;
919 }
920 Sec.Subsections.clear();
921 Sec.Subsections.push_back({0u, {Dummy.getNext(), Tail}});
922 Sec.CurFragList = &Sec.Subsections[0].second;
923
924 unsigned FragmentIndex = 0;
925 for (MCFragment &Frag : Sec)
926 Frag.setLayoutOrder(FragmentIndex++);
927 }
928 }
929
930 // Layout until everything fits.
931 this->HasLayout = true;
932 while (layoutOnce()) {
933 if (getContext().hadError())
934 return;
935 // Size of fragments in one section can depend on the size of fragments in
936 // another. If any fragment has changed size, we have to re-layout (and
937 // as a result possibly further relax) all.
938 for (MCSection &Sec : *this)
939 Sec.setHasLayout(false);
940 }
941
942 DEBUG_WITH_TYPE("mc-dump", {
943 errs() << "assembler backend - post-relaxation\n--\n";
944 dump(); });
945
946 // Finalize the layout, including fragment lowering.
947 getBackend().finishLayout(*this);
948
949 DEBUG_WITH_TYPE("mc-dump", {
950 errs() << "assembler backend - final-layout\n--\n";
951 dump(); });
952
953 // Allow the object writer a chance to perform post-layout binding (for
954 // example, to set the index fields in the symbol data).
956
957 // Evaluate and apply the fixups, generating relocation entries as necessary.
958 for (MCSection &Sec : *this) {
959 for (MCFragment &Frag : Sec) {
960 ArrayRef<MCFixup> Fixups;
961 MutableArrayRef<char> Contents;
962 const MCSubtargetInfo *STI = nullptr;
963
964 // Process MCAlignFragment and MCEncodedFragmentWithFixups here.
965 switch (Frag.getKind()) {
966 default:
967 continue;
969 MCAlignFragment &AF = cast<MCAlignFragment>(Frag);
970 // Insert fixup type for code alignment if the target define
971 // shouldInsertFixupForCodeAlign target hook.
972 if (Sec.useCodeAlign() && AF.hasEmitNops())
974 continue;
975 }
976 case MCFragment::FT_Data: {
977 MCDataFragment &DF = cast<MCDataFragment>(Frag);
978 Fixups = DF.getFixups();
979 Contents = DF.getContents();
980 STI = DF.getSubtargetInfo();
981 assert(!DF.hasInstructions() || STI != nullptr);
982 break;
983 }
985 MCRelaxableFragment &RF = cast<MCRelaxableFragment>(Frag);
986 Fixups = RF.getFixups();
987 Contents = RF.getContents();
988 STI = RF.getSubtargetInfo();
989 assert(!RF.hasInstructions() || STI != nullptr);
990 break;
991 }
993 MCCVDefRangeFragment &CF = cast<MCCVDefRangeFragment>(Frag);
994 Fixups = CF.getFixups();
995 Contents = CF.getContents();
996 break;
997 }
999 MCDwarfLineAddrFragment &DF = cast<MCDwarfLineAddrFragment>(Frag);
1000 Fixups = DF.getFixups();
1001 Contents = DF.getContents();
1002 break;
1003 }
1005 MCDwarfCallFrameFragment &DF = cast<MCDwarfCallFrameFragment>(Frag);
1006 Fixups = DF.getFixups();
1007 Contents = DF.getContents();
1008 break;
1009 }
1010 case MCFragment::FT_LEB: {
1011 auto &LF = cast<MCLEBFragment>(Frag);
1012 Fixups = LF.getFixups();
1013 Contents = LF.getContents();
1014 break;
1015 }
1017 MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Frag);
1018 Fixups = PF.getFixups();
1019 Contents = PF.getContents();
1020 break;
1021 }
1022 }
1023 for (const MCFixup &Fixup : Fixups) {
1024 uint64_t FixedValue;
1025 bool IsResolved;
1027 std::tie(Target, FixedValue, IsResolved) =
1028 handleFixup(Frag, Fixup, STI);
1029 getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue,
1030 IsResolved, STI);
1031 }
1032 }
1033 }
1034}
1035
1037 layout();
1038
1039 // Write the object file.
1040 stats::ObjectBytes += getWriter().writeObject(*this);
1041
1042 HasLayout = false;
1043}
1044
1045bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup,
1046 const MCRelaxableFragment *DF) const {
1047 assert(getBackendPtr() && "Expected assembler backend");
1050 bool WasForced;
1051 bool Resolved = evaluateFixup(Fixup, DF, Target, DF->getSubtargetInfo(),
1052 Value, WasForced);
1053 if (Target.getSymA() &&
1054 Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 &&
1055 Fixup.getKind() == FK_Data_1)
1056 return false;
1057 return getBackend().fixupNeedsRelaxationAdvanced(*this, Fixup, Resolved,
1058 Value, DF, WasForced);
1059}
1060
1061bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F) const {
1062 assert(getBackendPtr() && "Expected assembler backend");
1063 // If this inst doesn't ever need relaxation, ignore it. This occurs when we
1064 // are intentionally pushing out inst fragments, or because we relaxed a
1065 // previous instruction to one that doesn't need relaxation.
1066 if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo()))
1067 return false;
1068
1069 for (const MCFixup &Fixup : F->getFixups())
1070 if (fixupNeedsRelaxation(Fixup, F))
1071 return true;
1072
1073 return false;
1074}
1075
1076bool MCAssembler::relaxInstruction(MCRelaxableFragment &F) {
1078 "Expected CodeEmitter defined for relaxInstruction");
1079 if (!fragmentNeedsRelaxation(&F))
1080 return false;
1081
1082 ++stats::RelaxedInstructions;
1083
1084 // FIXME-PERF: We could immediately lower out instructions if we can tell
1085 // they are fully resolved, to avoid retesting on later passes.
1086
1087 // Relax the fragment.
1088
1089 MCInst Relaxed = F.getInst();
1090 getBackend().relaxInstruction(Relaxed, *F.getSubtargetInfo());
1091
1092 // Encode the new instruction.
1093 F.setInst(Relaxed);
1094 F.getFixups().clear();
1095 F.getContents().clear();
1096 getEmitter().encodeInstruction(Relaxed, F.getContents(), F.getFixups(),
1097 *F.getSubtargetInfo());
1098 return true;
1099}
1100
1101bool MCAssembler::relaxLEB(MCLEBFragment &LF) {
1102 const unsigned OldSize = static_cast<unsigned>(LF.getContents().size());
1103 unsigned PadTo = OldSize;
1104 int64_t Value;
1106 LF.getFixups().clear();
1107 // Use evaluateKnownAbsolute for Mach-O as a hack: .subsections_via_symbols
1108 // requires that .uleb128 A-B is foldable where A and B reside in different
1109 // fragments. This is used by __gcc_except_table.
1110 bool Abs = getWriter().getSubsectionsViaSymbols()
1111 ? LF.getValue().evaluateKnownAbsolute(Value, *this)
1112 : LF.getValue().evaluateAsAbsolute(Value, *this);
1113 if (!Abs) {
1114 bool Relaxed, UseZeroPad;
1115 std::tie(Relaxed, UseZeroPad) = getBackend().relaxLEB128(*this, LF, Value);
1116 if (!Relaxed) {
1118 Twine(LF.isSigned() ? ".s" : ".u") +
1119 "leb128 expression is not absolute");
1120 LF.setValue(MCConstantExpr::create(0, Context));
1121 }
1122 uint8_t Tmp[10]; // maximum size: ceil(64/7)
1123 PadTo = std::max(PadTo, encodeULEB128(uint64_t(Value), Tmp));
1124 if (UseZeroPad)
1125 Value = 0;
1126 }
1127 Data.clear();
1129 // The compiler can generate EH table assembly that is impossible to assemble
1130 // without either adding padding to an LEB fragment or adding extra padding
1131 // to a later alignment fragment. To accommodate such tables, relaxation can
1132 // only increase an LEB fragment size here, not decrease it. See PR35809.
1133 if (LF.isSigned())
1134 encodeSLEB128(Value, OSE, PadTo);
1135 else
1136 encodeULEB128(Value, OSE, PadTo);
1137 return OldSize != LF.getContents().size();
1138}
1139
1140/// Check if the branch crosses the boundary.
1141///
1142/// \param StartAddr start address of the fused/unfused branch.
1143/// \param Size size of the fused/unfused branch.
1144/// \param BoundaryAlignment alignment requirement of the branch.
1145/// \returns true if the branch cross the boundary.
1147 Align BoundaryAlignment) {
1148 uint64_t EndAddr = StartAddr + Size;
1149 return (StartAddr >> Log2(BoundaryAlignment)) !=
1150 ((EndAddr - 1) >> Log2(BoundaryAlignment));
1151}
1152
1153/// Check if the branch is against the boundary.
1154///
1155/// \param StartAddr start address of the fused/unfused branch.
1156/// \param Size size of the fused/unfused branch.
1157/// \param BoundaryAlignment alignment requirement of the branch.
1158/// \returns true if the branch is against the boundary.
1160 Align BoundaryAlignment) {
1161 uint64_t EndAddr = StartAddr + Size;
1162 return (EndAddr & (BoundaryAlignment.value() - 1)) == 0;
1163}
1164
1165/// Check if the branch needs padding.
1166///
1167/// \param StartAddr start address of the fused/unfused branch.
1168/// \param Size size of the fused/unfused branch.
1169/// \param BoundaryAlignment alignment requirement of the branch.
1170/// \returns true if the branch needs padding.
1171static bool needPadding(uint64_t StartAddr, uint64_t Size,
1172 Align BoundaryAlignment) {
1173 return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) ||
1174 isAgainstBoundary(StartAddr, Size, BoundaryAlignment);
1175}
1176
1177bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) {
1178 // BoundaryAlignFragment that doesn't need to align any fragment should not be
1179 // relaxed.
1180 if (!BF.getLastFragment())
1181 return false;
1182
1183 uint64_t AlignedOffset = getFragmentOffset(BF);
1184 uint64_t AlignedSize = 0;
1185 for (const MCFragment *F = BF.getNext();; F = F->getNext()) {
1186 AlignedSize += computeFragmentSize(*F);
1187 if (F == BF.getLastFragment())
1188 break;
1189 }
1190
1191 Align BoundaryAlignment = BF.getAlignment();
1192 uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment)
1193 ? offsetToAlignment(AlignedOffset, BoundaryAlignment)
1194 : 0U;
1195 if (NewSize == BF.getSize())
1196 return false;
1197 BF.setSize(NewSize);
1198 return true;
1199}
1200
1201bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) {
1202 bool WasRelaxed;
1203 if (getBackend().relaxDwarfLineAddr(*this, DF, WasRelaxed))
1204 return WasRelaxed;
1205
1206 MCContext &Context = getContext();
1207 uint64_t OldSize = DF.getContents().size();
1208 int64_t AddrDelta;
1209 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *this);
1210 assert(Abs && "We created a line delta with an invalid expression");
1211 (void)Abs;
1212 int64_t LineDelta;
1213 LineDelta = DF.getLineDelta();
1214 SmallVectorImpl<char> &Data = DF.getContents();
1215 Data.clear();
1216 DF.getFixups().clear();
1217
1219 AddrDelta, Data);
1220 return OldSize != Data.size();
1221}
1222
1223bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) {
1224 bool WasRelaxed;
1225 if (getBackend().relaxDwarfCFA(*this, DF, WasRelaxed))
1226 return WasRelaxed;
1227
1228 MCContext &Context = getContext();
1229 int64_t Value;
1230 bool Abs = DF.getAddrDelta().evaluateAsAbsolute(Value, *this);
1231 if (!Abs) {
1232 getContext().reportError(DF.getAddrDelta().getLoc(),
1233 "invalid CFI advance_loc expression");
1234 DF.setAddrDelta(MCConstantExpr::create(0, Context));
1235 return false;
1236 }
1237
1238 SmallVectorImpl<char> &Data = DF.getContents();
1239 uint64_t OldSize = Data.size();
1240 Data.clear();
1241 DF.getFixups().clear();
1242
1244 return OldSize != Data.size();
1245}
1246
1247bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment &F) {
1248 unsigned OldSize = F.getContents().size();
1250 return OldSize != F.getContents().size();
1251}
1252
1253bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment &F) {
1254 unsigned OldSize = F.getContents().size();
1256 return OldSize != F.getContents().size();
1257}
1258
1259bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) {
1260 uint64_t OldSize = PF.getContents().size();
1261 int64_t AddrDelta;
1262 bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *this);
1263 assert(Abs && "We created a pseudo probe with an invalid expression");
1264 (void)Abs;
1266 Data.clear();
1268 PF.getFixups().clear();
1269
1270 // AddrDelta is a signed integer
1271 encodeSLEB128(AddrDelta, OSE, OldSize);
1272 return OldSize != Data.size();
1273}
1274
1275bool MCAssembler::relaxFragment(MCFragment &F) {
1276 switch(F.getKind()) {
1277 default:
1278 return false;
1280 assert(!getRelaxAll() &&
1281 "Did not expect a MCRelaxableFragment in RelaxAll mode");
1282 return relaxInstruction(cast<MCRelaxableFragment>(F));
1284 return relaxDwarfLineAddr(cast<MCDwarfLineAddrFragment>(F));
1286 return relaxDwarfCallFrameFragment(cast<MCDwarfCallFrameFragment>(F));
1287 case MCFragment::FT_LEB:
1288 return relaxLEB(cast<MCLEBFragment>(F));
1290 return relaxBoundaryAlign(cast<MCBoundaryAlignFragment>(F));
1292 return relaxCVInlineLineTable(cast<MCCVInlineLineTableFragment>(F));
1294 return relaxCVDefRange(cast<MCCVDefRangeFragment>(F));
1296 return relaxPseudoProbeAddr(cast<MCPseudoProbeAddrFragment>(F));
1297 }
1298}
1299
1300bool MCAssembler::layoutOnce() {
1301 ++stats::RelaxationSteps;
1302
1303 bool Changed = false;
1304 for (MCSection &Sec : *this)
1305 for (MCFragment &Frag : Sec)
1306 if (relaxFragment(Frag))
1307 Changed = true;
1308 return Changed;
1309}
1310
1311#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1313 raw_ostream &OS = errs();
1314
1315 OS << "<MCAssembler\n";
1316 OS << " Sections:[\n ";
1317 bool First = true;
1318 for (const MCSection &Sec : *this) {
1319 if (First)
1320 First = false;
1321 else
1322 OS << ",\n ";
1323 Sec.dump();
1324 }
1325 OS << "],\n";
1326 OS << " Symbols:[";
1327
1328 First = true;
1329 for (const MCSymbol &Sym : symbols()) {
1330 if (First)
1331 First = false;
1332 else
1333 OS << ",\n ";
1334 OS << "(";
1335 Sym.dump();
1336 OS << ", Index:" << Sym.getIndex() << ", ";
1337 OS << ")";
1338 }
1339 OS << "]>\n";
1340}
1341#endif
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:537
dxil DXContainer Global Emitter
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition: Debug.h:64
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
#define _
static uint64_t computeBundlePadding(unsigned BundleSize, const MCEncodedFragment *F, uint64_t FOffset, uint64_t FSize)
static bool getSymbolOffsetImpl(const MCAssembler &Asm, const MCSymbol &S, bool ReportError, uint64_t &Val)
static bool needPadding(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment)
Check if the branch needs padding.
static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, const MCFragment &F)
Write the fragment F to the output file.
static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment)
Check if the branch crosses the boundary.
static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment)
Check if the branch is against the boundary.
static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S, bool ReportError, uint64_t &Val)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
PowerPC TLS Dynamic Call Fixup
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
endianness Endian
raw_pwrite_stream & OS
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
void encodeInlineLineTable(const MCAssembler &Asm, MCCVInlineLineTableFragment &F)
Encodes the binary annotations once we have a layout.
Definition: MCCodeView.cpp:484
void encodeDefRange(const MCAssembler &Asm, MCCVDefRangeFragment &F)
Definition: MCCodeView.cpp:621
int64_t getValue() const
Definition: MCFragment.h:285
Align getAlignment() const
Definition: MCFragment.h:283
unsigned getMaxBytesToEmit() const
Definition: MCFragment.h:289
bool hasEmitNops() const
Definition: MCFragment.h:291
unsigned getValueSize() const
Definition: MCFragment.h:287
const MCSubtargetInfo * getSubtargetInfo() const
Definition: MCFragment.h:297
virtual void relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const
Relax the instruction in the given fragment to the next wider instruction.
Definition: MCAsmBackend.h:179
virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCAlignFragment &AF)
Hook which indicates if the target requires a fixup to be generated when handling an align directive ...
Definition: MCAsmBackend.h:113
virtual bool fixupNeedsRelaxationAdvanced(const MCAssembler &Asm, const MCFixup &Fixup, bool Resolved, uint64_t Value, const MCRelaxableFragment *DF, const bool WasForced) const
Target specific predicate for whether a given fixup requires the associated instruction to be relaxed...
virtual void finishLayout(MCAssembler const &Asm) const
Give backend an opportunity to finish layout after relaxation.
Definition: MCAsmBackend.h:223
virtual bool evaluateTargetFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCFragment *DF, const MCValue &Target, const MCSubtargetInfo *STI, uint64_t &Value, bool &WasForced)
Definition: MCAsmBackend.h:118
virtual void reset()
lifetime management
Definition: MCAsmBackend.h:67
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual std::pair< bool, bool > relaxLEB128(const MCAssembler &Asm, MCLEBFragment &LF, int64_t &Value) const
Definition: MCAsmBackend.h:197
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef< char > Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const =0
Apply the Value for given Fixup into the provided data fragment, at the offset specified by the fixup...
MCContext & getContext() const
Definition: MCAssembler.h:182
bool getSymbolOffset(const MCSymbol &S, uint64_t &Val) const
void ensureValid(MCSection &Sec) const
uint64_t getSectionAddressSize(const MCSection &Sec) const
void Finish()
Finish - Do final processing and write the object to the output stream.
unsigned getBundleAlignSize() const
Definition: MCAssembler.h:210
bool isBundlingEnabled() const
Definition: MCAssembler.h:208
void writeSectionData(raw_ostream &OS, const MCSection *Section) const
Emit the section contents to OS.
void dump() const
MCObjectWriter & getWriter() const
Definition: MCAssembler.h:192
MCCodeEmitter * getEmitterPtr() const
Definition: MCAssembler.h:186
uint64_t getFragmentOffset(const MCFragment &F) const
void layoutBundle(MCFragment *Prev, MCFragment *F) const
bool getRelaxAll() const
Definition: MCAssembler.h:205
MCCodeEmitter & getEmitter() const
Definition: MCAssembler.h:190
MCAssembler(MCContext &Context, std::unique_ptr< MCAsmBackend > Backend, std::unique_ptr< MCCodeEmitter > Emitter, std::unique_ptr< MCObjectWriter > Writer)
Construct a new assembler instance.
Definition: MCAssembler.cpp:80
bool isThumbFunc(const MCSymbol *Func) const
Check whether a given symbol has been flagged with .thumb_func.
MCAsmBackend & getBackend() const
Definition: MCAssembler.h:188
bool registerSection(MCSection &Section)
uint64_t computeFragmentSize(const MCFragment &F) const
Compute the effective fragment size.
const MCSymbol * getBaseSymbol(const MCSymbol &Symbol) const
MCAsmBackend * getBackendPtr() const
Definition: MCAssembler.h:184
iterator_range< pointee_iterator< typename SmallVector< const MCSymbol *, 0 >::const_iterator > > symbols() const
Definition: MCAssembler.h:223
uint64_t getSectionFileSize(const MCSection &Sec) const
void reset()
Reuse an assembler instance.
Definition: MCAssembler.cpp:87
bool registerSymbol(const MCSymbol &Symbol)
MCDwarfLineTableParams getDWARFLinetableParams() const
Definition: MCAssembler.h:194
void writeFragmentPadding(raw_ostream &OS, const MCEncodedFragment &F, uint64_t FSize) const
Write the necessary bundle padding to OS.
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition: MCFragment.h:532
uint64_t getSize() const
Definition: MCFragment.h:549
void setSize(uint64_t Value)
Definition: MCFragment.h:550
const MCFragment * getLastFragment() const
Definition: MCFragment.h:555
const MCSubtargetInfo * getSubtargetInfo() const
Definition: MCFragment.h:561
Fragment representing the .cv_def_range directive.
Definition: MCFragment.h:503
Fragment representing the binary annotations produced by the .cv_inline_linetable directive.
Definition: MCFragment.h:471
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
virtual void reset()
Lifetime management.
Definition: MCCodeEmitter.h:31
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:193
Context object for machine code objects.
Definition: MCContext.h:83
CodeViewContext & getCVContext()
Definition: MCContext.cpp:1013
void reportError(SMLoc L, const Twine &Msg)
Definition: MCContext.cpp:1068
Fragment for data and encoded instructions.
Definition: MCFragment.h:219
static void encodeAdvanceLoc(MCContext &Context, uint64_t AddrDelta, SmallVectorImpl< char > &OS)
Definition: MCDwarf.cpp:1907
static void encode(MCContext &Context, MCDwarfLineTableParams Params, int64_t LineDelta, uint64_t AddrDelta, SmallVectorImpl< char > &OS)
Utility function to encode a Dwarf pair of LineDelta and AddrDeltas.
Definition: MCDwarf.cpp:689
SmallVectorImpl< MCFixup > & getFixups()
Definition: MCFragment.h:200
SmallVectorImpl< char > & getContents()
Definition: MCFragment.h:197
Interface implemented by fragments that contain encoded instructions and/or data.
Definition: MCFragment.h:124
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition: MCFragment.h:167
void setBundlePadding(uint8_t N)
Set the padding size for this fragment.
Definition: MCFragment.h:163
uint8_t getBundlePadding() const
Get the padding size that must be inserted before this fragment.
Definition: MCFragment.h:159
bool alignToBundleEnd() const
Should this fragment be placed at the end of an aligned bundle?
Definition: MCFragment.h:151
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
bool evaluateAsValue(MCValue &Res, const MCAssembler &Asm) const
Try to evaluate the expression to the form (a - b + constant) where neither a nor b are variables.
Definition: MCExpr.cpp:793
bool evaluateKnownAbsolute(int64_t &Res, const MCAssembler &Asm) const
Aggressive variant of evaluateAsRelocatable when relocations are unavailable (e.g.
Definition: MCExpr.cpp:565
bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:788
SMLoc getLoc() const
Definition: MCExpr.h:79
uint8_t getValueSize() const
Definition: MCFragment.h:321
uint64_t getValue() const
Definition: MCFragment.h:320
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:71
MCSection * getParent() const
Definition: MCFragment.h:99
MCFragment * getNext() const
Definition: MCFragment.h:95
bool hasInstructions() const
Does this fragment have instructions emitted into it? By default this is false, but specific fragment...
Definition: MCFragment.h:109
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
bool isSigned() const
Definition: MCFragment.h:403
const MCExpr & getValue() const
Definition: MCFragment.h:400
void setValue(const MCExpr *Expr)
Definition: MCFragment.h:401
int64_t getControlledNopLength() const
Definition: MCFragment.h:350
int64_t getNumBytes() const
Definition: MCFragment.h:349
const MCSubtargetInfo * getSubtargetInfo() const
Definition: MCFragment.h:354
SMLoc getLoc() const
Definition: MCFragment.h:352
virtual bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm, const MCSymbol &SymA, const MCFragment &FB, bool InSet, bool IsPCRel) const
virtual void executePostLayoutBinding(MCAssembler &Asm)
Perform any late binding of symbols (for example, to assign symbol indices for use when generating re...
bool getSubsectionsViaSymbols() const
virtual uint64_t writeObject(MCAssembler &Asm)=0
Write the object file and returns the number of bytes written.
virtual void recordRelocation(MCAssembler &Asm, const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target, uint64_t &FixedValue)=0
Record a relocation entry.
SMLoc getLoc() const
Definition: MCFragment.h:379
uint8_t getValue() const
Definition: MCFragment.h:377
const MCExpr & getOffset() const
Definition: MCFragment.h:375
const MCExpr & getAddrDelta() const
Definition: MCFragment.h:578
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
Definition: MCFragment.h:234
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:36
void dump() const
Definition: MCSection.cpp:72
bool hasLayout() const
Definition: MCSection.h:172
void setOrdinal(unsigned Value)
Definition: MCSection.h:156
bool isVirtualSection() const
Check whether this section is "virtual", that is has no actual object file contents.
Definition: MCSection.h:198
virtual bool useCodeAlign() const =0
Return true if a .align directive should use "optimized nops" to fill instead of 0s.
FragList * curFragList() const
Definition: MCSection.h:181
void setHasLayout(bool Value)
Definition: MCSection.h:173
Generic base class for all target subtargets.
Represents a symbol table index fragment.
Definition: MCFragment.h:454
const MCSymbol * getSymbol()
Definition: MCFragment.h:461
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:188
const MCSymbol & getSymbol() const
Definition: MCExpr.h:406
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
const MCExpr * getVariableValue(bool SetUsed=true) const
getVariableValue - Get the value for variable symbols.
Definition: MCSymbol.h:305
bool isCommon() const
Is this a 'common' symbol.
Definition: MCSymbol.h:387
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:205
bool isVariable() const
isVariable - Check if this is a variable symbol.
Definition: MCSymbol.h:300
bool isUndefined(bool SetUsed=true) const
isUndefined - Check if this symbol undefined (i.e., implicitly defined).
Definition: MCSymbol.h:259
uint32_t getIndex() const
Get the (implementation defined) index.
Definition: MCSymbol.h:316
uint64_t getOffset() const
Definition: MCSymbol.h:327
MCFragment * getFragment(bool SetUsed=true) const
Definition: MCSymbol.h:397
This represents an "assembler immediate".
Definition: MCValue.h:36
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:307
Represents a location in source code.
Definition: SMLoc.h:23
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
void push_back(const T &Elt)
Definition: SmallVector.h:427
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
uint64_t tell() const
tell - Return the current offset with the file.
Definition: raw_ostream.h:147
raw_ostream & write(unsigned char C)
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ Relaxed
Definition: NVPTX.h:116
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition: Alignment.h:197
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Ref
The access may reference the value stored in memory.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
unsigned encodeSLEB128(int64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a SLEB128 value to an output stream.
Definition: LEB128.h:23
unsigned encodeULEB128(uint64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a ULEB128 value to an output stream.
Definition: LEB128.h:80
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
endianness
Definition: bit.h:70
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
@ FKF_IsTarget
Should this fixup be evaluated in a target dependent manner?
@ FKF_IsAlignedDownTo32Bits
Should this fixup kind force a 4-byte aligned effective PC value?
@ FKF_Constant
This fixup kind should be resolved if defined.
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...
unsigned Flags
Flags describing additional information on this fixup kind.