Line data Source code
1 : //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 :
10 : #include "AArch64.h"
11 : #include "MCTargetDesc/AArch64FixupKinds.h"
12 : #include "MCTargetDesc/AArch64MCExpr.h"
13 : #include "llvm/ADT/Triple.h"
14 : #include "llvm/BinaryFormat/MachO.h"
15 : #include "llvm/MC/MCAsmBackend.h"
16 : #include "llvm/MC/MCAssembler.h"
17 : #include "llvm/MC/MCContext.h"
18 : #include "llvm/MC/MCDirectives.h"
19 : #include "llvm/MC/MCELFObjectWriter.h"
20 : #include "llvm/MC/MCFixupKindInfo.h"
21 : #include "llvm/MC/MCObjectWriter.h"
22 : #include "llvm/MC/MCRegisterInfo.h"
23 : #include "llvm/MC/MCSectionELF.h"
24 : #include "llvm/MC/MCSectionMachO.h"
25 : #include "llvm/MC/MCValue.h"
26 : #include "llvm/Support/ErrorHandling.h"
27 : using namespace llvm;
28 :
29 : namespace {
30 :
31 : class AArch64AsmBackend : public MCAsmBackend {
32 : static const unsigned PCRelFlagVal =
33 : MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
34 : Triple TheTriple;
35 :
36 : public:
37 : AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
38 3400 : : MCAsmBackend(IsLittleEndian ? support::little : support::big),
39 3400 : TheTriple(TT) {}
40 :
41 0 : unsigned getNumFixupKinds() const override {
42 0 : return AArch64::NumTargetFixupKinds;
43 : }
44 :
45 5910 : const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
46 : const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
47 : // This table *must* be in the order that the fixup_* kinds are defined
48 : // in AArch64FixupKinds.h.
49 : //
50 : // Name Offset (bits) Size (bits) Flags
51 : {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
52 : {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
53 : {"fixup_aarch64_add_imm12", 10, 12, 0},
54 : {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
55 : {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
56 : {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
57 : {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
58 : {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
59 : {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
60 : {"fixup_aarch64_movw", 5, 16, 0},
61 : {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
62 : {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
63 : {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
64 : {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal},
65 : {"fixup_aarch64_tlsdesc_call", 0, 0, 0}};
66 :
67 6452 : if (Kind < FirstTargetFixupKind)
68 2214 : return MCAsmBackend::getFixupKindInfo(Kind);
69 :
70 : assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
71 : "Invalid kind!");
72 4238 : return Infos[Kind - FirstTargetFixupKind];
73 : }
74 :
75 : void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
76 : const MCValue &Target, MutableArrayRef<char> Data,
77 : uint64_t Value, bool IsResolved,
78 : const MCSubtargetInfo *STI) const override;
79 :
80 : bool mayNeedRelaxation(const MCInst &Inst,
81 : const MCSubtargetInfo &STI) const override;
82 : bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
83 : const MCRelaxableFragment *DF,
84 : const MCAsmLayout &Layout) const override;
85 : void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
86 : MCInst &Res) const override;
87 : bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
88 :
89 : void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
90 :
91 : unsigned getPointerSize() const { return 8; }
92 :
93 : unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
94 :
95 : bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
96 : const MCValue &Target) override;
97 : };
98 :
99 : } // end anonymous namespace
100 :
101 : /// The number of bytes the fixup may change.
102 1865 : static unsigned getFixupKindNumBytes(unsigned Kind) {
103 1865 : switch (Kind) {
104 0 : default:
105 0 : llvm_unreachable("Unknown fixup kind!");
106 :
107 : case AArch64::fixup_aarch64_tlsdesc_call:
108 : return 0;
109 :
110 3 : case FK_Data_1:
111 3 : return 1;
112 :
113 128 : case AArch64::fixup_aarch64_movw:
114 : case FK_Data_2:
115 : case FK_SecRel_2:
116 128 : return 2;
117 :
118 679 : case AArch64::fixup_aarch64_pcrel_branch14:
119 : case AArch64::fixup_aarch64_add_imm12:
120 : case AArch64::fixup_aarch64_ldst_imm12_scale1:
121 : case AArch64::fixup_aarch64_ldst_imm12_scale2:
122 : case AArch64::fixup_aarch64_ldst_imm12_scale4:
123 : case AArch64::fixup_aarch64_ldst_imm12_scale8:
124 : case AArch64::fixup_aarch64_ldst_imm12_scale16:
125 : case AArch64::fixup_aarch64_ldr_pcrel_imm19:
126 : case AArch64::fixup_aarch64_pcrel_branch19:
127 679 : return 3;
128 :
129 816 : case AArch64::fixup_aarch64_pcrel_adr_imm21:
130 : case AArch64::fixup_aarch64_pcrel_adrp_imm21:
131 : case AArch64::fixup_aarch64_pcrel_branch26:
132 : case AArch64::fixup_aarch64_pcrel_call26:
133 : case FK_Data_4:
134 : case FK_SecRel_4:
135 816 : return 4;
136 :
137 219 : case FK_Data_8:
138 219 : return 8;
139 : }
140 : }
141 :
142 : static unsigned AdrImmBits(unsigned Value) {
143 : unsigned lo2 = Value & 0x3;
144 9 : unsigned hi19 = (Value & 0x1ffffc) >> 2;
145 9 : return (hi19 << 5) | (lo2 << 29);
146 : }
147 :
148 542 : static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
149 : MCContext &Ctx, const Triple &TheTriple,
150 : bool IsResolved) {
151 542 : unsigned Kind = Fixup.getKind();
152 : int64_t SignedValue = static_cast<int64_t>(Value);
153 542 : switch (Kind) {
154 0 : default:
155 0 : llvm_unreachable("Unknown fixup kind!");
156 7 : case AArch64::fixup_aarch64_pcrel_adr_imm21:
157 7 : if (SignedValue > 2097151 || SignedValue < -2097152)
158 2 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
159 7 : return AdrImmBits(Value & 0x1fffffULL);
160 : case AArch64::fixup_aarch64_pcrel_adrp_imm21:
161 : assert(!IsResolved);
162 2 : if (TheTriple.isOSBinFormatCOFF())
163 1 : return AdrImmBits(Value & 0x1fffffULL);
164 1 : return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
165 37 : case AArch64::fixup_aarch64_ldr_pcrel_imm19:
166 : case AArch64::fixup_aarch64_pcrel_branch19:
167 : // Signed 21-bit immediate
168 37 : if (SignedValue > 2097151 || SignedValue < -2097152)
169 5 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
170 37 : if (Value & 0x3)
171 4 : Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
172 : // Low two bits are not encoded.
173 37 : return (Value >> 2) & 0x7ffff;
174 : case AArch64::fixup_aarch64_add_imm12:
175 : case AArch64::fixup_aarch64_ldst_imm12_scale1:
176 96 : if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
177 2 : Value &= 0xfff;
178 : // Unsigned 12-bit immediate
179 96 : if (Value >= 0x1000)
180 20 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
181 : return Value;
182 : case AArch64::fixup_aarch64_ldst_imm12_scale2:
183 4 : if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
184 0 : Value &= 0xfff;
185 : // Unsigned 12-bit immediate which gets multiplied by 2
186 4 : if (Value >= 0x2000)
187 2 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
188 4 : if (Value & 0x1)
189 2 : Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
190 4 : return Value >> 1;
191 : case AArch64::fixup_aarch64_ldst_imm12_scale4:
192 4 : if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
193 0 : Value &= 0xfff;
194 : // Unsigned 12-bit immediate which gets multiplied by 4
195 4 : if (Value >= 0x4000)
196 2 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
197 4 : if (Value & 0x3)
198 2 : Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
199 4 : return Value >> 2;
200 : case AArch64::fixup_aarch64_ldst_imm12_scale8:
201 5 : if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
202 1 : Value &= 0xfff;
203 : // Unsigned 12-bit immediate which gets multiplied by 8
204 5 : if (Value >= 0x8000)
205 2 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
206 5 : if (Value & 0x7)
207 4 : Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
208 5 : return Value >> 3;
209 : case AArch64::fixup_aarch64_ldst_imm12_scale16:
210 4 : if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
211 0 : Value &= 0xfff;
212 : // Unsigned 12-bit immediate which gets multiplied by 16
213 4 : if (Value >= 0x10000)
214 2 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
215 4 : if (Value & 0xf)
216 2 : Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
217 4 : return Value >> 4;
218 : case AArch64::fixup_aarch64_movw:
219 0 : Ctx.reportError(Fixup.getLoc(),
220 : "no resolvable MOVZ/MOVK fixups supported yet");
221 0 : return Value;
222 4 : case AArch64::fixup_aarch64_pcrel_branch14:
223 : // Signed 16-bit immediate
224 4 : if (SignedValue > 32767 || SignedValue < -32768)
225 2 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
226 : // Low two bits are not encoded (4-byte alignment assumed).
227 4 : if (Value & 0x3)
228 2 : Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
229 4 : return (Value >> 2) & 0x3fff;
230 18 : case AArch64::fixup_aarch64_pcrel_branch26:
231 : case AArch64::fixup_aarch64_pcrel_call26:
232 : // Signed 28-bit immediate
233 18 : if (SignedValue > 134217727 || SignedValue < -134217728)
234 2 : Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
235 : // Low two bits are not encoded (4-byte alignment assumed).
236 18 : if (Value & 0x3)
237 2 : Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
238 18 : return (Value >> 2) & 0x3ffffff;
239 : case FK_Data_1:
240 : case FK_Data_2:
241 : case FK_Data_4:
242 : case FK_Data_8:
243 : case FK_SecRel_2:
244 : case FK_SecRel_4:
245 : return Value;
246 : }
247 : }
248 :
249 : /// getFixupKindContainereSizeInBytes - The number of bytes of the
250 : /// container involved in big endian or 0 if the item is little endian
251 0 : unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
252 0 : if (Endian == support::little)
253 0 : return 0;
254 :
255 19 : switch (Kind) {
256 0 : default:
257 0 : llvm_unreachable("Unknown fixup kind!");
258 :
259 : case FK_Data_1:
260 : return 1;
261 0 : case FK_Data_2:
262 0 : return 2;
263 0 : case FK_Data_4:
264 0 : return 4;
265 0 : case FK_Data_8:
266 0 : return 8;
267 :
268 0 : case AArch64::fixup_aarch64_tlsdesc_call:
269 : case AArch64::fixup_aarch64_movw:
270 : case AArch64::fixup_aarch64_pcrel_branch14:
271 : case AArch64::fixup_aarch64_add_imm12:
272 : case AArch64::fixup_aarch64_ldst_imm12_scale1:
273 : case AArch64::fixup_aarch64_ldst_imm12_scale2:
274 : case AArch64::fixup_aarch64_ldst_imm12_scale4:
275 : case AArch64::fixup_aarch64_ldst_imm12_scale8:
276 : case AArch64::fixup_aarch64_ldst_imm12_scale16:
277 : case AArch64::fixup_aarch64_ldr_pcrel_imm19:
278 : case AArch64::fixup_aarch64_pcrel_branch19:
279 : case AArch64::fixup_aarch64_pcrel_adr_imm21:
280 : case AArch64::fixup_aarch64_pcrel_adrp_imm21:
281 : case AArch64::fixup_aarch64_pcrel_branch26:
282 : case AArch64::fixup_aarch64_pcrel_call26:
283 : // Instructions are always little endian
284 0 : return 0;
285 : }
286 : }
287 :
288 1865 : void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
289 : const MCValue &Target,
290 : MutableArrayRef<char> Data, uint64_t Value,
291 : bool IsResolved,
292 : const MCSubtargetInfo *STI) const {
293 1865 : unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
294 1865 : if (!Value)
295 : return; // Doesn't change encoding.
296 542 : MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
297 542 : MCContext &Ctx = Asm.getContext();
298 : // Apply any target-specific value adjustments.
299 542 : Value = adjustFixupValue(Fixup, Value, Ctx, TheTriple, IsResolved);
300 :
301 : // Shift the value into position.
302 542 : Value <<= Info.TargetOffset;
303 :
304 542 : unsigned Offset = Fixup.getOffset();
305 : assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
306 :
307 : // Used to point to big endian bytes.
308 542 : unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
309 :
310 : // For each byte of the fragment that the fixup touches, mask in the
311 : // bits from the fixup value.
312 : if (FulleSizeInBytes == 0) {
313 : // Handle as little-endian
314 2712 : for (unsigned i = 0; i != NumBytes; ++i) {
315 4376 : Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
316 : }
317 : } else {
318 : // Handle as big-endian
319 : assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
320 : assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
321 90 : for (unsigned i = 0; i != NumBytes; ++i) {
322 72 : unsigned Idx = FulleSizeInBytes - 1 - i;
323 144 : Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
324 : }
325 : }
326 : }
327 :
328 11807 : bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst,
329 : const MCSubtargetInfo &STI) const {
330 11807 : return false;
331 : }
332 :
333 0 : bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
334 : uint64_t Value,
335 : const MCRelaxableFragment *DF,
336 : const MCAsmLayout &Layout) const {
337 : // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
338 : // into the targets for now.
339 : //
340 : // Relax if the value is too big for a (signed) i8.
341 0 : return int64_t(Value) != int64_t(int8_t(Value));
342 : }
343 :
344 0 : void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
345 : const MCSubtargetInfo &STI,
346 : MCInst &Res) const {
347 0 : llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
348 : }
349 :
350 1009 : bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
351 : // If the count is not 4-byte aligned, we must be writing data into the text
352 : // section (otherwise we have unaligned instructions, and thus have far
353 : // bigger problems), so just write zeros instead.
354 1009 : OS.write_zeros(Count % 4);
355 :
356 : // We are properly aligned, so write NOPs as requested.
357 1009 : Count /= 4;
358 3070 : for (uint64_t i = 0; i != Count; ++i)
359 4122 : support::endian::write<uint32_t>(OS, 0xd503201f, Endian);
360 1009 : return true;
361 : }
362 :
363 502 : bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
364 : const MCFixup &Fixup,
365 : const MCValue &Target) {
366 : // The ADRP instruction adds some multiple of 0x1000 to the current PC &
367 : // ~0xfff. This means that the required offset to reach a symbol can vary by
368 : // up to one step depending on where the ADRP is in memory. For example:
369 : //
370 : // ADRP x0, there
371 : // there:
372 : //
373 : // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
374 : // we'll need that as an offset. At any other address "there" will be in the
375 : // same page as the ADRP and the instruction should encode 0x0. Assuming the
376 : // section isn't 0x1000-aligned, we therefore need to delegate this decision
377 : // to the linker -- a relocation!
378 502 : if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
379 : return true;
380 :
381 : AArch64MCExpr::VariantKind RefKind =
382 479 : static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
383 : AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
384 : // LDR GOT relocations need a relocation
385 479 : if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_ldr_pcrel_imm19 &&
386 : SymLoc == AArch64MCExpr::VK_GOT)
387 0 : return true;
388 : return false;
389 : }
390 :
391 : namespace {
392 :
393 : namespace CU {
394 :
395 : /// Compact unwind encoding values.
396 : enum CompactUnwindEncodings {
397 : /// A "frameless" leaf function, where no non-volatile registers are
398 : /// saved. The return remains in LR throughout the function.
399 : UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
400 :
401 : /// No compact unwind encoding available. Instead the low 23-bits of
402 : /// the compact unwind encoding is the offset of the DWARF FDE in the
403 : /// __eh_frame section. This mode is never used in object files. It is only
404 : /// generated by the linker in final linked images, which have only DWARF info
405 : /// for a function.
406 : UNWIND_ARM64_MODE_DWARF = 0x03000000,
407 :
408 : /// This is a standard arm64 prologue where FP/LR are immediately
409 : /// pushed on the stack, then SP is copied to FP. If there are any
410 : /// non-volatile register saved, they are copied into the stack fame in pairs
411 : /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
412 : /// five X pairs and four D pairs can be saved, but the memory layout must be
413 : /// in register number order.
414 : UNWIND_ARM64_MODE_FRAME = 0x04000000,
415 :
416 : /// Frame register pair encodings.
417 : UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
418 : UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
419 : UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
420 : UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
421 : UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
422 : UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
423 : UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
424 : UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
425 : UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
426 : };
427 :
428 : } // end CU namespace
429 :
430 : // FIXME: This should be in a separate file.
431 : class DarwinAArch64AsmBackend : public AArch64AsmBackend {
432 : const MCRegisterInfo &MRI;
433 :
434 : /// Encode compact unwind stack adjustment for frameless functions.
435 : /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
436 : /// The stack size always needs to be 16 byte aligned.
437 0 : uint32_t encodeStackAdjustment(uint32_t StackSize) const {
438 3 : return (StackSize / 16) << 12;
439 : }
440 :
441 : public:
442 0 : DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
443 : const MCRegisterInfo &MRI)
444 368 : : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
445 :
446 : std::unique_ptr<MCObjectTargetWriter>
447 368 : createObjectTargetWriter() const override {
448 : return createAArch64MachObjectWriter(MachO::CPU_TYPE_ARM64,
449 368 : MachO::CPU_SUBTYPE_ARM64_ALL);
450 : }
451 :
452 : /// Generate the compact unwind encoding from the CFI directives.
453 20 : uint32_t generateCompactUnwindEncoding(
454 : ArrayRef<MCCFIInstruction> Instrs) const override {
455 20 : if (Instrs.empty())
456 : return CU::UNWIND_ARM64_MODE_FRAMELESS;
457 :
458 : bool HasFP = false;
459 : unsigned StackSize = 0;
460 :
461 : uint32_t CompactUnwindEncoding = 0;
462 32 : for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
463 : const MCCFIInstruction &Inst = Instrs[i];
464 :
465 22 : switch (Inst.getOperation()) {
466 : default:
467 : // Cannot handle this directive: bail out.
468 : return CU::UNWIND_ARM64_MODE_DWARF;
469 8 : case MCCFIInstruction::OpDefCfa: {
470 : // Defines a frame pointer.
471 : unsigned XReg =
472 8 : getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true));
473 :
474 : // Other CFA registers than FP are not supported by compact unwind.
475 : // Fallback on DWARF.
476 : // FIXME: When opt-remarks are supported in MC, add a remark to notify
477 : // the user.
478 8 : if (XReg != AArch64::FP)
479 : return CU::UNWIND_ARM64_MODE_DWARF;
480 :
481 : assert(XReg == AArch64::FP && "Invalid frame pointer!");
482 : assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
483 :
484 7 : const MCCFIInstruction &LRPush = Instrs[++i];
485 : assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
486 : "Link register not pushed!");
487 7 : const MCCFIInstruction &FPPush = Instrs[++i];
488 : assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
489 : "Frame pointer not pushed!");
490 :
491 7 : unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true);
492 7 : unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true);
493 :
494 : LRReg = getXRegFromWReg(LRReg);
495 : FPReg = getXRegFromWReg(FPReg);
496 :
497 : assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
498 : "Pushing invalid registers for frame!");
499 :
500 : // Indicate that the function has a frame.
501 7 : CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
502 : HasFP = true;
503 7 : break;
504 : }
505 5 : case MCCFIInstruction::OpDefCfaOffset: {
506 : assert(StackSize == 0 && "We already have the CFA offset!");
507 5 : StackSize = std::abs(Inst.getOffset());
508 5 : break;
509 : }
510 9 : case MCCFIInstruction::OpOffset: {
511 : // Registers are saved in pairs. We expect there to be two consecutive
512 : // `.cfi_offset' instructions with the appropriate registers specified.
513 9 : unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true);
514 9 : if (i + 1 == e)
515 : return CU::UNWIND_ARM64_MODE_DWARF;
516 :
517 : const MCCFIInstruction &Inst2 = Instrs[++i];
518 9 : if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
519 : return CU::UNWIND_ARM64_MODE_DWARF;
520 9 : unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true);
521 :
522 : // N.B. The encodings must be in register number order, and the X
523 : // registers before the D registers.
524 :
525 : // X19/X20 pair = 0x00000001,
526 : // X21/X22 pair = 0x00000002,
527 : // X23/X24 pair = 0x00000004,
528 : // X25/X26 pair = 0x00000008,
529 : // X27/X28 pair = 0x00000010
530 9 : Reg1 = getXRegFromWReg(Reg1);
531 9 : Reg2 = getXRegFromWReg(Reg2);
532 :
533 9 : if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
534 2 : (CompactUnwindEncoding & 0xF1E) == 0)
535 2 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
536 7 : else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
537 1 : (CompactUnwindEncoding & 0xF1C) == 0)
538 1 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
539 6 : else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
540 1 : (CompactUnwindEncoding & 0xF18) == 0)
541 1 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
542 5 : else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
543 1 : (CompactUnwindEncoding & 0xF10) == 0)
544 1 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
545 4 : else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
546 2 : (CompactUnwindEncoding & 0xF00) == 0)
547 2 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
548 : else {
549 : Reg1 = getDRegFromBReg(Reg1);
550 : Reg2 = getDRegFromBReg(Reg2);
551 :
552 : // D8/D9 pair = 0x00000100,
553 : // D10/D11 pair = 0x00000200,
554 : // D12/D13 pair = 0x00000400,
555 : // D14/D15 pair = 0x00000800
556 2 : if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
557 0 : (CompactUnwindEncoding & 0xE00) == 0)
558 0 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
559 2 : else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
560 0 : (CompactUnwindEncoding & 0xC00) == 0)
561 0 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
562 2 : else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
563 0 : (CompactUnwindEncoding & 0x800) == 0)
564 0 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
565 2 : else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
566 0 : CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
567 : else
568 : // A pair was pushed which we cannot handle.
569 : return CU::UNWIND_ARM64_MODE_DWARF;
570 : }
571 :
572 : break;
573 : }
574 : }
575 : }
576 :
577 10 : if (!HasFP) {
578 : // With compact unwind info we can only represent stack adjustments of up
579 : // to 65520 bytes.
580 3 : if (StackSize > 65520)
581 : return CU::UNWIND_ARM64_MODE_DWARF;
582 :
583 3 : CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
584 3 : CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
585 : }
586 :
587 : return CompactUnwindEncoding;
588 : }
589 : };
590 :
591 : } // end anonymous namespace
592 :
593 : namespace {
594 :
595 : class ELFAArch64AsmBackend : public AArch64AsmBackend {
596 : public:
597 : uint8_t OSABI;
598 : bool IsILP32;
599 :
600 : ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
601 : bool IsLittleEndian, bool IsILP32)
602 2993 : : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
603 2993 : IsILP32(IsILP32) {}
604 :
605 : std::unique_ptr<MCObjectTargetWriter>
606 2993 : createObjectTargetWriter() const override {
607 2993 : return createAArch64ELFObjectWriter(OSABI, IsILP32);
608 : }
609 : };
610 :
611 : }
612 :
613 : namespace {
614 : class COFFAArch64AsmBackend : public AArch64AsmBackend {
615 : public:
616 0 : COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
617 39 : : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
618 :
619 : std::unique_ptr<MCObjectTargetWriter>
620 39 : createObjectTargetWriter() const override {
621 39 : return createAArch64WinCOFFObjectWriter();
622 : }
623 : };
624 : }
625 :
626 3366 : MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
627 : const MCSubtargetInfo &STI,
628 : const MCRegisterInfo &MRI,
629 : const MCTargetOptions &Options) {
630 : const Triple &TheTriple = STI.getTargetTriple();
631 3366 : if (TheTriple.isOSBinFormatMachO())
632 736 : return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
633 :
634 2998 : if (TheTriple.isOSBinFormatCOFF())
635 78 : return new COFFAArch64AsmBackend(T, TheTriple);
636 :
637 : assert(TheTriple.isOSBinFormatELF() && "Invalid target");
638 :
639 2959 : uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
640 2959 : bool IsILP32 = Options.getABIName() == "ilp32";
641 : return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
642 5918 : IsILP32);
643 : }
644 :
645 34 : MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
646 : const MCSubtargetInfo &STI,
647 : const MCRegisterInfo &MRI,
648 : const MCTargetOptions &Options) {
649 : const Triple &TheTriple = STI.getTargetTriple();
650 : assert(TheTriple.isOSBinFormatELF() &&
651 : "Big endian is only supported for ELF targets!");
652 34 : uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
653 34 : bool IsILP32 = Options.getABIName() == "ilp32";
654 : return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
655 34 : IsILP32);
656 : }
|