File: | llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp |
Warning: | line 606, column 9 Value stored to 'LRReg' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "MCTargetDesc/AArch64FixupKinds.h" |
10 | #include "MCTargetDesc/AArch64MCExpr.h" |
11 | #include "MCTargetDesc/AArch64MCTargetDesc.h" |
12 | #include "Utils/AArch64BaseInfo.h" |
13 | #include "llvm/ADT/Triple.h" |
14 | #include "llvm/BinaryFormat/MachO.h" |
15 | #include "llvm/MC/MCAsmBackend.h" |
16 | #include "llvm/MC/MCAssembler.h" |
17 | #include "llvm/MC/MCContext.h" |
18 | #include "llvm/MC/MCDirectives.h" |
19 | #include "llvm/MC/MCELFObjectWriter.h" |
20 | #include "llvm/MC/MCFixupKindInfo.h" |
21 | #include "llvm/MC/MCObjectWriter.h" |
22 | #include "llvm/MC/MCRegisterInfo.h" |
23 | #include "llvm/MC/MCSectionELF.h" |
24 | #include "llvm/MC/MCSectionMachO.h" |
25 | #include "llvm/MC/MCTargetOptions.h" |
26 | #include "llvm/MC/MCValue.h" |
27 | #include "llvm/Support/EndianStream.h" |
28 | #include "llvm/Support/ErrorHandling.h" |
29 | #include "llvm/Support/TargetRegistry.h" |
30 | using namespace llvm; |
31 | |
32 | namespace { |
33 | |
34 | class AArch64AsmBackend : public MCAsmBackend { |
35 | static const unsigned PCRelFlagVal = |
36 | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel; |
37 | protected: |
38 | Triple TheTriple; |
39 | |
40 | public: |
41 | AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian) |
42 | : MCAsmBackend(IsLittleEndian ? support::little : support::big), |
43 | TheTriple(TT) {} |
44 | |
45 | unsigned getNumFixupKinds() const override { |
46 | return AArch64::NumTargetFixupKinds; |
47 | } |
48 | |
49 | Optional<MCFixupKind> getFixupKind(StringRef Name) const override; |
50 | |
51 | const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { |
52 | const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { |
53 | // This table *must* be in the order that the fixup_* kinds are defined |
54 | // in AArch64FixupKinds.h. |
55 | // |
56 | // Name Offset (bits) Size (bits) Flags |
57 | {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal}, |
58 | {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal}, |
59 | {"fixup_aarch64_add_imm12", 10, 12, 0}, |
60 | {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0}, |
61 | {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0}, |
62 | {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0}, |
63 | {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0}, |
64 | {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0}, |
65 | {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal}, |
66 | {"fixup_aarch64_movw", 5, 16, 0}, |
67 | {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal}, |
68 | {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal}, |
69 | {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal}, |
70 | {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}}; |
71 | |
72 | // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not |
73 | // require any extra processing. |
74 | if (Kind >= FirstLiteralRelocationKind) |
75 | return MCAsmBackend::getFixupKindInfo(FK_NONE); |
76 | |
77 | if (Kind < FirstTargetFixupKind) |
78 | return MCAsmBackend::getFixupKindInfo(Kind); |
79 | |
80 | assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&(static_cast<void> (0)) |
81 | "Invalid kind!")(static_cast<void> (0)); |
82 | return Infos[Kind - FirstTargetFixupKind]; |
83 | } |
84 | |
85 | void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, |
86 | const MCValue &Target, MutableArrayRef<char> Data, |
87 | uint64_t Value, bool IsResolved, |
88 | const MCSubtargetInfo *STI) const override; |
89 | |
90 | bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, |
91 | const MCRelaxableFragment *DF, |
92 | const MCAsmLayout &Layout) const override; |
93 | void relaxInstruction(MCInst &Inst, |
94 | const MCSubtargetInfo &STI) const override; |
95 | bool writeNopData(raw_ostream &OS, uint64_t Count) const override; |
96 | |
97 | unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const; |
98 | |
99 | bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, |
100 | const MCValue &Target) override; |
101 | }; |
102 | |
103 | } // end anonymous namespace |
104 | |
105 | /// The number of bytes the fixup may change. |
106 | static unsigned getFixupKindNumBytes(unsigned Kind) { |
107 | switch (Kind) { |
108 | default: |
109 | llvm_unreachable("Unknown fixup kind!")__builtin_unreachable(); |
110 | |
111 | case FK_Data_1: |
112 | return 1; |
113 | |
114 | case FK_Data_2: |
115 | case FK_SecRel_2: |
116 | return 2; |
117 | |
118 | case AArch64::fixup_aarch64_movw: |
119 | case AArch64::fixup_aarch64_pcrel_branch14: |
120 | case AArch64::fixup_aarch64_add_imm12: |
121 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
122 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
123 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
124 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
125 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
126 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
127 | case AArch64::fixup_aarch64_pcrel_branch19: |
128 | return 3; |
129 | |
130 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
131 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
132 | case AArch64::fixup_aarch64_pcrel_branch26: |
133 | case AArch64::fixup_aarch64_pcrel_call26: |
134 | case FK_Data_4: |
135 | case FK_SecRel_4: |
136 | return 4; |
137 | |
138 | case FK_Data_8: |
139 | return 8; |
140 | } |
141 | } |
142 | |
143 | static unsigned AdrImmBits(unsigned Value) { |
144 | unsigned lo2 = Value & 0x3; |
145 | unsigned hi19 = (Value & 0x1ffffc) >> 2; |
146 | return (hi19 << 5) | (lo2 << 29); |
147 | } |
148 | |
149 | static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target, |
150 | uint64_t Value, MCContext &Ctx, |
151 | const Triple &TheTriple, bool IsResolved) { |
152 | int64_t SignedValue = static_cast<int64_t>(Value); |
153 | switch (Fixup.getTargetKind()) { |
154 | default: |
155 | llvm_unreachable("Unknown fixup kind!")__builtin_unreachable(); |
156 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
157 | if (SignedValue > 2097151 || SignedValue < -2097152) |
158 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
159 | return AdrImmBits(Value & 0x1fffffULL); |
160 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
161 | assert(!IsResolved)(static_cast<void> (0)); |
162 | if (TheTriple.isOSBinFormatCOFF()) |
163 | return AdrImmBits(Value & 0x1fffffULL); |
164 | return AdrImmBits((Value & 0x1fffff000ULL) >> 12); |
165 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
166 | case AArch64::fixup_aarch64_pcrel_branch19: |
167 | // Signed 21-bit immediate |
168 | if (SignedValue > 2097151 || SignedValue < -2097152) |
169 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
170 | if (Value & 0x3) |
171 | Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); |
172 | // Low two bits are not encoded. |
173 | return (Value >> 2) & 0x7ffff; |
174 | case AArch64::fixup_aarch64_add_imm12: |
175 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
176 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
177 | Value &= 0xfff; |
178 | // Unsigned 12-bit immediate |
179 | if (Value >= 0x1000) |
180 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
181 | return Value; |
182 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
183 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
184 | Value &= 0xfff; |
185 | // Unsigned 12-bit immediate which gets multiplied by 2 |
186 | if (Value >= 0x2000) |
187 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
188 | if (Value & 0x1) |
189 | Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned"); |
190 | return Value >> 1; |
191 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
192 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
193 | Value &= 0xfff; |
194 | // Unsigned 12-bit immediate which gets multiplied by 4 |
195 | if (Value >= 0x4000) |
196 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
197 | if (Value & 0x3) |
198 | Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned"); |
199 | return Value >> 2; |
200 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
201 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
202 | Value &= 0xfff; |
203 | // Unsigned 12-bit immediate which gets multiplied by 8 |
204 | if (Value >= 0x8000) |
205 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
206 | if (Value & 0x7) |
207 | Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned"); |
208 | return Value >> 3; |
209 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
210 | if (TheTriple.isOSBinFormatCOFF() && !IsResolved) |
211 | Value &= 0xfff; |
212 | // Unsigned 12-bit immediate which gets multiplied by 16 |
213 | if (Value >= 0x10000) |
214 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
215 | if (Value & 0xf) |
216 | Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned"); |
217 | return Value >> 4; |
218 | case AArch64::fixup_aarch64_movw: { |
219 | AArch64MCExpr::VariantKind RefKind = |
220 | static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); |
221 | if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS && |
222 | AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) { |
223 | if (!RefKind) { |
224 | // The fixup is an expression |
225 | if (SignedValue > 0xFFFF || SignedValue < -0xFFFF) |
226 | Ctx.reportError(Fixup.getLoc(), |
227 | "fixup value out of range [-0xFFFF, 0xFFFF]"); |
228 | |
229 | // Invert the negative immediate because it will feed into a MOVN. |
230 | if (SignedValue < 0) |
231 | SignedValue = ~SignedValue; |
232 | Value = static_cast<uint64_t>(SignedValue); |
233 | } else |
234 | // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't |
235 | // ever be resolved in the assembler. |
236 | Ctx.reportError(Fixup.getLoc(), |
237 | "relocation for a thread-local variable points to an " |
238 | "absolute symbol"); |
239 | return Value; |
240 | } |
241 | |
242 | if (!IsResolved) { |
243 | // FIXME: Figure out when this can actually happen, and verify our |
244 | // behavior. |
245 | Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet " |
246 | "implemented"); |
247 | return Value; |
248 | } |
249 | |
250 | if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) { |
251 | switch (AArch64MCExpr::getAddressFrag(RefKind)) { |
252 | case AArch64MCExpr::VK_G0: |
253 | break; |
254 | case AArch64MCExpr::VK_G1: |
255 | SignedValue = SignedValue >> 16; |
256 | break; |
257 | case AArch64MCExpr::VK_G2: |
258 | SignedValue = SignedValue >> 32; |
259 | break; |
260 | case AArch64MCExpr::VK_G3: |
261 | SignedValue = SignedValue >> 48; |
262 | break; |
263 | default: |
264 | llvm_unreachable("Variant kind doesn't correspond to fixup")__builtin_unreachable(); |
265 | } |
266 | |
267 | } else { |
268 | switch (AArch64MCExpr::getAddressFrag(RefKind)) { |
269 | case AArch64MCExpr::VK_G0: |
270 | break; |
271 | case AArch64MCExpr::VK_G1: |
272 | Value = Value >> 16; |
273 | break; |
274 | case AArch64MCExpr::VK_G2: |
275 | Value = Value >> 32; |
276 | break; |
277 | case AArch64MCExpr::VK_G3: |
278 | Value = Value >> 48; |
279 | break; |
280 | default: |
281 | llvm_unreachable("Variant kind doesn't correspond to fixup")__builtin_unreachable(); |
282 | } |
283 | } |
284 | |
285 | if (RefKind & AArch64MCExpr::VK_NC) { |
286 | Value &= 0xFFFF; |
287 | } |
288 | else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) { |
289 | if (SignedValue > 0xFFFF || SignedValue < -0xFFFF) |
290 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
291 | |
292 | // Invert the negative immediate because it will feed into a MOVN. |
293 | if (SignedValue < 0) |
294 | SignedValue = ~SignedValue; |
295 | Value = static_cast<uint64_t>(SignedValue); |
296 | } |
297 | else if (Value > 0xFFFF) { |
298 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
299 | } |
300 | return Value; |
301 | } |
302 | case AArch64::fixup_aarch64_pcrel_branch14: |
303 | // Signed 16-bit immediate |
304 | if (SignedValue > 32767 || SignedValue < -32768) |
305 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
306 | // Low two bits are not encoded (4-byte alignment assumed). |
307 | if (Value & 0x3) |
308 | Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); |
309 | return (Value >> 2) & 0x3fff; |
310 | case AArch64::fixup_aarch64_pcrel_branch26: |
311 | case AArch64::fixup_aarch64_pcrel_call26: |
312 | // Signed 28-bit immediate |
313 | if (SignedValue > 134217727 || SignedValue < -134217728) |
314 | Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); |
315 | // Low two bits are not encoded (4-byte alignment assumed). |
316 | if (Value & 0x3) |
317 | Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); |
318 | return (Value >> 2) & 0x3ffffff; |
319 | case FK_Data_1: |
320 | case FK_Data_2: |
321 | case FK_Data_4: |
322 | case FK_Data_8: |
323 | case FK_SecRel_2: |
324 | case FK_SecRel_4: |
325 | return Value; |
326 | } |
327 | } |
328 | |
329 | Optional<MCFixupKind> AArch64AsmBackend::getFixupKind(StringRef Name) const { |
330 | if (!TheTriple.isOSBinFormatELF()) |
331 | return None; |
332 | |
333 | unsigned Type = llvm::StringSwitch<unsigned>(Name) |
334 | #define ELF_RELOC(X, Y) .Case(#X, Y) |
335 | #include "llvm/BinaryFormat/ELFRelocs/AArch64.def" |
336 | #undef ELF_RELOC |
337 | .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE) |
338 | .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16) |
339 | .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32) |
340 | .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64) |
341 | .Default(-1u); |
342 | if (Type == -1u) |
343 | return None; |
344 | return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); |
345 | } |
346 | |
347 | /// getFixupKindContainereSizeInBytes - The number of bytes of the |
348 | /// container involved in big endian or 0 if the item is little endian |
349 | unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const { |
350 | if (Endian == support::little) |
351 | return 0; |
352 | |
353 | switch (Kind) { |
354 | default: |
355 | llvm_unreachable("Unknown fixup kind!")__builtin_unreachable(); |
356 | |
357 | case FK_Data_1: |
358 | return 1; |
359 | case FK_Data_2: |
360 | return 2; |
361 | case FK_Data_4: |
362 | return 4; |
363 | case FK_Data_8: |
364 | return 8; |
365 | |
366 | case AArch64::fixup_aarch64_movw: |
367 | case AArch64::fixup_aarch64_pcrel_branch14: |
368 | case AArch64::fixup_aarch64_add_imm12: |
369 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
370 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
371 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
372 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
373 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
374 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
375 | case AArch64::fixup_aarch64_pcrel_branch19: |
376 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
377 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
378 | case AArch64::fixup_aarch64_pcrel_branch26: |
379 | case AArch64::fixup_aarch64_pcrel_call26: |
380 | // Instructions are always little endian |
381 | return 0; |
382 | } |
383 | } |
384 | |
385 | void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, |
386 | const MCValue &Target, |
387 | MutableArrayRef<char> Data, uint64_t Value, |
388 | bool IsResolved, |
389 | const MCSubtargetInfo *STI) const { |
390 | if (!Value) |
391 | return; // Doesn't change encoding. |
392 | unsigned Kind = Fixup.getKind(); |
393 | if (Kind >= FirstLiteralRelocationKind) |
394 | return; |
395 | unsigned NumBytes = getFixupKindNumBytes(Kind); |
396 | MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); |
397 | MCContext &Ctx = Asm.getContext(); |
398 | int64_t SignedValue = static_cast<int64_t>(Value); |
399 | // Apply any target-specific value adjustments. |
400 | Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved); |
401 | |
402 | // Shift the value into position. |
403 | Value <<= Info.TargetOffset; |
404 | |
405 | unsigned Offset = Fixup.getOffset(); |
406 | assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!")(static_cast<void> (0)); |
407 | |
408 | // Used to point to big endian bytes. |
409 | unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind()); |
410 | |
411 | // For each byte of the fragment that the fixup touches, mask in the |
412 | // bits from the fixup value. |
413 | if (FulleSizeInBytes == 0) { |
414 | // Handle as little-endian |
415 | for (unsigned i = 0; i != NumBytes; ++i) { |
416 | Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); |
417 | } |
418 | } else { |
419 | // Handle as big-endian |
420 | assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!")(static_cast<void> (0)); |
421 | assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!")(static_cast<void> (0)); |
422 | for (unsigned i = 0; i != NumBytes; ++i) { |
423 | unsigned Idx = FulleSizeInBytes - 1 - i; |
424 | Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); |
425 | } |
426 | } |
427 | |
428 | // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to |
429 | // handle this more cleanly. This may affect the output of -show-mc-encoding. |
430 | AArch64MCExpr::VariantKind RefKind = |
431 | static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); |
432 | if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS || |
433 | (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) { |
434 | // If the immediate is negative, generate MOVN else MOVZ. |
435 | // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ. |
436 | if (SignedValue < 0) |
437 | Data[Offset + 3] &= ~(1 << 6); |
438 | else |
439 | Data[Offset + 3] |= (1 << 6); |
440 | } |
441 | } |
442 | |
443 | bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, |
444 | uint64_t Value, |
445 | const MCRelaxableFragment *DF, |
446 | const MCAsmLayout &Layout) const { |
447 | // FIXME: This isn't correct for AArch64. Just moving the "generic" logic |
448 | // into the targets for now. |
449 | // |
450 | // Relax if the value is too big for a (signed) i8. |
451 | return int64_t(Value) != int64_t(int8_t(Value)); |
452 | } |
453 | |
454 | void AArch64AsmBackend::relaxInstruction(MCInst &Inst, |
455 | const MCSubtargetInfo &STI) const { |
456 | llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented")__builtin_unreachable(); |
457 | } |
458 | |
459 | bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const { |
460 | // If the count is not 4-byte aligned, we must be writing data into the text |
461 | // section (otherwise we have unaligned instructions, and thus have far |
462 | // bigger problems), so just write zeros instead. |
463 | OS.write_zeros(Count % 4); |
464 | |
465 | // We are properly aligned, so write NOPs as requested. |
466 | Count /= 4; |
467 | for (uint64_t i = 0; i != Count; ++i) |
468 | support::endian::write<uint32_t>(OS, 0xd503201f, Endian); |
469 | return true; |
470 | } |
471 | |
472 | bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm, |
473 | const MCFixup &Fixup, |
474 | const MCValue &Target) { |
475 | unsigned Kind = Fixup.getKind(); |
476 | if (Kind >= FirstLiteralRelocationKind) |
477 | return true; |
478 | |
479 | // The ADRP instruction adds some multiple of 0x1000 to the current PC & |
480 | // ~0xfff. This means that the required offset to reach a symbol can vary by |
481 | // up to one step depending on where the ADRP is in memory. For example: |
482 | // |
483 | // ADRP x0, there |
484 | // there: |
485 | // |
486 | // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and |
487 | // we'll need that as an offset. At any other address "there" will be in the |
488 | // same page as the ADRP and the instruction should encode 0x0. Assuming the |
489 | // section isn't 0x1000-aligned, we therefore need to delegate this decision |
490 | // to the linker -- a relocation! |
491 | if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21) |
492 | return true; |
493 | |
494 | return false; |
495 | } |
496 | |
497 | namespace { |
498 | |
499 | namespace CU { |
500 | |
501 | /// Compact unwind encoding values. |
502 | enum CompactUnwindEncodings { |
503 | /// A "frameless" leaf function, where no non-volatile registers are |
504 | /// saved. The return remains in LR throughout the function. |
505 | UNWIND_ARM64_MODE_FRAMELESS = 0x02000000, |
506 | |
507 | /// No compact unwind encoding available. Instead the low 23-bits of |
508 | /// the compact unwind encoding is the offset of the DWARF FDE in the |
509 | /// __eh_frame section. This mode is never used in object files. It is only |
510 | /// generated by the linker in final linked images, which have only DWARF info |
511 | /// for a function. |
512 | UNWIND_ARM64_MODE_DWARF = 0x03000000, |
513 | |
514 | /// This is a standard arm64 prologue where FP/LR are immediately |
515 | /// pushed on the stack, then SP is copied to FP. If there are any |
516 | /// non-volatile register saved, they are copied into the stack fame in pairs |
517 | /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the |
518 | /// five X pairs and four D pairs can be saved, but the memory layout must be |
519 | /// in register number order. |
520 | UNWIND_ARM64_MODE_FRAME = 0x04000000, |
521 | |
522 | /// Frame register pair encodings. |
523 | UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001, |
524 | UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002, |
525 | UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004, |
526 | UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008, |
527 | UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010, |
528 | UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100, |
529 | UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200, |
530 | UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400, |
531 | UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800 |
532 | }; |
533 | |
534 | } // end CU namespace |
535 | |
536 | // FIXME: This should be in a separate file. |
537 | class DarwinAArch64AsmBackend : public AArch64AsmBackend { |
538 | const MCRegisterInfo &MRI; |
539 | |
540 | /// Encode compact unwind stack adjustment for frameless functions. |
541 | /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. |
542 | /// The stack size always needs to be 16 byte aligned. |
543 | uint32_t encodeStackAdjustment(uint32_t StackSize) const { |
544 | return (StackSize / 16) << 12; |
545 | } |
546 | |
547 | public: |
548 | DarwinAArch64AsmBackend(const Target &T, const Triple &TT, |
549 | const MCRegisterInfo &MRI) |
550 | : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {} |
551 | |
552 | std::unique_ptr<MCObjectTargetWriter> |
553 | createObjectTargetWriter() const override { |
554 | uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple)); |
555 | uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple)); |
556 | return createAArch64MachObjectWriter(CPUType, CPUSubType, |
557 | TheTriple.isArch32Bit()); |
558 | } |
559 | |
560 | /// Generate the compact unwind encoding from the CFI directives. |
561 | uint32_t generateCompactUnwindEncoding( |
562 | ArrayRef<MCCFIInstruction> Instrs) const override { |
563 | if (Instrs.empty()) |
564 | return CU::UNWIND_ARM64_MODE_FRAMELESS; |
565 | |
566 | bool HasFP = false; |
567 | unsigned StackSize = 0; |
568 | |
569 | uint32_t CompactUnwindEncoding = 0; |
570 | int CurOffset = 0; |
571 | for (size_t i = 0, e = Instrs.size(); i != e; ++i) { |
572 | const MCCFIInstruction &Inst = Instrs[i]; |
573 | |
574 | switch (Inst.getOperation()) { |
575 | default: |
576 | // Cannot handle this directive: bail out. |
577 | return CU::UNWIND_ARM64_MODE_DWARF; |
578 | case MCCFIInstruction::OpDefCfa: { |
579 | // Defines a frame pointer. |
580 | unsigned XReg = |
581 | getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true)); |
582 | |
583 | // Other CFA registers than FP are not supported by compact unwind. |
584 | // Fallback on DWARF. |
585 | // FIXME: When opt-remarks are supported in MC, add a remark to notify |
586 | // the user. |
587 | if (XReg != AArch64::FP) |
588 | return CU::UNWIND_ARM64_MODE_DWARF; |
589 | |
590 | assert(XReg == AArch64::FP && "Invalid frame pointer!")(static_cast<void> (0)); |
591 | assert(i + 2 < e && "Insufficient CFI instructions to define a frame!")(static_cast<void> (0)); |
592 | |
593 | const MCCFIInstruction &LRPush = Instrs[++i]; |
594 | assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&(static_cast<void> (0)) |
595 | "Link register not pushed!")(static_cast<void> (0)); |
596 | const MCCFIInstruction &FPPush = Instrs[++i]; |
597 | assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&(static_cast<void> (0)) |
598 | "Frame pointer not pushed!")(static_cast<void> (0)); |
599 | |
600 | assert(FPPush.getOffset() + 8 == LRPush.getOffset())(static_cast<void> (0)); |
601 | CurOffset = FPPush.getOffset(); |
602 | |
603 | unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true); |
604 | unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true); |
605 | |
606 | LRReg = getXRegFromWReg(LRReg); |
Value stored to 'LRReg' is never read | |
607 | FPReg = getXRegFromWReg(FPReg); |
608 | |
609 | assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&(static_cast<void> (0)) |
610 | "Pushing invalid registers for frame!")(static_cast<void> (0)); |
611 | |
612 | // Indicate that the function has a frame. |
613 | CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME; |
614 | HasFP = true; |
615 | break; |
616 | } |
617 | case MCCFIInstruction::OpDefCfaOffset: { |
618 | assert(StackSize == 0 && "We already have the CFA offset!")(static_cast<void> (0)); |
619 | StackSize = std::abs(Inst.getOffset()); |
620 | break; |
621 | } |
622 | case MCCFIInstruction::OpOffset: { |
623 | // Registers are saved in pairs. We expect there to be two consecutive |
624 | // `.cfi_offset' instructions with the appropriate registers specified. |
625 | unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true); |
626 | if (i + 1 == e) |
627 | return CU::UNWIND_ARM64_MODE_DWARF; |
628 | |
629 | if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8) |
630 | return CU::UNWIND_ARM64_MODE_DWARF; |
631 | CurOffset = Inst.getOffset(); |
632 | |
633 | const MCCFIInstruction &Inst2 = Instrs[++i]; |
634 | if (Inst2.getOperation() != MCCFIInstruction::OpOffset) |
635 | return CU::UNWIND_ARM64_MODE_DWARF; |
636 | unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true); |
637 | |
638 | if (Inst2.getOffset() != CurOffset - 8) |
639 | return CU::UNWIND_ARM64_MODE_DWARF; |
640 | CurOffset = Inst2.getOffset(); |
641 | |
642 | // N.B. The encodings must be in register number order, and the X |
643 | // registers before the D registers. |
644 | |
645 | // X19/X20 pair = 0x00000001, |
646 | // X21/X22 pair = 0x00000002, |
647 | // X23/X24 pair = 0x00000004, |
648 | // X25/X26 pair = 0x00000008, |
649 | // X27/X28 pair = 0x00000010 |
650 | Reg1 = getXRegFromWReg(Reg1); |
651 | Reg2 = getXRegFromWReg(Reg2); |
652 | |
653 | if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 && |
654 | (CompactUnwindEncoding & 0xF1E) == 0) |
655 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR; |
656 | else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 && |
657 | (CompactUnwindEncoding & 0xF1C) == 0) |
658 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR; |
659 | else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 && |
660 | (CompactUnwindEncoding & 0xF18) == 0) |
661 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR; |
662 | else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 && |
663 | (CompactUnwindEncoding & 0xF10) == 0) |
664 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR; |
665 | else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 && |
666 | (CompactUnwindEncoding & 0xF00) == 0) |
667 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR; |
668 | else { |
669 | Reg1 = getDRegFromBReg(Reg1); |
670 | Reg2 = getDRegFromBReg(Reg2); |
671 | |
672 | // D8/D9 pair = 0x00000100, |
673 | // D10/D11 pair = 0x00000200, |
674 | // D12/D13 pair = 0x00000400, |
675 | // D14/D15 pair = 0x00000800 |
676 | if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 && |
677 | (CompactUnwindEncoding & 0xE00) == 0) |
678 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR; |
679 | else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 && |
680 | (CompactUnwindEncoding & 0xC00) == 0) |
681 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR; |
682 | else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 && |
683 | (CompactUnwindEncoding & 0x800) == 0) |
684 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR; |
685 | else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15) |
686 | CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR; |
687 | else |
688 | // A pair was pushed which we cannot handle. |
689 | return CU::UNWIND_ARM64_MODE_DWARF; |
690 | } |
691 | |
692 | break; |
693 | } |
694 | } |
695 | } |
696 | |
697 | if (!HasFP) { |
698 | // With compact unwind info we can only represent stack adjustments of up |
699 | // to 65520 bytes. |
700 | if (StackSize > 65520) |
701 | return CU::UNWIND_ARM64_MODE_DWARF; |
702 | |
703 | CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS; |
704 | CompactUnwindEncoding |= encodeStackAdjustment(StackSize); |
705 | } |
706 | |
707 | return CompactUnwindEncoding; |
708 | } |
709 | }; |
710 | |
711 | } // end anonymous namespace |
712 | |
713 | namespace { |
714 | |
715 | class ELFAArch64AsmBackend : public AArch64AsmBackend { |
716 | public: |
717 | uint8_t OSABI; |
718 | bool IsILP32; |
719 | |
720 | ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI, |
721 | bool IsLittleEndian, bool IsILP32) |
722 | : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI), |
723 | IsILP32(IsILP32) {} |
724 | |
725 | std::unique_ptr<MCObjectTargetWriter> |
726 | createObjectTargetWriter() const override { |
727 | return createAArch64ELFObjectWriter(OSABI, IsILP32); |
728 | } |
729 | }; |
730 | |
731 | } |
732 | |
733 | namespace { |
734 | class COFFAArch64AsmBackend : public AArch64AsmBackend { |
735 | public: |
736 | COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple) |
737 | : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {} |
738 | |
739 | std::unique_ptr<MCObjectTargetWriter> |
740 | createObjectTargetWriter() const override { |
741 | return createAArch64WinCOFFObjectWriter(); |
742 | } |
743 | }; |
744 | } |
745 | |
746 | MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T, |
747 | const MCSubtargetInfo &STI, |
748 | const MCRegisterInfo &MRI, |
749 | const MCTargetOptions &Options) { |
750 | const Triple &TheTriple = STI.getTargetTriple(); |
751 | if (TheTriple.isOSBinFormatMachO()) { |
752 | return new DarwinAArch64AsmBackend(T, TheTriple, MRI); |
753 | } |
754 | |
755 | if (TheTriple.isOSBinFormatCOFF()) |
756 | return new COFFAArch64AsmBackend(T, TheTriple); |
757 | |
758 | assert(TheTriple.isOSBinFormatELF() && "Invalid target")(static_cast<void> (0)); |
759 | |
760 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
761 | bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; |
762 | return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true, |
763 | IsILP32); |
764 | } |
765 | |
766 | MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T, |
767 | const MCSubtargetInfo &STI, |
768 | const MCRegisterInfo &MRI, |
769 | const MCTargetOptions &Options) { |
770 | const Triple &TheTriple = STI.getTargetTriple(); |
771 | assert(TheTriple.isOSBinFormatELF() &&(static_cast<void> (0)) |
772 | "Big endian is only supported for ELF targets!")(static_cast<void> (0)); |
773 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
774 | bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; |
775 | return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false, |
776 | IsILP32); |
777 | } |