LLVM 19.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
29#include "llvm/ADT/StringRef.h"
30#include "llvm/ADT/Twine.h"
44#include "llvm/IR/DataLayout.h"
46#include "llvm/MC/MCAsmInfo.h"
47#include "llvm/MC/MCContext.h"
48#include "llvm/MC/MCInst.h"
52#include "llvm/MC/MCStreamer.h"
53#include "llvm/MC/MCSymbol.h"
62#include <algorithm>
63#include <cassert>
64#include <cstdint>
65#include <map>
66#include <memory>
67
68using namespace llvm;
69
70#define DEBUG_TYPE "asm-printer"
71
72namespace {
73
74class AArch64AsmPrinter : public AsmPrinter {
75 AArch64MCInstLower MCInstLowering;
76 FaultMaps FM;
77 const AArch64Subtarget *STI;
78 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
79
80public:
81 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
82 : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
83 FM(*this) {}
84
85 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
86
87 /// Wrapper for MCInstLowering.lowerOperand() for the
88 /// tblgen'erated pseudo lowering.
89 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
90 return MCInstLowering.lowerOperand(MO, MCOp);
91 }
92
93 void emitStartOfAsmFile(Module &M) override;
94 void emitJumpTableInfo() override;
95 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
97 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
98 const MCSymbol *BranchLabel) const override;
99
100 void emitFunctionEntryLabel() override;
101
102 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
103
104 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
105
106 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
107 const MachineInstr &MI);
108 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
109 const MachineInstr &MI);
110 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
111 const MachineInstr &MI);
112 void LowerFAULTING_OP(const MachineInstr &MI);
113
114 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
115 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
116 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
117 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
118
119 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
120 HwasanMemaccessTuple;
121 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
122 void LowerKCFI_CHECK(const MachineInstr &MI);
123 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
124 void emitHwasanMemaccessSymbols(Module &M);
125
126 void emitSled(const MachineInstr &MI, SledKind Kind);
127
128 /// tblgen'erated driver function for lowering simple MI->MC
129 /// pseudo instructions.
130 bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
131 const MachineInstr *MI);
132
133 void emitInstruction(const MachineInstr *MI) override;
134
135 void emitFunctionHeaderComment() override;
136
137 void getAnalysisUsage(AnalysisUsage &AU) const override {
139 AU.setPreservesAll();
140 }
141
142 bool runOnMachineFunction(MachineFunction &MF) override {
143 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
144 STI = &MF.getSubtarget<AArch64Subtarget>();
145
147
148 if (STI->isTargetCOFF()) {
149 bool Local = MF.getFunction().hasLocalLinkage();
152 int Type =
154
155 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
156 OutStreamer->emitCOFFSymbolStorageClass(Scl);
157 OutStreamer->emitCOFFSymbolType(Type);
158 OutStreamer->endCOFFSymbolDef();
159 }
160
161 // Emit the rest of the function body.
163
164 // Emit the XRay table for this function.
166
167 // We didn't modify anything.
168 return false;
169 }
170
171 const MCExpr *lowerConstant(const Constant *CV) override;
172
173private:
174 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
175 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
176 bool printAsmRegInClass(const MachineOperand &MO,
177 const TargetRegisterClass *RC, unsigned AltName,
178 raw_ostream &O);
179
180 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
181 const char *ExtraCode, raw_ostream &O) override;
182 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
183 const char *ExtraCode, raw_ostream &O) override;
184
185 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
186
187 void emitFunctionBodyEnd() override;
188
189 MCSymbol *GetCPISymbol(unsigned CPID) const override;
190 void emitEndOfAsmFile(Module &M) override;
191
192 AArch64FunctionInfo *AArch64FI = nullptr;
193
194 /// Emit the LOHs contained in AArch64FI.
195 void emitLOHs();
196
197 /// Emit instruction to set float register to zero.
198 void emitFMov0(const MachineInstr &MI);
199
200 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
201
202 MInstToMCSymbol LOHInstToLabel;
203
205 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
206 }
207
208 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
209 assert(STI);
210 return STI;
211 }
212 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
213 MCSymbol *LazyPointer) override;
215 MCSymbol *LazyPointer) override;
216};
217
218} // end anonymous namespace
219
220void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
221 const Triple &TT = TM.getTargetTriple();
222
223 if (TT.isOSBinFormatCOFF()) {
224 // Emit an absolute @feat.00 symbol
225 MCSymbol *S = MMI->getContext().getOrCreateSymbol(StringRef("@feat.00"));
226 OutStreamer->beginCOFFSymbolDef(S);
227 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
228 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_NULL);
229 OutStreamer->endCOFFSymbolDef();
230 int64_t Feat00Value = 0;
231
232 if (M.getModuleFlag("cfguard")) {
233 // Object is CFG-aware.
234 Feat00Value |= COFF::Feat00Flags::GuardCF;
235 }
236
237 if (M.getModuleFlag("ehcontguard")) {
238 // Object also has EHCont.
239 Feat00Value |= COFF::Feat00Flags::GuardEHCont;
240 }
241
242 if (M.getModuleFlag("ms-kernel")) {
243 // Object is compiled with /kernel.
244 Feat00Value |= COFF::Feat00Flags::Kernel;
245 }
246
247 OutStreamer->emitSymbolAttribute(S, MCSA_Global);
248 OutStreamer->emitAssignment(
249 S, MCConstantExpr::create(Feat00Value, MMI->getContext()));
250 }
251
252 if (!TT.isOSBinFormatELF())
253 return;
254
255 // Assemble feature flags that may require creation of a note section.
256 unsigned Flags = 0;
257 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
258 M.getModuleFlag("branch-target-enforcement")))
259 if (BTE->getZExtValue())
261
262 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
263 M.getModuleFlag("guarded-control-stack")))
264 if (GCS->getZExtValue())
266
267 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
268 M.getModuleFlag("sign-return-address")))
269 if (Sign->getZExtValue())
271
272 uint64_t PAuthABIPlatform = -1;
273 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
274 M.getModuleFlag("aarch64-elf-pauthabi-platform")))
275 PAuthABIPlatform = PAP->getZExtValue();
276 uint64_t PAuthABIVersion = -1;
277 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
278 M.getModuleFlag("aarch64-elf-pauthabi-version")))
279 PAuthABIVersion = PAV->getZExtValue();
280
281 // Emit a .note.gnu.property section with the flags.
282 auto *TS =
283 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
284 TS->emitNoteSection(Flags, PAuthABIPlatform, PAuthABIVersion);
285}
286
287void AArch64AsmPrinter::emitFunctionHeaderComment() {
288 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
289 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
290 if (OutlinerString != std::nullopt)
291 OutStreamer->getCommentOS() << ' ' << OutlinerString;
292}
293
294void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
295{
296 const Function &F = MF->getFunction();
297 if (F.hasFnAttribute("patchable-function-entry")) {
298 unsigned Num;
299 if (F.getFnAttribute("patchable-function-entry")
300 .getValueAsString()
301 .getAsInteger(10, Num))
302 return;
303 emitNops(Num);
304 return;
305 }
306
307 emitSled(MI, SledKind::FUNCTION_ENTER);
308}
309
310void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
311 emitSled(MI, SledKind::FUNCTION_EXIT);
312}
313
314void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
315 emitSled(MI, SledKind::TAIL_CALL);
316}
317
318void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
319 static const int8_t NoopsInSledCount = 7;
320 // We want to emit the following pattern:
321 //
322 // .Lxray_sled_N:
323 // ALIGN
324 // B #32
325 // ; 7 NOP instructions (28 bytes)
326 // .tmpN
327 //
328 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
329 // over the full 32 bytes (8 instructions) with the following pattern:
330 //
331 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
332 // LDR W17, #12 ; W17 := function ID
333 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
334 // BLR X16 ; call the tracing trampoline
335 // ;DATA: 32 bits of function ID
336 // ;DATA: lower 32 bits of the address of the trampoline
337 // ;DATA: higher 32 bits of the address of the trampoline
338 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
339 //
340 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
341 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
342 OutStreamer->emitLabel(CurSled);
343 auto Target = OutContext.createTempSymbol();
344
345 // Emit "B #32" instruction, which jumps over the next 28 bytes.
346 // The operand has to be the number of 4-byte instructions to jump over,
347 // including the current instruction.
348 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
349
350 for (int8_t I = 0; I < NoopsInSledCount; I++)
351 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
352
353 OutStreamer->emitLabel(Target);
354 recordSled(CurSled, MI, Kind, 2);
355}
356
357// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
358// (built-in functions __xray_customevent/__xray_typedevent).
359//
360// .Lxray_event_sled_N:
361// b 1f
362// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
363// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
364// bl __xray_CustomEvent or __xray_TypedEvent
365// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
366// 1:
367//
368// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
369//
370// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
371// After patching, b .+N will become a nop.
372void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
373 bool Typed) {
374 auto &O = *OutStreamer;
375 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
376 O.emitLabel(CurSled);
377 MCInst MovX0Op0 = MCInstBuilder(AArch64::ORRXrs)
378 .addReg(AArch64::X0)
379 .addReg(AArch64::XZR)
380 .addReg(MI.getOperand(0).getReg())
381 .addImm(0);
382 MCInst MovX1Op1 = MCInstBuilder(AArch64::ORRXrs)
383 .addReg(AArch64::X1)
384 .addReg(AArch64::XZR)
385 .addReg(MI.getOperand(1).getReg())
386 .addImm(0);
387 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
389 OutContext.getOrCreateSymbol(
390 Twine(MachO ? "_" : "") +
391 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
392 OutContext);
393 if (Typed) {
394 O.AddComment("Begin XRay typed event");
395 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
396 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
397 .addReg(AArch64::SP)
398 .addReg(AArch64::X0)
399 .addReg(AArch64::X1)
400 .addReg(AArch64::SP)
401 .addImm(-4));
402 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
403 .addReg(AArch64::X2)
404 .addReg(AArch64::SP)
405 .addImm(2));
406 EmitToStreamer(O, MovX0Op0);
407 EmitToStreamer(O, MovX1Op1);
408 EmitToStreamer(O, MCInstBuilder(AArch64::ORRXrs)
409 .addReg(AArch64::X2)
410 .addReg(AArch64::XZR)
411 .addReg(MI.getOperand(2).getReg())
412 .addImm(0));
413 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
414 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
415 .addReg(AArch64::X2)
416 .addReg(AArch64::SP)
417 .addImm(2));
418 O.AddComment("End XRay typed event");
419 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
420 .addReg(AArch64::SP)
421 .addReg(AArch64::X0)
422 .addReg(AArch64::X1)
423 .addReg(AArch64::SP)
424 .addImm(4));
425
426 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
427 } else {
428 O.AddComment("Begin XRay custom event");
429 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
430 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
431 .addReg(AArch64::SP)
432 .addReg(AArch64::X0)
433 .addReg(AArch64::X1)
434 .addReg(AArch64::SP)
435 .addImm(-2));
436 EmitToStreamer(O, MovX0Op0);
437 EmitToStreamer(O, MovX1Op1);
438 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
439 O.AddComment("End XRay custom event");
440 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
441 .addReg(AArch64::SP)
442 .addReg(AArch64::X0)
443 .addReg(AArch64::X1)
444 .addReg(AArch64::SP)
445 .addImm(2));
446
447 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
448 }
449}
450
451void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
452 Register AddrReg = MI.getOperand(0).getReg();
453 assert(std::next(MI.getIterator())->isCall() &&
454 "KCFI_CHECK not followed by a call instruction");
455 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
456 "KCFI_CHECK call target doesn't match call operand");
457
458 // Default to using the intra-procedure-call temporary registers for
459 // comparing the hashes.
460 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
461 if (AddrReg == AArch64::XZR) {
462 // Checking XZR makes no sense. Instead of emitting a load, zero
463 // ScratchRegs[0] and use it for the ESR AddrIndex below.
464 AddrReg = getXRegFromWReg(ScratchRegs[0]);
465 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
466 .addReg(AddrReg)
467 .addReg(AArch64::XZR)
468 .addReg(AArch64::XZR)
469 .addImm(0));
470 } else {
471 // If one of the scratch registers is used for the call target (e.g.
472 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
473 // temporary register instead (in this case, AArch64::W9) as the check
474 // is immediately followed by the call instruction.
475 for (auto &Reg : ScratchRegs) {
476 if (Reg == getWRegFromXReg(AddrReg)) {
477 Reg = AArch64::W9;
478 break;
479 }
480 }
481 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
482 "Invalid scratch registers for KCFI_CHECK");
483
484 // Adjust the offset for patchable-function-prefix. This assumes that
485 // patchable-function-prefix is the same for all functions.
486 int64_t PrefixNops = 0;
487 (void)MI.getMF()
488 ->getFunction()
489 .getFnAttribute("patchable-function-prefix")
490 .getValueAsString()
491 .getAsInteger(10, PrefixNops);
492
493 // Load the target function type hash.
494 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
495 .addReg(ScratchRegs[0])
496 .addReg(AddrReg)
497 .addImm(-(PrefixNops * 4 + 4)));
498 }
499
500 // Load the expected type hash.
501 const int64_t Type = MI.getOperand(1).getImm();
502 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKWi)
503 .addReg(ScratchRegs[1])
504 .addReg(ScratchRegs[1])
505 .addImm(Type & 0xFFFF)
506 .addImm(0));
507 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKWi)
508 .addReg(ScratchRegs[1])
509 .addReg(ScratchRegs[1])
510 .addImm((Type >> 16) & 0xFFFF)
511 .addImm(16));
512
513 // Compare the hashes and trap if there's a mismatch.
514 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
515 .addReg(AArch64::WZR)
516 .addReg(ScratchRegs[0])
517 .addReg(ScratchRegs[1])
518 .addImm(0));
519
520 MCSymbol *Pass = OutContext.createTempSymbol();
521 EmitToStreamer(*OutStreamer,
522 MCInstBuilder(AArch64::Bcc)
523 .addImm(AArch64CC::EQ)
524 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
525
526 // The base ESR is 0x8000 and the register information is encoded in bits
527 // 0-9 as follows:
528 // - 0-4: n, where the register Xn contains the target address
529 // - 5-9: m, where the register Wm contains the expected type hash
530 // Where n, m are in [0, 30].
531 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
532 unsigned AddrIndex;
533 switch (AddrReg) {
534 default:
535 AddrIndex = AddrReg - AArch64::X0;
536 break;
537 case AArch64::FP:
538 AddrIndex = 29;
539 break;
540 case AArch64::LR:
541 AddrIndex = 30;
542 break;
543 }
544
545 assert(AddrIndex < 31 && TypeIndex < 31);
546
547 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
548 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
549 OutStreamer->emitLabel(Pass);
550}
551
552void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
553 Register Reg = MI.getOperand(0).getReg();
554 bool IsShort =
555 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
556 (MI.getOpcode() ==
557 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
558 uint32_t AccessInfo = MI.getOperand(1).getImm();
559 bool IsFixedShadow =
560 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
561 (MI.getOpcode() ==
562 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
563 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
564
565 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
566 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
567 if (!Sym) {
568 // FIXME: Make this work on non-ELF.
569 if (!TM.getTargetTriple().isOSBinFormatELF())
570 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
571
572 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
573 utostr(AccessInfo);
574 if (IsFixedShadow)
575 SymName += "_fixed_" + utostr(FixedShadowOffset);
576 if (IsShort)
577 SymName += "_short_v2";
578 Sym = OutContext.getOrCreateSymbol(SymName);
579 }
580
581 EmitToStreamer(*OutStreamer,
582 MCInstBuilder(AArch64::BL)
583 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
584}
585
586void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
587 if (HwasanMemaccessSymbols.empty())
588 return;
589
590 const Triple &TT = TM.getTargetTriple();
591 assert(TT.isOSBinFormatELF());
592 std::unique_ptr<MCSubtargetInfo> STI(
593 TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
594 assert(STI && "Unable to create subtarget info");
595
596 MCSymbol *HwasanTagMismatchV1Sym =
597 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
598 MCSymbol *HwasanTagMismatchV2Sym =
599 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
600
601 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
602 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
603 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
604 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
605
606 for (auto &P : HwasanMemaccessSymbols) {
607 unsigned Reg = std::get<0>(P.first);
608 bool IsShort = std::get<1>(P.first);
609 uint32_t AccessInfo = std::get<2>(P.first);
610 bool IsFixedShadow = std::get<3>(P.first);
611 uint64_t FixedShadowOffset = std::get<4>(P.first);
612 const MCSymbolRefExpr *HwasanTagMismatchRef =
613 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
614 MCSymbol *Sym = P.second;
615
616 bool HasMatchAllTag =
617 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
618 uint8_t MatchAllTag =
619 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
620 unsigned Size =
621 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
622 bool CompileKernel =
623 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
624
625 OutStreamer->switchSection(OutContext.getELFSection(
626 ".text.hot", ELF::SHT_PROGBITS,
628 /*IsComdat=*/true));
629
630 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
631 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
632 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
633 OutStreamer->emitLabel(Sym);
634
635 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SBFMXri)
636 .addReg(AArch64::X16)
637 .addReg(Reg)
638 .addImm(4)
639 .addImm(55),
640 *STI);
641
642 if (IsFixedShadow) {
643 // Aarch64 makes it difficult to embed large constants in the code.
644 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
645 // left-shift option in the MOV instruction. Combined with the 16-bit
646 // immediate, this is enough to represent any offset up to 2**48.
647 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
648 .addReg(AArch64::X17)
649 .addImm(FixedShadowOffset >> 32)
650 .addImm(32),
651 *STI);
652 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRBBroX)
653 .addReg(AArch64::W16)
654 .addReg(AArch64::X17)
655 .addReg(AArch64::X16)
656 .addImm(0)
657 .addImm(0),
658 *STI);
659 } else {
660 OutStreamer->emitInstruction(
661 MCInstBuilder(AArch64::LDRBBroX)
662 .addReg(AArch64::W16)
663 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
664 .addReg(AArch64::X16)
665 .addImm(0)
666 .addImm(0),
667 *STI);
668 }
669
670 OutStreamer->emitInstruction(
671 MCInstBuilder(AArch64::SUBSXrs)
672 .addReg(AArch64::XZR)
673 .addReg(AArch64::X16)
674 .addReg(Reg)
676 *STI);
677 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
678 OutStreamer->emitInstruction(
679 MCInstBuilder(AArch64::Bcc)
681 .addExpr(MCSymbolRefExpr::create(HandleMismatchOrPartialSym,
682 OutContext)),
683 *STI);
684 MCSymbol *ReturnSym = OutContext.createTempSymbol();
685 OutStreamer->emitLabel(ReturnSym);
686 OutStreamer->emitInstruction(
687 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
688 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
689
690 if (HasMatchAllTag) {
691 OutStreamer->emitInstruction(MCInstBuilder(AArch64::UBFMXri)
692 .addReg(AArch64::X17)
693 .addReg(Reg)
694 .addImm(56)
695 .addImm(63),
696 *STI);
697 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSXri)
698 .addReg(AArch64::XZR)
699 .addReg(AArch64::X17)
700 .addImm(MatchAllTag)
701 .addImm(0),
702 *STI);
703 OutStreamer->emitInstruction(
704 MCInstBuilder(AArch64::Bcc)
706 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
707 *STI);
708 }
709
710 if (IsShort) {
711 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWri)
712 .addReg(AArch64::WZR)
713 .addReg(AArch64::W16)
714 .addImm(15)
715 .addImm(0),
716 *STI);
717 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
718 OutStreamer->emitInstruction(
719 MCInstBuilder(AArch64::Bcc)
721 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
722 *STI);
723
724 OutStreamer->emitInstruction(
725 MCInstBuilder(AArch64::ANDXri)
726 .addReg(AArch64::X17)
727 .addReg(Reg)
729 *STI);
730 if (Size != 1)
731 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
732 .addReg(AArch64::X17)
733 .addReg(AArch64::X17)
734 .addImm(Size - 1)
735 .addImm(0),
736 *STI);
737 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWrs)
738 .addReg(AArch64::WZR)
739 .addReg(AArch64::W16)
740 .addReg(AArch64::W17)
741 .addImm(0),
742 *STI);
743 OutStreamer->emitInstruction(
744 MCInstBuilder(AArch64::Bcc)
746 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
747 *STI);
748
749 OutStreamer->emitInstruction(
750 MCInstBuilder(AArch64::ORRXri)
751 .addReg(AArch64::X16)
752 .addReg(Reg)
754 *STI);
755 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRBBui)
756 .addReg(AArch64::W16)
757 .addReg(AArch64::X16)
758 .addImm(0),
759 *STI);
760 OutStreamer->emitInstruction(
761 MCInstBuilder(AArch64::SUBSXrs)
762 .addReg(AArch64::XZR)
763 .addReg(AArch64::X16)
764 .addReg(Reg)
766 *STI);
767 OutStreamer->emitInstruction(
768 MCInstBuilder(AArch64::Bcc)
770 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
771 *STI);
772
773 OutStreamer->emitLabel(HandleMismatchSym);
774 }
775
776 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
777 .addReg(AArch64::SP)
778 .addReg(AArch64::X0)
779 .addReg(AArch64::X1)
780 .addReg(AArch64::SP)
781 .addImm(-32),
782 *STI);
783 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXi)
784 .addReg(AArch64::FP)
785 .addReg(AArch64::LR)
786 .addReg(AArch64::SP)
787 .addImm(29),
788 *STI);
789
790 if (Reg != AArch64::X0)
791 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ORRXrs)
792 .addReg(AArch64::X0)
793 .addReg(AArch64::XZR)
794 .addReg(Reg)
795 .addImm(0),
796 *STI);
797 OutStreamer->emitInstruction(
798 MCInstBuilder(AArch64::MOVZXi)
799 .addReg(AArch64::X1)
801 .addImm(0),
802 *STI);
803
804 if (CompileKernel) {
805 // The Linux kernel's dynamic loader doesn't support GOT relative
806 // relocations, but it doesn't support late binding either, so just call
807 // the function directly.
808 OutStreamer->emitInstruction(
809 MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef), *STI);
810 } else {
811 // Intentionally load the GOT entry and branch to it, rather than possibly
812 // late binding the function, which may clobber the registers before we
813 // have a chance to save them.
814 OutStreamer->emitInstruction(
815 MCInstBuilder(AArch64::ADRP)
816 .addReg(AArch64::X16)
818 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
819 OutContext)),
820 *STI);
821 OutStreamer->emitInstruction(
822 MCInstBuilder(AArch64::LDRXui)
823 .addReg(AArch64::X16)
824 .addReg(AArch64::X16)
826 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
827 OutContext)),
828 *STI);
829 OutStreamer->emitInstruction(
830 MCInstBuilder(AArch64::BR).addReg(AArch64::X16), *STI);
831 }
832 }
833}
834
835void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
836 emitHwasanMemaccessSymbols(M);
837
838 const Triple &TT = TM.getTargetTriple();
839 if (TT.isOSBinFormatMachO()) {
840 // Funny Darwin hack: This flag tells the linker that no global symbols
841 // contain code that falls through to other global symbols (e.g. the obvious
842 // implementation of multiple entry points). If this doesn't occur, the
843 // linker can safely perform dead code stripping. Since LLVM never
844 // generates code that does this, it is always safe to set.
845 OutStreamer->emitAssemblerFlag(MCAF_SubsectionsViaSymbols);
846 }
847
848 // Emit stack and fault map information.
849 FM.serializeToFaultMapSection();
850
851}
852
853void AArch64AsmPrinter::emitLOHs() {
855
856 for (const auto &D : AArch64FI->getLOHContainer()) {
857 for (const MachineInstr *MI : D.getArgs()) {
858 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
859 assert(LabelIt != LOHInstToLabel.end() &&
860 "Label hasn't been inserted for LOH related instruction");
861 MCArgs.push_back(LabelIt->second);
862 }
863 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
864 MCArgs.clear();
865 }
866}
867
868void AArch64AsmPrinter::emitFunctionBodyEnd() {
869 if (!AArch64FI->getLOHRelated().empty())
870 emitLOHs();
871}
872
873/// GetCPISymbol - Return the symbol for the specified constant pool entry.
874MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
875 // Darwin uses a linker-private symbol name for constant-pools (to
876 // avoid addends on the relocation?), ELF has no such concept and
877 // uses a normal private symbol.
878 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
879 return OutContext.getOrCreateSymbol(
880 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
881 Twine(getFunctionNumber()) + "_" + Twine(CPID));
882
883 return AsmPrinter::GetCPISymbol(CPID);
884}
885
886void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
887 raw_ostream &O) {
888 const MachineOperand &MO = MI->getOperand(OpNum);
889 switch (MO.getType()) {
890 default:
891 llvm_unreachable("<unknown operand type>");
893 Register Reg = MO.getReg();
894 assert(Reg.isPhysical());
895 assert(!MO.getSubReg() && "Subregs should be eliminated!");
897 break;
898 }
900 O << MO.getImm();
901 break;
902 }
904 PrintSymbolOperand(MO, O);
905 break;
906 }
908 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
909 Sym->print(O, MAI);
910 break;
911 }
912 }
913}
914
915bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
916 raw_ostream &O) {
917 Register Reg = MO.getReg();
918 switch (Mode) {
919 default:
920 return true; // Unknown mode.
921 case 'w':
922 Reg = getWRegFromXReg(Reg);
923 break;
924 case 'x':
925 Reg = getXRegFromWReg(Reg);
926 break;
927 case 't':
929 break;
930 }
931
933 return false;
934}
935
936// Prints the register in MO using class RC using the offset in the
937// new register class. This should not be used for cross class
938// printing.
939bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
940 const TargetRegisterClass *RC,
941 unsigned AltName, raw_ostream &O) {
942 assert(MO.isReg() && "Should only get here with a register!");
943 const TargetRegisterInfo *RI = STI->getRegisterInfo();
944 Register Reg = MO.getReg();
945 unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
946 if (!RI->regsOverlap(RegToPrint, Reg))
947 return true;
948 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
949 return false;
950}
951
952bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
953 const char *ExtraCode, raw_ostream &O) {
954 const MachineOperand &MO = MI->getOperand(OpNum);
955
956 // First try the generic code, which knows about modifiers like 'c' and 'n'.
957 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
958 return false;
959
960 // Does this asm operand have a single letter operand modifier?
961 if (ExtraCode && ExtraCode[0]) {
962 if (ExtraCode[1] != 0)
963 return true; // Unknown modifier.
964
965 switch (ExtraCode[0]) {
966 default:
967 return true; // Unknown modifier.
968 case 'w': // Print W register
969 case 'x': // Print X register
970 if (MO.isReg())
971 return printAsmMRegister(MO, ExtraCode[0], O);
972 if (MO.isImm() && MO.getImm() == 0) {
973 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
975 return false;
976 }
977 printOperand(MI, OpNum, O);
978 return false;
979 case 'b': // Print B register.
980 case 'h': // Print H register.
981 case 's': // Print S register.
982 case 'd': // Print D register.
983 case 'q': // Print Q register.
984 case 'z': // Print Z register.
985 if (MO.isReg()) {
986 const TargetRegisterClass *RC;
987 switch (ExtraCode[0]) {
988 case 'b':
989 RC = &AArch64::FPR8RegClass;
990 break;
991 case 'h':
992 RC = &AArch64::FPR16RegClass;
993 break;
994 case 's':
995 RC = &AArch64::FPR32RegClass;
996 break;
997 case 'd':
998 RC = &AArch64::FPR64RegClass;
999 break;
1000 case 'q':
1001 RC = &AArch64::FPR128RegClass;
1002 break;
1003 case 'z':
1004 RC = &AArch64::ZPRRegClass;
1005 break;
1006 default:
1007 return true;
1008 }
1009 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1010 }
1011 printOperand(MI, OpNum, O);
1012 return false;
1013 }
1014 }
1015
1016 // According to ARM, we should emit x and v registers unless we have a
1017 // modifier.
1018 if (MO.isReg()) {
1019 Register Reg = MO.getReg();
1020
1021 // If this is a w or x register, print an x register.
1022 if (AArch64::GPR32allRegClass.contains(Reg) ||
1023 AArch64::GPR64allRegClass.contains(Reg))
1024 return printAsmMRegister(MO, 'x', O);
1025
1026 // If this is an x register tuple, print an x register.
1027 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1028 return printAsmMRegister(MO, 't', O);
1029
1030 unsigned AltName = AArch64::NoRegAltName;
1031 const TargetRegisterClass *RegClass;
1032 if (AArch64::ZPRRegClass.contains(Reg)) {
1033 RegClass = &AArch64::ZPRRegClass;
1034 } else if (AArch64::PPRRegClass.contains(Reg)) {
1035 RegClass = &AArch64::PPRRegClass;
1036 } else if (AArch64::PNRRegClass.contains(Reg)) {
1037 RegClass = &AArch64::PNRRegClass;
1038 } else {
1039 RegClass = &AArch64::FPR128RegClass;
1040 AltName = AArch64::vreg;
1041 }
1042
1043 // If this is a b, h, s, d, or q register, print it as a v register.
1044 return printAsmRegInClass(MO, RegClass, AltName, O);
1045 }
1046
1047 printOperand(MI, OpNum, O);
1048 return false;
1049}
1050
1051bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1052 unsigned OpNum,
1053 const char *ExtraCode,
1054 raw_ostream &O) {
1055 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1056 return true; // Unknown modifier.
1057
1058 const MachineOperand &MO = MI->getOperand(OpNum);
1059 assert(MO.isReg() && "unexpected inline asm memory operand");
1060 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1061 return false;
1062}
1063
1064void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1065 raw_ostream &OS) {
1066 unsigned NOps = MI->getNumOperands();
1067 assert(NOps == 4);
1068 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1069 // cast away const; DIetc do not take const operands for some reason.
1070 OS << MI->getDebugVariable()->getName();
1071 OS << " <- ";
1072 // Frame address. Currently handles register +- offset only.
1073 assert(MI->isIndirectDebugValue());
1074 OS << '[';
1075 for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
1076 MI->debug_operands().end());
1077 I < E; ++I) {
1078 if (I != 0)
1079 OS << ", ";
1080 printOperand(MI, I, OS);
1081 }
1082 OS << ']';
1083 OS << "+";
1084 printOperand(MI, NOps - 2, OS);
1085}
1086
1087void AArch64AsmPrinter::emitJumpTableInfo() {
1088 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1089 if (!MJTI) return;
1090
1091 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1092 if (JT.empty()) return;
1093
1094 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1095 MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM);
1096 OutStreamer->switchSection(ReadOnlySec);
1097
1098 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1099 for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
1100 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1101
1102 // If this jump table was deleted, ignore it.
1103 if (JTBBs.empty()) continue;
1104
1105 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1106 emitAlignment(Align(Size));
1107 OutStreamer->emitLabel(GetJTISymbol(JTI));
1108
1109 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1110 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1111
1112 for (auto *JTBB : JTBBs) {
1113 const MCExpr *Value =
1114 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1115
1116 // Each entry is:
1117 // .byte/.hword (LBB - Lbase)>>2
1118 // or plain:
1119 // .word LBB - Lbase
1120 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1121 if (Size != 4)
1123 Value, MCConstantExpr::create(2, OutContext), OutContext);
1124
1125 OutStreamer->emitValue(Value, Size);
1126 }
1127 }
1128}
1129
1130std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1132AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1133 const MachineInstr *BranchInstr,
1134 const MCSymbol *BranchLabel) const {
1135 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1136 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1138 switch (AFI->getJumpTableEntrySize(JTI)) {
1139 case 1:
1140 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1141 break;
1142 case 2:
1143 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1144 break;
1145 case 4:
1146 EntrySize = codeview::JumpTableEntrySize::Int32;
1147 break;
1148 default:
1149 llvm_unreachable("Unexpected jump table entry size");
1150 }
1151 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1152}
1153
1154void AArch64AsmPrinter::emitFunctionEntryLabel() {
1155 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1156 MF->getFunction().getCallingConv() ==
1158 MF->getInfo<AArch64FunctionInfo>()->isSVECC()) {
1159 auto *TS =
1160 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1161 TS->emitDirectiveVariantPCS(CurrentFnSym);
1162 }
1163
1165
1166 if (TM.getTargetTriple().isWindowsArm64EC() &&
1167 !MF->getFunction().hasLocalLinkage()) {
1168 // For ARM64EC targets, a function definition's name is mangled differently
1169 // from the normal symbol, emit required aliases here.
1170 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1171 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1172 OutStreamer->emitAssignment(
1174 MMI->getContext()));
1175 };
1176
1177 auto getSymbolFromMetadata = [&](StringRef Name) {
1178 MCSymbol *Sym = nullptr;
1179 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1180 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1181 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1182 }
1183 return Sym;
1184 };
1185
1186 if (MCSymbol *UnmangledSym =
1187 getSymbolFromMetadata("arm64ec_unmangled_name")) {
1188 MCSymbol *ECMangledSym = getSymbolFromMetadata("arm64ec_ecmangled_name");
1189
1190 if (ECMangledSym) {
1191 // An external function, emit the alias from the unmangled symbol to
1192 // mangled symbol name and the alias from the mangled symbol to guest
1193 // exit thunk.
1194 emitFunctionAlias(UnmangledSym, ECMangledSym);
1195 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1196 } else {
1197 // A function implementation, emit the alias from the unmangled symbol
1198 // to mangled symbol name.
1199 emitFunctionAlias(UnmangledSym, CurrentFnSym);
1200 }
1201 }
1202 }
1203}
1204
1205/// Small jump tables contain an unsigned byte or half, representing the offset
1206/// from the lowest-addressed possible destination to the desired basic
1207/// block. Since all instructions are 4-byte aligned, this is further compressed
1208/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1209/// materialize the correct destination we need:
1210///
1211/// adr xDest, .LBB0_0
1212/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1213/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1214void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1215 const llvm::MachineInstr &MI) {
1216 Register DestReg = MI.getOperand(0).getReg();
1217 Register ScratchReg = MI.getOperand(1).getReg();
1218 Register ScratchRegW =
1219 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1220 Register TableReg = MI.getOperand(2).getReg();
1221 Register EntryReg = MI.getOperand(3).getReg();
1222 int JTIdx = MI.getOperand(4).getIndex();
1223 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1224
1225 // This has to be first because the compression pass based its reachability
1226 // calculations on the start of the JumpTableDest instruction.
1227 auto Label =
1228 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1229
1230 // If we don't already have a symbol to use as the base, use the ADR
1231 // instruction itself.
1232 if (!Label) {
1233 Label = MF->getContext().createTempSymbol();
1234 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1235 OutStreamer.emitLabel(Label);
1236 }
1237
1238 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1239 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1240 .addReg(DestReg)
1241 .addExpr(LabelExpr));
1242
1243 // Load the number of instruction-steps to offset from the label.
1244 unsigned LdrOpcode;
1245 switch (Size) {
1246 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1247 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1248 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1249 default:
1250 llvm_unreachable("Unknown jump table size");
1251 }
1252
1253 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1254 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1255 .addReg(TableReg)
1256 .addReg(EntryReg)
1257 .addImm(0)
1258 .addImm(Size == 1 ? 0 : 1));
1259
1260 // Add to the already materialized base label address, multiplying by 4 if
1261 // compressed.
1262 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1263 .addReg(DestReg)
1264 .addReg(DestReg)
1265 .addReg(ScratchReg)
1266 .addImm(Size == 4 ? 0 : 2));
1267}
1268
1269void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1270 const llvm::MachineInstr &MI) {
1271 unsigned Opcode = MI.getOpcode();
1272 assert(STI->hasMOPS());
1273 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1274
1275 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1276 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1277 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1278 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1279 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1280 if (Opcode == AArch64::MOPSMemorySetPseudo)
1281 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1282 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1283 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1284 llvm_unreachable("Unhandled memory operation pseudo");
1285 }();
1286 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1287 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1288
1289 for (auto Op : Ops) {
1290 int i = 0;
1291 auto MCIB = MCInstBuilder(Op);
1292 // Destination registers
1293 MCIB.addReg(MI.getOperand(i++).getReg());
1294 MCIB.addReg(MI.getOperand(i++).getReg());
1295 if (!IsSet)
1296 MCIB.addReg(MI.getOperand(i++).getReg());
1297 // Input registers
1298 MCIB.addReg(MI.getOperand(i++).getReg());
1299 MCIB.addReg(MI.getOperand(i++).getReg());
1300 MCIB.addReg(MI.getOperand(i++).getReg());
1301
1302 EmitToStreamer(OutStreamer, MCIB);
1303 }
1304}
1305
1306void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1307 const MachineInstr &MI) {
1308 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1309
1310 auto &Ctx = OutStreamer.getContext();
1311 MCSymbol *MILabel = Ctx.createTempSymbol();
1312 OutStreamer.emitLabel(MILabel);
1313
1314 SM.recordStackMap(*MILabel, MI);
1315 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1316
1317 // Scan ahead to trim the shadow.
1318 const MachineBasicBlock &MBB = *MI.getParent();
1320 ++MII;
1321 while (NumNOPBytes > 0) {
1322 if (MII == MBB.end() || MII->isCall() ||
1323 MII->getOpcode() == AArch64::DBG_VALUE ||
1324 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1325 MII->getOpcode() == TargetOpcode::STACKMAP)
1326 break;
1327 ++MII;
1328 NumNOPBytes -= 4;
1329 }
1330
1331 // Emit nops.
1332 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1333 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1334}
1335
1336// Lower a patchpoint of the form:
1337// [<def>], <id>, <numBytes>, <target>, <numArgs>
1338void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1339 const MachineInstr &MI) {
1340 auto &Ctx = OutStreamer.getContext();
1341 MCSymbol *MILabel = Ctx.createTempSymbol();
1342 OutStreamer.emitLabel(MILabel);
1343 SM.recordPatchPoint(*MILabel, MI);
1344
1345 PatchPointOpers Opers(&MI);
1346
1347 int64_t CallTarget = Opers.getCallTarget().getImm();
1348 unsigned EncodedBytes = 0;
1349 if (CallTarget) {
1350 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1351 "High 16 bits of call target should be zero.");
1352 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1353 EncodedBytes = 16;
1354 // Materialize the jump address:
1355 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi)
1356 .addReg(ScratchReg)
1357 .addImm((CallTarget >> 32) & 0xFFFF)
1358 .addImm(32));
1359 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1360 .addReg(ScratchReg)
1361 .addReg(ScratchReg)
1362 .addImm((CallTarget >> 16) & 0xFFFF)
1363 .addImm(16));
1364 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1365 .addReg(ScratchReg)
1366 .addReg(ScratchReg)
1367 .addImm(CallTarget & 0xFFFF)
1368 .addImm(0));
1369 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1370 }
1371 // Emit padding.
1372 unsigned NumBytes = Opers.getNumPatchBytes();
1373 assert(NumBytes >= EncodedBytes &&
1374 "Patchpoint can't request size less than the length of a call.");
1375 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1376 "Invalid number of NOP bytes requested!");
1377 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1378 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1379}
1380
1381void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1382 const MachineInstr &MI) {
1383 StatepointOpers SOpers(&MI);
1384 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1385 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1386 for (unsigned i = 0; i < PatchBytes; i += 4)
1387 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1388 } else {
1389 // Lower call target and choose correct opcode
1390 const MachineOperand &CallTarget = SOpers.getCallTarget();
1391 MCOperand CallTargetMCOp;
1392 unsigned CallOpcode;
1393 switch (CallTarget.getType()) {
1396 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1397 CallOpcode = AArch64::BL;
1398 break;
1400 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1401 CallOpcode = AArch64::BL;
1402 break;
1404 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1405 CallOpcode = AArch64::BLR;
1406 break;
1407 default:
1408 llvm_unreachable("Unsupported operand type in statepoint call target");
1409 break;
1410 }
1411
1412 EmitToStreamer(OutStreamer,
1413 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1414 }
1415
1416 auto &Ctx = OutStreamer.getContext();
1417 MCSymbol *MILabel = Ctx.createTempSymbol();
1418 OutStreamer.emitLabel(MILabel);
1419 SM.recordStatepoint(*MILabel, MI);
1420}
1421
1422void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1423 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1424 // <opcode>, <operands>
1425
1426 Register DefRegister = FaultingMI.getOperand(0).getReg();
1428 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1429 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1430 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1431 unsigned OperandsBeginIdx = 4;
1432
1433 auto &Ctx = OutStreamer->getContext();
1434 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1435 OutStreamer->emitLabel(FaultingLabel);
1436
1437 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1438 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1439
1440 MCInst MI;
1441 MI.setOpcode(Opcode);
1442
1443 if (DefRegister != (Register)0)
1444 MI.addOperand(MCOperand::createReg(DefRegister));
1445
1446 for (const MachineOperand &MO :
1447 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1448 MCOperand Dest;
1449 lowerOperand(MO, Dest);
1450 MI.addOperand(Dest);
1451 }
1452
1453 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1454 OutStreamer->emitInstruction(MI, getSubtargetInfo());
1455}
1456
1457void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1458 Register DestReg = MI.getOperand(0).getReg();
1459 if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround() &&
1460 STI->isNeonAvailable()) {
1461 // Convert H/S register to corresponding D register
1462 if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
1463 DestReg = AArch64::D0 + (DestReg - AArch64::H0);
1464 else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
1465 DestReg = AArch64::D0 + (DestReg - AArch64::S0);
1466 else
1467 assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
1468
1469 MCInst MOVI;
1470 MOVI.setOpcode(AArch64::MOVID);
1471 MOVI.addOperand(MCOperand::createReg(DestReg));
1472 MOVI.addOperand(MCOperand::createImm(0));
1473 EmitToStreamer(*OutStreamer, MOVI);
1474 } else {
1475 MCInst FMov;
1476 switch (MI.getOpcode()) {
1477 default: llvm_unreachable("Unexpected opcode");
1478 case AArch64::FMOVH0:
1479 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1480 if (!STI->hasFullFP16())
1481 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1482 FMov.addOperand(MCOperand::createReg(DestReg));
1483 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1484 break;
1485 case AArch64::FMOVS0:
1486 FMov.setOpcode(AArch64::FMOVWSr);
1487 FMov.addOperand(MCOperand::createReg(DestReg));
1488 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1489 break;
1490 case AArch64::FMOVD0:
1491 FMov.setOpcode(AArch64::FMOVXDr);
1492 FMov.addOperand(MCOperand::createReg(DestReg));
1493 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1494 break;
1495 }
1496 EmitToStreamer(*OutStreamer, FMov);
1497 }
1498}
1499
1500// Simple pseudo-instructions have their lowering (with expansion to real
1501// instructions) auto-generated.
1502#include "AArch64GenMCPseudoLowering.inc"
1503
1504void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
1505 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
1506
1507 // Do any auto-generated pseudo lowerings.
1508 if (emitPseudoExpansionLowering(*OutStreamer, MI))
1509 return;
1510
1511 if (MI->getOpcode() == AArch64::ADRP) {
1512 for (auto &Opd : MI->operands()) {
1513 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
1514 "swift_async_extendedFramePointerFlags") {
1515 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
1516 }
1517 }
1518 }
1519
1520 if (AArch64FI->getLOHRelated().count(MI)) {
1521 // Generate a label for LOH related instruction
1522 MCSymbol *LOHLabel = createTempSymbol("loh");
1523 // Associate the instruction with the label
1524 LOHInstToLabel[MI] = LOHLabel;
1525 OutStreamer->emitLabel(LOHLabel);
1526 }
1527
1529 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1530 // Do any manual lowerings.
1531 switch (MI->getOpcode()) {
1532 default:
1533 break;
1534 case AArch64::HINT: {
1535 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
1536 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
1537 // non-empty. If MI is the initial BTI, place the
1538 // __patchable_function_entries label after BTI.
1539 if (CurrentPatchableFunctionEntrySym &&
1540 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
1541 MI == &MF->front().front()) {
1542 int64_t Imm = MI->getOperand(0).getImm();
1543 if ((Imm & 32) && (Imm & 6)) {
1544 MCInst Inst;
1545 MCInstLowering.Lower(MI, Inst);
1546 EmitToStreamer(*OutStreamer, Inst);
1547 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
1548 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
1549 return;
1550 }
1551 }
1552 break;
1553 }
1554 case AArch64::MOVMCSym: {
1555 Register DestReg = MI->getOperand(0).getReg();
1556 const MachineOperand &MO_Sym = MI->getOperand(1);
1557 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
1558 MCOperand Hi_MCSym, Lo_MCSym;
1559
1560 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
1561 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
1562
1563 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
1564 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
1565
1566 MCInst MovZ;
1567 MovZ.setOpcode(AArch64::MOVZXi);
1568 MovZ.addOperand(MCOperand::createReg(DestReg));
1569 MovZ.addOperand(Hi_MCSym);
1571 EmitToStreamer(*OutStreamer, MovZ);
1572
1573 MCInst MovK;
1574 MovK.setOpcode(AArch64::MOVKXi);
1575 MovK.addOperand(MCOperand::createReg(DestReg));
1576 MovK.addOperand(MCOperand::createReg(DestReg));
1577 MovK.addOperand(Lo_MCSym);
1579 EmitToStreamer(*OutStreamer, MovK);
1580 return;
1581 }
1582 case AArch64::MOVIv2d_ns:
1583 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
1584 // as movi is more efficient across all cores. Newer cores can eliminate
1585 // fmovs early and there is no difference with movi, but this not true for
1586 // all implementations.
1587 //
1588 // The floating-point version doesn't quite work in rare cases on older
1589 // CPUs, so on those targets we lower this instruction to movi.16b instead.
1590 if (STI->hasZeroCycleZeroingFPWorkaround() &&
1591 MI->getOperand(1).getImm() == 0) {
1592 MCInst TmpInst;
1593 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
1594 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1595 TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm()));
1596 EmitToStreamer(*OutStreamer, TmpInst);
1597 return;
1598 }
1599 break;
1600
1601 case AArch64::DBG_VALUE:
1602 case AArch64::DBG_VALUE_LIST:
1603 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
1604 SmallString<128> TmpStr;
1605 raw_svector_ostream OS(TmpStr);
1606 PrintDebugValueComment(MI, OS);
1607 OutStreamer->emitRawText(StringRef(OS.str()));
1608 }
1609 return;
1610
1611 case AArch64::EMITBKEY: {
1612 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1613 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1614 ExceptionHandlingType != ExceptionHandling::ARM)
1615 return;
1616
1617 if (getFunctionCFISectionType(*MF) == CFISection::None)
1618 return;
1619
1620 OutStreamer->emitCFIBKeyFrame();
1621 return;
1622 }
1623
1624 case AArch64::EMITMTETAGGED: {
1625 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1626 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1627 ExceptionHandlingType != ExceptionHandling::ARM)
1628 return;
1629
1630 if (getFunctionCFISectionType(*MF) != CFISection::None)
1631 OutStreamer->emitCFIMTETaggedFrame();
1632 return;
1633 }
1634
1635 // Tail calls use pseudo instructions so they have the proper code-gen
1636 // attributes (isCall, isReturn, etc.). We lower them to the real
1637 // instruction here.
1638 case AArch64::TCRETURNri:
1639 case AArch64::TCRETURNrix16x17:
1640 case AArch64::TCRETURNrix17:
1641 case AArch64::TCRETURNrinotx16:
1642 case AArch64::TCRETURNriALL: {
1643 MCInst TmpInst;
1644 TmpInst.setOpcode(AArch64::BR);
1645 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1646 EmitToStreamer(*OutStreamer, TmpInst);
1647 return;
1648 }
1649 case AArch64::TCRETURNdi: {
1650 MCOperand Dest;
1651 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
1652 MCInst TmpInst;
1653 TmpInst.setOpcode(AArch64::B);
1654 TmpInst.addOperand(Dest);
1655 EmitToStreamer(*OutStreamer, TmpInst);
1656 return;
1657 }
1658 case AArch64::SpeculationBarrierISBDSBEndBB: {
1659 // Print DSB SYS + ISB
1660 MCInst TmpInstDSB;
1661 TmpInstDSB.setOpcode(AArch64::DSB);
1662 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
1663 EmitToStreamer(*OutStreamer, TmpInstDSB);
1664 MCInst TmpInstISB;
1665 TmpInstISB.setOpcode(AArch64::ISB);
1666 TmpInstISB.addOperand(MCOperand::createImm(0xf));
1667 EmitToStreamer(*OutStreamer, TmpInstISB);
1668 return;
1669 }
1670 case AArch64::SpeculationBarrierSBEndBB: {
1671 // Print SB
1672 MCInst TmpInstSB;
1673 TmpInstSB.setOpcode(AArch64::SB);
1674 EmitToStreamer(*OutStreamer, TmpInstSB);
1675 return;
1676 }
1677 case AArch64::TLSDESC_CALLSEQ: {
1678 /// lower this to:
1679 /// adrp x0, :tlsdesc:var
1680 /// ldr x1, [x0, #:tlsdesc_lo12:var]
1681 /// add x0, x0, #:tlsdesc_lo12:var
1682 /// .tlsdesccall var
1683 /// blr x1
1684 /// (TPIDR_EL0 offset now in x0)
1685 const MachineOperand &MO_Sym = MI->getOperand(0);
1686 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
1687 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
1688 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
1689 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
1690 MCInstLowering.lowerOperand(MO_Sym, Sym);
1691 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
1692 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
1693
1694 MCInst Adrp;
1695 Adrp.setOpcode(AArch64::ADRP);
1696 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
1697 Adrp.addOperand(SymTLSDesc);
1698 EmitToStreamer(*OutStreamer, Adrp);
1699
1700 MCInst Ldr;
1701 if (STI->isTargetILP32()) {
1702 Ldr.setOpcode(AArch64::LDRWui);
1703 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
1704 } else {
1705 Ldr.setOpcode(AArch64::LDRXui);
1706 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
1707 }
1708 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
1709 Ldr.addOperand(SymTLSDescLo12);
1711 EmitToStreamer(*OutStreamer, Ldr);
1712
1713 MCInst Add;
1714 if (STI->isTargetILP32()) {
1715 Add.setOpcode(AArch64::ADDWri);
1716 Add.addOperand(MCOperand::createReg(AArch64::W0));
1717 Add.addOperand(MCOperand::createReg(AArch64::W0));
1718 } else {
1719 Add.setOpcode(AArch64::ADDXri);
1720 Add.addOperand(MCOperand::createReg(AArch64::X0));
1721 Add.addOperand(MCOperand::createReg(AArch64::X0));
1722 }
1723 Add.addOperand(SymTLSDescLo12);
1725 EmitToStreamer(*OutStreamer, Add);
1726
1727 // Emit a relocation-annotation. This expands to no code, but requests
1728 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
1729 MCInst TLSDescCall;
1730 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
1731 TLSDescCall.addOperand(Sym);
1732 EmitToStreamer(*OutStreamer, TLSDescCall);
1733
1734 MCInst Blr;
1735 Blr.setOpcode(AArch64::BLR);
1736 Blr.addOperand(MCOperand::createReg(AArch64::X1));
1737 EmitToStreamer(*OutStreamer, Blr);
1738
1739 return;
1740 }
1741
1742 case AArch64::JumpTableDest32:
1743 case AArch64::JumpTableDest16:
1744 case AArch64::JumpTableDest8:
1745 LowerJumpTableDest(*OutStreamer, *MI);
1746 return;
1747
1748 case AArch64::FMOVH0:
1749 case AArch64::FMOVS0:
1750 case AArch64::FMOVD0:
1751 emitFMov0(*MI);
1752 return;
1753
1754 case AArch64::MOPSMemoryCopyPseudo:
1755 case AArch64::MOPSMemoryMovePseudo:
1756 case AArch64::MOPSMemorySetPseudo:
1757 case AArch64::MOPSMemorySetTaggingPseudo:
1758 LowerMOPS(*OutStreamer, *MI);
1759 return;
1760
1761 case TargetOpcode::STACKMAP:
1762 return LowerSTACKMAP(*OutStreamer, SM, *MI);
1763
1764 case TargetOpcode::PATCHPOINT:
1765 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
1766
1767 case TargetOpcode::STATEPOINT:
1768 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
1769
1770 case TargetOpcode::FAULTING_OP:
1771 return LowerFAULTING_OP(*MI);
1772
1773 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1774 LowerPATCHABLE_FUNCTION_ENTER(*MI);
1775 return;
1776
1777 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1778 LowerPATCHABLE_FUNCTION_EXIT(*MI);
1779 return;
1780
1781 case TargetOpcode::PATCHABLE_TAIL_CALL:
1782 LowerPATCHABLE_TAIL_CALL(*MI);
1783 return;
1784 case TargetOpcode::PATCHABLE_EVENT_CALL:
1785 return LowerPATCHABLE_EVENT_CALL(*MI, false);
1786 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
1787 return LowerPATCHABLE_EVENT_CALL(*MI, true);
1788
1789 case AArch64::KCFI_CHECK:
1790 LowerKCFI_CHECK(*MI);
1791 return;
1792
1793 case AArch64::HWASAN_CHECK_MEMACCESS:
1794 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
1795 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
1796 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
1797 LowerHWASAN_CHECK_MEMACCESS(*MI);
1798 return;
1799
1800 case AArch64::SEH_StackAlloc:
1801 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
1802 return;
1803
1804 case AArch64::SEH_SaveFPLR:
1805 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
1806 return;
1807
1808 case AArch64::SEH_SaveFPLR_X:
1809 assert(MI->getOperand(0).getImm() < 0 &&
1810 "Pre increment SEH opcode must have a negative offset");
1811 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
1812 return;
1813
1814 case AArch64::SEH_SaveReg:
1815 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
1816 MI->getOperand(1).getImm());
1817 return;
1818
1819 case AArch64::SEH_SaveReg_X:
1820 assert(MI->getOperand(1).getImm() < 0 &&
1821 "Pre increment SEH opcode must have a negative offset");
1822 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
1823 -MI->getOperand(1).getImm());
1824 return;
1825
1826 case AArch64::SEH_SaveRegP:
1827 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
1828 MI->getOperand(0).getImm() <= 28) {
1829 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
1830 "Register paired with LR must be odd");
1831 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
1832 MI->getOperand(2).getImm());
1833 return;
1834 }
1835 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1836 "Non-consecutive registers not allowed for save_regp");
1837 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
1838 MI->getOperand(2).getImm());
1839 return;
1840
1841 case AArch64::SEH_SaveRegP_X:
1842 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1843 "Non-consecutive registers not allowed for save_regp_x");
1844 assert(MI->getOperand(2).getImm() < 0 &&
1845 "Pre increment SEH opcode must have a negative offset");
1846 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
1847 -MI->getOperand(2).getImm());
1848 return;
1849
1850 case AArch64::SEH_SaveFReg:
1851 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
1852 MI->getOperand(1).getImm());
1853 return;
1854
1855 case AArch64::SEH_SaveFReg_X:
1856 assert(MI->getOperand(1).getImm() < 0 &&
1857 "Pre increment SEH opcode must have a negative offset");
1858 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
1859 -MI->getOperand(1).getImm());
1860 return;
1861
1862 case AArch64::SEH_SaveFRegP:
1863 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1864 "Non-consecutive registers not allowed for save_regp");
1865 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
1866 MI->getOperand(2).getImm());
1867 return;
1868
1869 case AArch64::SEH_SaveFRegP_X:
1870 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1871 "Non-consecutive registers not allowed for save_regp_x");
1872 assert(MI->getOperand(2).getImm() < 0 &&
1873 "Pre increment SEH opcode must have a negative offset");
1874 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
1875 -MI->getOperand(2).getImm());
1876 return;
1877
1878 case AArch64::SEH_SetFP:
1880 return;
1881
1882 case AArch64::SEH_AddFP:
1883 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
1884 return;
1885
1886 case AArch64::SEH_Nop:
1887 TS->emitARM64WinCFINop();
1888 return;
1889
1890 case AArch64::SEH_PrologEnd:
1892 return;
1893
1894 case AArch64::SEH_EpilogStart:
1896 return;
1897
1898 case AArch64::SEH_EpilogEnd:
1900 return;
1901
1902 case AArch64::SEH_PACSignLR:
1904 return;
1905
1906 case AArch64::SEH_SaveAnyRegQP:
1907 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
1908 "Non-consecutive registers not allowed for save_any_reg");
1909 assert(MI->getOperand(2).getImm() >= 0 &&
1910 "SaveAnyRegQP SEH opcode offset must be non-negative");
1911 assert(MI->getOperand(2).getImm() <= 1008 &&
1912 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
1913 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
1914 MI->getOperand(2).getImm());
1915 return;
1916
1917 case AArch64::SEH_SaveAnyRegQPX:
1918 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
1919 "Non-consecutive registers not allowed for save_any_reg");
1920 assert(MI->getOperand(2).getImm() < 0 &&
1921 "SaveAnyRegQPX SEH opcode offset must be negative");
1922 assert(MI->getOperand(2).getImm() >= -1008 &&
1923 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
1924 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
1925 -MI->getOperand(2).getImm());
1926 return;
1927 }
1928
1929 // Finally, do the automated lowerings for everything else.
1930 MCInst TmpInst;
1931 MCInstLowering.Lower(MI, TmpInst);
1932 EmitToStreamer(*OutStreamer, TmpInst);
1933}
1934
1935void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
1936 MCSymbol *LazyPointer) {
1937 // _ifunc:
1938 // adrp x16, lazy_pointer@GOTPAGE
1939 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
1940 // ldr x16, [x16]
1941 // br x16
1942
1943 {
1944 MCInst Adrp;
1945 Adrp.setOpcode(AArch64::ADRP);
1946 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
1947 MCOperand SymPage;
1948 MCInstLowering.lowerOperand(
1951 SymPage);
1952 Adrp.addOperand(SymPage);
1953 OutStreamer->emitInstruction(Adrp, *STI);
1954 }
1955
1956 {
1957 MCInst Ldr;
1958 Ldr.setOpcode(AArch64::LDRXui);
1959 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
1960 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
1961 MCOperand SymPageOff;
1962 MCInstLowering.lowerOperand(
1965 SymPageOff);
1966 Ldr.addOperand(SymPageOff);
1968 OutStreamer->emitInstruction(Ldr, *STI);
1969 }
1970
1971 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRXui)
1972 .addReg(AArch64::X16)
1973 .addReg(AArch64::X16)
1974 .addImm(0),
1975 *STI);
1976
1977 OutStreamer->emitInstruction(MCInstBuilder(TM.getTargetTriple().isArm64e()
1978 ? AArch64::BRAAZ
1979 : AArch64::BR)
1980 .addReg(AArch64::X16),
1981 *STI);
1982}
1983
1984void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
1985 const GlobalIFunc &GI,
1986 MCSymbol *LazyPointer) {
1987 // These stub helpers are only ever called once, so here we're optimizing for
1988 // minimum size by using the pre-indexed store variants, which saves a few
1989 // bytes of instructions to bump & restore sp.
1990
1991 // _ifunc.stub_helper:
1992 // stp fp, lr, [sp, #-16]!
1993 // mov fp, sp
1994 // stp x1, x0, [sp, #-16]!
1995 // stp x3, x2, [sp, #-16]!
1996 // stp x5, x4, [sp, #-16]!
1997 // stp x7, x6, [sp, #-16]!
1998 // stp d1, d0, [sp, #-16]!
1999 // stp d3, d2, [sp, #-16]!
2000 // stp d5, d4, [sp, #-16]!
2001 // stp d7, d6, [sp, #-16]!
2002 // bl _resolver
2003 // adrp x16, lazy_pointer@GOTPAGE
2004 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
2005 // str x0, [x16]
2006 // mov x16, x0
2007 // ldp d7, d6, [sp], #16
2008 // ldp d5, d4, [sp], #16
2009 // ldp d3, d2, [sp], #16
2010 // ldp d1, d0, [sp], #16
2011 // ldp x7, x6, [sp], #16
2012 // ldp x5, x4, [sp], #16
2013 // ldp x3, x2, [sp], #16
2014 // ldp x1, x0, [sp], #16
2015 // ldp fp, lr, [sp], #16
2016 // br x16
2017
2018 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
2019 .addReg(AArch64::SP)
2020 .addReg(AArch64::FP)
2021 .addReg(AArch64::LR)
2022 .addReg(AArch64::SP)
2023 .addImm(-2),
2024 *STI);
2025
2026 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
2027 .addReg(AArch64::FP)
2028 .addReg(AArch64::SP)
2029 .addImm(0)
2030 .addImm(0),
2031 *STI);
2032
2033 for (int I = 0; I != 4; ++I)
2034 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
2035 .addReg(AArch64::SP)
2036 .addReg(AArch64::X1 + 2 * I)
2037 .addReg(AArch64::X0 + 2 * I)
2038 .addReg(AArch64::SP)
2039 .addImm(-2),
2040 *STI);
2041
2042 for (int I = 0; I != 4; ++I)
2043 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPDpre)
2044 .addReg(AArch64::SP)
2045 .addReg(AArch64::D1 + 2 * I)
2046 .addReg(AArch64::D0 + 2 * I)
2047 .addReg(AArch64::SP)
2048 .addImm(-2),
2049 *STI);
2050
2051 OutStreamer->emitInstruction(
2052 MCInstBuilder(AArch64::BL)
2054 *STI);
2055
2056 {
2057 MCInst Adrp;
2058 Adrp.setOpcode(AArch64::ADRP);
2059 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
2060 MCOperand SymPage;
2061 MCInstLowering.lowerOperand(
2062 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
2064 SymPage);
2065 Adrp.addOperand(SymPage);
2066 OutStreamer->emitInstruction(Adrp, *STI);
2067 }
2068
2069 {
2070 MCInst Ldr;
2071 Ldr.setOpcode(AArch64::LDRXui);
2072 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
2073 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
2074 MCOperand SymPageOff;
2075 MCInstLowering.lowerOperand(
2076 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
2078 SymPageOff);
2079 Ldr.addOperand(SymPageOff);
2081 OutStreamer->emitInstruction(Ldr, *STI);
2082 }
2083
2084 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STRXui)
2085 .addReg(AArch64::X0)
2086 .addReg(AArch64::X16)
2087 .addImm(0),
2088 *STI);
2089
2090 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
2091 .addReg(AArch64::X16)
2092 .addReg(AArch64::X0)
2093 .addImm(0)
2094 .addImm(0),
2095 *STI);
2096
2097 for (int I = 3; I != -1; --I)
2098 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDPDpost)
2099 .addReg(AArch64::SP)
2100 .addReg(AArch64::D1 + 2 * I)
2101 .addReg(AArch64::D0 + 2 * I)
2102 .addReg(AArch64::SP)
2103 .addImm(2),
2104 *STI);
2105
2106 for (int I = 3; I != -1; --I)
2107 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDPXpost)
2108 .addReg(AArch64::SP)
2109 .addReg(AArch64::X1 + 2 * I)
2110 .addReg(AArch64::X0 + 2 * I)
2111 .addReg(AArch64::SP)
2112 .addImm(2),
2113 *STI);
2114
2115 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDPXpost)
2116 .addReg(AArch64::SP)
2117 .addReg(AArch64::FP)
2118 .addReg(AArch64::LR)
2119 .addReg(AArch64::SP)
2120 .addImm(2),
2121 *STI);
2122
2123 OutStreamer->emitInstruction(MCInstBuilder(TM.getTargetTriple().isArm64e()
2124 ? AArch64::BRAAZ
2125 : AArch64::BR)
2126 .addReg(AArch64::X16),
2127 *STI);
2128}
2129
2130const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV) {
2131 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
2132 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
2133 OutContext);
2134 }
2135
2136 return AsmPrinter::lowerConstant(CV);
2137}
2138
2139// Force static initialization.
2146}
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter()
MachineBasicBlock & MBB
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
std::string Name
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
const char LLVMTargetMachineRef TM
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
std::optional< std::string > getOutliningStyle() const
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
void emitNoteSection(unsigned Flags, uint64_t PAuthABIPlatform=-1, uint64_t PAuthABIVersion=-1)
Callback used to implement the .note.gnu.property section.
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class is intended to be used as a driving class for all asm writers.
Definition: AsmPrinter.h:84
virtual void emitInstruction(const MachineInstr *)
Targets should implement this to emit instructions.
Definition: AsmPrinter.h:567
void emitXRayTable()
Emit a table with all XRay instrumentation points.
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI, MCSymbol *LazyPointer)
Definition: AsmPrinter.h:612
virtual void emitJumpTableInfo()
Print assembly representations of the jump tables used by the current function to the current output ...
virtual void SetupMachineFunction(MachineFunction &MF)
This should be called when a new MachineFunction is being processed from runOnMachineFunction.
void emitFunctionBody()
This method emits the body and trailer for a function.
virtual void emitStartOfAsmFile(Module &)
This virtual method can be overridden by targets that want to emit something at the start of their fi...
Definition: AsmPrinter.h:543
virtual void emitEndOfAsmFile(Module &)
This virtual method can be overridden by targets that want to emit something at the end of their file...
Definition: AsmPrinter.h:547
virtual void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI, MCSymbol *LazyPointer)
Definition: AsmPrinter.h:606
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
Definition: AsmPrinter.cpp:441
virtual bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const
Definition: AsmPrinter.h:911
virtual const MCSubtargetInfo * getIFuncMCSubtargetInfo() const
getSubtargetInfo() cannot be used where this is needed because we don't have a MachineFunction when w...
Definition: AsmPrinter.h:602
bool runOnMachineFunction(MachineFunction &MF) override
Emit the specified function out to the OutStreamer.
Definition: AsmPrinter.h:395
virtual const MCExpr * lowerConstant(const Constant *CV)
Lower the specified LLVM Constant to an MCExpr.
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant as...
virtual void emitFunctionBodyEnd()
Targets can override this to emit stuff after the last basic block in the function.
Definition: AsmPrinter.h:555
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual std::tuple< const MCSymbol *, uint64_t, const MCSymbol *, codeview::JumpTableEntrySize > getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr, const MCSymbol *BranchLabel) const
Gets information required to create a CodeView debug symbol for a jump table.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
const Constant * getResolver() const
Definition: GlobalIFunc.h:70
bool hasLocalLinkage() const
Definition: GlobalValue.h:527
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:616
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:621
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:194
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
Definition: MCInstBuilder.h:37
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Definition: MCInstBuilder.h:43
MCInstBuilder & addExpr(const MCExpr *Val)
Add a new MCExpr operand.
Definition: MCInstBuilder.h:61
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
void addOperand(const MCOperand Op)
Definition: MCInst.h:210
void setOpcode(unsigned Op)
Definition: MCInst.h:197
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:134
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:162
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:141
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:39
Streaming machine code generation interface.
Definition: MCStreamer.h:212
virtual void emitCFIBKeyFrame()
Definition: MCStreamer.cpp:249
virtual void beginCOFFSymbolDef(const MCSymbol *Symbol)
Start emitting COFF symbol definition.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitCOFFSymbolType(int Type)
Emit the type of the symbol.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition: MCStreamer.h:340
virtual void endCOFFSymbolDef()
Marks the end of the symbol definition.
MCContext & getContext() const
Definition: MCStreamer.h:297
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition: MCStreamer.h:359
virtual void emitCFIMTETaggedFrame()
Definition: MCStreamer.cpp:256
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
Definition: MCStreamer.cpp:424
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:304
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
virtual void emitCOFFSymbolStorageClass(int StorageClass)
Emit the storage class of the symbol.
Generic base class for all target subtargets.
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:397
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:205
Metadata node.
Definition: Metadata.h:1067
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
Definition: MachineInstr.h:69
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:674
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:568
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
MI-level patchpoint operands.
Definition: StackMaps.h:76
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
MI-level stackmap operands.
Definition: StackMaps.h:35
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
Definition: StackMaps.h:50
void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
Definition: StackMaps.cpp:569
void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
Definition: StackMaps.cpp:548
void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
Definition: StackMaps.cpp:538
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:690
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition: COFF.h:217
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition: COFF.h:223
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition: COFF.h:224
@ IMAGE_SYM_DTYPE_NULL
No complex type; simple scalar variable.
Definition: COFF.h:273
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition: COFF.h:275
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition: COFF.h:279
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:221
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:224
@ SHF_ALLOC
Definition: ELF.h:1157
@ SHF_GROUP
Definition: ELF.h:1179
@ SHF_EXECINSTR
Definition: ELF.h:1160
@ SHT_PROGBITS
Definition: ELF.h:1063
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition: ELF.h:1752
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition: ELF.h:1753
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition: ELF.h:1754
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
ExceptionHandling
Target & getTheAArch64beTarget()
Target & getTheAArch64leTarget()
static unsigned getXRegFromWReg(unsigned Reg)
static unsigned getXRegFromXRegTuple(unsigned RegTuple)
Target & getTheAArch64_32Target()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
Target & getTheARM64_32Target()
@ MCAF_SubsectionsViaSymbols
.subsections_via_symbols (MachO)
Definition: MCDirectives.h:55
@ Add
Sum of integers.
static unsigned getWRegFromXReg(unsigned Reg)
Target & getTheARM64Target()
@ MCSA_Weak
.weak
Definition: MCDirectives.h:45
@ MCSA_Global
.type _foo, @gnu_unique_object
Definition: MCDirectives.h:30
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
Definition: MCDirectives.h:49
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
Definition: MCDirectives.h:23
@ MCSA_Hidden
.hidden (ELF)
Definition: MCDirectives.h:33
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...