LLVM 20.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/StringRef.h"
32#include "llvm/ADT/Twine.h"
46#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/Module.h"
49#include "llvm/MC/MCAsmInfo.h"
50#include "llvm/MC/MCContext.h"
51#include "llvm/MC/MCInst.h"
55#include "llvm/MC/MCStreamer.h"
56#include "llvm/MC/MCSymbol.h"
65#include <cassert>
66#include <cstdint>
67#include <map>
68#include <memory>
69
70using namespace llvm;
71
74 "aarch64-ptrauth-auth-checks", cl::Hidden,
75 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
76 clEnumValN(Poison, "poison", "poison on failure"),
77 clEnumValN(Trap, "trap", "trap on failure")),
78 cl::desc("Check pointer authentication auth/resign failures"),
80
82 "aarch64-win-import-call-optimization", cl::Hidden,
83 cl::desc("Enable import call optimization for AArch64 Windows"),
84 cl::init(false));
85
86#define DEBUG_TYPE "asm-printer"
87
88namespace {
89
90class AArch64AsmPrinter : public AsmPrinter {
91 AArch64MCInstLower MCInstLowering;
92 FaultMaps FM;
93 const AArch64Subtarget *STI;
94 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
95#ifndef NDEBUG
96 unsigned InstsEmitted;
97#endif
99 SectionToImportedFunctionCalls;
100
101public:
102 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
103 : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
104 FM(*this) {}
105
106 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
107
108 /// Wrapper for MCInstLowering.lowerOperand() for the
109 /// tblgen'erated pseudo lowering.
110 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
111 return MCInstLowering.lowerOperand(MO, MCOp);
112 }
113
114 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
115
116 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
117
118 void emitStartOfAsmFile(Module &M) override;
119 void emitJumpTableInfo() override;
120 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
122 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
123 const MCSymbol *BranchLabel) const override;
124
125 void emitFunctionEntryLabel() override;
126
127 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
128
129 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
130
131 void LowerHardenedBRJumpTable(const MachineInstr &MI);
132
133 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
134
135 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
136 const MachineInstr &MI);
137 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
138 const MachineInstr &MI);
139 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
140 const MachineInstr &MI);
141 void LowerFAULTING_OP(const MachineInstr &MI);
142
143 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
144 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
145 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
146 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
147
148 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
149 HwasanMemaccessTuple;
150 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
151 void LowerKCFI_CHECK(const MachineInstr &MI);
152 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
153 void emitHwasanMemaccessSymbols(Module &M);
154
155 void emitSled(const MachineInstr &MI, SledKind Kind);
156
157 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
158 void emitPtrauthBranch(const MachineInstr *MI);
159
160 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
161 Register ScratchReg,
164 bool ShouldTrap,
165 const MCSymbol *OnFailure);
166
167 // Check authenticated LR before tail calling.
168 void emitPtrauthTailCallHardening(const MachineInstr *TC);
169
170 // Emit the sequence for AUT or AUTPAC.
171 void emitPtrauthAuthResign(const MachineInstr *MI);
172
173 // Emit the sequence to compute the discriminator.
174 //
175 // ScratchReg should be x16/x17.
176 //
177 // The returned register is either unmodified AddrDisc or x16/x17.
178 //
179 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
180 // MayUseAddrAsScratch may save one MOV instruction, provided the address
181 // is already in x16/x17 (i.e. return x16/x17 which is the *modified* AddrDisc
182 // register at the same time):
183 //
184 // mov x17, x16
185 // movk x17, #1234, lsl #48
186 // ; x16 is not used anymore
187 //
188 // can be replaced by
189 //
190 // movk x16, #1234, lsl #48
191 Register emitPtrauthDiscriminator(uint16_t Disc, Register AddrDisc,
192 Register ScratchReg,
193 bool MayUseAddrAsScratch = false);
194
195 // Emit the sequence for LOADauthptrstatic
196 void LowerLOADauthptrstatic(const MachineInstr &MI);
197
198 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
199 // adrp-add followed by PAC sign)
200 void LowerMOVaddrPAC(const MachineInstr &MI);
201
202 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
203 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
204 // authenticating)
205 void LowerLOADgotAUTH(const MachineInstr &MI);
206
207 /// tblgen'erated driver function for lowering simple MI->MC
208 /// pseudo instructions.
209 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
210
211 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
212 void EmitToStreamer(const MCInst &Inst) {
213 EmitToStreamer(*OutStreamer, Inst);
214 }
215
216 void emitInstruction(const MachineInstr *MI) override;
217
218 void emitFunctionHeaderComment() override;
219
220 void getAnalysisUsage(AnalysisUsage &AU) const override {
222 AU.setPreservesAll();
223 }
224
225 bool runOnMachineFunction(MachineFunction &MF) override {
226 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
227 STI = &MF.getSubtarget<AArch64Subtarget>();
228
230
231 if (STI->isTargetCOFF()) {
232 bool Local = MF.getFunction().hasLocalLinkage();
235 int Type =
237
238 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
239 OutStreamer->emitCOFFSymbolStorageClass(Scl);
240 OutStreamer->emitCOFFSymbolType(Type);
241 OutStreamer->endCOFFSymbolDef();
242 }
243
244 // Emit the rest of the function body.
246
247 // Emit the XRay table for this function.
249
250 // We didn't modify anything.
251 return false;
252 }
253
254 const MCExpr *lowerConstant(const Constant *CV) override;
255
256private:
257 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
258 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
259 bool printAsmRegInClass(const MachineOperand &MO,
260 const TargetRegisterClass *RC, unsigned AltName,
261 raw_ostream &O);
262
263 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
264 const char *ExtraCode, raw_ostream &O) override;
265 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
266 const char *ExtraCode, raw_ostream &O) override;
267
268 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
269
270 void emitFunctionBodyEnd() override;
271 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
272
273 MCSymbol *GetCPISymbol(unsigned CPID) const override;
274 void emitEndOfAsmFile(Module &M) override;
275
276 AArch64FunctionInfo *AArch64FI = nullptr;
277
278 /// Emit the LOHs contained in AArch64FI.
279 void emitLOHs();
280
281 void emitMovXReg(Register Dest, Register Src);
282 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
283 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
284
285 /// Emit instruction to set float register to zero.
286 void emitFMov0(const MachineInstr &MI);
287
288 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
289
290 MInstToMCSymbol LOHInstToLabel;
291
293 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
294 }
295
296 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
297 assert(STI);
298 return STI;
299 }
300 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
301 MCSymbol *LazyPointer) override;
303 MCSymbol *LazyPointer) override;
304
305 /// Checks if this instruction is part of a sequence that is eligle for import
306 /// call optimization and, if so, records it to be emitted in the import call
307 /// section.
308 void recordIfImportCall(const MachineInstr *BranchInst);
309};
310
311} // end anonymous namespace
312
313void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
314 const Triple &TT = TM.getTargetTriple();
315
316 if (TT.isOSBinFormatCOFF()) {
317 // Emit an absolute @feat.00 symbol
318 MCSymbol *S = MMI->getContext().getOrCreateSymbol(StringRef("@feat.00"));
319 OutStreamer->beginCOFFSymbolDef(S);
320 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
321 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_NULL);
322 OutStreamer->endCOFFSymbolDef();
323 int64_t Feat00Value = 0;
324
325 if (M.getModuleFlag("cfguard")) {
326 // Object is CFG-aware.
327 Feat00Value |= COFF::Feat00Flags::GuardCF;
328 }
329
330 if (M.getModuleFlag("ehcontguard")) {
331 // Object also has EHCont.
332 Feat00Value |= COFF::Feat00Flags::GuardEHCont;
333 }
334
335 if (M.getModuleFlag("ms-kernel")) {
336 // Object is compiled with /kernel.
337 Feat00Value |= COFF::Feat00Flags::Kernel;
338 }
339
340 OutStreamer->emitSymbolAttribute(S, MCSA_Global);
341 OutStreamer->emitAssignment(
342 S, MCConstantExpr::create(Feat00Value, MMI->getContext()));
343 }
344
345 if (!TT.isOSBinFormatELF())
346 return;
347
348 // Assemble feature flags that may require creation of a note section.
349 unsigned Flags = 0;
350 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
351 M.getModuleFlag("branch-target-enforcement")))
352 if (!BTE->isZero())
354
355 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
356 M.getModuleFlag("guarded-control-stack")))
357 if (!GCS->isZero())
359
360 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
361 M.getModuleFlag("sign-return-address")))
362 if (!Sign->isZero())
364
365 uint64_t PAuthABIPlatform = -1;
366 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
367 M.getModuleFlag("aarch64-elf-pauthabi-platform")))
368 PAuthABIPlatform = PAP->getZExtValue();
369 uint64_t PAuthABIVersion = -1;
370 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
371 M.getModuleFlag("aarch64-elf-pauthabi-version")))
372 PAuthABIVersion = PAV->getZExtValue();
373
374 // Emit a .note.gnu.property section with the flags.
375 auto *TS =
376 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
377 TS->emitNoteSection(Flags, PAuthABIPlatform, PAuthABIVersion);
378}
379
380void AArch64AsmPrinter::emitFunctionHeaderComment() {
381 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
382 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
383 if (OutlinerString != std::nullopt)
384 OutStreamer->getCommentOS() << ' ' << OutlinerString;
385}
386
387void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
388{
389 const Function &F = MF->getFunction();
390 if (F.hasFnAttribute("patchable-function-entry")) {
391 unsigned Num;
392 if (F.getFnAttribute("patchable-function-entry")
393 .getValueAsString()
394 .getAsInteger(10, Num))
395 return;
396 emitNops(Num);
397 return;
398 }
399
400 emitSled(MI, SledKind::FUNCTION_ENTER);
401}
402
403void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
404 emitSled(MI, SledKind::FUNCTION_EXIT);
405}
406
407void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
408 emitSled(MI, SledKind::TAIL_CALL);
409}
410
411void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
412 static const int8_t NoopsInSledCount = 7;
413 // We want to emit the following pattern:
414 //
415 // .Lxray_sled_N:
416 // ALIGN
417 // B #32
418 // ; 7 NOP instructions (28 bytes)
419 // .tmpN
420 //
421 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
422 // over the full 32 bytes (8 instructions) with the following pattern:
423 //
424 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
425 // LDR W17, #12 ; W17 := function ID
426 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
427 // BLR X16 ; call the tracing trampoline
428 // ;DATA: 32 bits of function ID
429 // ;DATA: lower 32 bits of the address of the trampoline
430 // ;DATA: higher 32 bits of the address of the trampoline
431 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
432 //
433 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
434 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
435 OutStreamer->emitLabel(CurSled);
436 auto Target = OutContext.createTempSymbol();
437
438 // Emit "B #32" instruction, which jumps over the next 28 bytes.
439 // The operand has to be the number of 4-byte instructions to jump over,
440 // including the current instruction.
441 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
442
443 for (int8_t I = 0; I < NoopsInSledCount; I++)
444 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
445
446 OutStreamer->emitLabel(Target);
447 recordSled(CurSled, MI, Kind, 2);
448}
449
450// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
451// (built-in functions __xray_customevent/__xray_typedevent).
452//
453// .Lxray_event_sled_N:
454// b 1f
455// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
456// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
457// bl __xray_CustomEvent or __xray_TypedEvent
458// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
459// 1:
460//
461// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
462//
463// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
464// After patching, b .+N will become a nop.
465void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
466 bool Typed) {
467 auto &O = *OutStreamer;
468 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
469 O.emitLabel(CurSled);
470 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
472 OutContext.getOrCreateSymbol(
473 Twine(MachO ? "_" : "") +
474 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
475 OutContext);
476 if (Typed) {
477 O.AddComment("Begin XRay typed event");
478 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
479 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
480 .addReg(AArch64::SP)
481 .addReg(AArch64::X0)
482 .addReg(AArch64::X1)
483 .addReg(AArch64::SP)
484 .addImm(-4));
485 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
486 .addReg(AArch64::X2)
487 .addReg(AArch64::SP)
488 .addImm(2));
489 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
490 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
491 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
492 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
493 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
494 .addReg(AArch64::X2)
495 .addReg(AArch64::SP)
496 .addImm(2));
497 O.AddComment("End XRay typed event");
498 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
499 .addReg(AArch64::SP)
500 .addReg(AArch64::X0)
501 .addReg(AArch64::X1)
502 .addReg(AArch64::SP)
503 .addImm(4));
504
505 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
506 } else {
507 O.AddComment("Begin XRay custom event");
508 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
509 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
510 .addReg(AArch64::SP)
511 .addReg(AArch64::X0)
512 .addReg(AArch64::X1)
513 .addReg(AArch64::SP)
514 .addImm(-2));
515 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
516 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
517 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
518 O.AddComment("End XRay custom event");
519 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
520 .addReg(AArch64::SP)
521 .addReg(AArch64::X0)
522 .addReg(AArch64::X1)
523 .addReg(AArch64::SP)
524 .addImm(2));
525
526 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
527 }
528}
529
530void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
531 Register AddrReg = MI.getOperand(0).getReg();
532 assert(std::next(MI.getIterator())->isCall() &&
533 "KCFI_CHECK not followed by a call instruction");
534 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
535 "KCFI_CHECK call target doesn't match call operand");
536
537 // Default to using the intra-procedure-call temporary registers for
538 // comparing the hashes.
539 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
540 if (AddrReg == AArch64::XZR) {
541 // Checking XZR makes no sense. Instead of emitting a load, zero
542 // ScratchRegs[0] and use it for the ESR AddrIndex below.
543 AddrReg = getXRegFromWReg(ScratchRegs[0]);
544 emitMovXReg(AddrReg, AArch64::XZR);
545 } else {
546 // If one of the scratch registers is used for the call target (e.g.
547 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
548 // temporary register instead (in this case, AArch64::W9) as the check
549 // is immediately followed by the call instruction.
550 for (auto &Reg : ScratchRegs) {
551 if (Reg == getWRegFromXReg(AddrReg)) {
552 Reg = AArch64::W9;
553 break;
554 }
555 }
556 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
557 "Invalid scratch registers for KCFI_CHECK");
558
559 // Adjust the offset for patchable-function-prefix. This assumes that
560 // patchable-function-prefix is the same for all functions.
561 int64_t PrefixNops = 0;
562 (void)MI.getMF()
563 ->getFunction()
564 .getFnAttribute("patchable-function-prefix")
565 .getValueAsString()
566 .getAsInteger(10, PrefixNops);
567
568 // Load the target function type hash.
569 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
570 .addReg(ScratchRegs[0])
571 .addReg(AddrReg)
572 .addImm(-(PrefixNops * 4 + 4)));
573 }
574
575 // Load the expected type hash.
576 const int64_t Type = MI.getOperand(1).getImm();
577 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
578 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
579
580 // Compare the hashes and trap if there's a mismatch.
581 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
582 .addReg(AArch64::WZR)
583 .addReg(ScratchRegs[0])
584 .addReg(ScratchRegs[1])
585 .addImm(0));
586
587 MCSymbol *Pass = OutContext.createTempSymbol();
588 EmitToStreamer(*OutStreamer,
589 MCInstBuilder(AArch64::Bcc)
590 .addImm(AArch64CC::EQ)
591 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
592
593 // The base ESR is 0x8000 and the register information is encoded in bits
594 // 0-9 as follows:
595 // - 0-4: n, where the register Xn contains the target address
596 // - 5-9: m, where the register Wm contains the expected type hash
597 // Where n, m are in [0, 30].
598 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
599 unsigned AddrIndex;
600 switch (AddrReg) {
601 default:
602 AddrIndex = AddrReg - AArch64::X0;
603 break;
604 case AArch64::FP:
605 AddrIndex = 29;
606 break;
607 case AArch64::LR:
608 AddrIndex = 30;
609 break;
610 }
611
612 assert(AddrIndex < 31 && TypeIndex < 31);
613
614 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
615 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
616 OutStreamer->emitLabel(Pass);
617}
618
619void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
620 Register Reg = MI.getOperand(0).getReg();
621
622 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
623 // statically known to be zero. However, conceivably, the HWASan pass may
624 // encounter a "cannot currently statically prove to be null" pointer (and is
625 // therefore unable to omit the intrinsic) that later optimization passes
626 // convert into a statically known-null pointer.
627 if (Reg == AArch64::XZR)
628 return;
629
630 bool IsShort =
631 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
632 (MI.getOpcode() ==
633 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
634 uint32_t AccessInfo = MI.getOperand(1).getImm();
635 bool IsFixedShadow =
636 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
637 (MI.getOpcode() ==
638 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
639 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
640
641 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
642 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
643 if (!Sym) {
644 // FIXME: Make this work on non-ELF.
645 if (!TM.getTargetTriple().isOSBinFormatELF())
646 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
647
648 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
649 utostr(AccessInfo);
650 if (IsFixedShadow)
651 SymName += "_fixed_" + utostr(FixedShadowOffset);
652 if (IsShort)
653 SymName += "_short_v2";
654 Sym = OutContext.getOrCreateSymbol(SymName);
655 }
656
657 EmitToStreamer(*OutStreamer,
658 MCInstBuilder(AArch64::BL)
659 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
660}
661
662void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
663 if (HwasanMemaccessSymbols.empty())
664 return;
665
666 const Triple &TT = TM.getTargetTriple();
667 assert(TT.isOSBinFormatELF());
668 std::unique_ptr<MCSubtargetInfo> STI(
669 TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
670 assert(STI && "Unable to create subtarget info");
671 this->STI = static_cast<const AArch64Subtarget *>(&*STI);
672
673 MCSymbol *HwasanTagMismatchV1Sym =
674 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
675 MCSymbol *HwasanTagMismatchV2Sym =
676 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
677
678 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
679 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
680 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
681 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
682
683 for (auto &P : HwasanMemaccessSymbols) {
684 unsigned Reg = std::get<0>(P.first);
685 bool IsShort = std::get<1>(P.first);
686 uint32_t AccessInfo = std::get<2>(P.first);
687 bool IsFixedShadow = std::get<3>(P.first);
688 uint64_t FixedShadowOffset = std::get<4>(P.first);
689 const MCSymbolRefExpr *HwasanTagMismatchRef =
690 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
691 MCSymbol *Sym = P.second;
692
693 bool HasMatchAllTag =
694 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
695 uint8_t MatchAllTag =
696 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
697 unsigned Size =
698 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
699 bool CompileKernel =
700 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
701
702 OutStreamer->switchSection(OutContext.getELFSection(
703 ".text.hot", ELF::SHT_PROGBITS,
705 /*IsComdat=*/true));
706
707 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
708 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
709 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
710 OutStreamer->emitLabel(Sym);
711
712 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
713 .addReg(AArch64::X16)
714 .addReg(Reg)
715 .addImm(4)
716 .addImm(55));
717
718 if (IsFixedShadow) {
719 // Aarch64 makes it difficult to embed large constants in the code.
720 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
721 // left-shift option in the MOV instruction. Combined with the 16-bit
722 // immediate, this is enough to represent any offset up to 2**48.
723 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
724 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
725 .addReg(AArch64::W16)
726 .addReg(AArch64::X17)
727 .addReg(AArch64::X16)
728 .addImm(0)
729 .addImm(0));
730 } else {
731 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
732 .addReg(AArch64::W16)
733 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
734 .addReg(AArch64::X16)
735 .addImm(0)
736 .addImm(0));
737 }
738
739 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
740 .addReg(AArch64::XZR)
741 .addReg(AArch64::X16)
742 .addReg(Reg)
744 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
745 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
746 .addImm(AArch64CC::NE)
748 HandleMismatchOrPartialSym, OutContext)));
749 MCSymbol *ReturnSym = OutContext.createTempSymbol();
750 OutStreamer->emitLabel(ReturnSym);
751 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
752 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
753
754 if (HasMatchAllTag) {
755 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
756 .addReg(AArch64::X17)
757 .addReg(Reg)
758 .addImm(56)
759 .addImm(63));
760 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
761 .addReg(AArch64::XZR)
762 .addReg(AArch64::X17)
763 .addImm(MatchAllTag)
764 .addImm(0));
765 EmitToStreamer(
766 MCInstBuilder(AArch64::Bcc)
767 .addImm(AArch64CC::EQ)
768 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
769 }
770
771 if (IsShort) {
772 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
773 .addReg(AArch64::WZR)
774 .addReg(AArch64::W16)
775 .addImm(15)
776 .addImm(0));
777 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
778 EmitToStreamer(
779 MCInstBuilder(AArch64::Bcc)
780 .addImm(AArch64CC::HI)
781 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
782
783 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
784 .addReg(AArch64::X17)
785 .addReg(Reg)
786 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
787 if (Size != 1)
788 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
789 .addReg(AArch64::X17)
790 .addReg(AArch64::X17)
791 .addImm(Size - 1)
792 .addImm(0));
793 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
794 .addReg(AArch64::WZR)
795 .addReg(AArch64::W16)
796 .addReg(AArch64::W17)
797 .addImm(0));
798 EmitToStreamer(
799 MCInstBuilder(AArch64::Bcc)
800 .addImm(AArch64CC::LS)
801 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
802
803 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
804 .addReg(AArch64::X16)
805 .addReg(Reg)
806 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
807 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
808 .addReg(AArch64::W16)
809 .addReg(AArch64::X16)
810 .addImm(0));
811 EmitToStreamer(
812 MCInstBuilder(AArch64::SUBSXrs)
813 .addReg(AArch64::XZR)
814 .addReg(AArch64::X16)
815 .addReg(Reg)
817 EmitToStreamer(
818 MCInstBuilder(AArch64::Bcc)
819 .addImm(AArch64CC::EQ)
820 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
821
822 OutStreamer->emitLabel(HandleMismatchSym);
823 }
824
825 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
826 .addReg(AArch64::SP)
827 .addReg(AArch64::X0)
828 .addReg(AArch64::X1)
829 .addReg(AArch64::SP)
830 .addImm(-32));
831 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
832 .addReg(AArch64::FP)
833 .addReg(AArch64::LR)
834 .addReg(AArch64::SP)
835 .addImm(29));
836
837 if (Reg != AArch64::X0)
838 emitMovXReg(AArch64::X0, Reg);
839 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
840
841 if (CompileKernel) {
842 // The Linux kernel's dynamic loader doesn't support GOT relative
843 // relocations, but it doesn't support late binding either, so just call
844 // the function directly.
845 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
846 } else {
847 // Intentionally load the GOT entry and branch to it, rather than possibly
848 // late binding the function, which may clobber the registers before we
849 // have a chance to save them.
850 EmitToStreamer(
851 MCInstBuilder(AArch64::ADRP)
852 .addReg(AArch64::X16)
853 .addExpr(AArch64MCExpr::create(
854 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
855 OutContext)));
856 EmitToStreamer(
857 MCInstBuilder(AArch64::LDRXui)
858 .addReg(AArch64::X16)
859 .addReg(AArch64::X16)
860 .addExpr(AArch64MCExpr::create(
861 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
862 OutContext)));
863 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
864 }
865 }
866 this->STI = nullptr;
867}
868
869static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
870 MCSymbol *StubLabel,
871 const MCExpr *StubAuthPtrRef) {
872 // sym$auth_ptr$key$disc:
873 OutStreamer.emitLabel(StubLabel);
874 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
875}
876
877void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
878 emitHwasanMemaccessSymbols(M);
879
880 const Triple &TT = TM.getTargetTriple();
881 if (TT.isOSBinFormatMachO()) {
882 // Output authenticated pointers as indirect symbols, if we have any.
883 MachineModuleInfoMachO &MMIMacho =
884 MMI->getObjFileInfo<MachineModuleInfoMachO>();
885
886 auto Stubs = MMIMacho.getAuthGVStubList();
887
888 if (!Stubs.empty()) {
889 // Switch to the "__auth_ptr" section.
890 OutStreamer->switchSection(
891 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
893 emitAlignment(Align(8));
894
895 for (const auto &Stub : Stubs)
896 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
897
898 OutStreamer->addBlankLine();
899 }
900
901 // Funny Darwin hack: This flag tells the linker that no global symbols
902 // contain code that falls through to other global symbols (e.g. the obvious
903 // implementation of multiple entry points). If this doesn't occur, the
904 // linker can safely perform dead code stripping. Since LLVM never
905 // generates code that does this, it is always safe to set.
906 OutStreamer->emitAssemblerFlag(MCAF_SubsectionsViaSymbols);
907 }
908
909 if (TT.isOSBinFormatELF()) {
910 // Output authenticated pointers as indirect symbols, if we have any.
911 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
912
913 auto Stubs = MMIELF.getAuthGVStubList();
914
915 if (!Stubs.empty()) {
916 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
917 OutStreamer->switchSection(TLOF.getDataSection());
918 emitAlignment(Align(8));
919
920 for (const auto &Stub : Stubs)
921 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
922
923 OutStreamer->addBlankLine();
924 }
925
926 // With signed ELF GOT enabled, the linker looks at the symbol type to
927 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
928 // for functions not defined in the module have STT_NOTYPE type by default.
929 // This makes linker to emit signing schema with DA key (instead of IA) for
930 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
931 // all function symbols used in the module to have STT_FUNC type. See
932 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
933 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
934 M.getModuleFlag("ptrauth-elf-got"));
935 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
936 for (const GlobalValue &GV : M.global_values())
937 if (!GV.use_empty() && isa<Function>(GV) &&
938 !GV.getName().starts_with("llvm."))
939 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
941 }
942
943 // Emit stack and fault map information.
944 FM.serializeToFaultMapSection();
945
946 // If import call optimization is enabled, emit the appropriate section.
947 // We do this whether or not we recorded any import calls.
948 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
949 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
950
951 // Section always starts with some magic.
952 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
953 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
954
955 // Layout of this section is:
956 // Per section that contains calls to imported functions:
957 // uint32_t SectionSize: Size in bytes for information in this section.
958 // uint32_t Section Number
959 // Per call to imported function in section:
960 // uint32_t Kind: the kind of imported function.
961 // uint32_t BranchOffset: the offset of the branch instruction in its
962 // parent section.
963 // uint32_t TargetSymbolId: the symbol id of the called function.
964 for (auto &[Section, CallsToImportedFuncs] :
965 SectionToImportedFunctionCalls) {
966 unsigned SectionSize =
967 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
968 OutStreamer->emitInt32(SectionSize);
969 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
970 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
971 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
972 OutStreamer->emitInt32(0x13);
973 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
974 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
975 }
976 }
977 }
978}
979
980void AArch64AsmPrinter::emitLOHs() {
982
983 for (const auto &D : AArch64FI->getLOHContainer()) {
984 for (const MachineInstr *MI : D.getArgs()) {
985 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
986 assert(LabelIt != LOHInstToLabel.end() &&
987 "Label hasn't been inserted for LOH related instruction");
988 MCArgs.push_back(LabelIt->second);
989 }
990 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
991 MCArgs.clear();
992 }
993}
994
995void AArch64AsmPrinter::emitFunctionBodyEnd() {
996 if (!AArch64FI->getLOHRelated().empty())
997 emitLOHs();
998}
999
1000/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1001MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1002 // Darwin uses a linker-private symbol name for constant-pools (to
1003 // avoid addends on the relocation?), ELF has no such concept and
1004 // uses a normal private symbol.
1005 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1006 return OutContext.getOrCreateSymbol(
1007 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1008 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1009
1010 return AsmPrinter::GetCPISymbol(CPID);
1011}
1012
1013void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1014 raw_ostream &O) {
1015 const MachineOperand &MO = MI->getOperand(OpNum);
1016 switch (MO.getType()) {
1017 default:
1018 llvm_unreachable("<unknown operand type>");
1020 Register Reg = MO.getReg();
1021 assert(Reg.isPhysical());
1022 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1024 break;
1025 }
1027 O << MO.getImm();
1028 break;
1029 }
1031 PrintSymbolOperand(MO, O);
1032 break;
1033 }
1035 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1036 Sym->print(O, MAI);
1037 break;
1038 }
1039 }
1040}
1041
1042bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1043 raw_ostream &O) {
1044 Register Reg = MO.getReg();
1045 switch (Mode) {
1046 default:
1047 return true; // Unknown mode.
1048 case 'w':
1049 Reg = getWRegFromXReg(Reg);
1050 break;
1051 case 'x':
1052 Reg = getXRegFromWReg(Reg);
1053 break;
1054 case 't':
1056 break;
1057 }
1058
1060 return false;
1061}
1062
1063// Prints the register in MO using class RC using the offset in the
1064// new register class. This should not be used for cross class
1065// printing.
1066bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1067 const TargetRegisterClass *RC,
1068 unsigned AltName, raw_ostream &O) {
1069 assert(MO.isReg() && "Should only get here with a register!");
1070 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1071 Register Reg = MO.getReg();
1072 unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1073 if (!RI->regsOverlap(RegToPrint, Reg))
1074 return true;
1075 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1076 return false;
1077}
1078
1079bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1080 const char *ExtraCode, raw_ostream &O) {
1081 const MachineOperand &MO = MI->getOperand(OpNum);
1082
1083 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1084 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1085 return false;
1086
1087 // Does this asm operand have a single letter operand modifier?
1088 if (ExtraCode && ExtraCode[0]) {
1089 if (ExtraCode[1] != 0)
1090 return true; // Unknown modifier.
1091
1092 switch (ExtraCode[0]) {
1093 default:
1094 return true; // Unknown modifier.
1095 case 'w': // Print W register
1096 case 'x': // Print X register
1097 if (MO.isReg())
1098 return printAsmMRegister(MO, ExtraCode[0], O);
1099 if (MO.isImm() && MO.getImm() == 0) {
1100 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1102 return false;
1103 }
1104 printOperand(MI, OpNum, O);
1105 return false;
1106 case 'b': // Print B register.
1107 case 'h': // Print H register.
1108 case 's': // Print S register.
1109 case 'd': // Print D register.
1110 case 'q': // Print Q register.
1111 case 'z': // Print Z register.
1112 if (MO.isReg()) {
1113 const TargetRegisterClass *RC;
1114 switch (ExtraCode[0]) {
1115 case 'b':
1116 RC = &AArch64::FPR8RegClass;
1117 break;
1118 case 'h':
1119 RC = &AArch64::FPR16RegClass;
1120 break;
1121 case 's':
1122 RC = &AArch64::FPR32RegClass;
1123 break;
1124 case 'd':
1125 RC = &AArch64::FPR64RegClass;
1126 break;
1127 case 'q':
1128 RC = &AArch64::FPR128RegClass;
1129 break;
1130 case 'z':
1131 RC = &AArch64::ZPRRegClass;
1132 break;
1133 default:
1134 return true;
1135 }
1136 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1137 }
1138 printOperand(MI, OpNum, O);
1139 return false;
1140 }
1141 }
1142
1143 // According to ARM, we should emit x and v registers unless we have a
1144 // modifier.
1145 if (MO.isReg()) {
1146 Register Reg = MO.getReg();
1147
1148 // If this is a w or x register, print an x register.
1149 if (AArch64::GPR32allRegClass.contains(Reg) ||
1150 AArch64::GPR64allRegClass.contains(Reg))
1151 return printAsmMRegister(MO, 'x', O);
1152
1153 // If this is an x register tuple, print an x register.
1154 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1155 return printAsmMRegister(MO, 't', O);
1156
1157 unsigned AltName = AArch64::NoRegAltName;
1158 const TargetRegisterClass *RegClass;
1159 if (AArch64::ZPRRegClass.contains(Reg)) {
1160 RegClass = &AArch64::ZPRRegClass;
1161 } else if (AArch64::PPRRegClass.contains(Reg)) {
1162 RegClass = &AArch64::PPRRegClass;
1163 } else if (AArch64::PNRRegClass.contains(Reg)) {
1164 RegClass = &AArch64::PNRRegClass;
1165 } else {
1166 RegClass = &AArch64::FPR128RegClass;
1167 AltName = AArch64::vreg;
1168 }
1169
1170 // If this is a b, h, s, d, or q register, print it as a v register.
1171 return printAsmRegInClass(MO, RegClass, AltName, O);
1172 }
1173
1174 printOperand(MI, OpNum, O);
1175 return false;
1176}
1177
1178bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1179 unsigned OpNum,
1180 const char *ExtraCode,
1181 raw_ostream &O) {
1182 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1183 return true; // Unknown modifier.
1184
1185 const MachineOperand &MO = MI->getOperand(OpNum);
1186 assert(MO.isReg() && "unexpected inline asm memory operand");
1187 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1188 return false;
1189}
1190
1191void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1192 raw_ostream &OS) {
1193 unsigned NOps = MI->getNumOperands();
1194 assert(NOps == 4);
1195 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1196 // cast away const; DIetc do not take const operands for some reason.
1197 OS << MI->getDebugVariable()->getName();
1198 OS << " <- ";
1199 // Frame address. Currently handles register +- offset only.
1200 assert(MI->isIndirectDebugValue());
1201 OS << '[';
1202 for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
1203 MI->debug_operands().end());
1204 I < E; ++I) {
1205 if (I != 0)
1206 OS << ", ";
1207 printOperand(MI, I, OS);
1208 }
1209 OS << ']';
1210 OS << "+";
1211 printOperand(MI, NOps - 2, OS);
1212}
1213
1214void AArch64AsmPrinter::emitJumpTableInfo() {
1215 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1216 if (!MJTI) return;
1217
1218 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1219 if (JT.empty()) return;
1220
1221 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1222 MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM);
1223 OutStreamer->switchSection(ReadOnlySec);
1224
1225 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1226 for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
1227 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1228
1229 // If this jump table was deleted, ignore it.
1230 if (JTBBs.empty()) continue;
1231
1232 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1233 emitAlignment(Align(Size));
1234 OutStreamer->emitLabel(GetJTISymbol(JTI));
1235
1236 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1237 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1238
1239 for (auto *JTBB : JTBBs) {
1240 const MCExpr *Value =
1241 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1242
1243 // Each entry is:
1244 // .byte/.hword (LBB - Lbase)>>2
1245 // or plain:
1246 // .word LBB - Lbase
1247 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1248 if (Size != 4)
1250 Value, MCConstantExpr::create(2, OutContext), OutContext);
1251
1252 OutStreamer->emitValue(Value, Size);
1253 }
1254 }
1255}
1256
1257std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1259AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1260 const MachineInstr *BranchInstr,
1261 const MCSymbol *BranchLabel) const {
1262 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1263 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1265 switch (AFI->getJumpTableEntrySize(JTI)) {
1266 case 1:
1267 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1268 break;
1269 case 2:
1270 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1271 break;
1272 case 4:
1273 EntrySize = codeview::JumpTableEntrySize::Int32;
1274 break;
1275 default:
1276 llvm_unreachable("Unexpected jump table entry size");
1277 }
1278 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1279}
1280
1281void AArch64AsmPrinter::emitFunctionEntryLabel() {
1282 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1283 MF->getFunction().getCallingConv() ==
1285 MF->getInfo<AArch64FunctionInfo>()->isSVECC()) {
1286 auto *TS =
1287 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1288 TS->emitDirectiveVariantPCS(CurrentFnSym);
1289 }
1290
1292
1293 if (TM.getTargetTriple().isWindowsArm64EC() &&
1294 !MF->getFunction().hasLocalLinkage()) {
1295 // For ARM64EC targets, a function definition's name is mangled differently
1296 // from the normal symbol, emit required aliases here.
1297 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1298 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1299 OutStreamer->emitAssignment(
1301 MMI->getContext()));
1302 };
1303
1304 auto getSymbolFromMetadata = [&](StringRef Name) {
1305 MCSymbol *Sym = nullptr;
1306 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1307 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1308 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1309 }
1310 return Sym;
1311 };
1312
1313 if (MCSymbol *UnmangledSym =
1314 getSymbolFromMetadata("arm64ec_unmangled_name")) {
1315 MCSymbol *ECMangledSym = getSymbolFromMetadata("arm64ec_ecmangled_name");
1316
1317 if (ECMangledSym) {
1318 // An external function, emit the alias from the unmangled symbol to
1319 // mangled symbol name and the alias from the mangled symbol to guest
1320 // exit thunk.
1321 emitFunctionAlias(UnmangledSym, ECMangledSym);
1322 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1323 } else {
1324 // A function implementation, emit the alias from the unmangled symbol
1325 // to mangled symbol name.
1326 emitFunctionAlias(UnmangledSym, CurrentFnSym);
1327 }
1328 }
1329 }
1330}
1331
1332void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1333 const Constant *CV) {
1334 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1335 if (CPA->hasAddressDiscriminator() &&
1336 !CPA->hasSpecialAddressDiscriminator(
1339 "unexpected address discrimination value for ctors/dtors entry, only "
1340 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1341 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1342 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1343 // actual address discrimination value and only checks
1344 // hasAddressDiscriminator(), so it's OK to leave special address
1345 // discrimination value here.
1347}
1348
1349void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1350 const GlobalAlias &GA) {
1351 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1352 // Global aliases must point to a definition, but unmangled patchable
1353 // symbols are special and need to point to an undefined symbol with "EXP+"
1354 // prefix. Such undefined symbol is resolved by the linker by creating
1355 // x86 thunk that jumps back to the actual EC target.
1356 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1357 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1358 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1359 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1360
1361 OutStreamer->beginCOFFSymbolDef(ExpSym);
1362 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1363 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1365 OutStreamer->endCOFFSymbolDef();
1366
1367 OutStreamer->beginCOFFSymbolDef(Sym);
1368 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1369 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1371 OutStreamer->endCOFFSymbolDef();
1372 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1373 OutStreamer->emitAssignment(
1375 MMI->getContext()));
1376 return;
1377 }
1378 }
1380}
1381
1382/// Small jump tables contain an unsigned byte or half, representing the offset
1383/// from the lowest-addressed possible destination to the desired basic
1384/// block. Since all instructions are 4-byte aligned, this is further compressed
1385/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1386/// materialize the correct destination we need:
1387///
1388/// adr xDest, .LBB0_0
1389/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1390/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1391void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1392 const llvm::MachineInstr &MI) {
1393 Register DestReg = MI.getOperand(0).getReg();
1394 Register ScratchReg = MI.getOperand(1).getReg();
1395 Register ScratchRegW =
1396 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1397 Register TableReg = MI.getOperand(2).getReg();
1398 Register EntryReg = MI.getOperand(3).getReg();
1399 int JTIdx = MI.getOperand(4).getIndex();
1400 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1401
1402 // This has to be first because the compression pass based its reachability
1403 // calculations on the start of the JumpTableDest instruction.
1404 auto Label =
1405 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1406
1407 // If we don't already have a symbol to use as the base, use the ADR
1408 // instruction itself.
1409 if (!Label) {
1410 Label = MF->getContext().createTempSymbol();
1411 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1412 OutStreamer.emitLabel(Label);
1413 }
1414
1415 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1416 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1417 .addReg(DestReg)
1418 .addExpr(LabelExpr));
1419
1420 // Load the number of instruction-steps to offset from the label.
1421 unsigned LdrOpcode;
1422 switch (Size) {
1423 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1424 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1425 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1426 default:
1427 llvm_unreachable("Unknown jump table size");
1428 }
1429
1430 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1431 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1432 .addReg(TableReg)
1433 .addReg(EntryReg)
1434 .addImm(0)
1435 .addImm(Size == 1 ? 0 : 1));
1436
1437 // Add to the already materialized base label address, multiplying by 4 if
1438 // compressed.
1439 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1440 .addReg(DestReg)
1441 .addReg(DestReg)
1442 .addReg(ScratchReg)
1443 .addImm(Size == 4 ? 0 : 2));
1444}
1445
1446void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1447 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1448 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1449
1450 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1451 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1452
1453 // Emit:
1454 // mov x17, #<size of table> ; depending on table size, with MOVKs
1455 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1456 // csel x16, x16, xzr, ls ; check for index overflow
1457 //
1458 // adrp x17, Ltable@PAGE ; materialize table address
1459 // add x17, Ltable@PAGEOFF
1460 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1461 //
1462 // Lanchor:
1463 // adr x17, Lanchor ; compute target address
1464 // add x16, x17, x16
1465 // br x16 ; branch to target
1466
1467 MachineOperand JTOp = MI.getOperand(0);
1468
1469 unsigned JTI = JTOp.getIndex();
1470 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1471 "unsupported compressed jump table");
1472
1473 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1474
1475 // cmp only supports a 12-bit immediate. If we need more, materialize the
1476 // immediate, using x17 as a scratch register.
1477 uint64_t MaxTableEntry = NumTableEntries - 1;
1478 if (isUInt<12>(MaxTableEntry)) {
1479 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1480 .addReg(AArch64::XZR)
1481 .addReg(AArch64::X16)
1482 .addImm(MaxTableEntry)
1483 .addImm(0));
1484 } else {
1485 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1486 // It's sad that we have to manually materialize instructions, but we can't
1487 // trivially reuse the main pseudo expansion logic.
1488 // A MOVK sequence is easy enough to generate and handles the general case.
1489 for (int Offset = 16; Offset < 64; Offset += 16) {
1490 if ((MaxTableEntry >> Offset) == 0)
1491 break;
1492 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1493 Offset);
1494 }
1495 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1496 .addReg(AArch64::XZR)
1497 .addReg(AArch64::X16)
1498 .addReg(AArch64::X17)
1499 .addImm(0));
1500 }
1501
1502 // This picks entry #0 on failure.
1503 // We might want to trap instead.
1504 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1505 .addReg(AArch64::X16)
1506 .addReg(AArch64::X16)
1507 .addReg(AArch64::XZR)
1508 .addImm(AArch64CC::LS));
1509
1510 // Prepare the @PAGE/@PAGEOFF low/high operands.
1511 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1512 MCOperand JTMCHi, JTMCLo;
1513
1514 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1515 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1516
1517 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1518 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1519
1520 EmitToStreamer(
1521 *OutStreamer,
1522 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1523
1524 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1525 .addReg(AArch64::X17)
1526 .addReg(AArch64::X17)
1527 .addOperand(JTMCLo)
1528 .addImm(0));
1529
1530 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1531 .addReg(AArch64::X16)
1532 .addReg(AArch64::X17)
1533 .addReg(AArch64::X16)
1534 .addImm(0)
1535 .addImm(1));
1536
1537 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1538 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1539 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1540
1541 OutStreamer->emitLabel(AdrLabel);
1542 EmitToStreamer(
1543 *OutStreamer,
1544 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1545
1546 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1547 .addReg(AArch64::X16)
1548 .addReg(AArch64::X17)
1549 .addReg(AArch64::X16)
1550 .addImm(0));
1551
1552 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1553}
1554
1555void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1556 const llvm::MachineInstr &MI) {
1557 unsigned Opcode = MI.getOpcode();
1558 assert(STI->hasMOPS());
1559 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1560
1561 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1562 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1563 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1564 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1565 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1566 if (Opcode == AArch64::MOPSMemorySetPseudo)
1567 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1568 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1569 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1570 llvm_unreachable("Unhandled memory operation pseudo");
1571 }();
1572 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1573 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1574
1575 for (auto Op : Ops) {
1576 int i = 0;
1577 auto MCIB = MCInstBuilder(Op);
1578 // Destination registers
1579 MCIB.addReg(MI.getOperand(i++).getReg());
1580 MCIB.addReg(MI.getOperand(i++).getReg());
1581 if (!IsSet)
1582 MCIB.addReg(MI.getOperand(i++).getReg());
1583 // Input registers
1584 MCIB.addReg(MI.getOperand(i++).getReg());
1585 MCIB.addReg(MI.getOperand(i++).getReg());
1586 MCIB.addReg(MI.getOperand(i++).getReg());
1587
1588 EmitToStreamer(OutStreamer, MCIB);
1589 }
1590}
1591
1592void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1593 const MachineInstr &MI) {
1594 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1595
1596 auto &Ctx = OutStreamer.getContext();
1597 MCSymbol *MILabel = Ctx.createTempSymbol();
1598 OutStreamer.emitLabel(MILabel);
1599
1600 SM.recordStackMap(*MILabel, MI);
1601 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1602
1603 // Scan ahead to trim the shadow.
1604 const MachineBasicBlock &MBB = *MI.getParent();
1606 ++MII;
1607 while (NumNOPBytes > 0) {
1608 if (MII == MBB.end() || MII->isCall() ||
1609 MII->getOpcode() == AArch64::DBG_VALUE ||
1610 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1611 MII->getOpcode() == TargetOpcode::STACKMAP)
1612 break;
1613 ++MII;
1614 NumNOPBytes -= 4;
1615 }
1616
1617 // Emit nops.
1618 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1619 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1620}
1621
1622// Lower a patchpoint of the form:
1623// [<def>], <id>, <numBytes>, <target>, <numArgs>
1624void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1625 const MachineInstr &MI) {
1626 auto &Ctx = OutStreamer.getContext();
1627 MCSymbol *MILabel = Ctx.createTempSymbol();
1628 OutStreamer.emitLabel(MILabel);
1629 SM.recordPatchPoint(*MILabel, MI);
1630
1631 PatchPointOpers Opers(&MI);
1632
1633 int64_t CallTarget = Opers.getCallTarget().getImm();
1634 unsigned EncodedBytes = 0;
1635 if (CallTarget) {
1636 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1637 "High 16 bits of call target should be zero.");
1638 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1639 EncodedBytes = 16;
1640 // Materialize the jump address:
1641 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1642 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1643 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1644 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1645 }
1646 // Emit padding.
1647 unsigned NumBytes = Opers.getNumPatchBytes();
1648 assert(NumBytes >= EncodedBytes &&
1649 "Patchpoint can't request size less than the length of a call.");
1650 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1651 "Invalid number of NOP bytes requested!");
1652 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1653 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1654}
1655
1656void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1657 const MachineInstr &MI) {
1658 StatepointOpers SOpers(&MI);
1659 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1660 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1661 for (unsigned i = 0; i < PatchBytes; i += 4)
1662 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1663 } else {
1664 // Lower call target and choose correct opcode
1665 const MachineOperand &CallTarget = SOpers.getCallTarget();
1666 MCOperand CallTargetMCOp;
1667 unsigned CallOpcode;
1668 switch (CallTarget.getType()) {
1671 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1672 CallOpcode = AArch64::BL;
1673 break;
1675 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1676 CallOpcode = AArch64::BL;
1677 break;
1679 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1680 CallOpcode = AArch64::BLR;
1681 break;
1682 default:
1683 llvm_unreachable("Unsupported operand type in statepoint call target");
1684 break;
1685 }
1686
1687 EmitToStreamer(OutStreamer,
1688 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1689 }
1690
1691 auto &Ctx = OutStreamer.getContext();
1692 MCSymbol *MILabel = Ctx.createTempSymbol();
1693 OutStreamer.emitLabel(MILabel);
1694 SM.recordStatepoint(*MILabel, MI);
1695}
1696
1697void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1698 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1699 // <opcode>, <operands>
1700
1701 Register DefRegister = FaultingMI.getOperand(0).getReg();
1703 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1704 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1705 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1706 unsigned OperandsBeginIdx = 4;
1707
1708 auto &Ctx = OutStreamer->getContext();
1709 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1710 OutStreamer->emitLabel(FaultingLabel);
1711
1712 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1713 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1714
1715 MCInst MI;
1716 MI.setOpcode(Opcode);
1717
1718 if (DefRegister != (Register)0)
1719 MI.addOperand(MCOperand::createReg(DefRegister));
1720
1721 for (const MachineOperand &MO :
1722 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1723 MCOperand Dest;
1724 lowerOperand(MO, Dest);
1725 MI.addOperand(Dest);
1726 }
1727
1728 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1729 EmitToStreamer(MI);
1730}
1731
1732void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1733 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1734 .addReg(Dest)
1735 .addReg(AArch64::XZR)
1736 .addReg(Src)
1737 .addImm(0));
1738}
1739
1740void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1741 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1742 EmitToStreamer(*OutStreamer,
1743 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1744 .addReg(Dest)
1745 .addImm(Imm)
1746 .addImm(Shift));
1747}
1748
1749void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1750 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1751 EmitToStreamer(*OutStreamer,
1752 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1753 .addReg(Dest)
1754 .addReg(Dest)
1755 .addImm(Imm)
1756 .addImm(Shift));
1757}
1758
1759void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1760 Register DestReg = MI.getOperand(0).getReg();
1761 if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround() &&
1762 STI->isNeonAvailable()) {
1763 // Convert H/S register to corresponding D register
1764 if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
1765 DestReg = AArch64::D0 + (DestReg - AArch64::H0);
1766 else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
1767 DestReg = AArch64::D0 + (DestReg - AArch64::S0);
1768 else
1769 assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
1770
1771 MCInst MOVI;
1772 MOVI.setOpcode(AArch64::MOVID);
1773 MOVI.addOperand(MCOperand::createReg(DestReg));
1774 MOVI.addOperand(MCOperand::createImm(0));
1775 EmitToStreamer(*OutStreamer, MOVI);
1776 } else {
1777 MCInst FMov;
1778 switch (MI.getOpcode()) {
1779 default: llvm_unreachable("Unexpected opcode");
1780 case AArch64::FMOVH0:
1781 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1782 if (!STI->hasFullFP16())
1783 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1784 FMov.addOperand(MCOperand::createReg(DestReg));
1785 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1786 break;
1787 case AArch64::FMOVS0:
1788 FMov.setOpcode(AArch64::FMOVWSr);
1789 FMov.addOperand(MCOperand::createReg(DestReg));
1790 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1791 break;
1792 case AArch64::FMOVD0:
1793 FMov.setOpcode(AArch64::FMOVXDr);
1794 FMov.addOperand(MCOperand::createReg(DestReg));
1795 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1796 break;
1797 }
1798 EmitToStreamer(*OutStreamer, FMov);
1799 }
1800}
1801
1802Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint16_t Disc,
1803 Register AddrDisc,
1804 Register ScratchReg,
1805 bool MayUseAddrAsScratch) {
1806 assert(ScratchReg == AArch64::X16 || ScratchReg == AArch64::X17);
1807 // So far we've used NoRegister in pseudos. Now we need real encodings.
1808 if (AddrDisc == AArch64::NoRegister)
1809 AddrDisc = AArch64::XZR;
1810
1811 // If there is no constant discriminator, there's no blend involved:
1812 // just use the address discriminator register as-is (XZR or not).
1813 if (!Disc)
1814 return AddrDisc;
1815
1816 // If there's only a constant discriminator, MOV it into the scratch register.
1817 if (AddrDisc == AArch64::XZR) {
1818 emitMOVZ(ScratchReg, Disc, 0);
1819 return ScratchReg;
1820 }
1821
1822 // If there are both, emit a blend into the scratch register.
1823
1824 // Check if we can save one MOV instruction.
1825 assert(MayUseAddrAsScratch || ScratchReg != AddrDisc);
1826 bool AddrDiscIsSafe = AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
1827 if (MayUseAddrAsScratch && AddrDiscIsSafe)
1828 ScratchReg = AddrDisc;
1829 else
1830 emitMovXReg(ScratchReg, AddrDisc);
1831
1832 emitMOVK(ScratchReg, Disc, 48);
1833 return ScratchReg;
1834}
1835
1836/// Emits a code sequence to check an authenticated pointer value.
1837///
1838/// If OnFailure argument is passed, jump there on check failure instead
1839/// of proceeding to the next instruction (only if ShouldTrap is false).
1840void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
1841 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
1842 AArch64PAuth::AuthCheckMethod Method, bool ShouldTrap,
1843 const MCSymbol *OnFailure) {
1844 // Insert a sequence to check if authentication of TestedReg succeeded,
1845 // such as:
1846 //
1847 // - checked and clearing:
1848 // ; x16 is TestedReg, x17 is ScratchReg
1849 // mov x17, x16
1850 // xpaci x17
1851 // cmp x16, x17
1852 // b.eq Lsuccess
1853 // mov x16, x17
1854 // b Lend
1855 // Lsuccess:
1856 // ; skipped if authentication failed
1857 // Lend:
1858 // ...
1859 //
1860 // - checked and trapping:
1861 // mov x17, x16
1862 // xpaci x17
1863 // cmp x16, x17
1864 // b.eq Lsuccess
1865 // brk #<0xc470 + aut key>
1866 // Lsuccess:
1867 // ...
1868 //
1869 // See the documentation on AuthCheckMethod enumeration constants for
1870 // the specific code sequences that can be used to perform the check.
1872
1873 if (Method == AuthCheckMethod::None)
1874 return;
1875 if (Method == AuthCheckMethod::DummyLoad) {
1876 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
1877 .addReg(getWRegFromXReg(ScratchReg))
1878 .addReg(TestedReg)
1879 .addImm(0));
1880 assert(ShouldTrap && !OnFailure && "DummyLoad always traps on error");
1881 return;
1882 }
1883
1884 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
1885 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
1886 // mov Xscratch, Xtested
1887 emitMovXReg(ScratchReg, TestedReg);
1888
1889 if (Method == AuthCheckMethod::XPAC) {
1890 // xpac(i|d) Xscratch
1891 unsigned XPACOpc = getXPACOpcodeForKey(Key);
1892 EmitToStreamer(
1893 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
1894 } else {
1895 // xpaclri
1896
1897 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
1898 assert(TestedReg == AArch64::LR &&
1899 "XPACHint mode is only compatible with checking the LR register");
1900 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
1901 "XPACHint mode is only compatible with I-keys");
1902 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
1903 }
1904
1905 // cmp Xtested, Xscratch
1906 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
1907 .addReg(AArch64::XZR)
1908 .addReg(TestedReg)
1909 .addReg(ScratchReg)
1910 .addImm(0));
1911
1912 // b.eq Lsuccess
1913 EmitToStreamer(
1914 MCInstBuilder(AArch64::Bcc)
1915 .addImm(AArch64CC::EQ)
1916 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
1917 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
1918 // eor Xscratch, Xtested, Xtested, lsl #1
1919 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
1920 .addReg(ScratchReg)
1921 .addReg(TestedReg)
1922 .addReg(TestedReg)
1923 .addImm(1));
1924 // tbz Xscratch, #62, Lsuccess
1925 EmitToStreamer(
1926 MCInstBuilder(AArch64::TBZX)
1927 .addReg(ScratchReg)
1928 .addImm(62)
1929 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
1930 } else {
1931 llvm_unreachable("Unsupported check method");
1932 }
1933
1934 if (ShouldTrap) {
1935 assert(!OnFailure && "Cannot specify OnFailure with ShouldTrap");
1936 // Trapping sequences do a 'brk'.
1937 // brk #<0xc470 + aut key>
1938 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
1939 } else {
1940 // Non-trapping checked sequences return the stripped result in TestedReg,
1941 // skipping over success-only code (such as re-signing the pointer) if
1942 // there is one.
1943 // Note that this can introduce an authentication oracle (such as based on
1944 // the high bits of the re-signed value).
1945
1946 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
1947 // instead of ScratchReg, thus eliminating one `mov` instruction.
1948 // Both XPAC and XPACHint can be further optimized by not using a
1949 // conditional branch jumping over an unconditional one.
1950
1951 switch (Method) {
1952 case AuthCheckMethod::XPACHint:
1953 // LR is already XPAC-ed at this point.
1954 break;
1955 case AuthCheckMethod::XPAC:
1956 // mov Xtested, Xscratch
1957 emitMovXReg(TestedReg, ScratchReg);
1958 break;
1959 default:
1960 // If Xtested was not XPAC-ed so far, emit XPAC here.
1961 // xpac(i|d) Xtested
1962 unsigned XPACOpc = getXPACOpcodeForKey(Key);
1963 EmitToStreamer(
1964 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
1965 }
1966
1967 if (OnFailure) {
1968 // b Lend
1969 EmitToStreamer(
1970 MCInstBuilder(AArch64::B)
1971 .addExpr(MCSymbolRefExpr::create(OnFailure, OutContext)));
1972 }
1973 }
1974
1975 // If the auth check succeeds, we can continue.
1976 // Lsuccess:
1977 OutStreamer->emitLabel(SuccessSym);
1978}
1979
1980// With Pointer Authentication, it may be needed to explicitly check the
1981// authenticated value in LR before performing a tail call.
1982// Otherwise, the callee may re-sign the invalid return address,
1983// introducing a signing oracle.
1984void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
1985 if (!AArch64FI->shouldSignReturnAddress(*MF))
1986 return;
1987
1988 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
1989 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
1990 return;
1991
1992 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1993 Register ScratchReg =
1994 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
1995 assert(!TC->readsRegister(ScratchReg, TRI) &&
1996 "Neither x16 nor x17 is available as a scratch register");
1998 AArch64FI->shouldSignWithBKey() ? AArch64PACKey::IB : AArch64PACKey::IA;
1999 emitPtrauthCheckAuthenticatedValue(
2000 AArch64::LR, ScratchReg, Key, LRCheckMethod,
2001 /*ShouldTrap=*/true, /*OnFailure=*/nullptr);
2002}
2003
2004void AArch64AsmPrinter::emitPtrauthAuthResign(const MachineInstr *MI) {
2005 const bool IsAUTPAC = MI->getOpcode() == AArch64::AUTPAC;
2006
2007 // We expand AUT/AUTPAC into a sequence of the form
2008 //
2009 // ; authenticate x16
2010 // ; check pointer in x16
2011 // Lsuccess:
2012 // ; sign x16 (if AUTPAC)
2013 // Lend: ; if not trapping on failure
2014 //
2015 // with the checking sequence chosen depending on whether/how we should check
2016 // the pointer and whether we should trap on failure.
2017
2018 // By default, auth/resign sequences check for auth failures.
2019 bool ShouldCheck = true;
2020 // In the checked sequence, we only trap if explicitly requested.
2021 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2022
2023 // On an FPAC CPU, you get traps whether you want them or not: there's
2024 // no point in emitting checks or traps.
2025 if (STI->hasFPAC())
2026 ShouldCheck = ShouldTrap = false;
2027
2028 // However, command-line flags can override this, for experimentation.
2029 switch (PtrauthAuthChecks) {
2031 break;
2033 ShouldCheck = ShouldTrap = false;
2034 break;
2036 ShouldCheck = true;
2037 ShouldTrap = false;
2038 break;
2040 ShouldCheck = ShouldTrap = true;
2041 break;
2042 }
2043
2044 auto AUTKey = (AArch64PACKey::ID)MI->getOperand(0).getImm();
2045 uint64_t AUTDisc = MI->getOperand(1).getImm();
2046 unsigned AUTAddrDisc = MI->getOperand(2).getReg();
2047
2048 // Compute aut discriminator into x17
2049 assert(isUInt<16>(AUTDisc));
2050 Register AUTDiscReg =
2051 emitPtrauthDiscriminator(AUTDisc, AUTAddrDisc, AArch64::X17);
2052 bool AUTZero = AUTDiscReg == AArch64::XZR;
2053 unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero);
2054
2055 // autiza x16 ; if AUTZero
2056 // autia x16, x17 ; if !AUTZero
2057 MCInst AUTInst;
2058 AUTInst.setOpcode(AUTOpc);
2059 AUTInst.addOperand(MCOperand::createReg(AArch64::X16));
2060 AUTInst.addOperand(MCOperand::createReg(AArch64::X16));
2061 if (!AUTZero)
2062 AUTInst.addOperand(MCOperand::createReg(AUTDiscReg));
2063 EmitToStreamer(*OutStreamer, AUTInst);
2064
2065 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2066 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2067 return;
2068
2069 MCSymbol *EndSym = nullptr;
2070
2071 if (ShouldCheck) {
2072 if (IsAUTPAC && !ShouldTrap)
2073 EndSym = createTempSymbol("resign_end_");
2074
2075 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AUTKey,
2076 AArch64PAuth::AuthCheckMethod::XPAC,
2077 ShouldTrap, EndSym);
2078 }
2079
2080 // We already emitted unchecked and checked-but-non-trapping AUTs.
2081 // That left us with trapping AUTs, and AUTPACs.
2082 // Trapping AUTs don't need PAC: we're done.
2083 if (!IsAUTPAC)
2084 return;
2085
2086 auto PACKey = (AArch64PACKey::ID)MI->getOperand(3).getImm();
2087 uint64_t PACDisc = MI->getOperand(4).getImm();
2088 unsigned PACAddrDisc = MI->getOperand(5).getReg();
2089
2090 // Compute pac discriminator into x17
2091 assert(isUInt<16>(PACDisc));
2092 Register PACDiscReg =
2093 emitPtrauthDiscriminator(PACDisc, PACAddrDisc, AArch64::X17);
2094 bool PACZero = PACDiscReg == AArch64::XZR;
2095 unsigned PACOpc = getPACOpcodeForKey(PACKey, PACZero);
2096
2097 // pacizb x16 ; if PACZero
2098 // pacib x16, x17 ; if !PACZero
2099 MCInst PACInst;
2100 PACInst.setOpcode(PACOpc);
2101 PACInst.addOperand(MCOperand::createReg(AArch64::X16));
2102 PACInst.addOperand(MCOperand::createReg(AArch64::X16));
2103 if (!PACZero)
2104 PACInst.addOperand(MCOperand::createReg(PACDiscReg));
2105 EmitToStreamer(*OutStreamer, PACInst);
2106
2107 // Lend:
2108 if (EndSym)
2109 OutStreamer->emitLabel(EndSym);
2110}
2111
2112void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2113 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2114 unsigned BrTarget = MI->getOperand(0).getReg();
2115
2116 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2117 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
2118 "Invalid auth call key");
2119
2120 uint64_t Disc = MI->getOperand(2).getImm();
2121 assert(isUInt<16>(Disc));
2122
2123 unsigned AddrDisc = MI->getOperand(3).getReg();
2124
2125 // Make sure AddrDisc is solely used to compute the discriminator.
2126 // While hardly meaningful, it is still possible to describe an authentication
2127 // of a pointer against its own value (instead of storage address) with
2128 // intrinsics, so use report_fatal_error instead of assert.
2129 if (BrTarget == AddrDisc)
2130 report_fatal_error("Branch target is signed with its own value");
2131
2132 // If we are printing BLRA pseudo instruction, then x16 and x17 are
2133 // implicit-def'ed by the MI and AddrDisc is not used as any other input, so
2134 // try to save one MOV by setting MayUseAddrAsScratch.
2135 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2136 // declared as clobbering x16/x17.
2137 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2138 /*MayUseAddrAsScratch=*/IsCall);
2139 bool IsZeroDisc = DiscReg == AArch64::XZR;
2140
2141 unsigned Opc;
2142 if (IsCall) {
2143 if (Key == AArch64PACKey::IA)
2144 Opc = IsZeroDisc ? AArch64::BLRAAZ : AArch64::BLRAA;
2145 else
2146 Opc = IsZeroDisc ? AArch64::BLRABZ : AArch64::BLRAB;
2147 } else {
2148 if (Key == AArch64PACKey::IA)
2149 Opc = IsZeroDisc ? AArch64::BRAAZ : AArch64::BRAA;
2150 else
2151 Opc = IsZeroDisc ? AArch64::BRABZ : AArch64::BRAB;
2152 }
2153
2154 MCInst BRInst;
2155 BRInst.setOpcode(Opc);
2156 BRInst.addOperand(MCOperand::createReg(BrTarget));
2157 if (!IsZeroDisc)
2158 BRInst.addOperand(MCOperand::createReg(DiscReg));
2159 EmitToStreamer(*OutStreamer, BRInst);
2160}
2161
2162const MCExpr *
2163AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2164 MCContext &Ctx = OutContext;
2165
2166 // Figure out the base symbol and the addend, if any.
2167 APInt Offset(64, 0);
2168 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2169 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2170
2171 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2172
2173 // If we can't understand the referenced ConstantExpr, there's nothing
2174 // else we can do: emit an error.
2175 if (!BaseGVB) {
2176 BaseGV->getContext().emitError(
2177 "cannot resolve target base/addend of ptrauth constant");
2178 return nullptr;
2179 }
2180
2181 // If there is an addend, turn that into the appropriate MCExpr.
2182 const MCExpr *Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2183 if (Offset.sgt(0))
2185 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2186 else if (Offset.slt(0))
2188 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2189
2190 uint64_t KeyID = CPA.getKey()->getZExtValue();
2191 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2192 // AArch64AuthMCExpr::printImpl, so fail fast.
2193 if (KeyID > AArch64PACKey::LAST)
2194 report_fatal_error("AArch64 PAC Key ID '" + Twine(KeyID) +
2195 "' out of range [0, " +
2196 Twine((unsigned)AArch64PACKey::LAST) + "]");
2197
2198 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2199 if (!isUInt<16>(Disc))
2200 report_fatal_error("AArch64 PAC Discriminator '" + Twine(Disc) +
2201 "' out of range [0, 0xFFFF]");
2202
2203 // Finally build the complete @AUTH expr.
2204 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2205 CPA.hasAddressDiscriminator(), Ctx);
2206}
2207
2208void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2209 unsigned DstReg = MI.getOperand(0).getReg();
2210 const MachineOperand &GAOp = MI.getOperand(1);
2211 const uint64_t KeyC = MI.getOperand(2).getImm();
2212 assert(KeyC <= AArch64PACKey::LAST &&
2213 "key is out of range [0, AArch64PACKey::LAST]");
2214 const auto Key = (AArch64PACKey::ID)KeyC;
2215 const uint64_t Disc = MI.getOperand(3).getImm();
2216 assert(isUInt<16>(Disc) &&
2217 "constant discriminator is out of range [0, 0xffff]");
2218
2219 // Emit instruction sequence like the following:
2220 // ADRP x16, symbol$auth_ptr$key$disc
2221 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2222 //
2223 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2224 // to symbol.
2225 MCSymbol *AuthPtrStubSym;
2226 if (TM.getTargetTriple().isOSBinFormatELF()) {
2227 const auto &TLOF =
2228 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2229
2230 assert(GAOp.getOffset() == 0 &&
2231 "non-zero offset for $auth_ptr$ stub slots is not supported");
2232 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2233 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2234 } else {
2235 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2236 "LOADauthptrstatic is implemented only for MachO/ELF");
2237
2238 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2239 getObjFileLowering());
2240
2241 assert(GAOp.getOffset() == 0 &&
2242 "non-zero offset for $auth_ptr$ stub slots is not supported");
2243 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2244 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2245 }
2246
2247 MachineOperand StubMOHi =
2250 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2251 MCOperand StubMCHi, StubMCLo;
2252
2253 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2254 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2255
2256 EmitToStreamer(
2257 *OutStreamer,
2258 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2259
2260 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2261 .addReg(DstReg)
2262 .addReg(DstReg)
2263 .addOperand(StubMCLo));
2264}
2265
2266void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2267 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2268 const bool IsELFSignedGOT = MI.getParent()
2269 ->getParent()
2270 ->getInfo<AArch64FunctionInfo>()
2271 ->hasELFSignedGOT();
2272 MachineOperand GAOp = MI.getOperand(0);
2273 const uint64_t KeyC = MI.getOperand(1).getImm();
2274 assert(KeyC <= AArch64PACKey::LAST &&
2275 "key is out of range [0, AArch64PACKey::LAST]");
2276 const auto Key = (AArch64PACKey::ID)KeyC;
2277 const unsigned AddrDisc = MI.getOperand(2).getReg();
2278 const uint64_t Disc = MI.getOperand(3).getImm();
2279 assert(isUInt<16>(Disc) &&
2280 "constant discriminator is out of range [0, 0xffff]");
2281
2282 const int64_t Offset = GAOp.getOffset();
2283 GAOp.setOffset(0);
2284
2285 // Emit:
2286 // target materialization:
2287 // - via GOT:
2288 // - unsigned GOT:
2289 // adrp x16, :got:target
2290 // ldr x16, [x16, :got_lo12:target]
2291 // add offset to x16 if offset != 0
2292 // - ELF signed GOT:
2293 // adrp x17, :got:target
2294 // add x17, x17, :got_auth_lo12:target
2295 // ldr x16, [x17]
2296 // aut{i|d}a x16, x17
2297 // check+trap sequence (if no FPAC)
2298 // add offset to x16 if offset != 0
2299 //
2300 // - direct:
2301 // adrp x16, target
2302 // add x16, x16, :lo12:target
2303 // add offset to x16 if offset != 0
2304 //
2305 // add offset to x16:
2306 // - abs(offset) fits 24 bits:
2307 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2308 // - abs(offset) does not fit 24 bits:
2309 // - offset < 0:
2310 // movn+movk sequence filling x17 register with the offset (up to 4
2311 // instructions)
2312 // add x16, x16, x17
2313 // - offset > 0:
2314 // movz+movk sequence filling x17 register with the offset (up to 4
2315 // instructions)
2316 // add x16, x16, x17
2317 //
2318 // signing:
2319 // - 0 discriminator:
2320 // paciza x16
2321 // - Non-0 discriminator, no address discriminator:
2322 // mov x17, #Disc
2323 // pacia x16, x17
2324 // - address discriminator (with potentially folded immediate discriminator):
2325 // pacia x16, xAddrDisc
2326
2327 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2328 MCOperand GAMCHi, GAMCLo;
2329
2330 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2331 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2332 if (IsGOTLoad) {
2333 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2334 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2335 }
2336
2337 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2338 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2339
2340 EmitToStreamer(
2341 MCInstBuilder(AArch64::ADRP)
2342 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2343 .addOperand(GAMCHi));
2344
2345 if (IsGOTLoad) {
2346 if (IsELFSignedGOT) {
2347 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2348 .addReg(AArch64::X17)
2349 .addReg(AArch64::X17)
2350 .addOperand(GAMCLo)
2351 .addImm(0));
2352
2353 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2354 .addReg(AArch64::X16)
2355 .addReg(AArch64::X17)
2356 .addImm(0));
2357
2358 assert(GAOp.isGlobal());
2359 assert(GAOp.getGlobal()->getValueType() != nullptr);
2360 unsigned AuthOpcode = GAOp.getGlobal()->getValueType()->isFunctionTy()
2361 ? AArch64::AUTIA
2362 : AArch64::AUTDA;
2363
2364 EmitToStreamer(MCInstBuilder(AuthOpcode)
2365 .addReg(AArch64::X16)
2366 .addReg(AArch64::X16)
2367 .addReg(AArch64::X17));
2368
2369 if (!STI->hasFPAC()) {
2370 auto AuthKey = (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA
2372
2373 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2374 AArch64PAuth::AuthCheckMethod::XPAC,
2375 /*ShouldTrap=*/true,
2376 /*OnFailure=*/nullptr);
2377 }
2378 } else {
2379 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2380 .addReg(AArch64::X16)
2381 .addReg(AArch64::X16)
2382 .addOperand(GAMCLo));
2383 }
2384 } else {
2385 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2386 .addReg(AArch64::X16)
2387 .addReg(AArch64::X16)
2388 .addOperand(GAMCLo)
2389 .addImm(0));
2390 }
2391
2392 if (Offset != 0) {
2393 const uint64_t AbsOffset = (Offset > 0 ? Offset : -((uint64_t)Offset));
2394 const bool IsNeg = Offset < 0;
2395 if (isUInt<24>(AbsOffset)) {
2396 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2397 BitPos += 12) {
2398 EmitToStreamer(
2399 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2400 .addReg(AArch64::X16)
2401 .addReg(AArch64::X16)
2402 .addImm((AbsOffset >> BitPos) & 0xfff)
2403 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2404 }
2405 } else {
2406 const uint64_t UOffset = Offset;
2407 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2408 .addReg(AArch64::X17)
2409 .addImm((IsNeg ? ~UOffset : UOffset) & 0xffff)
2410 .addImm(/*shift=*/0));
2411 auto NeedMovk = [IsNeg, UOffset](int BitPos) -> bool {
2412 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2413 uint64_t Shifted = UOffset >> BitPos;
2414 if (!IsNeg)
2415 return Shifted != 0;
2416 for (int I = 0; I != 64 - BitPos; I += 16)
2417 if (((Shifted >> I) & 0xffff) != 0xffff)
2418 return true;
2419 return false;
2420 };
2421 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2422 emitMOVK(AArch64::X17, (UOffset >> BitPos) & 0xffff, BitPos);
2423
2424 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2425 .addReg(AArch64::X16)
2426 .addReg(AArch64::X16)
2427 .addReg(AArch64::X17)
2428 .addImm(/*shift=*/0));
2429 }
2430 }
2431
2432 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2433
2434 auto MIB = MCInstBuilder(getPACOpcodeForKey(Key, DiscReg == AArch64::XZR))
2435 .addReg(AArch64::X16)
2436 .addReg(AArch64::X16);
2437 if (DiscReg != AArch64::XZR)
2438 MIB.addReg(DiscReg);
2439 EmitToStreamer(MIB);
2440}
2441
2442void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2443 Register DstReg = MI.getOperand(0).getReg();
2444 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2445 const MachineOperand &GAMO = MI.getOperand(1);
2446 assert(GAMO.getOffset() == 0);
2447
2448 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2449 MCOperand GAMC;
2450 MCInstLowering.lowerOperand(GAMO, GAMC);
2451 EmitToStreamer(
2452 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2453 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2454 .addReg(AuthResultReg)
2455 .addReg(AArch64::X17)
2456 .addImm(0));
2457 } else {
2458 MachineOperand GAHiOp(GAMO);
2459 MachineOperand GALoOp(GAMO);
2460 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2461 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2462
2463 MCOperand GAMCHi, GAMCLo;
2464 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2465 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2466
2467 EmitToStreamer(
2468 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2469
2470 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2471 .addReg(AArch64::X17)
2472 .addReg(AArch64::X17)
2473 .addOperand(GAMCLo)
2474 .addImm(0));
2475
2476 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2477 .addReg(AuthResultReg)
2478 .addReg(AArch64::X17)
2479 .addImm(0));
2480 }
2481
2482 assert(GAMO.isGlobal());
2483 MCSymbol *UndefWeakSym;
2484 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2485 UndefWeakSym = createTempSymbol("undef_weak");
2486 EmitToStreamer(
2487 MCInstBuilder(AArch64::CBZX)
2488 .addReg(AuthResultReg)
2489 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2490 }
2491
2492 assert(GAMO.getGlobal()->getValueType() != nullptr);
2493 unsigned AuthOpcode = GAMO.getGlobal()->getValueType()->isFunctionTy()
2494 ? AArch64::AUTIA
2495 : AArch64::AUTDA;
2496 EmitToStreamer(MCInstBuilder(AuthOpcode)
2497 .addReg(AuthResultReg)
2498 .addReg(AuthResultReg)
2499 .addReg(AArch64::X17));
2500
2501 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2502 OutStreamer->emitLabel(UndefWeakSym);
2503
2504 if (!STI->hasFPAC()) {
2505 auto AuthKey =
2506 (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA : AArch64PACKey::DA);
2507
2508 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2509 AArch64PAuth::AuthCheckMethod::XPAC,
2510 /*ShouldTrap=*/true,
2511 /*OnFailure=*/nullptr);
2512
2513 emitMovXReg(DstReg, AuthResultReg);
2514 }
2515}
2516
2517const MCExpr *
2518AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2520 const Function &Fn = *BA.getFunction();
2521
2522 if (std::optional<uint16_t> BADisc =
2523 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2524 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2525 /*HasAddressDiversity=*/false, OutContext);
2526
2527 return BAE;
2528}
2529
2530// Simple pseudo-instructions have their lowering (with expansion to real
2531// instructions) auto-generated.
2532#include "AArch64GenMCPseudoLowering.inc"
2533
2534void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
2535 S.emitInstruction(Inst, *STI);
2536#ifndef NDEBUG
2537 ++InstsEmitted;
2538#endif
2539}
2540
2541void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
2542 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
2543
2544#ifndef NDEBUG
2545 InstsEmitted = 0;
2546 auto CheckMISize = make_scope_exit([&]() {
2547 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
2548 });
2549#endif
2550
2551 // Do any auto-generated pseudo lowerings.
2552 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
2553 EmitToStreamer(*OutStreamer, OutInst);
2554 return;
2555 }
2556
2557 if (MI->getOpcode() == AArch64::ADRP) {
2558 for (auto &Opd : MI->operands()) {
2559 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
2560 "swift_async_extendedFramePointerFlags") {
2561 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
2562 }
2563 }
2564 }
2565
2566 if (AArch64FI->getLOHRelated().count(MI)) {
2567 // Generate a label for LOH related instruction
2568 MCSymbol *LOHLabel = createTempSymbol("loh");
2569 // Associate the instruction with the label
2570 LOHInstToLabel[MI] = LOHLabel;
2571 OutStreamer->emitLabel(LOHLabel);
2572 }
2573
2575 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
2576 // Do any manual lowerings.
2577 switch (MI->getOpcode()) {
2578 default:
2580 "Unhandled tail call instruction");
2581 break;
2582 case AArch64::HINT: {
2583 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
2584 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
2585 // non-empty. If MI is the initial BTI, place the
2586 // __patchable_function_entries label after BTI.
2587 if (CurrentPatchableFunctionEntrySym &&
2588 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
2589 MI == &MF->front().front()) {
2590 int64_t Imm = MI->getOperand(0).getImm();
2591 if ((Imm & 32) && (Imm & 6)) {
2592 MCInst Inst;
2593 MCInstLowering.Lower(MI, Inst);
2594 EmitToStreamer(*OutStreamer, Inst);
2595 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
2596 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
2597 return;
2598 }
2599 }
2600 break;
2601 }
2602 case AArch64::MOVMCSym: {
2603 Register DestReg = MI->getOperand(0).getReg();
2604 const MachineOperand &MO_Sym = MI->getOperand(1);
2605 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
2606 MCOperand Hi_MCSym, Lo_MCSym;
2607
2608 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
2609 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
2610
2611 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
2612 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
2613
2614 MCInst MovZ;
2615 MovZ.setOpcode(AArch64::MOVZXi);
2616 MovZ.addOperand(MCOperand::createReg(DestReg));
2617 MovZ.addOperand(Hi_MCSym);
2619 EmitToStreamer(*OutStreamer, MovZ);
2620
2621 MCInst MovK;
2622 MovK.setOpcode(AArch64::MOVKXi);
2623 MovK.addOperand(MCOperand::createReg(DestReg));
2624 MovK.addOperand(MCOperand::createReg(DestReg));
2625 MovK.addOperand(Lo_MCSym);
2627 EmitToStreamer(*OutStreamer, MovK);
2628 return;
2629 }
2630 case AArch64::MOVIv2d_ns:
2631 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
2632 // as movi is more efficient across all cores. Newer cores can eliminate
2633 // fmovs early and there is no difference with movi, but this not true for
2634 // all implementations.
2635 //
2636 // The floating-point version doesn't quite work in rare cases on older
2637 // CPUs, so on those targets we lower this instruction to movi.16b instead.
2638 if (STI->hasZeroCycleZeroingFPWorkaround() &&
2639 MI->getOperand(1).getImm() == 0) {
2640 MCInst TmpInst;
2641 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
2642 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
2643 TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm()));
2644 EmitToStreamer(*OutStreamer, TmpInst);
2645 return;
2646 }
2647 break;
2648
2649 case AArch64::DBG_VALUE:
2650 case AArch64::DBG_VALUE_LIST:
2651 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
2652 SmallString<128> TmpStr;
2653 raw_svector_ostream OS(TmpStr);
2654 PrintDebugValueComment(MI, OS);
2655 OutStreamer->emitRawText(StringRef(OS.str()));
2656 }
2657 return;
2658
2659 case AArch64::EMITBKEY: {
2660 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
2661 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
2662 ExceptionHandlingType != ExceptionHandling::ARM)
2663 return;
2664
2665 if (getFunctionCFISectionType(*MF) == CFISection::None)
2666 return;
2667
2668 OutStreamer->emitCFIBKeyFrame();
2669 return;
2670 }
2671
2672 case AArch64::EMITMTETAGGED: {
2673 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
2674 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
2675 ExceptionHandlingType != ExceptionHandling::ARM)
2676 return;
2677
2678 if (getFunctionCFISectionType(*MF) != CFISection::None)
2679 OutStreamer->emitCFIMTETaggedFrame();
2680 return;
2681 }
2682
2683 case AArch64::AUT:
2684 case AArch64::AUTPAC:
2685 emitPtrauthAuthResign(MI);
2686 return;
2687
2688 case AArch64::LOADauthptrstatic:
2689 LowerLOADauthptrstatic(*MI);
2690 return;
2691
2692 case AArch64::LOADgotPAC:
2693 case AArch64::MOVaddrPAC:
2694 LowerMOVaddrPAC(*MI);
2695 return;
2696
2697 case AArch64::LOADgotAUTH:
2698 LowerLOADgotAUTH(*MI);
2699 return;
2700
2701 case AArch64::BRA:
2702 case AArch64::BLRA:
2703 emitPtrauthBranch(MI);
2704 return;
2705
2706 // Tail calls use pseudo instructions so they have the proper code-gen
2707 // attributes (isCall, isReturn, etc.). We lower them to the real
2708 // instruction here.
2709 case AArch64::AUTH_TCRETURN:
2710 case AArch64::AUTH_TCRETURN_BTI: {
2711 Register Callee = MI->getOperand(0).getReg();
2712 const uint64_t Key = MI->getOperand(2).getImm();
2713 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
2714 "Invalid auth key for tail-call return");
2715
2716 const uint64_t Disc = MI->getOperand(3).getImm();
2717 assert(isUInt<16>(Disc) && "Integer discriminator is too wide");
2718
2719 Register AddrDisc = MI->getOperand(4).getReg();
2720
2721 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2722
2723 emitPtrauthTailCallHardening(MI);
2724
2725 // See the comments in emitPtrauthBranch.
2726 if (Callee == AddrDisc)
2727 report_fatal_error("Call target is signed with its own value");
2728 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
2729 /*MayUseAddrAsScratch=*/true);
2730
2731 const bool IsZero = DiscReg == AArch64::XZR;
2732 const unsigned Opcodes[2][2] = {{AArch64::BRAA, AArch64::BRAAZ},
2733 {AArch64::BRAB, AArch64::BRABZ}};
2734
2735 MCInst TmpInst;
2736 TmpInst.setOpcode(Opcodes[Key][IsZero]);
2737 TmpInst.addOperand(MCOperand::createReg(Callee));
2738 if (!IsZero)
2739 TmpInst.addOperand(MCOperand::createReg(DiscReg));
2740 EmitToStreamer(*OutStreamer, TmpInst);
2741 return;
2742 }
2743
2744 case AArch64::TCRETURNri:
2745 case AArch64::TCRETURNrix16x17:
2746 case AArch64::TCRETURNrix17:
2747 case AArch64::TCRETURNrinotx16:
2748 case AArch64::TCRETURNriALL: {
2749 emitPtrauthTailCallHardening(MI);
2750
2751 recordIfImportCall(MI);
2752 MCInst TmpInst;
2753 TmpInst.setOpcode(AArch64::BR);
2754 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
2755 EmitToStreamer(*OutStreamer, TmpInst);
2756 return;
2757 }
2758 case AArch64::TCRETURNdi: {
2759 emitPtrauthTailCallHardening(MI);
2760
2761 MCOperand Dest;
2762 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
2763 recordIfImportCall(MI);
2764 MCInst TmpInst;
2765 TmpInst.setOpcode(AArch64::B);
2766 TmpInst.addOperand(Dest);
2767 EmitToStreamer(*OutStreamer, TmpInst);
2768 return;
2769 }
2770 case AArch64::SpeculationBarrierISBDSBEndBB: {
2771 // Print DSB SYS + ISB
2772 MCInst TmpInstDSB;
2773 TmpInstDSB.setOpcode(AArch64::DSB);
2774 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
2775 EmitToStreamer(*OutStreamer, TmpInstDSB);
2776 MCInst TmpInstISB;
2777 TmpInstISB.setOpcode(AArch64::ISB);
2778 TmpInstISB.addOperand(MCOperand::createImm(0xf));
2779 EmitToStreamer(*OutStreamer, TmpInstISB);
2780 return;
2781 }
2782 case AArch64::SpeculationBarrierSBEndBB: {
2783 // Print SB
2784 MCInst TmpInstSB;
2785 TmpInstSB.setOpcode(AArch64::SB);
2786 EmitToStreamer(*OutStreamer, TmpInstSB);
2787 return;
2788 }
2789 case AArch64::TLSDESC_AUTH_CALLSEQ: {
2790 /// lower this to:
2791 /// adrp x0, :tlsdesc_auth:var
2792 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
2793 /// add x0, x0, #:tlsdesc_auth_lo12:var
2794 /// blraa x16, x0
2795 /// (TPIDR_EL0 offset now in x0)
2796 const MachineOperand &MO_Sym = MI->getOperand(0);
2797 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
2798 MCOperand SymTLSDescLo12, SymTLSDesc;
2799 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
2800 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
2801 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
2802 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
2803
2804 MCInst Adrp;
2805 Adrp.setOpcode(AArch64::ADRP);
2806 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
2807 Adrp.addOperand(SymTLSDesc);
2808 EmitToStreamer(*OutStreamer, Adrp);
2809
2810 MCInst Ldr;
2811 Ldr.setOpcode(AArch64::LDRXui);
2812 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
2813 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
2814 Ldr.addOperand(SymTLSDescLo12);
2816 EmitToStreamer(*OutStreamer, Ldr);
2817
2818 MCInst Add;
2819 Add.setOpcode(AArch64::ADDXri);
2820 Add.addOperand(MCOperand::createReg(AArch64::X0));
2821 Add.addOperand(MCOperand::createReg(AArch64::X0));
2822 Add.addOperand(SymTLSDescLo12);
2824 EmitToStreamer(*OutStreamer, Add);
2825
2826 // Authenticated TLSDESC accesses are not relaxed.
2827 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
2828
2829 MCInst Blraa;
2830 Blraa.setOpcode(AArch64::BLRAA);
2831 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
2832 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
2833 EmitToStreamer(*OutStreamer, Blraa);
2834
2835 return;
2836 }
2837 case AArch64::TLSDESC_CALLSEQ: {
2838 /// lower this to:
2839 /// adrp x0, :tlsdesc:var
2840 /// ldr x1, [x0, #:tlsdesc_lo12:var]
2841 /// add x0, x0, #:tlsdesc_lo12:var
2842 /// .tlsdesccall var
2843 /// blr x1
2844 /// (TPIDR_EL0 offset now in x0)
2845 const MachineOperand &MO_Sym = MI->getOperand(0);
2846 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
2847 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
2848 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
2849 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
2850 MCInstLowering.lowerOperand(MO_Sym, Sym);
2851 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
2852 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
2853
2854 MCInst Adrp;
2855 Adrp.setOpcode(AArch64::ADRP);
2856 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
2857 Adrp.addOperand(SymTLSDesc);
2858 EmitToStreamer(*OutStreamer, Adrp);
2859
2860 MCInst Ldr;
2861 if (STI->isTargetILP32()) {
2862 Ldr.setOpcode(AArch64::LDRWui);
2863 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
2864 } else {
2865 Ldr.setOpcode(AArch64::LDRXui);
2866 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
2867 }
2868 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
2869 Ldr.addOperand(SymTLSDescLo12);
2871 EmitToStreamer(*OutStreamer, Ldr);
2872
2873 MCInst Add;
2874 if (STI->isTargetILP32()) {
2875 Add.setOpcode(AArch64::ADDWri);
2876 Add.addOperand(MCOperand::createReg(AArch64::W0));
2877 Add.addOperand(MCOperand::createReg(AArch64::W0));
2878 } else {
2879 Add.setOpcode(AArch64::ADDXri);
2880 Add.addOperand(MCOperand::createReg(AArch64::X0));
2881 Add.addOperand(MCOperand::createReg(AArch64::X0));
2882 }
2883 Add.addOperand(SymTLSDescLo12);
2885 EmitToStreamer(*OutStreamer, Add);
2886
2887 // Emit a relocation-annotation. This expands to no code, but requests
2888 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
2889 MCInst TLSDescCall;
2890 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
2891 TLSDescCall.addOperand(Sym);
2892 EmitToStreamer(*OutStreamer, TLSDescCall);
2893#ifndef NDEBUG
2894 --InstsEmitted; // no code emitted
2895#endif
2896
2897 MCInst Blr;
2898 Blr.setOpcode(AArch64::BLR);
2899 Blr.addOperand(MCOperand::createReg(AArch64::X1));
2900 EmitToStreamer(*OutStreamer, Blr);
2901
2902 return;
2903 }
2904
2905 case AArch64::JumpTableDest32:
2906 case AArch64::JumpTableDest16:
2907 case AArch64::JumpTableDest8:
2908 LowerJumpTableDest(*OutStreamer, *MI);
2909 return;
2910
2911 case AArch64::BR_JumpTable:
2912 LowerHardenedBRJumpTable(*MI);
2913 return;
2914
2915 case AArch64::FMOVH0:
2916 case AArch64::FMOVS0:
2917 case AArch64::FMOVD0:
2918 emitFMov0(*MI);
2919 return;
2920
2921 case AArch64::MOPSMemoryCopyPseudo:
2922 case AArch64::MOPSMemoryMovePseudo:
2923 case AArch64::MOPSMemorySetPseudo:
2924 case AArch64::MOPSMemorySetTaggingPseudo:
2925 LowerMOPS(*OutStreamer, *MI);
2926 return;
2927
2928 case TargetOpcode::STACKMAP:
2929 return LowerSTACKMAP(*OutStreamer, SM, *MI);
2930
2931 case TargetOpcode::PATCHPOINT:
2932 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
2933
2934 case TargetOpcode::STATEPOINT:
2935 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
2936
2937 case TargetOpcode::FAULTING_OP:
2938 return LowerFAULTING_OP(*MI);
2939
2940 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2941 LowerPATCHABLE_FUNCTION_ENTER(*MI);
2942 return;
2943
2944 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2945 LowerPATCHABLE_FUNCTION_EXIT(*MI);
2946 return;
2947
2948 case TargetOpcode::PATCHABLE_TAIL_CALL:
2949 LowerPATCHABLE_TAIL_CALL(*MI);
2950 return;
2951 case TargetOpcode::PATCHABLE_EVENT_CALL:
2952 return LowerPATCHABLE_EVENT_CALL(*MI, false);
2953 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
2954 return LowerPATCHABLE_EVENT_CALL(*MI, true);
2955
2956 case AArch64::KCFI_CHECK:
2957 LowerKCFI_CHECK(*MI);
2958 return;
2959
2960 case AArch64::HWASAN_CHECK_MEMACCESS:
2961 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
2962 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
2963 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
2964 LowerHWASAN_CHECK_MEMACCESS(*MI);
2965 return;
2966
2967 case AArch64::SEH_StackAlloc:
2968 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
2969 return;
2970
2971 case AArch64::SEH_SaveFPLR:
2972 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
2973 return;
2974
2975 case AArch64::SEH_SaveFPLR_X:
2976 assert(MI->getOperand(0).getImm() < 0 &&
2977 "Pre increment SEH opcode must have a negative offset");
2978 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
2979 return;
2980
2981 case AArch64::SEH_SaveReg:
2982 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
2983 MI->getOperand(1).getImm());
2984 return;
2985
2986 case AArch64::SEH_SaveReg_X:
2987 assert(MI->getOperand(1).getImm() < 0 &&
2988 "Pre increment SEH opcode must have a negative offset");
2989 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
2990 -MI->getOperand(1).getImm());
2991 return;
2992
2993 case AArch64::SEH_SaveRegP:
2994 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
2995 MI->getOperand(0).getImm() <= 28) {
2996 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
2997 "Register paired with LR must be odd");
2998 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
2999 MI->getOperand(2).getImm());
3000 return;
3001 }
3002 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3003 "Non-consecutive registers not allowed for save_regp");
3004 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3005 MI->getOperand(2).getImm());
3006 return;
3007
3008 case AArch64::SEH_SaveRegP_X:
3009 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3010 "Non-consecutive registers not allowed for save_regp_x");
3011 assert(MI->getOperand(2).getImm() < 0 &&
3012 "Pre increment SEH opcode must have a negative offset");
3013 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3014 -MI->getOperand(2).getImm());
3015 return;
3016
3017 case AArch64::SEH_SaveFReg:
3018 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3019 MI->getOperand(1).getImm());
3020 return;
3021
3022 case AArch64::SEH_SaveFReg_X:
3023 assert(MI->getOperand(1).getImm() < 0 &&
3024 "Pre increment SEH opcode must have a negative offset");
3025 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3026 -MI->getOperand(1).getImm());
3027 return;
3028
3029 case AArch64::SEH_SaveFRegP:
3030 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3031 "Non-consecutive registers not allowed for save_regp");
3032 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3033 MI->getOperand(2).getImm());
3034 return;
3035
3036 case AArch64::SEH_SaveFRegP_X:
3037 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3038 "Non-consecutive registers not allowed for save_regp_x");
3039 assert(MI->getOperand(2).getImm() < 0 &&
3040 "Pre increment SEH opcode must have a negative offset");
3041 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3042 -MI->getOperand(2).getImm());
3043 return;
3044
3045 case AArch64::SEH_SetFP:
3047 return;
3048
3049 case AArch64::SEH_AddFP:
3050 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3051 return;
3052
3053 case AArch64::SEH_Nop:
3054 TS->emitARM64WinCFINop();
3055 return;
3056
3057 case AArch64::SEH_PrologEnd:
3059 return;
3060
3061 case AArch64::SEH_EpilogStart:
3063 return;
3064
3065 case AArch64::SEH_EpilogEnd:
3067 return;
3068
3069 case AArch64::SEH_PACSignLR:
3071 return;
3072
3073 case AArch64::SEH_SaveAnyRegQP:
3074 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3075 "Non-consecutive registers not allowed for save_any_reg");
3076 assert(MI->getOperand(2).getImm() >= 0 &&
3077 "SaveAnyRegQP SEH opcode offset must be non-negative");
3078 assert(MI->getOperand(2).getImm() <= 1008 &&
3079 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3080 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3081 MI->getOperand(2).getImm());
3082 return;
3083
3084 case AArch64::SEH_SaveAnyRegQPX:
3085 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3086 "Non-consecutive registers not allowed for save_any_reg");
3087 assert(MI->getOperand(2).getImm() < 0 &&
3088 "SaveAnyRegQPX SEH opcode offset must be negative");
3089 assert(MI->getOperand(2).getImm() >= -1008 &&
3090 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3091 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3092 -MI->getOperand(2).getImm());
3093 return;
3094
3095 case AArch64::BLR:
3096 case AArch64::BR:
3097 recordIfImportCall(MI);
3098 MCInst TmpInst;
3099 MCInstLowering.Lower(MI, TmpInst);
3100 EmitToStreamer(*OutStreamer, TmpInst);
3101 return;
3102 }
3103
3104 // Finally, do the automated lowerings for everything else.
3105 MCInst TmpInst;
3106 MCInstLowering.Lower(MI, TmpInst);
3107 EmitToStreamer(*OutStreamer, TmpInst);
3108}
3109
3110void AArch64AsmPrinter::recordIfImportCall(
3113 !TM.getTargetTriple().isOSBinFormatCOFF())
3114 return;
3115
3116 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3117 if (GV && GV->hasDLLImportStorageClass()) {
3118 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3119 OutStreamer->emitLabel(CallSiteSymbol);
3120
3121 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3122 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3123 .push_back({CallSiteSymbol, CalledSymbol});
3124 }
3125}
3126
3127void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3128 MCSymbol *LazyPointer) {
3129 // _ifunc:
3130 // adrp x16, lazy_pointer@GOTPAGE
3131 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3132 // ldr x16, [x16]
3133 // br x16
3134
3135 {
3136 MCInst Adrp;
3137 Adrp.setOpcode(AArch64::ADRP);
3138 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3139 MCOperand SymPage;
3140 MCInstLowering.lowerOperand(
3143 SymPage);
3144 Adrp.addOperand(SymPage);
3145 EmitToStreamer(Adrp);
3146 }
3147
3148 {
3149 MCInst Ldr;
3150 Ldr.setOpcode(AArch64::LDRXui);
3151 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3152 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3153 MCOperand SymPageOff;
3154 MCInstLowering.lowerOperand(
3157 SymPageOff);
3158 Ldr.addOperand(SymPageOff);
3160 EmitToStreamer(Ldr);
3161 }
3162
3163 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3164 .addReg(AArch64::X16)
3165 .addReg(AArch64::X16)
3166 .addImm(0));
3167
3168 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3169 : AArch64::BR)
3170 .addReg(AArch64::X16));
3171}
3172
3173void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3174 const GlobalIFunc &GI,
3175 MCSymbol *LazyPointer) {
3176 // These stub helpers are only ever called once, so here we're optimizing for
3177 // minimum size by using the pre-indexed store variants, which saves a few
3178 // bytes of instructions to bump & restore sp.
3179
3180 // _ifunc.stub_helper:
3181 // stp fp, lr, [sp, #-16]!
3182 // mov fp, sp
3183 // stp x1, x0, [sp, #-16]!
3184 // stp x3, x2, [sp, #-16]!
3185 // stp x5, x4, [sp, #-16]!
3186 // stp x7, x6, [sp, #-16]!
3187 // stp d1, d0, [sp, #-16]!
3188 // stp d3, d2, [sp, #-16]!
3189 // stp d5, d4, [sp, #-16]!
3190 // stp d7, d6, [sp, #-16]!
3191 // bl _resolver
3192 // adrp x16, lazy_pointer@GOTPAGE
3193 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3194 // str x0, [x16]
3195 // mov x16, x0
3196 // ldp d7, d6, [sp], #16
3197 // ldp d5, d4, [sp], #16
3198 // ldp d3, d2, [sp], #16
3199 // ldp d1, d0, [sp], #16
3200 // ldp x7, x6, [sp], #16
3201 // ldp x5, x4, [sp], #16
3202 // ldp x3, x2, [sp], #16
3203 // ldp x1, x0, [sp], #16
3204 // ldp fp, lr, [sp], #16
3205 // br x16
3206
3207 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3208 .addReg(AArch64::SP)
3209 .addReg(AArch64::FP)
3210 .addReg(AArch64::LR)
3211 .addReg(AArch64::SP)
3212 .addImm(-2));
3213
3214 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3215 .addReg(AArch64::FP)
3216 .addReg(AArch64::SP)
3217 .addImm(0)
3218 .addImm(0));
3219
3220 for (int I = 0; I != 4; ++I)
3221 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3222 .addReg(AArch64::SP)
3223 .addReg(AArch64::X1 + 2 * I)
3224 .addReg(AArch64::X0 + 2 * I)
3225 .addReg(AArch64::SP)
3226 .addImm(-2));
3227
3228 for (int I = 0; I != 4; ++I)
3229 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3230 .addReg(AArch64::SP)
3231 .addReg(AArch64::D1 + 2 * I)
3232 .addReg(AArch64::D0 + 2 * I)
3233 .addReg(AArch64::SP)
3234 .addImm(-2));
3235
3236 EmitToStreamer(
3237 MCInstBuilder(AArch64::BL)
3239
3240 {
3241 MCInst Adrp;
3242 Adrp.setOpcode(AArch64::ADRP);
3243 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3244 MCOperand SymPage;
3245 MCInstLowering.lowerOperand(
3246 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3248 SymPage);
3249 Adrp.addOperand(SymPage);
3250 EmitToStreamer(Adrp);
3251 }
3252
3253 {
3254 MCInst Ldr;
3255 Ldr.setOpcode(AArch64::LDRXui);
3256 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3257 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3258 MCOperand SymPageOff;
3259 MCInstLowering.lowerOperand(
3260 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3262 SymPageOff);
3263 Ldr.addOperand(SymPageOff);
3265 EmitToStreamer(Ldr);
3266 }
3267
3268 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3269 .addReg(AArch64::X0)
3270 .addReg(AArch64::X16)
3271 .addImm(0));
3272
3273 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3274 .addReg(AArch64::X16)
3275 .addReg(AArch64::X0)
3276 .addImm(0)
3277 .addImm(0));
3278
3279 for (int I = 3; I != -1; --I)
3280 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3281 .addReg(AArch64::SP)
3282 .addReg(AArch64::D1 + 2 * I)
3283 .addReg(AArch64::D0 + 2 * I)
3284 .addReg(AArch64::SP)
3285 .addImm(2));
3286
3287 for (int I = 3; I != -1; --I)
3288 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3289 .addReg(AArch64::SP)
3290 .addReg(AArch64::X1 + 2 * I)
3291 .addReg(AArch64::X0 + 2 * I)
3292 .addReg(AArch64::SP)
3293 .addImm(2));
3294
3295 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3296 .addReg(AArch64::SP)
3297 .addReg(AArch64::FP)
3298 .addReg(AArch64::LR)
3299 .addReg(AArch64::SP)
3300 .addImm(2));
3301
3302 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3303 : AArch64::BR)
3304 .addReg(AArch64::X16));
3305}
3306
3307const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV) {
3308 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3309 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3310 OutContext);
3311 }
3312
3313 return AsmPrinter::lowerConstant(CV);
3314}
3315
3316// Force static initialization.
3323}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Poison
@ Default
@ Unchecked
static cl::opt< bool > EnableImportCallOptimization("aarch64-win-import-call-optimization", cl::Hidden, cl::desc("Enable import call optimization for AArch64 Windows"), cl::init(false))
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter()
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
This file defines the DenseMap class.
std::string Name
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
std::optional< std::string > getOutliningStyle() const
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
void emitNoteSection(unsigned Flags, uint64_t PAuthABIPlatform=-1, uint64_t PAuthABIVersion=-1)
Callback used to implement the .note.gnu.property section.
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
This implementation is used for AArch64 ELF targets (Linux in particular).
AArch64_MachoTargetObjectFile - This TLOF implementation is used for Darwin.
Class for arbitrary precision integers.
Definition: APInt.h:78
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class is intended to be used as a driving class for all asm writers.
Definition: AsmPrinter.h:86
virtual void emitInstruction(const MachineInstr *)
Targets should implement this to emit instructions.
Definition: AsmPrinter.h:561
void EmitToStreamer(MCStreamer &S, const MCInst &Inst)
Definition: AsmPrinter.cpp:428
virtual const MCExpr * lowerConstantPtrAuth(const ConstantPtrAuth &CPA)
Definition: AsmPrinter.h:582
void emitXRayTable()
Emit a table with all XRay instrumentation points.
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI, MCSymbol *LazyPointer)
Definition: AsmPrinter.h:613
virtual void emitJumpTableInfo()
Print assembly representations of the jump tables used by the current function to the current output ...
virtual void SetupMachineFunction(MachineFunction &MF)
This should be called when a new MachineFunction is being processed from runOnMachineFunction.
void emitFunctionBody()
This method emits the body and trailer for a function.
virtual void emitStartOfAsmFile(Module &)
This virtual method can be overridden by targets that want to emit something at the start of their fi...
Definition: AsmPrinter.h:537
virtual void emitEndOfAsmFile(Module &)
This virtual method can be overridden by targets that want to emit something at the end of their file...
Definition: AsmPrinter.h:541
virtual void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI, MCSymbol *LazyPointer)
Definition: AsmPrinter.h:607
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
Definition: AsmPrinter.cpp:450
virtual bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const
Definition: AsmPrinter.h:916
virtual const MCSubtargetInfo * getIFuncMCSubtargetInfo() const
getSubtargetInfo() cannot be used where this is needed because we don't have a MachineFunction when w...
Definition: AsmPrinter.h:603
bool runOnMachineFunction(MachineFunction &MF) override
Emit the specified function out to the OutStreamer.
Definition: AsmPrinter.h:387
virtual const MCExpr * lowerConstant(const Constant *CV)
Lower the specified LLVM Constant to an MCExpr.
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant as...
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition: AsmPrinter.h:578
virtual void emitFunctionBodyEnd()
Targets can override this to emit stuff after the last basic block in the function.
Definition: AsmPrinter.h:549
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual std::tuple< const MCSymbol *, uint64_t, const MCSymbol *, codeview::JumpTableEntrySize > getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr, const MCSymbol *BranchLabel) const
Gets information required to create a CodeView debug symbol for a jump table.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
The address of a basic block.
Definition: Constants.h:893
Function * getFunction() const
Definition: Constants.h:923
Conditional or Unconditional Branch instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1021
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1048
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1051
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition: Constants.h:1066
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1054
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const Constant * getAliasee() const
Definition: GlobalAlias.h:86
const Constant * getResolver() const
Definition: GlobalIFunc.h:72
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
Type * getValueType() const
Definition: GlobalValue.h:296
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:617
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:537
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:622
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:222
Context object for machine code objects.
Definition: MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
Definition: MCInstBuilder.h:37
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
void addOperand(const MCOperand Op)
Definition: MCInst.h:211
void setOpcode(unsigned Op)
Definition: MCInst.h:198
MCSection * getDataSection() const
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:37
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:163
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:135
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:142
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:36
Streaming machine code generation interface.
Definition: MCStreamer.h:213
virtual void emitCFIBKeyFrame()
Definition: MCStreamer.cpp:248
virtual void beginCOFFSymbolDef(const MCSymbol *Symbol)
Start emitting COFF symbol definition.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitCOFFSymbolType(int Type)
Emit the type of the symbol.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition: MCStreamer.h:347
virtual void endCOFFSymbolDef()
Marks the end of the symbol definition.
MCContext & getContext() const
Definition: MCStreamer.h:300
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition: MCStreamer.h:366
virtual void emitCFIMTETaggedFrame()
Definition: MCStreamer.cpp:255
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
Definition: MCStreamer.cpp:179
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
Definition: MCStreamer.cpp:420
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:309
MCSection * getCurrentSectionOnly() const
Definition: MCStreamer.h:400
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
virtual void emitCOFFSymbolStorageClass(int StorageClass)
Emit the storage class of the symbol.
Generic base class for all target subtargets.
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:398
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:205
Metadata node.
Definition: Metadata.h:1069
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
Definition: MachineInstr.h:69
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:691
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation for ELF targets.
MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation for MachO targets.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
MI-level patchpoint operands.
Definition: StackMaps.h:76
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static SectionKind getMetadata()
Definition: SectionKind.h:188
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
MI-level stackmap operands.
Definition: StackMaps.h:35
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
Definition: StackMaps.h:50
void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
Definition: StackMaps.cpp:562
void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
Definition: StackMaps.cpp:541
void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
Definition: StackMaps.cpp:531
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:255
LLVM Value Representation.
Definition: Value.h:74
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Key
PAL metadata keys.
@ SectionSize
Definition: COFF.h:60
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition: COFF.h:217
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition: COFF.h:223
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition: COFF.h:224
@ IMAGE_SYM_DTYPE_NULL
No complex type; simple scalar variable.
Definition: COFF.h:273
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition: COFF.h:275
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition: COFF.h:279
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:221
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:224
@ SHT_PROGBITS
Definition: ELF.h:1098
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition: ELF.h:1801
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition: ELF.h:1802
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition: ELF.h:1803
@ SHF_ALLOC
Definition: ELF.h:1196
@ SHF_GROUP
Definition: ELF.h:1218
@ SHF_EXECINSTR
Definition: ELF.h:1199
@ S_REGULAR
S_REGULAR - Regular section.
Definition: MachO.h:127
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
constexpr double e
Definition: MathExtras.h:47
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Target & getTheAArch64beTarget()
Target & getTheAArch64leTarget()
Target & getTheAArch64_32Target()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
Target & getTheARM64_32Target()
@ MCAF_SubsectionsViaSymbols
.subsections_via_symbols (MachO)
Definition: MCDirectives.h:55
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
Definition: MCDirectives.h:45
@ MCSA_Global
.type _foo, @gnu_unique_object
Definition: MCDirectives.h:30
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
Definition: MCDirectives.h:49
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
Definition: MCDirectives.h:23
@ MCSA_Hidden
.hidden (ELF)
Definition: MCDirectives.h:33
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...