LLVM 23.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
47#include "llvm/IR/DataLayout.h"
49#include "llvm/IR/Mangler.h"
50#include "llvm/IR/Module.h"
51#include "llvm/MC/MCAsmInfo.h"
52#include "llvm/MC/MCContext.h"
53#include "llvm/MC/MCExpr.h"
54#include "llvm/MC/MCInst.h"
58#include "llvm/MC/MCStreamer.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/MCValue.h"
70#include <cassert>
71#include <cstdint>
72#include <map>
73#include <memory>
74
75using namespace llvm;
76
77#define DEBUG_TYPE "AArch64AsmPrinter"
78
79// Doesn't count FPR128 ZCZ instructions which are handled
80// by TableGen pattern matching
81STATISTIC(NumZCZeroingInstrsFPR,
82 "Number of zero-cycle FPR zeroing instructions expanded from "
83 "canonical pseudo instructions");
84
87 "aarch64-ptrauth-auth-checks", cl::Hidden,
88 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
89 clEnumValN(Poison, "poison", "poison on failure"),
90 clEnumValN(Trap, "trap", "trap on failure")),
91 cl::desc("Check pointer authentication auth/resign failures"),
93
94namespace {
95
96class AArch64AsmPrinter : public AsmPrinter {
97 AArch64MCInstLower MCInstLowering;
98 FaultMaps FM;
99 const AArch64Subtarget *STI;
100 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
101#ifndef NDEBUG
102 unsigned InstsEmitted;
103#endif
104 bool EnableImportCallOptimization = false;
106 SectionToImportedFunctionCalls;
107 unsigned PAuthIFuncNextUniqueID = 1;
108
109public:
110 static char ID;
111
112 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
113 : AsmPrinter(TM, std::move(Streamer), ID),
114 MCInstLowering(OutContext, *this), FM(*this) {}
115
116 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
117
118 /// Wrapper for MCInstLowering.lowerOperand() for the
119 /// tblgen'erated pseudo lowering.
120 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
121 return MCInstLowering.lowerOperand(MO, MCOp);
122 }
123
124 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
125
126 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
127
128 void emitStartOfAsmFile(Module &M) override;
129 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
130 ArrayRef<unsigned> JumpTableIndices) override;
131 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
133 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
134 const MCSymbol *BranchLabel) const override;
135
136 void emitFunctionEntryLabel() override;
137
138 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
139
140 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
141
142 void LowerHardenedBRJumpTable(const MachineInstr &MI);
143
144 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
145
146 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
147 const MachineInstr &MI);
148 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
149 const MachineInstr &MI);
150 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
151 const MachineInstr &MI);
152 void LowerFAULTING_OP(const MachineInstr &MI);
153
154 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
155 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
156 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
157 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
158
159 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
160 HwasanMemaccessTuple;
161 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
162 void LowerKCFI_CHECK(const MachineInstr &MI);
163 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
164 void emitHwasanMemaccessSymbols(Module &M);
165
166 void emitSled(const MachineInstr &MI, SledKind Kind);
167
168 // Returns whether Reg may be used to store sensitive temporary values when
169 // expanding PtrAuth pseudos. Some OSes may take extra care to protect a
170 // small subset of GPRs on context switches - use these registers then.
171 //
172 // If there are no preferred registers, returns true for any Reg.
173 bool isPtrauthRegSafe(Register Reg) const {
174 if (STI->isX16X17Safer())
175 return Reg == AArch64::X16 || Reg == AArch64::X17;
176
177 return true;
178 }
179
180 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
181 void emitPtrauthBranch(const MachineInstr *MI);
182
183 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
184 Register ScratchReg,
187 const MCSymbol *OnFailure = nullptr);
188
189 // Check authenticated LR before tail calling.
190 void emitPtrauthTailCallHardening(const MachineInstr *TC);
191
192 struct PtrAuthSchema {
193 PtrAuthSchema(AArch64PACKey::ID Key, uint64_t IntDisc,
194 const MachineOperand &AddrDiscOp);
195
197 uint64_t IntDisc;
198 Register AddrDisc;
199 bool AddrDiscIsKilled;
200 };
201
202 // Emit the sequence for AUT or AUTPAC. Addend if AUTRELLOADPAC
203 void emitPtrauthAuthResign(Register Pointer, Register Scratch,
204 PtrAuthSchema AuthSchema,
205 std::optional<PtrAuthSchema> SignSchema,
206 std::optional<uint64_t> Addend, Value *DS);
207
208 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
209 // if no instruction should be emitted because the deactivation symbol is
210 // defined in the current module so this function emitted a NOP instead.
211 bool emitDeactivationSymbolRelocation(Value *DS);
212
213 // Emit the sequence for PAC.
214 void emitPtrauthSign(const MachineInstr *MI);
215
216 // Emit the sequence to compute the discriminator.
217 //
218 // The Scratch register passed to this function must be safe, as returned by
219 // isPtrauthRegSafe(ScratchReg).
220 //
221 // The returned register is either ScratchReg, AddrDisc, or XZR. Furthermore,
222 // it is guaranteed to be safe (or XZR), with the only exception of
223 // passing-through an *unmodified* unsafe AddrDisc register.
224 //
225 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
226 // MayClobberAddrDisc may save one MOV instruction, provided
227 // isPtrauthRegSafe(AddrDisc) is true:
228 //
229 // mov x17, x16
230 // movk x17, #1234, lsl #48
231 // ; x16 is not used anymore
232 //
233 // can be replaced by
234 //
235 // movk x16, #1234, lsl #48
236 Register emitPtrauthDiscriminator(uint64_t Disc, Register AddrDisc,
237 Register ScratchReg,
238 bool MayClobberAddrDisc = false);
239
240 // Emit the sequence for LOADauthptrstatic
241 void LowerLOADauthptrstatic(const MachineInstr &MI);
242
243 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
244 // adrp-add followed by PAC sign)
245 void LowerMOVaddrPAC(const MachineInstr &MI);
246
247 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
248 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
249 // authenticating)
250 void LowerLOADgotAUTH(const MachineInstr &MI);
251
252 void emitAddImm(MCRegister Val, int64_t Addend, MCRegister Tmp);
253 void emitAddress(MCRegister Reg, const MCExpr *Expr, MCRegister Tmp,
254 bool DSOLocal, const MCSubtargetInfo &STI);
255
256 const MCExpr *emitPAuthRelocationAsIRelative(
257 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
258 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
259
260 /// tblgen'erated driver function for lowering simple MI->MC
261 /// pseudo instructions.
262 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
263
264 // Emit Build Attributes
265 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
266 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
267
268 // Emit expansion of Compare-and-branch pseudo instructions
269 void emitCBPseudoExpansion(const MachineInstr *MI);
270
271 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
272 void EmitToStreamer(const MCInst &Inst) {
273 EmitToStreamer(*OutStreamer, Inst);
274 }
275
276 void emitInstruction(const MachineInstr *MI) override;
277
278 void emitFunctionHeaderComment() override;
279
280 void getAnalysisUsage(AnalysisUsage &AU) const override {
282 AU.setPreservesAll();
283 }
284
285 bool runOnMachineFunction(MachineFunction &MF) override {
286 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
287 PSI = &PSIW->getPSI();
288 if (auto *SDPIW =
289 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
290 SDPI = &SDPIW->getStaticDataProfileInfo();
291
292 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
293 STI = &MF.getSubtarget<AArch64Subtarget>();
294
295 SetupMachineFunction(MF);
296
297 if (STI->isTargetCOFF()) {
298 bool Local = MF.getFunction().hasLocalLinkage();
301 int Type =
303
304 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
305 OutStreamer->emitCOFFSymbolStorageClass(Scl);
306 OutStreamer->emitCOFFSymbolType(Type);
307 OutStreamer->endCOFFSymbolDef();
308 }
309
310 // Emit the rest of the function body.
311 emitFunctionBody();
312
313 // Emit the XRay table for this function.
314 emitXRayTable();
315
316 // We didn't modify anything.
317 return false;
318 }
319
320 const MCExpr *lowerConstant(const Constant *CV,
321 const Constant *BaseCV = nullptr,
322 uint64_t Offset = 0) override;
323
324private:
325 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
326 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
327 bool printAsmRegInClass(const MachineOperand &MO,
328 const TargetRegisterClass *RC, unsigned AltName,
329 raw_ostream &O);
330
331 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
332 const char *ExtraCode, raw_ostream &O) override;
333 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
334 const char *ExtraCode, raw_ostream &O) override;
335
336 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
337
338 void emitFunctionBodyEnd() override;
339 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
340
341 MCSymbol *GetCPISymbol(unsigned CPID) const override;
342 void emitEndOfAsmFile(Module &M) override;
343
344 AArch64FunctionInfo *AArch64FI = nullptr;
345
346 /// Emit the LOHs contained in AArch64FI.
347 void emitLOHs();
348
349 void emitMovXReg(Register Dest, Register Src);
350 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
351 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
352
353 void emitAUT(AArch64PACKey::ID Key, Register Pointer, Register Disc);
354 void emitPAC(AArch64PACKey::ID Key, Register Pointer, Register Disc);
355 void emitBLRA(bool IsCall, AArch64PACKey::ID Key, Register Target,
356 Register Disc);
357
358 /// Emit instruction to set float register to zero.
359 void emitFMov0(const MachineInstr &MI);
360 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
361
362 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
363
364 MInstToMCSymbol LOHInstToLabel;
365
366 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
367 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
368 }
369
370 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
371 assert(STI);
372 return STI;
373 }
374 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
375 MCSymbol *LazyPointer) override;
376 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
377 MCSymbol *LazyPointer) override;
378
379 /// Checks if this instruction is part of a sequence that is eligle for import
380 /// call optimization and, if so, records it to be emitted in the import call
381 /// section.
382 void recordIfImportCall(const MachineInstr *BranchInst);
383};
384
385} // end anonymous namespace
386
387void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
388 const Triple &TT = TM.getTargetTriple();
389
390 if (TT.isOSBinFormatCOFF()) {
391 emitCOFFFeatureSymbol(M);
392 emitCOFFReplaceableFunctionData(M);
393
394 if (M.getModuleFlag("import-call-optimization"))
395 EnableImportCallOptimization = true;
396 }
397
398 if (!TT.isOSBinFormatELF())
399 return;
400
401 // For emitting build attributes and .note.gnu.property section
402 auto *TS =
403 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
404 // Assemble feature flags that may require creation of build attributes and a
405 // note section.
406 unsigned BAFlags = 0;
407 unsigned GNUFlags = 0;
408 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
409 M.getModuleFlag("branch-target-enforcement"))) {
410 if (!BTE->isZero()) {
411 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
413 }
414 }
415
416 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
417 M.getModuleFlag("guarded-control-stack"))) {
418 if (!GCS->isZero()) {
419 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
421 }
422 }
423
424 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
425 M.getModuleFlag("sign-return-address"))) {
426 if (!Sign->isZero()) {
427 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
429 }
430 }
431
432 uint64_t PAuthABIPlatform = -1;
433 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
434 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
435 PAuthABIPlatform = PAP->getZExtValue();
436 }
437
438 uint64_t PAuthABIVersion = -1;
439 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
440 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
441 PAuthABIVersion = PAV->getZExtValue();
442 }
443
444 // Emit AArch64 Build Attributes
445 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
446 // Emit a .note.gnu.property section with the flags.
447 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
448}
449
450void AArch64AsmPrinter::emitFunctionHeaderComment() {
451 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
452 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
453 if (OutlinerString != std::nullopt)
454 OutStreamer->getCommentOS() << ' ' << OutlinerString;
455}
456
457void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
458{
459 const Function &F = MF->getFunction();
460 if (F.hasFnAttribute("patchable-function-entry")) {
461 unsigned Num;
462 if (F.getFnAttribute("patchable-function-entry")
463 .getValueAsString()
464 .getAsInteger(10, Num))
465 return;
466 emitNops(Num);
467 return;
468 }
469
470 emitSled(MI, SledKind::FUNCTION_ENTER);
471}
472
473void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
474 emitSled(MI, SledKind::FUNCTION_EXIT);
475}
476
477void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
478 emitSled(MI, SledKind::TAIL_CALL);
479}
480
481void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
482 static const int8_t NoopsInSledCount = 7;
483 // We want to emit the following pattern:
484 //
485 // .Lxray_sled_N:
486 // ALIGN
487 // B #32
488 // ; 7 NOP instructions (28 bytes)
489 // .tmpN
490 //
491 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
492 // over the full 32 bytes (8 instructions) with the following pattern:
493 //
494 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
495 // LDR W17, #12 ; W17 := function ID
496 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
497 // BLR X16 ; call the tracing trampoline
498 // ;DATA: 32 bits of function ID
499 // ;DATA: lower 32 bits of the address of the trampoline
500 // ;DATA: higher 32 bits of the address of the trampoline
501 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
502 //
503 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
504 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
505 OutStreamer->emitLabel(CurSled);
506 auto Target = OutContext.createTempSymbol();
507
508 // Emit "B #32" instruction, which jumps over the next 28 bytes.
509 // The operand has to be the number of 4-byte instructions to jump over,
510 // including the current instruction.
511 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
512
513 for (int8_t I = 0; I < NoopsInSledCount; I++)
514 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::NOP));
515
516 OutStreamer->emitLabel(Target);
517 recordSled(CurSled, MI, Kind, 2);
518}
519
520void AArch64AsmPrinter::emitAttributes(unsigned Flags,
521 uint64_t PAuthABIPlatform,
522 uint64_t PAuthABIVersion,
523 AArch64TargetStreamer *TS) {
524
525 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
526 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
527
528 if (PAuthABIPlatform || PAuthABIVersion) {
532 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
533 AArch64BuildAttributes::SubsectionType::ULEB128);
537 PAuthABIPlatform, "");
541 "");
542 }
543
544 unsigned BTIValue =
546 unsigned PACValue =
548 unsigned GCSValue =
550
551 if (BTIValue || PACValue || GCSValue) {
555 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
556 AArch64BuildAttributes::SubsectionType::ULEB128);
566 }
567}
568
569// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
570// (built-in functions __xray_customevent/__xray_typedevent).
571//
572// .Lxray_event_sled_N:
573// b 1f
574// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
575// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
576// bl __xray_CustomEvent or __xray_TypedEvent
577// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
578// 1:
579//
580// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
581//
582// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
583// After patching, b .+N will become a nop.
584void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
585 bool Typed) {
586 auto &O = *OutStreamer;
587 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
588 O.emitLabel(CurSled);
589 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
590 auto *Sym = MCSymbolRefExpr::create(
591 OutContext.getOrCreateSymbol(
592 Twine(MachO ? "_" : "") +
593 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
594 OutContext);
595 if (Typed) {
596 O.AddComment("Begin XRay typed event");
597 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
598 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
599 .addReg(AArch64::SP)
600 .addReg(AArch64::X0)
601 .addReg(AArch64::X1)
602 .addReg(AArch64::SP)
603 .addImm(-4));
604 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
605 .addReg(AArch64::X2)
606 .addReg(AArch64::SP)
607 .addImm(2));
608 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
609 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
610 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
611 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
612 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
613 .addReg(AArch64::X2)
614 .addReg(AArch64::SP)
615 .addImm(2));
616 O.AddComment("End XRay typed event");
617 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
618 .addReg(AArch64::SP)
619 .addReg(AArch64::X0)
620 .addReg(AArch64::X1)
621 .addReg(AArch64::SP)
622 .addImm(4));
623
624 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
625 } else {
626 O.AddComment("Begin XRay custom event");
627 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
628 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
629 .addReg(AArch64::SP)
630 .addReg(AArch64::X0)
631 .addReg(AArch64::X1)
632 .addReg(AArch64::SP)
633 .addImm(-2));
634 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
635 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
636 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
637 O.AddComment("End XRay custom event");
638 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
639 .addReg(AArch64::SP)
640 .addReg(AArch64::X0)
641 .addReg(AArch64::X1)
642 .addReg(AArch64::SP)
643 .addImm(2));
644
645 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
646 }
647}
648
649void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
650 Register AddrReg = MI.getOperand(0).getReg();
651 assert(std::next(MI.getIterator())->isCall() &&
652 "KCFI_CHECK not followed by a call instruction");
653 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
654 "KCFI_CHECK call target doesn't match call operand");
655
656 // Default to using the intra-procedure-call temporary registers for
657 // comparing the hashes.
658 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
659 if (AddrReg == AArch64::XZR) {
660 // Checking XZR makes no sense. Instead of emitting a load, zero
661 // ScratchRegs[0] and use it for the ESR AddrIndex below.
662 AddrReg = getXRegFromWReg(ScratchRegs[0]);
663 emitMovXReg(AddrReg, AArch64::XZR);
664 } else {
665 // If one of the scratch registers is used for the call target (e.g.
666 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
667 // temporary register instead (in this case, AArch64::W9) as the check
668 // is immediately followed by the call instruction.
669 for (auto &Reg : ScratchRegs) {
670 if (Reg == getWRegFromXReg(AddrReg)) {
671 Reg = AArch64::W9;
672 break;
673 }
674 }
675 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
676 "Invalid scratch registers for KCFI_CHECK");
677
678 // Adjust the offset for patchable-function-prefix. This assumes that
679 // patchable-function-prefix is the same for all functions.
680 int64_t PrefixNops = 0;
681 (void)MI.getMF()
682 ->getFunction()
683 .getFnAttribute("patchable-function-prefix")
684 .getValueAsString()
685 .getAsInteger(10, PrefixNops);
686
687 // Load the target function type hash.
688 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
689 .addReg(ScratchRegs[0])
690 .addReg(AddrReg)
691 .addImm(-(PrefixNops * 4 + 4)));
692 }
693
694 // Load the expected type hash.
695 const int64_t Type = MI.getOperand(1).getImm();
696 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
697 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
698
699 // Compare the hashes and trap if there's a mismatch.
700 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
701 .addReg(AArch64::WZR)
702 .addReg(ScratchRegs[0])
703 .addReg(ScratchRegs[1])
704 .addImm(0));
705
706 MCSymbol *Pass = OutContext.createTempSymbol();
707 EmitToStreamer(*OutStreamer,
708 MCInstBuilder(AArch64::Bcc)
709 .addImm(AArch64CC::EQ)
710 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
711
712 // The base ESR is 0x8000 and the register information is encoded in bits
713 // 0-9 as follows:
714 // - 0-4: n, where the register Xn contains the target address
715 // - 5-9: m, where the register Wm contains the expected type hash
716 // Where n, m are in [0, 30].
717 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
718 unsigned AddrIndex;
719 switch (AddrReg) {
720 default:
721 AddrIndex = AddrReg - AArch64::X0;
722 break;
723 case AArch64::FP:
724 AddrIndex = 29;
725 break;
726 case AArch64::LR:
727 AddrIndex = 30;
728 break;
729 }
730
731 assert(AddrIndex < 31 && TypeIndex < 31);
732
733 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
734 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
735 OutStreamer->emitLabel(Pass);
736}
737
738void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
739 Register Reg = MI.getOperand(0).getReg();
740
741 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
742 // statically known to be zero. However, conceivably, the HWASan pass may
743 // encounter a "cannot currently statically prove to be null" pointer (and is
744 // therefore unable to omit the intrinsic) that later optimization passes
745 // convert into a statically known-null pointer.
746 if (Reg == AArch64::XZR)
747 return;
748
749 bool IsShort =
750 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
751 (MI.getOpcode() ==
752 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
753 uint32_t AccessInfo = MI.getOperand(1).getImm();
754 bool IsFixedShadow =
755 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
756 (MI.getOpcode() ==
757 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
758 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
759
760 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
761 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
762 if (!Sym) {
763 // FIXME: Make this work on non-ELF.
764 if (!TM.getTargetTriple().isOSBinFormatELF())
765 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
766
767 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
768 utostr(AccessInfo);
769 if (IsFixedShadow)
770 SymName += "_fixed_" + utostr(FixedShadowOffset);
771 if (IsShort)
772 SymName += "_short_v2";
773 Sym = OutContext.getOrCreateSymbol(SymName);
774 }
775
776 EmitToStreamer(*OutStreamer,
777 MCInstBuilder(AArch64::BL)
778 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
779}
780
781void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
782 if (HwasanMemaccessSymbols.empty())
783 return;
784
785 const Triple &TT = TM.getTargetTriple();
786 assert(TT.isOSBinFormatELF());
787 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
788 // space.
789 auto STI = std::make_unique<AArch64Subtarget>(
790 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
791 true);
792 this->STI = STI.get();
793
794 MCSymbol *HwasanTagMismatchV1Sym =
795 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
796 MCSymbol *HwasanTagMismatchV2Sym =
797 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
798
799 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
800 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
801 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
802 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
803
804 for (auto &P : HwasanMemaccessSymbols) {
805 unsigned Reg = std::get<0>(P.first);
806 bool IsShort = std::get<1>(P.first);
807 uint32_t AccessInfo = std::get<2>(P.first);
808 bool IsFixedShadow = std::get<3>(P.first);
809 uint64_t FixedShadowOffset = std::get<4>(P.first);
810 const MCSymbolRefExpr *HwasanTagMismatchRef =
811 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
812 MCSymbol *Sym = P.second;
813
814 bool HasMatchAllTag =
815 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
816 uint8_t MatchAllTag =
817 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
818 unsigned Size =
819 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
820 bool CompileKernel =
821 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
822
823 OutStreamer->switchSection(OutContext.getELFSection(
824 ".text.hot", ELF::SHT_PROGBITS,
826 /*IsComdat=*/true));
827
828 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
829 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
830 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
831 OutStreamer->emitLabel(Sym);
832
833 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
834 .addReg(AArch64::X16)
835 .addReg(Reg)
836 .addImm(4)
837 .addImm(55));
838
839 if (IsFixedShadow) {
840 // Aarch64 makes it difficult to embed large constants in the code.
841 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
842 // left-shift option in the MOV instruction. Combined with the 16-bit
843 // immediate, this is enough to represent any offset up to 2**48.
844 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
845 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
846 .addReg(AArch64::W16)
847 .addReg(AArch64::X17)
848 .addReg(AArch64::X16)
849 .addImm(0)
850 .addImm(0));
851 } else {
852 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
853 .addReg(AArch64::W16)
854 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
855 .addReg(AArch64::X16)
856 .addImm(0)
857 .addImm(0));
858 }
859
860 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
861 .addReg(AArch64::XZR)
862 .addReg(AArch64::X16)
863 .addReg(Reg)
865 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
866 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
867 .addImm(AArch64CC::NE)
869 HandleMismatchOrPartialSym, OutContext)));
870 MCSymbol *ReturnSym = OutContext.createTempSymbol();
871 OutStreamer->emitLabel(ReturnSym);
872 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
873 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
874
875 if (HasMatchAllTag) {
876 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
877 .addReg(AArch64::X17)
878 .addReg(Reg)
879 .addImm(56)
880 .addImm(63));
881 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
882 .addReg(AArch64::XZR)
883 .addReg(AArch64::X17)
884 .addImm(MatchAllTag)
885 .addImm(0));
886 EmitToStreamer(
887 MCInstBuilder(AArch64::Bcc)
888 .addImm(AArch64CC::EQ)
889 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
890 }
891
892 if (IsShort) {
893 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
894 .addReg(AArch64::WZR)
895 .addReg(AArch64::W16)
896 .addImm(15)
897 .addImm(0));
898 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
899 EmitToStreamer(
900 MCInstBuilder(AArch64::Bcc)
901 .addImm(AArch64CC::HI)
902 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
903
904 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
905 .addReg(AArch64::X17)
906 .addReg(Reg)
907 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
908 if (Size != 1)
909 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
910 .addReg(AArch64::X17)
911 .addReg(AArch64::X17)
912 .addImm(Size - 1)
913 .addImm(0));
914 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
915 .addReg(AArch64::WZR)
916 .addReg(AArch64::W16)
917 .addReg(AArch64::W17)
918 .addImm(0));
919 EmitToStreamer(
920 MCInstBuilder(AArch64::Bcc)
921 .addImm(AArch64CC::LS)
922 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
923
924 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
925 .addReg(AArch64::X16)
926 .addReg(Reg)
927 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
928 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
929 .addReg(AArch64::W16)
930 .addReg(AArch64::X16)
931 .addImm(0));
932 EmitToStreamer(
933 MCInstBuilder(AArch64::SUBSXrs)
934 .addReg(AArch64::XZR)
935 .addReg(AArch64::X16)
936 .addReg(Reg)
938 EmitToStreamer(
939 MCInstBuilder(AArch64::Bcc)
940 .addImm(AArch64CC::EQ)
941 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
942
943 OutStreamer->emitLabel(HandleMismatchSym);
944 }
945
946 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
947 .addReg(AArch64::SP)
948 .addReg(AArch64::X0)
949 .addReg(AArch64::X1)
950 .addReg(AArch64::SP)
951 .addImm(-32));
952 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
953 .addReg(AArch64::FP)
954 .addReg(AArch64::LR)
955 .addReg(AArch64::SP)
956 .addImm(29));
957
958 if (Reg != AArch64::X0)
959 emitMovXReg(AArch64::X0, Reg);
960 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
961
962 if (CompileKernel) {
963 // The Linux kernel's dynamic loader doesn't support GOT relative
964 // relocations, but it doesn't support late binding either, so just call
965 // the function directly.
966 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
967 } else {
968 // Intentionally load the GOT entry and branch to it, rather than possibly
969 // late binding the function, which may clobber the registers before we
970 // have a chance to save them.
971 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
972 .addReg(AArch64::X16)
973 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
975 OutContext)));
976 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
977 .addReg(AArch64::X16)
978 .addReg(AArch64::X16)
979 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
981 OutContext)));
982 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
983 }
984 }
985 this->STI = nullptr;
986}
987
988static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
989 MCSymbol *StubLabel,
990 const MCExpr *StubAuthPtrRef) {
991 // sym$auth_ptr$key$disc:
992 OutStreamer.emitLabel(StubLabel);
993 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
994}
995
996void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
997 emitHwasanMemaccessSymbols(M);
998
999 const Triple &TT = TM.getTargetTriple();
1000 if (TT.isOSBinFormatMachO()) {
1001 // Output authenticated pointers as indirect symbols, if we have any.
1002 MachineModuleInfoMachO &MMIMacho =
1003 MMI->getObjFileInfo<MachineModuleInfoMachO>();
1004
1005 auto Stubs = MMIMacho.getAuthGVStubList();
1006
1007 if (!Stubs.empty()) {
1008 // Switch to the "__auth_ptr" section.
1009 OutStreamer->switchSection(
1010 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
1012 emitAlignment(Align(8));
1013
1014 for (const auto &Stub : Stubs)
1015 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1016
1017 OutStreamer->addBlankLine();
1018 }
1019
1020 // Funny Darwin hack: This flag tells the linker that no global symbols
1021 // contain code that falls through to other global symbols (e.g. the obvious
1022 // implementation of multiple entry points). If this doesn't occur, the
1023 // linker can safely perform dead code stripping. Since LLVM never
1024 // generates code that does this, it is always safe to set.
1025 OutStreamer->emitSubsectionsViaSymbols();
1026 }
1027
1028 if (TT.isOSBinFormatELF()) {
1029 // Output authenticated pointers as indirect symbols, if we have any.
1030 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
1031
1032 auto Stubs = MMIELF.getAuthGVStubList();
1033
1034 if (!Stubs.empty()) {
1035 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1036 OutStreamer->switchSection(TLOF.getDataSection());
1037 emitAlignment(Align(8));
1038
1039 for (const auto &Stub : Stubs)
1040 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1041
1042 OutStreamer->addBlankLine();
1043 }
1044
1045 // With signed ELF GOT enabled, the linker looks at the symbol type to
1046 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1047 // for functions not defined in the module have STT_NOTYPE type by default.
1048 // This makes linker to emit signing schema with DA key (instead of IA) for
1049 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1050 // all function symbols used in the module to have STT_FUNC type. See
1051 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1052 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1053 M.getModuleFlag("ptrauth-elf-got"));
1054 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1055 for (const GlobalValue &GV : M.global_values())
1056 if (!GV.use_empty() && isa<Function>(GV) &&
1057 !GV.getName().starts_with("llvm."))
1058 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1060 }
1061
1062 // Emit stack and fault map information.
1064
1065 // If import call optimization is enabled, emit the appropriate section.
1066 // We do this whether or not we recorded any import calls.
1067 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1068 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1069
1070 // Section always starts with some magic.
1071 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1072 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1073
1074 // Layout of this section is:
1075 // Per section that contains calls to imported functions:
1076 // uint32_t SectionSize: Size in bytes for information in this section.
1077 // uint32_t Section Number
1078 // Per call to imported function in section:
1079 // uint32_t Kind: the kind of imported function.
1080 // uint32_t BranchOffset: the offset of the branch instruction in its
1081 // parent section.
1082 // uint32_t TargetSymbolId: the symbol id of the called function.
1083 for (auto &[Section, CallsToImportedFuncs] :
1084 SectionToImportedFunctionCalls) {
1085 unsigned SectionSize =
1086 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1087 OutStreamer->emitInt32(SectionSize);
1088 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1089 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1090 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1091 OutStreamer->emitInt32(0x13);
1092 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1093 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1094 }
1095 }
1096 }
1097}
1098
1099void AArch64AsmPrinter::emitLOHs() {
1101
1102 for (const auto &D : AArch64FI->getLOHContainer()) {
1103 for (const MachineInstr *MI : D.getArgs()) {
1104 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1105 assert(LabelIt != LOHInstToLabel.end() &&
1106 "Label hasn't been inserted for LOH related instruction");
1107 MCArgs.push_back(LabelIt->second);
1108 }
1109 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1110 MCArgs.clear();
1111 }
1112}
1113
1114void AArch64AsmPrinter::emitFunctionBodyEnd() {
1115 if (!AArch64FI->getLOHRelated().empty())
1116 emitLOHs();
1117}
1118
1119/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1120MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1121 // Darwin uses a linker-private symbol name for constant-pools (to
1122 // avoid addends on the relocation?), ELF has no such concept and
1123 // uses a normal private symbol.
1124 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1125 return OutContext.getOrCreateSymbol(
1126 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1127 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1128
1129 return AsmPrinter::GetCPISymbol(CPID);
1130}
1131
1132void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1133 raw_ostream &O) {
1134 const MachineOperand &MO = MI->getOperand(OpNum);
1135 switch (MO.getType()) {
1136 default:
1137 llvm_unreachable("<unknown operand type>");
1139 Register Reg = MO.getReg();
1141 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1143 break;
1144 }
1146 O << MO.getImm();
1147 break;
1148 }
1150 PrintSymbolOperand(MO, O);
1151 break;
1152 }
1154 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1155 Sym->print(O, MAI);
1156 break;
1157 }
1158 }
1159}
1160
1161bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1162 raw_ostream &O) {
1163 Register Reg = MO.getReg();
1164 switch (Mode) {
1165 default:
1166 return true; // Unknown mode.
1167 case 'w':
1169 break;
1170 case 'x':
1172 break;
1173 case 't':
1175 break;
1176 }
1177
1179 return false;
1180}
1181
1182// Prints the register in MO using class RC using the offset in the
1183// new register class. This should not be used for cross class
1184// printing.
1185bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1186 const TargetRegisterClass *RC,
1187 unsigned AltName, raw_ostream &O) {
1188 assert(MO.isReg() && "Should only get here with a register!");
1189 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1190 Register Reg = MO.getReg();
1191 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1192 if (!RI->regsOverlap(RegToPrint, Reg))
1193 return true;
1194 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1195 return false;
1196}
1197
1198bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1199 const char *ExtraCode, raw_ostream &O) {
1200 const MachineOperand &MO = MI->getOperand(OpNum);
1201
1202 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1203 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1204 return false;
1205
1206 // Does this asm operand have a single letter operand modifier?
1207 if (ExtraCode && ExtraCode[0]) {
1208 if (ExtraCode[1] != 0)
1209 return true; // Unknown modifier.
1210
1211 switch (ExtraCode[0]) {
1212 default:
1213 return true; // Unknown modifier.
1214 case 'w': // Print W register
1215 case 'x': // Print X register
1216 if (MO.isReg())
1217 return printAsmMRegister(MO, ExtraCode[0], O);
1218 if (MO.isImm() && MO.getImm() == 0) {
1219 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1221 return false;
1222 }
1223 printOperand(MI, OpNum, O);
1224 return false;
1225 case 'b': // Print B register.
1226 case 'h': // Print H register.
1227 case 's': // Print S register.
1228 case 'd': // Print D register.
1229 case 'q': // Print Q register.
1230 case 'z': // Print Z register.
1231 if (MO.isReg()) {
1232 const TargetRegisterClass *RC;
1233 switch (ExtraCode[0]) {
1234 case 'b':
1235 RC = &AArch64::FPR8RegClass;
1236 break;
1237 case 'h':
1238 RC = &AArch64::FPR16RegClass;
1239 break;
1240 case 's':
1241 RC = &AArch64::FPR32RegClass;
1242 break;
1243 case 'd':
1244 RC = &AArch64::FPR64RegClass;
1245 break;
1246 case 'q':
1247 RC = &AArch64::FPR128RegClass;
1248 break;
1249 case 'z':
1250 RC = &AArch64::ZPRRegClass;
1251 break;
1252 default:
1253 return true;
1254 }
1255 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1256 }
1257 printOperand(MI, OpNum, O);
1258 return false;
1259 }
1260 }
1261
1262 // According to ARM, we should emit x and v registers unless we have a
1263 // modifier.
1264 if (MO.isReg()) {
1265 Register Reg = MO.getReg();
1266
1267 // If this is a w or x register, print an x register.
1268 if (AArch64::GPR32allRegClass.contains(Reg) ||
1269 AArch64::GPR64allRegClass.contains(Reg))
1270 return printAsmMRegister(MO, 'x', O);
1271
1272 // If this is an x register tuple, print an x register.
1273 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1274 return printAsmMRegister(MO, 't', O);
1275
1276 unsigned AltName = AArch64::NoRegAltName;
1277 const TargetRegisterClass *RegClass;
1278 if (AArch64::ZPRRegClass.contains(Reg)) {
1279 RegClass = &AArch64::ZPRRegClass;
1280 } else if (AArch64::PPRRegClass.contains(Reg)) {
1281 RegClass = &AArch64::PPRRegClass;
1282 } else if (AArch64::PNRRegClass.contains(Reg)) {
1283 RegClass = &AArch64::PNRRegClass;
1284 } else {
1285 RegClass = &AArch64::FPR128RegClass;
1286 AltName = AArch64::vreg;
1287 }
1288
1289 // If this is a b, h, s, d, or q register, print it as a v register.
1290 return printAsmRegInClass(MO, RegClass, AltName, O);
1291 }
1292
1293 printOperand(MI, OpNum, O);
1294 return false;
1295}
1296
1297bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1298 unsigned OpNum,
1299 const char *ExtraCode,
1300 raw_ostream &O) {
1301 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1302 return true; // Unknown modifier.
1303
1304 const MachineOperand &MO = MI->getOperand(OpNum);
1305 assert(MO.isReg() && "unexpected inline asm memory operand");
1306 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1307 return false;
1308}
1309
1310void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1311 raw_ostream &OS) {
1312 unsigned NOps = MI->getNumOperands();
1313 assert(NOps == 4);
1314 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1315 // cast away const; DIetc do not take const operands for some reason.
1316 OS << MI->getDebugVariable()->getName();
1317 OS << " <- ";
1318 // Frame address. Currently handles register +- offset only.
1319 assert(MI->isIndirectDebugValue());
1320 OS << '[';
1321 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1322 if (I != 0)
1323 OS << ", ";
1324 printOperand(MI, I, OS);
1325 }
1326 OS << ']';
1327 OS << "+";
1328 printOperand(MI, NOps - 2, OS);
1329}
1330
1331void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1332 ArrayRef<unsigned> JumpTableIndices) {
1333 // Fast return if there is nothing to emit to avoid creating empty sections.
1334 if (JumpTableIndices.empty())
1335 return;
1336 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1337 const auto &F = MF->getFunction();
1339
1340 MCSection *ReadOnlySec = nullptr;
1341 if (TM.Options.EnableStaticDataPartitioning) {
1342 ReadOnlySec =
1343 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1344 } else {
1345 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1346 }
1347 OutStreamer->switchSection(ReadOnlySec);
1348
1349 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1350 for (unsigned JTI : JumpTableIndices) {
1351 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1352
1353 // If this jump table was deleted, ignore it.
1354 if (JTBBs.empty()) continue;
1355
1356 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1357 emitAlignment(Align(Size));
1358 OutStreamer->emitLabel(GetJTISymbol(JTI));
1359
1360 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1361 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1362
1363 for (auto *JTBB : JTBBs) {
1364 const MCExpr *Value =
1365 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1366
1367 // Each entry is:
1368 // .byte/.hword (LBB - Lbase)>>2
1369 // or plain:
1370 // .word LBB - Lbase
1371 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1372 if (Size != 4)
1374 Value, MCConstantExpr::create(2, OutContext), OutContext);
1375
1376 OutStreamer->emitValue(Value, Size);
1377 }
1378 }
1379}
1380
1381std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1383AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1384 const MachineInstr *BranchInstr,
1385 const MCSymbol *BranchLabel) const {
1386 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1387 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1389 switch (AFI->getJumpTableEntrySize(JTI)) {
1390 case 1:
1391 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1392 break;
1393 case 2:
1394 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1395 break;
1396 case 4:
1397 EntrySize = codeview::JumpTableEntrySize::Int32;
1398 break;
1399 default:
1400 llvm_unreachable("Unexpected jump table entry size");
1401 }
1402 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1403}
1404
1405void AArch64AsmPrinter::emitFunctionEntryLabel() {
1406 const Triple &TT = TM.getTargetTriple();
1407 if (TT.isOSBinFormatELF() &&
1408 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1409 MF->getFunction().getCallingConv() ==
1410 CallingConv::AArch64_SVE_VectorCall ||
1411 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1412 auto *TS =
1413 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1414 TS->emitDirectiveVariantPCS(CurrentFnSym);
1415 }
1416
1418
1419 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1420 // For ARM64EC targets, a function definition's name is mangled differently
1421 // from the normal symbol, emit required aliases here.
1422 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1423 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1424 OutStreamer->emitAssignment(
1425 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1426 };
1427
1428 auto getSymbolFromMetadata = [&](StringRef Name) {
1429 MCSymbol *Sym = nullptr;
1430 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1431 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1432 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1433 }
1434 return Sym;
1435 };
1436
1437 SmallVector<MDNode *> UnmangledNames;
1438 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1439 for (MDNode *Node : UnmangledNames) {
1440 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1441 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1442 if (std::optional<std::string> MangledName =
1443 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1444 MCSymbol *ECMangledSym =
1445 MMI->getContext().getOrCreateSymbol(*MangledName);
1446 emitFunctionAlias(UnmangledSym, ECMangledSym);
1447 }
1448 }
1449 if (MCSymbol *ECMangledSym =
1450 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1451 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1452 }
1453}
1454
1455void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1456 const Constant *CV) {
1457 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1458 if (CPA->hasAddressDiscriminator() &&
1459 !CPA->hasSpecialAddressDiscriminator(
1462 "unexpected address discrimination value for ctors/dtors entry, only "
1463 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1464 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1465 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1466 // actual address discrimination value and only checks
1467 // hasAddressDiscriminator(), so it's OK to leave special address
1468 // discrimination value here.
1470}
1471
1472void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1473 const GlobalAlias &GA) {
1474 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1475 // Global aliases must point to a definition, but unmangled patchable
1476 // symbols are special and need to point to an undefined symbol with "EXP+"
1477 // prefix. Such undefined symbol is resolved by the linker by creating
1478 // x86 thunk that jumps back to the actual EC target.
1479 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1480 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1481 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1482 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1483
1484 OutStreamer->beginCOFFSymbolDef(ExpSym);
1485 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1486 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1488 OutStreamer->endCOFFSymbolDef();
1489
1490 OutStreamer->beginCOFFSymbolDef(Sym);
1491 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1492 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1494 OutStreamer->endCOFFSymbolDef();
1495 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1496 OutStreamer->emitAssignment(
1497 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1498 return;
1499 }
1500 }
1502}
1503
1504/// Small jump tables contain an unsigned byte or half, representing the offset
1505/// from the lowest-addressed possible destination to the desired basic
1506/// block. Since all instructions are 4-byte aligned, this is further compressed
1507/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1508/// materialize the correct destination we need:
1509///
1510/// adr xDest, .LBB0_0
1511/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1512/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1513void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1514 const llvm::MachineInstr &MI) {
1515 Register DestReg = MI.getOperand(0).getReg();
1516 Register ScratchReg = MI.getOperand(1).getReg();
1517 Register ScratchRegW =
1518 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1519 Register TableReg = MI.getOperand(2).getReg();
1520 Register EntryReg = MI.getOperand(3).getReg();
1521 int JTIdx = MI.getOperand(4).getIndex();
1522 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1523
1524 // This has to be first because the compression pass based its reachability
1525 // calculations on the start of the JumpTableDest instruction.
1526 auto Label =
1527 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1528
1529 // If we don't already have a symbol to use as the base, use the ADR
1530 // instruction itself.
1531 if (!Label) {
1533 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1534 OutStreamer.emitLabel(Label);
1535 }
1536
1537 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1538 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1539 .addReg(DestReg)
1540 .addExpr(LabelExpr));
1541
1542 // Load the number of instruction-steps to offset from the label.
1543 unsigned LdrOpcode;
1544 switch (Size) {
1545 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1546 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1547 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1548 default:
1549 llvm_unreachable("Unknown jump table size");
1550 }
1551
1552 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1553 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1554 .addReg(TableReg)
1555 .addReg(EntryReg)
1556 .addImm(0)
1557 .addImm(Size == 1 ? 0 : 1));
1558
1559 // Add to the already materialized base label address, multiplying by 4 if
1560 // compressed.
1561 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1562 .addReg(DestReg)
1563 .addReg(DestReg)
1564 .addReg(ScratchReg)
1565 .addImm(Size == 4 ? 0 : 2));
1566}
1567
1568void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1569 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1570 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1571
1572 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1573 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1574
1575 // Emit:
1576 // mov x17, #<size of table> ; depending on table size, with MOVKs
1577 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1578 // csel x16, x16, xzr, ls ; check for index overflow
1579 //
1580 // adrp x17, Ltable@PAGE ; materialize table address
1581 // add x17, Ltable@PAGEOFF
1582 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1583 //
1584 // Lanchor:
1585 // adr x17, Lanchor ; compute target address
1586 // add x16, x17, x16
1587 // br x16 ; branch to target
1588
1589 MachineOperand JTOp = MI.getOperand(0);
1590
1591 unsigned JTI = JTOp.getIndex();
1592 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1593 "unsupported compressed jump table");
1594
1595 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1596
1597 // cmp only supports a 12-bit immediate. If we need more, materialize the
1598 // immediate, using x17 as a scratch register.
1599 uint64_t MaxTableEntry = NumTableEntries - 1;
1600 if (isUInt<12>(MaxTableEntry)) {
1601 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1602 .addReg(AArch64::XZR)
1603 .addReg(AArch64::X16)
1604 .addImm(MaxTableEntry)
1605 .addImm(0));
1606 } else {
1607 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1608 // It's sad that we have to manually materialize instructions, but we can't
1609 // trivially reuse the main pseudo expansion logic.
1610 // A MOVK sequence is easy enough to generate and handles the general case.
1611 for (int Offset = 16; Offset < 64; Offset += 16) {
1612 if ((MaxTableEntry >> Offset) == 0)
1613 break;
1614 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1615 Offset);
1616 }
1617 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1618 .addReg(AArch64::XZR)
1619 .addReg(AArch64::X16)
1620 .addReg(AArch64::X17)
1621 .addImm(0));
1622 }
1623
1624 // This picks entry #0 on failure.
1625 // We might want to trap instead.
1626 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1627 .addReg(AArch64::X16)
1628 .addReg(AArch64::X16)
1629 .addReg(AArch64::XZR)
1630 .addImm(AArch64CC::LS));
1631
1632 // Prepare the @PAGE/@PAGEOFF low/high operands.
1633 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1634 MCOperand JTMCHi, JTMCLo;
1635
1636 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1637 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1638
1639 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1640 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1641
1642 EmitToStreamer(
1643 *OutStreamer,
1644 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1645
1646 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1647 .addReg(AArch64::X17)
1648 .addReg(AArch64::X17)
1649 .addOperand(JTMCLo)
1650 .addImm(0));
1651
1652 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1653 .addReg(AArch64::X16)
1654 .addReg(AArch64::X17)
1655 .addReg(AArch64::X16)
1656 .addImm(0)
1657 .addImm(1));
1658
1659 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1660 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1661 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1662
1663 OutStreamer->emitLabel(AdrLabel);
1664 EmitToStreamer(
1665 *OutStreamer,
1666 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1667
1668 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1669 .addReg(AArch64::X16)
1670 .addReg(AArch64::X17)
1671 .addReg(AArch64::X16)
1672 .addImm(0));
1673
1674 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1675}
1676
1677void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1678 const llvm::MachineInstr &MI) {
1679 unsigned Opcode = MI.getOpcode();
1680 assert(STI->hasMOPS());
1681 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1682
1683 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1684 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1685 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1686 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1687 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1688 if (Opcode == AArch64::MOPSMemorySetPseudo)
1689 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1690 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1691 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1692 llvm_unreachable("Unhandled memory operation pseudo");
1693 }();
1694 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1695 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1696
1697 for (auto Op : Ops) {
1698 int i = 0;
1699 auto MCIB = MCInstBuilder(Op);
1700 // Destination registers
1701 MCIB.addReg(MI.getOperand(i++).getReg());
1702 MCIB.addReg(MI.getOperand(i++).getReg());
1703 if (!IsSet)
1704 MCIB.addReg(MI.getOperand(i++).getReg());
1705 // Input registers
1706 MCIB.addReg(MI.getOperand(i++).getReg());
1707 MCIB.addReg(MI.getOperand(i++).getReg());
1708 MCIB.addReg(MI.getOperand(i++).getReg());
1709
1710 EmitToStreamer(OutStreamer, MCIB);
1711 }
1712}
1713
1714void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1715 const MachineInstr &MI) {
1716 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1717
1718 auto &Ctx = OutStreamer.getContext();
1719 MCSymbol *MILabel = Ctx.createTempSymbol();
1720 OutStreamer.emitLabel(MILabel);
1721
1722 SM.recordStackMap(*MILabel, MI);
1723 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1724
1725 // Scan ahead to trim the shadow.
1726 const MachineBasicBlock &MBB = *MI.getParent();
1728 ++MII;
1729 while (NumNOPBytes > 0) {
1730 if (MII == MBB.end() || MII->isCall() ||
1731 MII->getOpcode() == AArch64::DBG_VALUE ||
1732 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1733 MII->getOpcode() == TargetOpcode::STACKMAP)
1734 break;
1735 ++MII;
1736 NumNOPBytes -= 4;
1737 }
1738
1739 // Emit nops.
1740 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1741 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1742}
1743
1744// Lower a patchpoint of the form:
1745// [<def>], <id>, <numBytes>, <target>, <numArgs>
1746void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1747 const MachineInstr &MI) {
1748 auto &Ctx = OutStreamer.getContext();
1749 MCSymbol *MILabel = Ctx.createTempSymbol();
1750 OutStreamer.emitLabel(MILabel);
1751 SM.recordPatchPoint(*MILabel, MI);
1752
1753 PatchPointOpers Opers(&MI);
1754
1755 int64_t CallTarget = Opers.getCallTarget().getImm();
1756 unsigned EncodedBytes = 0;
1757 if (CallTarget) {
1758 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1759 "High 16 bits of call target should be zero.");
1760 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1761 EncodedBytes = 16;
1762 // Materialize the jump address:
1763 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1764 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1765 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1766 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1767 }
1768 // Emit padding.
1769 unsigned NumBytes = Opers.getNumPatchBytes();
1770 assert(NumBytes >= EncodedBytes &&
1771 "Patchpoint can't request size less than the length of a call.");
1772 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1773 "Invalid number of NOP bytes requested!");
1774 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1775 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1776}
1777
1778void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1779 const MachineInstr &MI) {
1780 StatepointOpers SOpers(&MI);
1781 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1782 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1783 for (unsigned i = 0; i < PatchBytes; i += 4)
1784 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1785 } else {
1786 // Lower call target and choose correct opcode
1787 const MachineOperand &CallTarget = SOpers.getCallTarget();
1788 MCOperand CallTargetMCOp;
1789 unsigned CallOpcode;
1790 switch (CallTarget.getType()) {
1793 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1794 CallOpcode = AArch64::BL;
1795 break;
1797 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1798 CallOpcode = AArch64::BL;
1799 break;
1801 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1802 CallOpcode = AArch64::BLR;
1803 break;
1804 default:
1805 llvm_unreachable("Unsupported operand type in statepoint call target");
1806 break;
1807 }
1808
1809 EmitToStreamer(OutStreamer,
1810 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1811 }
1812
1813 auto &Ctx = OutStreamer.getContext();
1814 MCSymbol *MILabel = Ctx.createTempSymbol();
1815 OutStreamer.emitLabel(MILabel);
1816 SM.recordStatepoint(*MILabel, MI);
1817}
1818
1819void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1820 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1821 // <opcode>, <operands>
1822
1823 Register DefRegister = FaultingMI.getOperand(0).getReg();
1825 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1826 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1827 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1828 unsigned OperandsBeginIdx = 4;
1829
1830 auto &Ctx = OutStreamer->getContext();
1831 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1832 OutStreamer->emitLabel(FaultingLabel);
1833
1834 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1835 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1836
1837 MCInst MI;
1838 MI.setOpcode(Opcode);
1839
1840 if (DefRegister != (Register)0)
1841 MI.addOperand(MCOperand::createReg(DefRegister));
1842
1843 for (const MachineOperand &MO :
1844 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1845 MCOperand Dest;
1846 lowerOperand(MO, Dest);
1847 MI.addOperand(Dest);
1848 }
1849
1850 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1851 EmitToStreamer(MI);
1852}
1853
1854void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1855 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1856 .addReg(Dest)
1857 .addReg(AArch64::XZR)
1858 .addReg(Src)
1859 .addImm(0));
1860}
1861
1862void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1863 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1864 EmitToStreamer(*OutStreamer,
1865 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1866 .addReg(Dest)
1867 .addImm(Imm)
1868 .addImm(Shift));
1869}
1870
1871void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1872 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1873 EmitToStreamer(*OutStreamer,
1874 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1875 .addReg(Dest)
1876 .addReg(Dest)
1877 .addImm(Imm)
1878 .addImm(Shift));
1879}
1880
1881void AArch64AsmPrinter::emitAUT(AArch64PACKey::ID Key, Register Pointer,
1882 Register Disc) {
1883 bool IsZeroDisc = Disc == AArch64::XZR;
1884 unsigned Opcode = getAUTOpcodeForKey(Key, IsZeroDisc);
1885
1886 // autiza x16 ; if IsZeroDisc
1887 // autia x16, x17 ; if !IsZeroDisc
1888 MCInst AUTInst;
1889 AUTInst.setOpcode(Opcode);
1890 AUTInst.addOperand(MCOperand::createReg(Pointer));
1891 AUTInst.addOperand(MCOperand::createReg(Pointer));
1892 if (!IsZeroDisc)
1893 AUTInst.addOperand(MCOperand::createReg(Disc));
1894
1895 EmitToStreamer(AUTInst);
1896}
1897
1898void AArch64AsmPrinter::emitPAC(AArch64PACKey::ID Key, Register Pointer,
1899 Register Disc) {
1900 bool IsZeroDisc = Disc == AArch64::XZR;
1901 unsigned Opcode = getPACOpcodeForKey(Key, IsZeroDisc);
1902
1903 // paciza x16 ; if IsZeroDisc
1904 // pacia x16, x17 ; if !IsZeroDisc
1905 MCInst PACInst;
1906 PACInst.setOpcode(Opcode);
1907 PACInst.addOperand(MCOperand::createReg(Pointer));
1908 PACInst.addOperand(MCOperand::createReg(Pointer));
1909 if (!IsZeroDisc)
1910 PACInst.addOperand(MCOperand::createReg(Disc));
1911
1912 EmitToStreamer(PACInst);
1913}
1914
1915void AArch64AsmPrinter::emitBLRA(bool IsCall, AArch64PACKey::ID Key,
1916 Register Target, Register Disc) {
1917 bool IsZeroDisc = Disc == AArch64::XZR;
1918 unsigned Opcode = getBranchOpcodeForKey(IsCall, Key, IsZeroDisc);
1919
1920 // blraaz x16 ; if IsZeroDisc
1921 // blraa x16, x17 ; if !IsZeroDisc
1922 MCInst Inst;
1923 Inst.setOpcode(Opcode);
1924 Inst.addOperand(MCOperand::createReg(Target));
1925 if (!IsZeroDisc)
1926 Inst.addOperand(MCOperand::createReg(Disc));
1927 EmitToStreamer(Inst);
1928}
1929
1930void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1931 Register DestReg = MI.getOperand(0).getReg();
1932 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1933 if (STI->hasZeroCycleZeroingFPR64()) {
1934 // Convert H/S register to corresponding D register
1935 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1936 if (AArch64::FPR16RegClass.contains(DestReg))
1937 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1938 &AArch64::FPR64RegClass);
1939 else if (AArch64::FPR32RegClass.contains(DestReg))
1940 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1941 &AArch64::FPR64RegClass);
1942 else
1943 assert(AArch64::FPR64RegClass.contains(DestReg));
1944
1945 MCInst MOVI;
1946 MOVI.setOpcode(AArch64::MOVID);
1947 MOVI.addOperand(MCOperand::createReg(DestReg));
1949 EmitToStreamer(*OutStreamer, MOVI);
1950 ++NumZCZeroingInstrsFPR;
1951 } else if (STI->hasZeroCycleZeroingFPR128()) {
1952 // Convert H/S/D register to corresponding Q register
1953 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1954 if (AArch64::FPR16RegClass.contains(DestReg)) {
1955 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1956 &AArch64::FPR128RegClass);
1957 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1958 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1959 &AArch64::FPR128RegClass);
1960 } else {
1961 assert(AArch64::FPR64RegClass.contains(DestReg));
1962 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1963 &AArch64::FPR128RegClass);
1964 }
1965
1966 MCInst MOVI;
1967 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1968 MOVI.addOperand(MCOperand::createReg(DestReg));
1970 EmitToStreamer(*OutStreamer, MOVI);
1971 ++NumZCZeroingInstrsFPR;
1972 } else {
1973 emitFMov0AsFMov(MI, DestReg);
1974 }
1975 } else {
1976 emitFMov0AsFMov(MI, DestReg);
1977 }
1978}
1979
1980void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1981 Register DestReg) {
1982 MCInst FMov;
1983 switch (MI.getOpcode()) {
1984 default:
1985 llvm_unreachable("Unexpected opcode");
1986 case AArch64::FMOVH0:
1987 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1988 if (!STI->hasFullFP16())
1989 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1990 FMov.addOperand(MCOperand::createReg(DestReg));
1991 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1992 break;
1993 case AArch64::FMOVS0:
1994 FMov.setOpcode(AArch64::FMOVWSr);
1995 FMov.addOperand(MCOperand::createReg(DestReg));
1996 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1997 break;
1998 case AArch64::FMOVD0:
1999 FMov.setOpcode(AArch64::FMOVXDr);
2000 FMov.addOperand(MCOperand::createReg(DestReg));
2001 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
2002 break;
2003 }
2004 EmitToStreamer(*OutStreamer, FMov);
2005}
2006
2007Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint64_t Disc,
2008 Register AddrDisc,
2009 Register ScratchReg,
2010 bool MayClobberAddrDisc) {
2011 assert(isPtrauthRegSafe(ScratchReg) &&
2012 "Safe scratch register must be provided by the caller");
2013 assert(isUInt<16>(Disc) && "Constant discriminator is too wide");
2014
2015 // So far we've used NoRegister in pseudos. Now we need real encodings.
2016 if (AddrDisc == AArch64::NoRegister)
2017 AddrDisc = AArch64::XZR;
2018
2019 // If there is no constant discriminator, there's no blend involved:
2020 // just use the address discriminator register as-is (XZR or not).
2021 if (!Disc)
2022 return AddrDisc;
2023
2024 // If there's only a constant discriminator, MOV it into the scratch register.
2025 if (AddrDisc == AArch64::XZR) {
2026 emitMOVZ(ScratchReg, Disc, 0);
2027 return ScratchReg;
2028 }
2029
2030 // If there are both, emit a blend into the scratch register.
2031
2032 // Check if we can save one MOV instruction.
2033 if (MayClobberAddrDisc && isPtrauthRegSafe(AddrDisc)) {
2034 ScratchReg = AddrDisc;
2035 } else {
2036 emitMovXReg(ScratchReg, AddrDisc);
2037 assert(ScratchReg != AddrDisc &&
2038 "Forbidden to clobber AddrDisc, but have to");
2039 }
2040
2041 emitMOVK(ScratchReg, Disc, 48);
2042 return ScratchReg;
2043}
2044
2045/// Emit a code sequence to check an authenticated pointer value.
2046///
2047/// This function emits a sequence of instructions that checks if TestedReg was
2048/// authenticated successfully. On success, execution continues at the next
2049/// instruction after the sequence.
2050///
2051/// The action performed on failure depends on the OnFailure argument:
2052/// * if OnFailure is not nullptr, control is transferred to that label after
2053/// clearing the PAC field
2054/// * otherwise, BRK instruction is emitted to generate an error
2055void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
2056 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
2057 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
2058 // Insert a sequence to check if authentication of TestedReg succeeded,
2059 // such as:
2060 //
2061 // - checked and clearing:
2062 // ; x16 is TestedReg, x17 is ScratchReg
2063 // mov x17, x16
2064 // xpaci x17
2065 // cmp x16, x17
2066 // b.eq Lsuccess
2067 // mov x16, x17
2068 // b Lend
2069 // Lsuccess:
2070 // ; skipped if authentication failed
2071 // Lend:
2072 // ...
2073 //
2074 // - checked and trapping:
2075 // mov x17, x16
2076 // xpaci x17
2077 // cmp x16, x17
2078 // b.eq Lsuccess
2079 // brk #<0xc470 + aut key>
2080 // Lsuccess:
2081 // ...
2082 //
2083 // See the documentation on AuthCheckMethod enumeration constants for
2084 // the specific code sequences that can be used to perform the check.
2086
2087 if (Method == AuthCheckMethod::None)
2088 return;
2089 if (Method == AuthCheckMethod::DummyLoad) {
2090 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
2091 .addReg(getWRegFromXReg(ScratchReg))
2092 .addReg(TestedReg)
2093 .addImm(0));
2094 assert(!OnFailure && "DummyLoad always traps on error");
2095 return;
2096 }
2097
2098 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2099 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2100 // mov Xscratch, Xtested
2101 emitMovXReg(ScratchReg, TestedReg);
2102
2103 if (Method == AuthCheckMethod::XPAC) {
2104 // xpac(i|d) Xscratch
2105 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2106 EmitToStreamer(
2107 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2108 } else {
2109 // xpaclri
2110
2111 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2112 assert(TestedReg == AArch64::LR &&
2113 "XPACHint mode is only compatible with checking the LR register");
2115 "XPACHint mode is only compatible with I-keys");
2116 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2117 }
2118
2119 // cmp Xtested, Xscratch
2120 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2121 .addReg(AArch64::XZR)
2122 .addReg(TestedReg)
2123 .addReg(ScratchReg)
2124 .addImm(0));
2125
2126 // b.eq Lsuccess
2127 EmitToStreamer(
2128 MCInstBuilder(AArch64::Bcc)
2129 .addImm(AArch64CC::EQ)
2130 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2131 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2132 // eor Xscratch, Xtested, Xtested, lsl #1
2133 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2134 .addReg(ScratchReg)
2135 .addReg(TestedReg)
2136 .addReg(TestedReg)
2137 .addImm(1));
2138 // tbz Xscratch, #62, Lsuccess
2139 EmitToStreamer(
2140 MCInstBuilder(AArch64::TBZX)
2141 .addReg(ScratchReg)
2142 .addImm(62)
2143 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2144 } else {
2145 llvm_unreachable("Unsupported check method");
2146 }
2147
2148 if (!OnFailure) {
2149 // Trapping sequences do a 'brk'.
2150 // brk #<0xc470 + aut key>
2151 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2152 } else {
2153 // Non-trapping checked sequences return the stripped result in TestedReg,
2154 // skipping over success-only code (such as re-signing the pointer) by
2155 // jumping to OnFailure label.
2156 // Note that this can introduce an authentication oracle (such as based on
2157 // the high bits of the re-signed value).
2158
2159 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2160 // instead of ScratchReg, thus eliminating one `mov` instruction.
2161 // Both XPAC and XPACHint can be further optimized by not using a
2162 // conditional branch jumping over an unconditional one.
2163
2164 switch (Method) {
2165 case AuthCheckMethod::XPACHint:
2166 // LR is already XPAC-ed at this point.
2167 break;
2168 case AuthCheckMethod::XPAC:
2169 // mov Xtested, Xscratch
2170 emitMovXReg(TestedReg, ScratchReg);
2171 break;
2172 default:
2173 // If Xtested was not XPAC-ed so far, emit XPAC here.
2174 // xpac(i|d) Xtested
2175 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2176 EmitToStreamer(
2177 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2178 }
2179
2180 // b Lend
2181 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2182 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2183 }
2184
2185 // If the auth check succeeds, we can continue.
2186 // Lsuccess:
2187 OutStreamer->emitLabel(SuccessSym);
2188}
2189
2190// With Pointer Authentication, it may be needed to explicitly check the
2191// authenticated value in LR before performing a tail call.
2192// Otherwise, the callee may re-sign the invalid return address,
2193// introducing a signing oracle.
2194void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2195 if (!AArch64FI->shouldSignReturnAddress(*MF))
2196 return;
2197
2198 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2199 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2200 return;
2201
2202 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2203 Register ScratchReg =
2204 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2205 assert(!TC->readsRegister(ScratchReg, TRI) &&
2206 "Neither x16 nor x17 is available as a scratch register");
2209 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2210 LRCheckMethod);
2211}
2212
2213bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2214 if (!DS)
2215 return false;
2216
2217 if (isa<GlobalAlias>(DS)) {
2218 // Just emit the nop directly.
2219 EmitToStreamer(MCInstBuilder(AArch64::NOP));
2220 return true;
2221 }
2222 MCSymbol *Dot = OutContext.createTempSymbol();
2223 OutStreamer->emitLabel(Dot);
2224 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2225
2226 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2227 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2228 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2229 SMLoc());
2230 return false;
2231}
2232
2233AArch64AsmPrinter::PtrAuthSchema::PtrAuthSchema(
2234 AArch64PACKey::ID Key, uint64_t IntDisc, const MachineOperand &AddrDiscOp)
2235 : Key(Key), IntDisc(IntDisc), AddrDisc(AddrDiscOp.getReg()),
2236 AddrDiscIsKilled(AddrDiscOp.isKill()) {}
2237
2238void AArch64AsmPrinter::emitPtrauthAuthResign(
2239 Register Pointer, Register Scratch, PtrAuthSchema AuthSchema,
2240 std::optional<PtrAuthSchema> SignSchema, std::optional<uint64_t> OptAddend,
2241 Value *DS) {
2242 const bool IsResign = SignSchema.has_value();
2243 const bool HasLoad = OptAddend.has_value();
2244 // We expand AUT/AUTPAC into a sequence of the form
2245 //
2246 // ; authenticate x16
2247 // ; check pointer in x16
2248 // Lsuccess:
2249 // ; sign x16 (if AUTPAC)
2250 // Lend: ; if not trapping on failure
2251 //
2252 // with the checking sequence chosen depending on whether/how we should check
2253 // the pointer and whether we should trap on failure.
2254
2255 // By default, auth/resign sequences check for auth failures.
2256 bool ShouldCheck = true;
2257 // In the checked sequence, we only trap if explicitly requested.
2258 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2259
2260 // On an FPAC CPU, you get traps whether you want them or not: there's
2261 // no point in emitting checks or traps.
2262 if (STI->hasFPAC())
2263 ShouldCheck = ShouldTrap = false;
2264
2265 // However, command-line flags can override this, for experimentation.
2266 switch (PtrauthAuthChecks) {
2268 break;
2270 ShouldCheck = ShouldTrap = false;
2271 break;
2273 ShouldCheck = true;
2274 ShouldTrap = false;
2275 break;
2277 ShouldCheck = ShouldTrap = true;
2278 break;
2279 }
2280
2281 // Compute aut discriminator
2282 Register AUTDiscReg =
2283 emitPtrauthDiscriminator(AuthSchema.IntDisc, AuthSchema.AddrDisc, Scratch,
2284 AuthSchema.AddrDiscIsKilled);
2285
2286 if (!emitDeactivationSymbolRelocation(DS))
2287 emitAUT(AuthSchema.Key, Pointer, AUTDiscReg);
2288
2289 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2290 if (!IsResign && (!ShouldCheck || !ShouldTrap))
2291 return;
2292
2293 MCSymbol *EndSym = nullptr;
2294
2295 if (ShouldCheck) {
2296 if (IsResign && !ShouldTrap)
2297 EndSym = createTempSymbol("resign_end_");
2298
2299 emitPtrauthCheckAuthenticatedValue(Pointer, Scratch, AuthSchema.Key,
2300 AArch64PAuth::AuthCheckMethod::XPAC,
2301 EndSym);
2302 }
2303
2304 // We already emitted unchecked and checked-but-non-trapping AUTs.
2305 // That left us with trapping AUTs, and AUTPA/AUTRELLOADPACs.
2306 // Trapping AUTs don't need PAC: we're done.
2307 if (!IsResign)
2308 return;
2309
2310 if (HasLoad) {
2311 int64_t Addend = *OptAddend;
2312 // incoming rawpointer in X16, X17 is not live at this point.
2313 // LDSRWpre x17, x16, simm9 ; note: x16+simm9 used later.
2314 if (isInt<9>(Addend)) {
2315 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWpre)
2316 .addReg(AArch64::X16)
2317 .addReg(AArch64::X17)
2318 .addReg(AArch64::X16)
2319 .addImm(/*simm9:*/ Addend));
2320 } else {
2321 // x16 = x16 + Addend computation has 2 variants
2322 if (isUInt<24>(Addend)) {
2323 // variant 1: add x16, x16, Addend >> shift12 ls shift12
2324 // This can take upto 2 instructions.
2325 for (int BitPos = 0; BitPos != 24 && (Addend >> BitPos); BitPos += 12) {
2326 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
2327 .addReg(AArch64::X16)
2328 .addReg(AArch64::X16)
2329 .addImm((Addend >> BitPos) & 0xfff)
2331 AArch64_AM::LSL, BitPos)));
2332 }
2333 } else {
2334 // variant 2: accumulate constant in X17 16 bits at a time, and add to
2335 // X16 This can take 2-5 instructions.
2336 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVZXi)
2337 .addReg(AArch64::X17)
2338 .addImm(Addend & 0xffff)
2340 AArch64_AM::LSL, 0)));
2341
2342 for (int Offset = 16; Offset < 64; Offset += 16) {
2343 uint16_t Fragment = static_cast<uint16_t>(Addend >> Offset);
2344 if (!Fragment)
2345 continue;
2346 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKXi)
2347 .addReg(AArch64::X17)
2348 .addReg(AArch64::X17)
2349 .addImm(Fragment)
2350 .addImm(/*shift:*/ Offset));
2351 }
2352 // addx x16, x16, x17
2353 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
2354 .addReg(AArch64::X16)
2355 .addReg(AArch64::X16)
2356 .addReg(AArch64::X17)
2357 .addImm(0));
2358 }
2359 // ldrsw x17,x16(0)
2360 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWui)
2361 .addReg(AArch64::X17)
2362 .addReg(AArch64::X16)
2363 .addImm(0));
2364 }
2365 // addx x16, x16, x17
2366 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
2367 .addReg(AArch64::X16)
2368 .addReg(AArch64::X16)
2369 .addReg(AArch64::X17)
2370 .addImm(0));
2371
2372 } /* HasLoad == true */
2373
2374 // Compute pac discriminator into x17
2375 Register PACDiscReg = emitPtrauthDiscriminator(SignSchema->IntDisc,
2376 SignSchema->AddrDisc, Scratch);
2377 emitPAC(SignSchema->Key, Pointer, PACDiscReg);
2378
2379 // Lend:
2380 if (EndSym)
2381 OutStreamer->emitLabel(EndSym);
2382}
2383
2384void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2385 Register Val = MI->getOperand(1).getReg();
2386 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2387 uint64_t Disc = MI->getOperand(3).getImm();
2388 Register AddrDisc = MI->getOperand(4).getReg();
2389 bool AddrDiscKilled = MI->getOperand(4).isKill();
2390
2391 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2392 // register is available.
2393 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2394 assert(ScratchReg != AddrDisc &&
2395 "Neither X16 nor X17 is available as a scratch register");
2396
2397 // Compute pac discriminator
2398 Register DiscReg = emitPtrauthDiscriminator(
2399 Disc, AddrDisc, ScratchReg, /*MayClobberAddrDisc=*/AddrDiscKilled);
2400
2401 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2402 return;
2403
2404 emitPAC(Key, Val, DiscReg);
2405}
2406
2407void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2408 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2409 unsigned BrTarget = MI->getOperand(0).getReg();
2410
2411 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2412 uint64_t Disc = MI->getOperand(2).getImm();
2413
2414 unsigned AddrDisc = MI->getOperand(3).getReg();
2415
2416 // Make sure AddrDisc is solely used to compute the discriminator.
2417 // While hardly meaningful, it is still possible to describe an authentication
2418 // of a pointer against its own value (instead of storage address) with
2419 // intrinsics, so use report_fatal_error instead of assert.
2420 if (BrTarget == AddrDisc)
2421 report_fatal_error("Branch target is signed with its own value");
2422
2423 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2424 // fact that x16 and x17 are described as clobbered by the MI instruction and
2425 // AddrDisc is not used as any other input.
2426 //
2427 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2428 // either x16 or x17, meaning the returned register is always among the
2429 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2430 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2431 // among x16 and x17 to prevent clobbering unexpected registers.
2432 //
2433 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2434 // declared as clobbering x16/x17.
2435 //
2436 // FIXME: Make use of `killed` flags and register masks instead.
2437 bool AddrDiscIsImplicitDef =
2438 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2439 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2440 AddrDiscIsImplicitDef);
2441 emitBLRA(IsCall, Key, BrTarget, DiscReg);
2442}
2443
2444void AArch64AsmPrinter::emitAddImm(MCRegister Reg, int64_t Addend,
2445 MCRegister Tmp) {
2446 if (Addend != 0) {
2447 const uint64_t AbsOffset = (Addend > 0 ? Addend : -((uint64_t)Addend));
2448 const bool IsNeg = Addend < 0;
2449 if (isUInt<24>(AbsOffset)) {
2450 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2451 BitPos += 12) {
2452 EmitToStreamer(
2453 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2454 .addReg(Reg)
2455 .addReg(Reg)
2456 .addImm((AbsOffset >> BitPos) & 0xfff)
2457 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2458 }
2459 } else {
2460 const uint64_t UAddend = Addend;
2461 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2462 .addReg(Tmp)
2463 .addImm((IsNeg ? ~UAddend : UAddend) & 0xffff)
2464 .addImm(/*shift=*/0));
2465 auto NeedMovk = [IsNeg, UAddend](int BitPos) -> bool {
2466 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2467 uint64_t Shifted = UAddend >> BitPos;
2468 if (!IsNeg)
2469 return Shifted != 0;
2470 for (int I = 0; I != 64 - BitPos; I += 16)
2471 if (((Shifted >> I) & 0xffff) != 0xffff)
2472 return true;
2473 return false;
2474 };
2475 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2476 emitMOVK(Tmp, (UAddend >> BitPos) & 0xffff, BitPos);
2477
2478 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2479 .addReg(Reg)
2480 .addReg(Reg)
2481 .addReg(Tmp)
2482 .addImm(/*shift=*/0));
2483 }
2484 }
2485}
2486
2487void AArch64AsmPrinter::emitAddress(MCRegister Reg, const MCExpr *Expr,
2488 MCRegister Tmp, bool DSOLocal,
2489 const MCSubtargetInfo &STI) {
2490 MCValue Val;
2491 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2492 report_fatal_error("emitAddress could not evaluate");
2493 if (DSOLocal) {
2494 EmitToStreamer(
2495 MCInstBuilder(AArch64::ADRP)
2496 .addReg(Reg)
2498 OutStreamer->getContext())));
2499 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2500 .addReg(Reg)
2501 .addReg(Reg)
2502 .addExpr(MCSpecifierExpr::create(
2503 Expr, AArch64::S_LO12, OutStreamer->getContext()))
2504 .addImm(0));
2505 } else {
2506 auto *SymRef =
2507 MCSymbolRefExpr::create(Val.getAddSym(), OutStreamer->getContext());
2508 EmitToStreamer(
2509 MCInstBuilder(AArch64::ADRP)
2510 .addReg(Reg)
2512 OutStreamer->getContext())));
2513 EmitToStreamer(
2514 MCInstBuilder(AArch64::LDRXui)
2515 .addReg(Reg)
2516 .addReg(Reg)
2518 OutStreamer->getContext())));
2519 emitAddImm(Reg, Val.getConstant(), Tmp);
2520 }
2521}
2522
2524 // IFUNCs are ELF-only.
2525 if (!TT.isOSBinFormatELF())
2526 return false;
2527
2528 // IFUNCs are supported on glibc, bionic, and some but not all of the BSDs.
2529 return TT.isOSGlibc() || TT.isAndroid() || TT.isOSFreeBSD() ||
2530 TT.isOSDragonFly() || TT.isOSNetBSD();
2531}
2532
2533// Emit an ifunc resolver that returns a signed pointer to the specified target,
2534// and return a FUNCINIT reference to the resolver. In the linked binary, this
2535// function becomes the target of an IRELATIVE relocation. This resolver is used
2536// to relocate signed pointers in global variable initializers in special cases
2537// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2538//
2539// Example (signed null pointer, not address discriminated):
2540//
2541// .8byte .Lpauth_ifunc0
2542// .pushsection .text.startup,"ax",@progbits
2543// .Lpauth_ifunc0:
2544// mov x0, #0
2545// mov x1, #12345
2546// b __emupac_pacda
2547//
2548// Example (signed null pointer, address discriminated):
2549//
2550// .Ltmp:
2551// .8byte .Lpauth_ifunc0
2552// .pushsection .text.startup,"ax",@progbits
2553// .Lpauth_ifunc0:
2554// mov x0, #0
2555// adrp x1, .Ltmp
2556// add x1, x1, :lo12:.Ltmp
2557// b __emupac_pacda
2558// .popsection
2559//
2560// Example (signed pointer to symbol, not address discriminated):
2561//
2562// .Ltmp:
2563// .8byte .Lpauth_ifunc0
2564// .pushsection .text.startup,"ax",@progbits
2565// .Lpauth_ifunc0:
2566// adrp x0, symbol
2567// add x0, x0, :lo12:symbol
2568// mov x1, #12345
2569// b __emupac_pacda
2570// .popsection
2571//
2572// Example (signed null pointer, not address discriminated, with deactivation
2573// symbol ds):
2574//
2575// .8byte .Lpauth_ifunc0
2576// .pushsection .text.startup,"ax",@progbits
2577// .Lpauth_ifunc0:
2578// mov x0, #0
2579// mov x1, #12345
2580// .reloc ., R_AARCH64_PATCHINST, ds
2581// b __emupac_pacda
2582// ret
2583// .popsection
2584const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2585 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
2586 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2587 const Triple &TT = TM.getTargetTriple();
2588
2589 // We only emit an IRELATIVE relocation if the target supports IRELATIVE.
2591 return nullptr;
2592
2593 // For now, only the DA key is supported.
2594 if (KeyID != AArch64PACKey::DA)
2595 return nullptr;
2596
2597 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
2598 // space.
2599 auto STI = std::make_unique<AArch64Subtarget>(
2600 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
2601 true);
2602 this->STI = STI.get();
2603
2604 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2605 OutStreamer->emitLabel(Place);
2606 OutStreamer->pushSection();
2607
2608 const MCSymbolELF *Group =
2609 static_cast<MCSectionELF *>(OutStreamer->getCurrentSectionOnly())
2610 ->getGroup();
2612 if (Group)
2614 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2615 ".text.startup", ELF::SHT_PROGBITS, Flags, 0, Group, true,
2616 Group ? MCSection::NonUniqueID : PAuthIFuncNextUniqueID++, nullptr));
2617
2618 MCSymbol *IRelativeSym =
2619 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2620 OutStreamer->emitLabel(IRelativeSym);
2621 if (isa<MCConstantExpr>(Target)) {
2622 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2623 .addReg(AArch64::X0)
2624 .addExpr(Target)
2625 .addImm(0),
2626 *STI);
2627 } else {
2628 emitAddress(AArch64::X0, Target, AArch64::X16, IsDSOLocal, *STI);
2629 }
2630 if (HasAddressDiversity) {
2631 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2632 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2633 MCConstantExpr::create(Disc, OutStreamer->getContext()),
2634 OutStreamer->getContext());
2635 emitAddress(AArch64::X1, PlacePlusDisc, AArch64::X16, /*IsDSOLocal=*/true,
2636 *STI);
2637 } else {
2638 if (!isUInt<16>(Disc)) {
2639 OutContext.reportError(SMLoc(), "AArch64 PAC Discriminator '" +
2640 Twine(Disc) +
2641 "' out of range [0, 0xFFFF]");
2642 }
2643 emitMOVZ(AArch64::X1, Disc, 0);
2644 }
2645
2646 if (DSExpr) {
2647 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2648 OutStreamer->emitLabel(PrePACInst);
2649
2650 auto *PrePACInstExpr =
2651 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2652 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2653 DSExpr, SMLoc());
2654 }
2655
2656 // We don't know the subtarget because this is being emitted for a global
2657 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2658 // always call the EmuPAC runtime, which will end up using the PAC instruction
2659 // if the target supports PAC.
2660 MCSymbol *EmuPAC =
2661 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2662 const MCSymbolRefExpr *EmuPACRef =
2663 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2664 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2665 *STI);
2666
2667 // We need a RET despite the above tail call because the deactivation symbol
2668 // may replace the tail call with a NOP.
2669 if (DSExpr)
2670 OutStreamer->emitInstruction(
2671 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
2672 OutStreamer->popSection();
2673
2674 return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT,
2675 OutStreamer->getContext());
2676}
2677
2678const MCExpr *
2679AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2680 MCContext &Ctx = OutContext;
2681
2682 // Figure out the base symbol and the addend, if any.
2683 APInt Offset(64, 0);
2684 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2685 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2686
2687 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2688
2689 const MCExpr *Sym;
2690 if (BaseGVB) {
2691 // If there is an addend, turn that into the appropriate MCExpr.
2692 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2693 if (Offset.sgt(0))
2695 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2696 else if (Offset.slt(0))
2698 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2699 } else {
2700 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2701 }
2702
2703 const MCExpr *DSExpr = nullptr;
2704 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2705 if (isa<GlobalAlias>(DS))
2706 return Sym;
2707 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2708 }
2709
2710 uint64_t KeyID = CPA.getKey()->getZExtValue();
2711 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2712 // AArch64AuthMCExpr::printImpl, so fail fast.
2713 if (KeyID > AArch64PACKey::LAST) {
2714 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2715 "' out of range [0, " +
2716 Twine((unsigned)AArch64PACKey::LAST) + "]");
2717 KeyID = 0;
2718 }
2719
2720 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2721
2722 // Check if we can represent this with an IRELATIVE and emit it if so.
2723 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2724 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2725 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2726 return IFuncSym;
2727
2728 if (!isUInt<16>(Disc)) {
2729 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2730 "' out of range [0, 0xFFFF]");
2731 Disc = 0;
2732 }
2733
2734 if (DSExpr)
2735 report_fatal_error("deactivation symbols unsupported in constant "
2736 "expressions on this target");
2737
2738 // Finally build the complete @AUTH expr.
2739 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2740 CPA.hasAddressDiscriminator(), Ctx);
2741}
2742
2743void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2744 unsigned DstReg = MI.getOperand(0).getReg();
2745 const MachineOperand &GAOp = MI.getOperand(1);
2746 const uint64_t KeyC = MI.getOperand(2).getImm();
2747 assert(KeyC <= AArch64PACKey::LAST &&
2748 "key is out of range [0, AArch64PACKey::LAST]");
2749 const auto Key = (AArch64PACKey::ID)KeyC;
2750 const uint64_t Disc = MI.getOperand(3).getImm();
2751 assert(isUInt<16>(Disc) &&
2752 "constant discriminator is out of range [0, 0xffff]");
2753
2754 // Emit instruction sequence like the following:
2755 // ADRP x16, symbol$auth_ptr$key$disc
2756 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2757 //
2758 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2759 // to symbol.
2760 MCSymbol *AuthPtrStubSym;
2761 if (TM.getTargetTriple().isOSBinFormatELF()) {
2762 const auto &TLOF =
2763 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2764
2765 assert(GAOp.getOffset() == 0 &&
2766 "non-zero offset for $auth_ptr$ stub slots is not supported");
2767 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2768 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2769 } else {
2770 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2771 "LOADauthptrstatic is implemented only for MachO/ELF");
2772
2773 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2774 getObjFileLowering());
2775
2776 assert(GAOp.getOffset() == 0 &&
2777 "non-zero offset for $auth_ptr$ stub slots is not supported");
2778 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2779 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2780 }
2781
2782 MachineOperand StubMOHi =
2784 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2785 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2786 MCOperand StubMCHi, StubMCLo;
2787
2788 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2789 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2790
2791 EmitToStreamer(
2792 *OutStreamer,
2793 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2794
2795 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2796 .addReg(DstReg)
2797 .addReg(DstReg)
2798 .addOperand(StubMCLo));
2799}
2800
2801void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2802 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2803 const bool IsELFSignedGOT = MI.getParent()
2804 ->getParent()
2805 ->getInfo<AArch64FunctionInfo>()
2806 ->hasELFSignedGOT();
2807 MachineOperand GAOp = MI.getOperand(0);
2808 const uint64_t KeyC = MI.getOperand(1).getImm();
2809 assert(KeyC <= AArch64PACKey::LAST &&
2810 "key is out of range [0, AArch64PACKey::LAST]");
2811 const auto Key = (AArch64PACKey::ID)KeyC;
2812 const unsigned AddrDisc = MI.getOperand(2).getReg();
2813 const uint64_t Disc = MI.getOperand(3).getImm();
2814
2815 const int64_t Offset = GAOp.getOffset();
2816 GAOp.setOffset(0);
2817
2818 // Emit:
2819 // target materialization:
2820 // - via GOT:
2821 // - unsigned GOT:
2822 // adrp x16, :got:target
2823 // ldr x16, [x16, :got_lo12:target]
2824 // add offset to x16 if offset != 0
2825 // - ELF signed GOT:
2826 // adrp x17, :got:target
2827 // add x17, x17, :got_auth_lo12:target
2828 // ldr x16, [x17]
2829 // aut{i|d}a x16, x17
2830 // check+trap sequence (if no FPAC)
2831 // add offset to x16 if offset != 0
2832 //
2833 // - direct:
2834 // adrp x16, target
2835 // add x16, x16, :lo12:target
2836 // add offset to x16 if offset != 0
2837 //
2838 // add offset to x16:
2839 // - abs(offset) fits 24 bits:
2840 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2841 // - abs(offset) does not fit 24 bits:
2842 // - offset < 0:
2843 // movn+movk sequence filling x17 register with the offset (up to 4
2844 // instructions)
2845 // add x16, x16, x17
2846 // - offset > 0:
2847 // movz+movk sequence filling x17 register with the offset (up to 4
2848 // instructions)
2849 // add x16, x16, x17
2850 //
2851 // signing:
2852 // - 0 discriminator:
2853 // paciza x16
2854 // - Non-0 discriminator, no address discriminator:
2855 // mov x17, #Disc
2856 // pacia x16, x17
2857 // - address discriminator (with potentially folded immediate discriminator):
2858 // pacia x16, xAddrDisc
2859
2860 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2861 MCOperand GAMCHi, GAMCLo;
2862
2863 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2864 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2865 if (IsGOTLoad) {
2866 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2867 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2868 }
2869
2870 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2871 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2872
2873 EmitToStreamer(
2874 MCInstBuilder(AArch64::ADRP)
2875 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2876 .addOperand(GAMCHi));
2877
2878 if (IsGOTLoad) {
2879 if (IsELFSignedGOT) {
2880 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2881 .addReg(AArch64::X17)
2882 .addReg(AArch64::X17)
2883 .addOperand(GAMCLo)
2884 .addImm(0));
2885
2886 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2887 .addReg(AArch64::X16)
2888 .addReg(AArch64::X17)
2889 .addImm(0));
2890
2891 assert(GAOp.isGlobal());
2892 assert(GAOp.getGlobal()->getValueType() != nullptr);
2893
2894 bool IsFunctionTy = GAOp.getGlobal()->getValueType()->isFunctionTy();
2895 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2896 emitAUT(AuthKey, AArch64::X16, AArch64::X17);
2897
2898 if (!STI->hasFPAC())
2899 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2900 AArch64PAuth::AuthCheckMethod::XPAC);
2901 } else {
2902 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2903 .addReg(AArch64::X16)
2904 .addReg(AArch64::X16)
2905 .addOperand(GAMCLo));
2906 }
2907 } else {
2908 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2909 .addReg(AArch64::X16)
2910 .addReg(AArch64::X16)
2911 .addOperand(GAMCLo)
2912 .addImm(0));
2913 }
2914
2915 emitAddImm(AArch64::X16, Offset, AArch64::X17);
2916 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2917
2918 emitPAC(Key, AArch64::X16, DiscReg);
2919}
2920
2921void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2922 Register DstReg = MI.getOperand(0).getReg();
2923 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2924 const MachineOperand &GAMO = MI.getOperand(1);
2925 assert(GAMO.getOffset() == 0);
2926
2927 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2928 MCOperand GAMC;
2929 MCInstLowering.lowerOperand(GAMO, GAMC);
2930 EmitToStreamer(
2931 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2932 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2933 .addReg(AuthResultReg)
2934 .addReg(AArch64::X17)
2935 .addImm(0));
2936 } else {
2937 MachineOperand GAHiOp(GAMO);
2938 MachineOperand GALoOp(GAMO);
2939 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2940 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2941
2942 MCOperand GAMCHi, GAMCLo;
2943 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2944 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2945
2946 EmitToStreamer(
2947 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2948
2949 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2950 .addReg(AArch64::X17)
2951 .addReg(AArch64::X17)
2952 .addOperand(GAMCLo)
2953 .addImm(0));
2954
2955 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2956 .addReg(AuthResultReg)
2957 .addReg(AArch64::X17)
2958 .addImm(0));
2959 }
2960
2961 assert(GAMO.isGlobal());
2962 MCSymbol *UndefWeakSym;
2963 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2964 UndefWeakSym = createTempSymbol("undef_weak");
2965 EmitToStreamer(
2966 MCInstBuilder(AArch64::CBZX)
2967 .addReg(AuthResultReg)
2968 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2969 }
2970
2971 assert(GAMO.getGlobal()->getValueType() != nullptr);
2972
2973 bool IsFunctionTy = GAMO.getGlobal()->getValueType()->isFunctionTy();
2974 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2975 emitAUT(AuthKey, AuthResultReg, AArch64::X17);
2976
2977 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2978 OutStreamer->emitLabel(UndefWeakSym);
2979
2980 if (!STI->hasFPAC()) {
2981 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2982 AArch64PAuth::AuthCheckMethod::XPAC);
2983
2984 emitMovXReg(DstReg, AuthResultReg);
2985 }
2986}
2987
2988const MCExpr *
2989AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2990 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2991 const Function &Fn = *BA.getFunction();
2992
2993 if (std::optional<uint16_t> BADisc =
2994 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2995 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2996 /*HasAddressDiversity=*/false, OutContext);
2997
2998 return BAE;
2999}
3000
3001void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
3002 bool IsImm = false;
3003 unsigned Width = 0;
3004
3005 switch (MI->getOpcode()) {
3006 default:
3007 llvm_unreachable("This is not a CB pseudo instruction");
3008 case AArch64::CBBAssertExt:
3009 IsImm = false;
3010 Width = 8;
3011 break;
3012 case AArch64::CBHAssertExt:
3013 IsImm = false;
3014 Width = 16;
3015 break;
3016 case AArch64::CBWPrr:
3017 Width = 32;
3018 break;
3019 case AArch64::CBXPrr:
3020 Width = 64;
3021 break;
3022 case AArch64::CBWPri:
3023 IsImm = true;
3024 Width = 32;
3025 break;
3026 case AArch64::CBXPri:
3027 IsImm = true;
3028 Width = 64;
3029 break;
3030 }
3031
3033 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
3034 bool NeedsRegSwap = false;
3035 bool NeedsImmDec = false;
3036 bool NeedsImmInc = false;
3037
3038#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
3039 (IsImm \
3040 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
3041 : (Width == 8 \
3042 ? AArch64::CBB##RegCond##Wrr \
3043 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
3044 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
3045 : AArch64::CB##RegCond##Xrr))))
3046 unsigned MCOpC;
3047
3048 // Decide if we need to either swap register operands or increment/decrement
3049 // immediate operands
3050 switch (CC) {
3051 default:
3052 llvm_unreachable("Invalid CB condition code");
3053 case AArch64CC::EQ:
3054 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
3055 break;
3056 case AArch64CC::NE:
3057 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
3058 break;
3059 case AArch64CC::HS:
3060 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
3061 NeedsImmDec = IsImm;
3062 break;
3063 case AArch64CC::LO:
3064 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
3065 NeedsRegSwap = !IsImm;
3066 break;
3067 case AArch64CC::HI:
3068 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
3069 break;
3070 case AArch64CC::LS:
3071 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
3072 NeedsRegSwap = !IsImm;
3073 NeedsImmInc = IsImm;
3074 break;
3075 case AArch64CC::GE:
3076 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
3077 NeedsImmDec = IsImm;
3078 break;
3079 case AArch64CC::LT:
3080 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
3081 NeedsRegSwap = !IsImm;
3082 break;
3083 case AArch64CC::GT:
3084 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
3085 break;
3086 case AArch64CC::LE:
3087 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
3088 NeedsRegSwap = !IsImm;
3089 NeedsImmInc = IsImm;
3090 break;
3091 }
3092#undef GET_CB_OPC
3093
3094 MCInst Inst;
3095 Inst.setOpcode(MCOpC);
3096
3097 MCOperand Lhs, Rhs, Trgt;
3098 lowerOperand(MI->getOperand(1), Lhs);
3099 lowerOperand(MI->getOperand(2), Rhs);
3100 lowerOperand(MI->getOperand(3), Trgt);
3101
3102 // Now swap, increment or decrement
3103 if (NeedsRegSwap) {
3104 assert(Lhs.isReg() && "Expected register operand for CB");
3105 assert(Rhs.isReg() && "Expected register operand for CB");
3106 Inst.addOperand(Rhs);
3107 Inst.addOperand(Lhs);
3108 } else if (NeedsImmDec) {
3109 Rhs.setImm(Rhs.getImm() - 1);
3110 Inst.addOperand(Lhs);
3111 Inst.addOperand(Rhs);
3112 } else if (NeedsImmInc) {
3113 Rhs.setImm(Rhs.getImm() + 1);
3114 Inst.addOperand(Lhs);
3115 Inst.addOperand(Rhs);
3116 } else {
3117 Inst.addOperand(Lhs);
3118 Inst.addOperand(Rhs);
3119 }
3120
3121 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3122 "CB immediate operand out-of-bounds");
3123
3124 Inst.addOperand(Trgt);
3125 EmitToStreamer(*OutStreamer, Inst);
3126}
3127
3128// Simple pseudo-instructions have their lowering (with expansion to real
3129// instructions) auto-generated.
3130#include "AArch64GenMCPseudoLowering.inc"
3131
3132void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3133 S.emitInstruction(Inst, *STI);
3134#ifndef NDEBUG
3135 ++InstsEmitted;
3136#endif
3137}
3138
3139void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3140 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3141
3142#ifndef NDEBUG
3143 InstsEmitted = 0;
3144 llvm::scope_exit CheckMISize([&]() {
3145 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3146 });
3147#endif
3148
3149 // Do any auto-generated pseudo lowerings.
3150 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3151 EmitToStreamer(*OutStreamer, OutInst);
3152 return;
3153 }
3154
3155 if (MI->getOpcode() == AArch64::ADRP) {
3156 for (auto &Opd : MI->operands()) {
3157 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3158 "swift_async_extendedFramePointerFlags") {
3159 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3160 }
3161 }
3162 }
3163
3164 if (AArch64FI->getLOHRelated().count(MI)) {
3165 // Generate a label for LOH related instruction
3166 MCSymbol *LOHLabel = createTempSymbol("loh");
3167 // Associate the instruction with the label
3168 LOHInstToLabel[MI] = LOHLabel;
3169 OutStreamer->emitLabel(LOHLabel);
3170 }
3171
3172 AArch64TargetStreamer *TS =
3173 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3174 // Do any manual lowerings.
3175 switch (MI->getOpcode()) {
3176 default:
3178 "Unhandled tail call instruction");
3179 break;
3180 case AArch64::HINT: {
3181 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3182 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3183 // non-empty. If MI is the initial BTI, place the
3184 // __patchable_function_entries label after BTI.
3185 if (CurrentPatchableFunctionEntrySym &&
3186 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3187 MI == &MF->front().front()) {
3188 int64_t Imm = MI->getOperand(0).getImm();
3189 if ((Imm & 32) && (Imm & 6)) {
3190 MCInst Inst;
3191 MCInstLowering.Lower(MI, Inst);
3192 EmitToStreamer(*OutStreamer, Inst);
3193 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3194 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3195 return;
3196 }
3197 }
3198 break;
3199 }
3200 case AArch64::MOVMCSym: {
3201 Register DestReg = MI->getOperand(0).getReg();
3202 const MachineOperand &MO_Sym = MI->getOperand(1);
3203 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3204 MCOperand Hi_MCSym, Lo_MCSym;
3205
3206 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3207 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3208
3209 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3210 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3211
3212 MCInst MovZ;
3213 MovZ.setOpcode(AArch64::MOVZXi);
3214 MovZ.addOperand(MCOperand::createReg(DestReg));
3215 MovZ.addOperand(Hi_MCSym);
3217 EmitToStreamer(*OutStreamer, MovZ);
3218
3219 MCInst MovK;
3220 MovK.setOpcode(AArch64::MOVKXi);
3221 MovK.addOperand(MCOperand::createReg(DestReg));
3222 MovK.addOperand(MCOperand::createReg(DestReg));
3223 MovK.addOperand(Lo_MCSym);
3225 EmitToStreamer(*OutStreamer, MovK);
3226 return;
3227 }
3228 case AArch64::MOVIv2d_ns:
3229 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3230 // as movi is more efficient across all cores. Newer cores can eliminate
3231 // fmovs early and there is no difference with movi, but this not true for
3232 // all implementations.
3233 //
3234 // The floating-point version doesn't quite work in rare cases on older
3235 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3236 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3237 MI->getOperand(1).getImm() == 0) {
3238 MCInst TmpInst;
3239 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3240 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3241 TmpInst.addOperand(MCOperand::createImm(0));
3242 EmitToStreamer(*OutStreamer, TmpInst);
3243 return;
3244 }
3245 break;
3246
3247 case AArch64::DBG_VALUE:
3248 case AArch64::DBG_VALUE_LIST:
3249 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3250 SmallString<128> TmpStr;
3251 raw_svector_ostream OS(TmpStr);
3252 PrintDebugValueComment(MI, OS);
3253 OutStreamer->emitRawText(StringRef(OS.str()));
3254 }
3255 return;
3256
3257 case AArch64::EMITBKEY: {
3258 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3259 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3260 ExceptionHandlingType != ExceptionHandling::ARM)
3261 return;
3262
3263 if (getFunctionCFISectionType(*MF) == CFISection::None)
3264 return;
3265
3266 OutStreamer->emitCFIBKeyFrame();
3267 return;
3268 }
3269
3270 case AArch64::EMITMTETAGGED: {
3271 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3272 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3273 ExceptionHandlingType != ExceptionHandling::ARM)
3274 return;
3275
3276 if (getFunctionCFISectionType(*MF) != CFISection::None)
3277 OutStreamer->emitCFIMTETaggedFrame();
3278 return;
3279 }
3280
3281 case AArch64::AUTx16x17: {
3282 const Register Pointer = AArch64::X16;
3283 const Register Scratch = AArch64::X17;
3284
3285 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3286 MI->getOperand(1).getImm(), MI->getOperand(2));
3287
3288 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, std::nullopt,
3289 std::nullopt, MI->getDeactivationSymbol());
3290 return;
3291 }
3292
3293 case AArch64::AUTxMxN: {
3294 const Register Pointer = MI->getOperand(0).getReg();
3295 const Register Scratch = MI->getOperand(1).getReg();
3296
3297 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3298 MI->getOperand(4).getImm(), MI->getOperand(5));
3299
3300 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, std::nullopt,
3301 std::nullopt, MI->getDeactivationSymbol());
3302 return;
3303 }
3304
3305 case AArch64::AUTPAC: {
3306 const Register Pointer = AArch64::X16;
3307 const Register Scratch = AArch64::X17;
3308
3309 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3310 MI->getOperand(1).getImm(), MI->getOperand(2));
3311
3312 PtrAuthSchema SignSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3313 MI->getOperand(4).getImm(), MI->getOperand(5));
3314
3315 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, SignSchema,
3316 std::nullopt, MI->getDeactivationSymbol());
3317 return;
3318 }
3319
3320 case AArch64::AUTRELLOADPAC: {
3321 const Register Pointer = AArch64::X16;
3322 const Register Scratch = AArch64::X17;
3323
3324 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3325 MI->getOperand(1).getImm(), MI->getOperand(2));
3326
3327 PtrAuthSchema SignSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3328 MI->getOperand(4).getImm(), MI->getOperand(5));
3329
3330 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, SignSchema,
3331 MI->getOperand(6).getImm(),
3332 MI->getDeactivationSymbol());
3333
3334 return;
3335 }
3336
3337 case AArch64::PAC:
3338 emitPtrauthSign(MI);
3339 return;
3340
3341 case AArch64::LOADauthptrstatic:
3342 LowerLOADauthptrstatic(*MI);
3343 return;
3344
3345 case AArch64::LOADgotPAC:
3346 case AArch64::MOVaddrPAC:
3347 LowerMOVaddrPAC(*MI);
3348 return;
3349
3350 case AArch64::LOADgotAUTH:
3351 LowerLOADgotAUTH(*MI);
3352 return;
3353
3354 case AArch64::BRA:
3355 case AArch64::BLRA:
3356 emitPtrauthBranch(MI);
3357 return;
3358
3359 // Tail calls use pseudo instructions so they have the proper code-gen
3360 // attributes (isCall, isReturn, etc.). We lower them to the real
3361 // instruction here.
3362 case AArch64::AUTH_TCRETURN:
3363 case AArch64::AUTH_TCRETURN_BTI: {
3364 Register Callee = MI->getOperand(0).getReg();
3365 const auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
3366 const uint64_t Disc = MI->getOperand(3).getImm();
3367
3368 Register AddrDisc = MI->getOperand(4).getReg();
3369
3370 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3371
3372 emitPtrauthTailCallHardening(MI);
3373
3374 // See the comments in emitPtrauthBranch.
3375 if (Callee == AddrDisc)
3376 report_fatal_error("Call target is signed with its own value");
3377
3378 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3379 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3380 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3381 // restriction manually not to clobber an unexpected register.
3382 bool AddrDiscIsImplicitDef =
3383 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3384 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3385 AddrDiscIsImplicitDef);
3386 emitBLRA(/*IsCall*/ false, Key, Callee, DiscReg);
3387 return;
3388 }
3389
3390 case AArch64::TCRETURNri:
3391 case AArch64::TCRETURNrix16x17:
3392 case AArch64::TCRETURNrix17:
3393 case AArch64::TCRETURNrinotx16:
3394 case AArch64::TCRETURNriALL: {
3395 emitPtrauthTailCallHardening(MI);
3396
3397 recordIfImportCall(MI);
3398 MCInst TmpInst;
3399 TmpInst.setOpcode(AArch64::BR);
3400 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3401 EmitToStreamer(*OutStreamer, TmpInst);
3402 return;
3403 }
3404 case AArch64::TCRETURNdi: {
3405 emitPtrauthTailCallHardening(MI);
3406
3407 MCOperand Dest;
3408 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3409 recordIfImportCall(MI);
3410 MCInst TmpInst;
3411 TmpInst.setOpcode(AArch64::B);
3412 TmpInst.addOperand(Dest);
3413 EmitToStreamer(*OutStreamer, TmpInst);
3414 return;
3415 }
3416 case AArch64::SpeculationBarrierISBDSBEndBB: {
3417 // Print DSB SYS + ISB
3418 MCInst TmpInstDSB;
3419 TmpInstDSB.setOpcode(AArch64::DSB);
3420 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3421 EmitToStreamer(*OutStreamer, TmpInstDSB);
3422 MCInst TmpInstISB;
3423 TmpInstISB.setOpcode(AArch64::ISB);
3424 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3425 EmitToStreamer(*OutStreamer, TmpInstISB);
3426 return;
3427 }
3428 case AArch64::SpeculationBarrierSBEndBB: {
3429 // Print SB
3430 MCInst TmpInstSB;
3431 TmpInstSB.setOpcode(AArch64::SB);
3432 EmitToStreamer(*OutStreamer, TmpInstSB);
3433 return;
3434 }
3435 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3436 /// lower this to:
3437 /// adrp x0, :tlsdesc_auth:var
3438 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3439 /// add x0, x0, #:tlsdesc_auth_lo12:var
3440 /// blraa x16, x0
3441 /// (TPIDR_EL0 offset now in x0)
3442 const MachineOperand &MO_Sym = MI->getOperand(0);
3443 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3444 MCOperand SymTLSDescLo12, SymTLSDesc;
3445 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3446 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3447 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3448 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3449
3450 MCInst Adrp;
3451 Adrp.setOpcode(AArch64::ADRP);
3452 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3453 Adrp.addOperand(SymTLSDesc);
3454 EmitToStreamer(*OutStreamer, Adrp);
3455
3456 MCInst Ldr;
3457 Ldr.setOpcode(AArch64::LDRXui);
3458 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3459 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3460 Ldr.addOperand(SymTLSDescLo12);
3462 EmitToStreamer(*OutStreamer, Ldr);
3463
3464 MCInst Add;
3465 Add.setOpcode(AArch64::ADDXri);
3466 Add.addOperand(MCOperand::createReg(AArch64::X0));
3467 Add.addOperand(MCOperand::createReg(AArch64::X0));
3468 Add.addOperand(SymTLSDescLo12);
3470 EmitToStreamer(*OutStreamer, Add);
3471
3472 // Authenticated TLSDESC accesses are not relaxed.
3473 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3474
3475 MCInst Blraa;
3476 Blraa.setOpcode(AArch64::BLRAA);
3477 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3478 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3479 EmitToStreamer(*OutStreamer, Blraa);
3480
3481 return;
3482 }
3483 case AArch64::TLSDESC_CALLSEQ: {
3484 /// lower this to:
3485 /// adrp x0, :tlsdesc:var
3486 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3487 /// add x0, x0, #:tlsdesc_lo12:var
3488 /// .tlsdesccall var
3489 /// blr x1
3490 /// (TPIDR_EL0 offset now in x0)
3491 const MachineOperand &MO_Sym = MI->getOperand(0);
3492 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3493 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3494 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3495 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3496 MCInstLowering.lowerOperand(MO_Sym, Sym);
3497 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3498 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3499
3500 MCInst Adrp;
3501 Adrp.setOpcode(AArch64::ADRP);
3502 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3503 Adrp.addOperand(SymTLSDesc);
3504 EmitToStreamer(*OutStreamer, Adrp);
3505
3506 MCInst Ldr;
3507 if (STI->isTargetILP32()) {
3508 Ldr.setOpcode(AArch64::LDRWui);
3509 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3510 } else {
3511 Ldr.setOpcode(AArch64::LDRXui);
3512 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3513 }
3514 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3515 Ldr.addOperand(SymTLSDescLo12);
3517 EmitToStreamer(*OutStreamer, Ldr);
3518
3519 MCInst Add;
3520 if (STI->isTargetILP32()) {
3521 Add.setOpcode(AArch64::ADDWri);
3522 Add.addOperand(MCOperand::createReg(AArch64::W0));
3523 Add.addOperand(MCOperand::createReg(AArch64::W0));
3524 } else {
3525 Add.setOpcode(AArch64::ADDXri);
3526 Add.addOperand(MCOperand::createReg(AArch64::X0));
3527 Add.addOperand(MCOperand::createReg(AArch64::X0));
3528 }
3529 Add.addOperand(SymTLSDescLo12);
3531 EmitToStreamer(*OutStreamer, Add);
3532
3533 // Emit a relocation-annotation. This expands to no code, but requests
3534 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3535 MCInst TLSDescCall;
3536 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3537 TLSDescCall.addOperand(Sym);
3538 EmitToStreamer(*OutStreamer, TLSDescCall);
3539#ifndef NDEBUG
3540 --InstsEmitted; // no code emitted
3541#endif
3542
3543 MCInst Blr;
3544 Blr.setOpcode(AArch64::BLR);
3545 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3546 EmitToStreamer(*OutStreamer, Blr);
3547
3548 return;
3549 }
3550
3551 case AArch64::JumpTableDest32:
3552 case AArch64::JumpTableDest16:
3553 case AArch64::JumpTableDest8:
3554 LowerJumpTableDest(*OutStreamer, *MI);
3555 return;
3556
3557 case AArch64::BR_JumpTable:
3558 LowerHardenedBRJumpTable(*MI);
3559 return;
3560
3561 case AArch64::FMOVH0:
3562 case AArch64::FMOVS0:
3563 case AArch64::FMOVD0:
3564 emitFMov0(*MI);
3565 return;
3566
3567 case AArch64::MOPSMemoryCopyPseudo:
3568 case AArch64::MOPSMemoryMovePseudo:
3569 case AArch64::MOPSMemorySetPseudo:
3570 case AArch64::MOPSMemorySetTaggingPseudo:
3571 LowerMOPS(*OutStreamer, *MI);
3572 return;
3573
3574 case TargetOpcode::STACKMAP:
3575 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3576
3577 case TargetOpcode::PATCHPOINT:
3578 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3579
3580 case TargetOpcode::STATEPOINT:
3581 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3582
3583 case TargetOpcode::FAULTING_OP:
3584 return LowerFAULTING_OP(*MI);
3585
3586 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3587 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3588 return;
3589
3590 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3591 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3592 return;
3593
3594 case TargetOpcode::PATCHABLE_TAIL_CALL:
3595 LowerPATCHABLE_TAIL_CALL(*MI);
3596 return;
3597 case TargetOpcode::PATCHABLE_EVENT_CALL:
3598 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3599 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3600 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3601
3602 case AArch64::KCFI_CHECK:
3603 LowerKCFI_CHECK(*MI);
3604 return;
3605
3606 case AArch64::HWASAN_CHECK_MEMACCESS:
3607 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3608 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3609 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3610 LowerHWASAN_CHECK_MEMACCESS(*MI);
3611 return;
3612
3613 case AArch64::SEH_StackAlloc:
3614 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3615 return;
3616
3617 case AArch64::SEH_SaveFPLR:
3618 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3619 return;
3620
3621 case AArch64::SEH_SaveFPLR_X:
3622 assert(MI->getOperand(0).getImm() < 0 &&
3623 "Pre increment SEH opcode must have a negative offset");
3624 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3625 return;
3626
3627 case AArch64::SEH_SaveReg:
3628 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3629 MI->getOperand(1).getImm());
3630 return;
3631
3632 case AArch64::SEH_SaveReg_X:
3633 assert(MI->getOperand(1).getImm() < 0 &&
3634 "Pre increment SEH opcode must have a negative offset");
3635 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3636 -MI->getOperand(1).getImm());
3637 return;
3638
3639 case AArch64::SEH_SaveRegP:
3640 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3641 MI->getOperand(0).getImm() <= 28) {
3642 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3643 "Register paired with LR must be odd");
3644 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3645 MI->getOperand(2).getImm());
3646 return;
3647 }
3648 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3649 "Non-consecutive registers not allowed for save_regp");
3650 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3651 MI->getOperand(2).getImm());
3652 return;
3653
3654 case AArch64::SEH_SaveRegP_X:
3655 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3656 "Non-consecutive registers not allowed for save_regp_x");
3657 assert(MI->getOperand(2).getImm() < 0 &&
3658 "Pre increment SEH opcode must have a negative offset");
3659 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3660 -MI->getOperand(2).getImm());
3661 return;
3662
3663 case AArch64::SEH_SaveFReg:
3664 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3665 MI->getOperand(1).getImm());
3666 return;
3667
3668 case AArch64::SEH_SaveFReg_X:
3669 assert(MI->getOperand(1).getImm() < 0 &&
3670 "Pre increment SEH opcode must have a negative offset");
3671 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3672 -MI->getOperand(1).getImm());
3673 return;
3674
3675 case AArch64::SEH_SaveFRegP:
3676 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3677 "Non-consecutive registers not allowed for save_regp");
3678 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3679 MI->getOperand(2).getImm());
3680 return;
3681
3682 case AArch64::SEH_SaveFRegP_X:
3683 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3684 "Non-consecutive registers not allowed for save_regp_x");
3685 assert(MI->getOperand(2).getImm() < 0 &&
3686 "Pre increment SEH opcode must have a negative offset");
3687 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3688 -MI->getOperand(2).getImm());
3689 return;
3690
3691 case AArch64::SEH_SetFP:
3693 return;
3694
3695 case AArch64::SEH_AddFP:
3696 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3697 return;
3698
3699 case AArch64::SEH_Nop:
3700 TS->emitARM64WinCFINop();
3701 return;
3702
3703 case AArch64::SEH_PrologEnd:
3705 return;
3706
3707 case AArch64::SEH_EpilogStart:
3709 return;
3710
3711 case AArch64::SEH_EpilogEnd:
3713 return;
3714
3715 case AArch64::SEH_PACSignLR:
3717 return;
3718
3719 case AArch64::SEH_SaveAnyRegI:
3720 assert(MI->getOperand(1).getImm() <= 1008 &&
3721 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3722 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3723 MI->getOperand(1).getImm());
3724 return;
3725
3726 case AArch64::SEH_SaveAnyRegIP:
3727 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3728 "Non-consecutive registers not allowed for save_any_reg");
3729 assert(MI->getOperand(2).getImm() <= 1008 &&
3730 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3731 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3732 MI->getOperand(2).getImm());
3733 return;
3734
3735 case AArch64::SEH_SaveAnyRegQP:
3736 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3737 "Non-consecutive registers not allowed for save_any_reg");
3738 assert(MI->getOperand(2).getImm() >= 0 &&
3739 "SaveAnyRegQP SEH opcode offset must be non-negative");
3740 assert(MI->getOperand(2).getImm() <= 1008 &&
3741 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3742 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3743 MI->getOperand(2).getImm());
3744 return;
3745
3746 case AArch64::SEH_SaveAnyRegQPX:
3747 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3748 "Non-consecutive registers not allowed for save_any_reg");
3749 assert(MI->getOperand(2).getImm() < 0 &&
3750 "SaveAnyRegQPX SEH opcode offset must be negative");
3751 assert(MI->getOperand(2).getImm() >= -1008 &&
3752 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3753 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3754 -MI->getOperand(2).getImm());
3755 return;
3756
3757 case AArch64::SEH_AllocZ:
3758 assert(MI->getOperand(0).getImm() >= 0 &&
3759 "AllocZ SEH opcode offset must be non-negative");
3760 assert(MI->getOperand(0).getImm() <= 255 &&
3761 "AllocZ SEH opcode offset must fit into 8 bits");
3762 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3763 return;
3764
3765 case AArch64::SEH_SaveZReg:
3766 assert(MI->getOperand(1).getImm() >= 0 &&
3767 "SaveZReg SEH opcode offset must be non-negative");
3768 assert(MI->getOperand(1).getImm() <= 255 &&
3769 "SaveZReg SEH opcode offset must fit into 8 bits");
3770 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3771 MI->getOperand(1).getImm());
3772 return;
3773
3774 case AArch64::SEH_SavePReg:
3775 assert(MI->getOperand(1).getImm() >= 0 &&
3776 "SavePReg SEH opcode offset must be non-negative");
3777 assert(MI->getOperand(1).getImm() <= 255 &&
3778 "SavePReg SEH opcode offset must fit into 8 bits");
3779 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3780 MI->getOperand(1).getImm());
3781 return;
3782
3783 case AArch64::BLR:
3784 case AArch64::BR: {
3785 recordIfImportCall(MI);
3786 MCInst TmpInst;
3787 MCInstLowering.Lower(MI, TmpInst);
3788 EmitToStreamer(*OutStreamer, TmpInst);
3789 return;
3790 }
3791 case AArch64::CBWPri:
3792 case AArch64::CBXPri:
3793 case AArch64::CBBAssertExt:
3794 case AArch64::CBHAssertExt:
3795 case AArch64::CBWPrr:
3796 case AArch64::CBXPrr:
3797 emitCBPseudoExpansion(MI);
3798 return;
3799 }
3800
3801 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3802 return;
3803
3804 // Finally, do the automated lowerings for everything else.
3805 MCInst TmpInst;
3806 MCInstLowering.Lower(MI, TmpInst);
3807 EmitToStreamer(*OutStreamer, TmpInst);
3808}
3809
3810void AArch64AsmPrinter::recordIfImportCall(
3811 const llvm::MachineInstr *BranchInst) {
3812 if (!EnableImportCallOptimization)
3813 return;
3814
3815 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3816 if (GV && GV->hasDLLImportStorageClass()) {
3817 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3818 OutStreamer->emitLabel(CallSiteSymbol);
3819
3820 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3821 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3822 .push_back({CallSiteSymbol, CalledSymbol});
3823 }
3824}
3825
3826void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3827 MCSymbol *LazyPointer) {
3828 // _ifunc:
3829 // adrp x16, lazy_pointer@GOTPAGE
3830 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3831 // ldr x16, [x16]
3832 // br x16
3833
3834 {
3835 MCInst Adrp;
3836 Adrp.setOpcode(AArch64::ADRP);
3837 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3838 MCOperand SymPage;
3839 MCInstLowering.lowerOperand(
3842 SymPage);
3843 Adrp.addOperand(SymPage);
3844 EmitToStreamer(Adrp);
3845 }
3846
3847 {
3848 MCInst Ldr;
3849 Ldr.setOpcode(AArch64::LDRXui);
3850 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3851 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3852 MCOperand SymPageOff;
3853 MCInstLowering.lowerOperand(
3856 SymPageOff);
3857 Ldr.addOperand(SymPageOff);
3859 EmitToStreamer(Ldr);
3860 }
3861
3862 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3863 .addReg(AArch64::X16)
3864 .addReg(AArch64::X16)
3865 .addImm(0));
3866
3867 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3868 : AArch64::BR)
3869 .addReg(AArch64::X16));
3870}
3871
3872void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3873 const GlobalIFunc &GI,
3874 MCSymbol *LazyPointer) {
3875 // These stub helpers are only ever called once, so here we're optimizing for
3876 // minimum size by using the pre-indexed store variants, which saves a few
3877 // bytes of instructions to bump & restore sp.
3878
3879 // _ifunc.stub_helper:
3880 // stp fp, lr, [sp, #-16]!
3881 // mov fp, sp
3882 // stp x1, x0, [sp, #-16]!
3883 // stp x3, x2, [sp, #-16]!
3884 // stp x5, x4, [sp, #-16]!
3885 // stp x7, x6, [sp, #-16]!
3886 // stp d1, d0, [sp, #-16]!
3887 // stp d3, d2, [sp, #-16]!
3888 // stp d5, d4, [sp, #-16]!
3889 // stp d7, d6, [sp, #-16]!
3890 // bl _resolver
3891 // adrp x16, lazy_pointer@GOTPAGE
3892 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3893 // str x0, [x16]
3894 // mov x16, x0
3895 // ldp d7, d6, [sp], #16
3896 // ldp d5, d4, [sp], #16
3897 // ldp d3, d2, [sp], #16
3898 // ldp d1, d0, [sp], #16
3899 // ldp x7, x6, [sp], #16
3900 // ldp x5, x4, [sp], #16
3901 // ldp x3, x2, [sp], #16
3902 // ldp x1, x0, [sp], #16
3903 // ldp fp, lr, [sp], #16
3904 // br x16
3905
3906 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3907 .addReg(AArch64::SP)
3908 .addReg(AArch64::FP)
3909 .addReg(AArch64::LR)
3910 .addReg(AArch64::SP)
3911 .addImm(-2));
3912
3913 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3914 .addReg(AArch64::FP)
3915 .addReg(AArch64::SP)
3916 .addImm(0)
3917 .addImm(0));
3918
3919 for (int I = 0; I != 4; ++I)
3920 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3921 .addReg(AArch64::SP)
3922 .addReg(AArch64::X1 + 2 * I)
3923 .addReg(AArch64::X0 + 2 * I)
3924 .addReg(AArch64::SP)
3925 .addImm(-2));
3926
3927 for (int I = 0; I != 4; ++I)
3928 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3929 .addReg(AArch64::SP)
3930 .addReg(AArch64::D1 + 2 * I)
3931 .addReg(AArch64::D0 + 2 * I)
3932 .addReg(AArch64::SP)
3933 .addImm(-2));
3934
3935 EmitToStreamer(
3936 MCInstBuilder(AArch64::BL)
3938
3939 {
3940 MCInst Adrp;
3941 Adrp.setOpcode(AArch64::ADRP);
3942 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3943 MCOperand SymPage;
3944 MCInstLowering.lowerOperand(
3945 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3947 SymPage);
3948 Adrp.addOperand(SymPage);
3949 EmitToStreamer(Adrp);
3950 }
3951
3952 {
3953 MCInst Ldr;
3954 Ldr.setOpcode(AArch64::LDRXui);
3955 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3956 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3957 MCOperand SymPageOff;
3958 MCInstLowering.lowerOperand(
3959 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3961 SymPageOff);
3962 Ldr.addOperand(SymPageOff);
3964 EmitToStreamer(Ldr);
3965 }
3966
3967 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3968 .addReg(AArch64::X0)
3969 .addReg(AArch64::X16)
3970 .addImm(0));
3971
3972 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3973 .addReg(AArch64::X16)
3974 .addReg(AArch64::X0)
3975 .addImm(0)
3976 .addImm(0));
3977
3978 for (int I = 3; I != -1; --I)
3979 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3980 .addReg(AArch64::SP)
3981 .addReg(AArch64::D1 + 2 * I)
3982 .addReg(AArch64::D0 + 2 * I)
3983 .addReg(AArch64::SP)
3984 .addImm(2));
3985
3986 for (int I = 3; I != -1; --I)
3987 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3988 .addReg(AArch64::SP)
3989 .addReg(AArch64::X1 + 2 * I)
3990 .addReg(AArch64::X0 + 2 * I)
3991 .addReg(AArch64::SP)
3992 .addImm(2));
3993
3994 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3995 .addReg(AArch64::SP)
3996 .addReg(AArch64::FP)
3997 .addReg(AArch64::LR)
3998 .addReg(AArch64::SP)
3999 .addImm(2));
4000
4001 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
4002 : AArch64::BR)
4003 .addReg(AArch64::X16));
4004}
4005
4006const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
4007 const Constant *BaseCV,
4008 uint64_t Offset) {
4009 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
4010 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
4011 OutContext);
4012 }
4013
4014 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
4015}
4016
4017char AArch64AsmPrinter::ID = 0;
4018
4019INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
4020 "AArch64 Assembly Printer", false, false)
4021
4022// Force static initialization.
4023extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
4024LLVMInitializeAArch64AsmPrinter() {
4030}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:598
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr unsigned SM(unsigned Version)
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
void setPreservesAll()
Set by analyses that do not transform their input at all.
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:650
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
Function * getFunction() const
Definition Constants.h:940
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1083
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:729
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:553
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
static constexpr unsigned NonUniqueID
Definition MCSection.h:521
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:221
virtual void emitCFIBKeyFrame()
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:374
MCContext & getContext() const
Definition MCStreamer.h:322
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:393
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:332
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:449
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:427
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
const FeatureBitset & getFeatureBits() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM Value Representation.
Definition Value.h:75
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1250
@ SHF_GROUP
Definition ELF.h:1272
@ SHF_EXECINSTR
Definition ELF.h:1253
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1861
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1862
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1863
@ SHT_PROGBITS
Definition ELF.h:1149
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:683
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:292
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
scope_exit(Callable) -> scope_exit< Callable >
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
static unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K, bool Zero)
Return B(L)RA opcode to be used for an authenticated branch or call using the given key,...
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1917
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...