LLVM 23.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
47#include "llvm/IR/DataLayout.h"
49#include "llvm/IR/Mangler.h"
50#include "llvm/IR/Module.h"
51#include "llvm/MC/MCAsmInfo.h"
52#include "llvm/MC/MCContext.h"
53#include "llvm/MC/MCExpr.h"
54#include "llvm/MC/MCInst.h"
58#include "llvm/MC/MCStreamer.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/MCValue.h"
70#include <cassert>
71#include <cstdint>
72#include <map>
73#include <memory>
74
75using namespace llvm;
76
77#define DEBUG_TYPE "AArch64AsmPrinter"
78
79// Doesn't count FPR128 ZCZ instructions which are handled
80// by TableGen pattern matching
81STATISTIC(NumZCZeroingInstrsFPR,
82 "Number of zero-cycle FPR zeroing instructions expanded from "
83 "canonical pseudo instructions");
84
87 "aarch64-ptrauth-auth-checks", cl::Hidden,
88 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
89 clEnumValN(Poison, "poison", "poison on failure"),
90 clEnumValN(Trap, "trap", "trap on failure")),
91 cl::desc("Check pointer authentication auth/resign failures"),
93
94namespace {
95
96class AArch64AsmPrinter : public AsmPrinter {
97 AArch64MCInstLower MCInstLowering;
98 FaultMaps FM;
99 const AArch64Subtarget *STI;
100 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
101#ifndef NDEBUG
102 unsigned InstsEmitted;
103#endif
104 bool EnableImportCallOptimization = false;
106 SectionToImportedFunctionCalls;
107 unsigned PAuthIFuncNextUniqueID = 1;
108
109public:
110 static char ID;
111
112 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
113 : AsmPrinter(TM, std::move(Streamer), ID),
114 MCInstLowering(OutContext, *this), FM(*this) {}
115
116 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
117
118 /// Wrapper for MCInstLowering.lowerOperand() for the
119 /// tblgen'erated pseudo lowering.
120 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
121 return MCInstLowering.lowerOperand(MO, MCOp);
122 }
123
124 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
125
126 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
127
128 void emitStartOfAsmFile(Module &M) override;
129 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
130 ArrayRef<unsigned> JumpTableIndices) override;
131 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
133 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
134 const MCSymbol *BranchLabel) const override;
135
136 void emitFunctionEntryLabel() override;
137
138 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
139
140 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
141
142 void LowerHardenedBRJumpTable(const MachineInstr &MI);
143
144 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
145
146 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
147 const MachineInstr &MI);
148 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
149 const MachineInstr &MI);
150 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
151 const MachineInstr &MI);
152 void LowerFAULTING_OP(const MachineInstr &MI);
153
154 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
155 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
156 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
157 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
158
159 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
160 HwasanMemaccessTuple;
161 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
162 void LowerKCFI_CHECK(const MachineInstr &MI);
163 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
164 void emitHwasanMemaccessSymbols(Module &M);
165
166 void emitSled(const MachineInstr &MI, SledKind Kind);
167
168 // Returns whether Reg may be used to store sensitive temporary values when
169 // expanding PtrAuth pseudos. Some OSes may take extra care to protect a
170 // small subset of GPRs on context switches - use these registers then.
171 //
172 // If there are no preferred registers, returns true for any Reg.
173 bool isPtrauthRegSafe(Register Reg) const {
174 if (STI->isX16X17Safer())
175 return Reg == AArch64::X16 || Reg == AArch64::X17;
176
177 return true;
178 }
179
180 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
181 void emitPtrauthBranch(const MachineInstr *MI);
182
183 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
184 Register ScratchReg,
187 const MCSymbol *OnFailure = nullptr);
188
189 // Check authenticated LR before tail calling.
190 void emitPtrauthTailCallHardening(const MachineInstr *TC);
191
192 struct PtrAuthSchema {
193 PtrAuthSchema(AArch64PACKey::ID Key, uint64_t IntDisc,
194 const MachineOperand &AddrDiscOp);
195
197 uint64_t IntDisc;
198 Register AddrDisc;
199 bool AddrDiscIsKilled;
200 };
201
202 // Emit the sequence for AUT or AUTPAC.
203 void emitPtrauthAuthResign(Register Pointer, Register Scratch,
204 PtrAuthSchema AuthSchema,
205 std::optional<PtrAuthSchema> SignSchema,
206 Value *DS);
207
208 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
209 // if no instruction should be emitted because the deactivation symbol is
210 // defined in the current module so this function emitted a NOP instead.
211 bool emitDeactivationSymbolRelocation(Value *DS);
212
213 // Emit the sequence for PAC.
214 void emitPtrauthSign(const MachineInstr *MI);
215
216 // Emit the sequence to compute the discriminator.
217 //
218 // The Scratch register passed to this function must be safe, as returned by
219 // isPtrauthRegSafe(ScratchReg).
220 //
221 // The returned register is either ScratchReg, AddrDisc, or XZR. Furthermore,
222 // it is guaranteed to be safe (or XZR), with the only exception of
223 // passing-through an *unmodified* unsafe AddrDisc register.
224 //
225 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
226 // MayClobberAddrDisc may save one MOV instruction, provided
227 // isPtrauthRegSafe(AddrDisc) is true:
228 //
229 // mov x17, x16
230 // movk x17, #1234, lsl #48
231 // ; x16 is not used anymore
232 //
233 // can be replaced by
234 //
235 // movk x16, #1234, lsl #48
236 Register emitPtrauthDiscriminator(uint64_t Disc, Register AddrDisc,
237 Register ScratchReg,
238 bool MayClobberAddrDisc = false);
239
240 // Emit the sequence for LOADauthptrstatic
241 void LowerLOADauthptrstatic(const MachineInstr &MI);
242
243 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
244 // adrp-add followed by PAC sign)
245 void LowerMOVaddrPAC(const MachineInstr &MI);
246
247 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
248 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
249 // authenticating)
250 void LowerLOADgotAUTH(const MachineInstr &MI);
251
252 void emitAddImm(MCRegister Val, int64_t Addend, MCRegister Tmp);
253 void emitAddress(MCRegister Reg, const MCExpr *Expr, MCRegister Tmp,
254 bool DSOLocal, const MCSubtargetInfo &STI);
255
256 const MCExpr *emitPAuthRelocationAsIRelative(
257 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
258 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
259
260 /// tblgen'erated driver function for lowering simple MI->MC
261 /// pseudo instructions.
262 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
263
264 // Emit Build Attributes
265 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
266 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
267
268 // Emit expansion of Compare-and-branch pseudo instructions
269 void emitCBPseudoExpansion(const MachineInstr *MI);
270
271 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
272 void EmitToStreamer(const MCInst &Inst) {
273 EmitToStreamer(*OutStreamer, Inst);
274 }
275
276 void emitInstruction(const MachineInstr *MI) override;
277
278 void emitFunctionHeaderComment() override;
279
280 void getAnalysisUsage(AnalysisUsage &AU) const override {
282 AU.setPreservesAll();
283 }
284
285 bool runOnMachineFunction(MachineFunction &MF) override {
286 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
287 PSI = &PSIW->getPSI();
288 if (auto *SDPIW =
289 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
290 SDPI = &SDPIW->getStaticDataProfileInfo();
291
292 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
293 STI = &MF.getSubtarget<AArch64Subtarget>();
294
295 SetupMachineFunction(MF);
296
297 if (STI->isTargetCOFF()) {
298 bool Local = MF.getFunction().hasLocalLinkage();
301 int Type =
303
304 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
305 OutStreamer->emitCOFFSymbolStorageClass(Scl);
306 OutStreamer->emitCOFFSymbolType(Type);
307 OutStreamer->endCOFFSymbolDef();
308 }
309
310 // Emit the rest of the function body.
311 emitFunctionBody();
312
313 // Emit the XRay table for this function.
314 emitXRayTable();
315
316 // We didn't modify anything.
317 return false;
318 }
319
320 const MCExpr *lowerConstant(const Constant *CV,
321 const Constant *BaseCV = nullptr,
322 uint64_t Offset = 0) override;
323
324private:
325 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
326 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
327 bool printAsmRegInClass(const MachineOperand &MO,
328 const TargetRegisterClass *RC, unsigned AltName,
329 raw_ostream &O);
330
331 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
332 const char *ExtraCode, raw_ostream &O) override;
333 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
334 const char *ExtraCode, raw_ostream &O) override;
335
336 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
337
338 void emitFunctionBodyEnd() override;
339 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
340
341 MCSymbol *GetCPISymbol(unsigned CPID) const override;
342 void emitEndOfAsmFile(Module &M) override;
343
344 AArch64FunctionInfo *AArch64FI = nullptr;
345
346 /// Emit the LOHs contained in AArch64FI.
347 void emitLOHs();
348
349 void emitMovXReg(Register Dest, Register Src);
350 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
351 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
352
353 void emitAUT(AArch64PACKey::ID Key, Register Pointer, Register Disc);
354 void emitPAC(AArch64PACKey::ID Key, Register Pointer, Register Disc);
355 void emitBLRA(bool IsCall, AArch64PACKey::ID Key, Register Target,
356 Register Disc);
357
358 /// Emit instruction to set float register to zero.
359 void emitFMov0(const MachineInstr &MI);
360 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
361
362 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
363
364 MInstToMCSymbol LOHInstToLabel;
365
366 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
367 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
368 }
369
370 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
371 assert(STI);
372 return STI;
373 }
374 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
375 MCSymbol *LazyPointer) override;
376 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
377 MCSymbol *LazyPointer) override;
378
379 /// Checks if this instruction is part of a sequence that is eligle for import
380 /// call optimization and, if so, records it to be emitted in the import call
381 /// section.
382 void recordIfImportCall(const MachineInstr *BranchInst);
383};
384
385} // end anonymous namespace
386
387void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
388 const Triple &TT = TM.getTargetTriple();
389
390 if (TT.isOSBinFormatCOFF()) {
391 emitCOFFFeatureSymbol(M);
392 emitCOFFReplaceableFunctionData(M);
393
394 if (M.getModuleFlag("import-call-optimization"))
395 EnableImportCallOptimization = true;
396 }
397
398 if (!TT.isOSBinFormatELF())
399 return;
400
401 // For emitting build attributes and .note.gnu.property section
402 auto *TS =
403 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
404 // Assemble feature flags that may require creation of build attributes and a
405 // note section.
406 unsigned BAFlags = 0;
407 unsigned GNUFlags = 0;
408 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
409 M.getModuleFlag("branch-target-enforcement"))) {
410 if (!BTE->isZero()) {
411 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
413 }
414 }
415
416 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
417 M.getModuleFlag("guarded-control-stack"))) {
418 if (!GCS->isZero()) {
419 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
421 }
422 }
423
424 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
425 M.getModuleFlag("sign-return-address"))) {
426 if (!Sign->isZero()) {
427 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
429 }
430 }
431
432 uint64_t PAuthABIPlatform = -1;
433 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
434 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
435 PAuthABIPlatform = PAP->getZExtValue();
436 }
437
438 uint64_t PAuthABIVersion = -1;
439 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
440 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
441 PAuthABIVersion = PAV->getZExtValue();
442 }
443
444 // Emit AArch64 Build Attributes
445 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
446 // Emit a .note.gnu.property section with the flags.
447 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
448}
449
450void AArch64AsmPrinter::emitFunctionHeaderComment() {
451 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
452 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
453 if (OutlinerString != std::nullopt)
454 OutStreamer->getCommentOS() << ' ' << OutlinerString;
455}
456
457void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
458{
459 const Function &F = MF->getFunction();
460 if (F.hasFnAttribute("patchable-function-entry")) {
461 unsigned Num;
462 if (F.getFnAttribute("patchable-function-entry")
463 .getValueAsString()
464 .getAsInteger(10, Num))
465 return;
466 emitNops(Num);
467 return;
468 }
469
470 emitSled(MI, SledKind::FUNCTION_ENTER);
471}
472
473void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
474 emitSled(MI, SledKind::FUNCTION_EXIT);
475}
476
477void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
478 emitSled(MI, SledKind::TAIL_CALL);
479}
480
481void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
482 static const int8_t NoopsInSledCount = 7;
483 // We want to emit the following pattern:
484 //
485 // .Lxray_sled_N:
486 // ALIGN
487 // B #32
488 // ; 7 NOP instructions (28 bytes)
489 // .tmpN
490 //
491 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
492 // over the full 32 bytes (8 instructions) with the following pattern:
493 //
494 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
495 // LDR W17, #12 ; W17 := function ID
496 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
497 // BLR X16 ; call the tracing trampoline
498 // ;DATA: 32 bits of function ID
499 // ;DATA: lower 32 bits of the address of the trampoline
500 // ;DATA: higher 32 bits of the address of the trampoline
501 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
502 //
503 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
504 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
505 OutStreamer->emitLabel(CurSled);
506 auto Target = OutContext.createTempSymbol();
507
508 // Emit "B #32" instruction, which jumps over the next 28 bytes.
509 // The operand has to be the number of 4-byte instructions to jump over,
510 // including the current instruction.
511 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
512
513 for (int8_t I = 0; I < NoopsInSledCount; I++)
514 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::NOP));
515
516 OutStreamer->emitLabel(Target);
517 recordSled(CurSled, MI, Kind, 2);
518}
519
520void AArch64AsmPrinter::emitAttributes(unsigned Flags,
521 uint64_t PAuthABIPlatform,
522 uint64_t PAuthABIVersion,
523 AArch64TargetStreamer *TS) {
524
525 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
526 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
527
528 if (PAuthABIPlatform || PAuthABIVersion) {
532 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
533 AArch64BuildAttributes::SubsectionType::ULEB128);
537 PAuthABIPlatform, "");
541 "");
542 }
543
544 unsigned BTIValue =
546 unsigned PACValue =
548 unsigned GCSValue =
550
551 if (BTIValue || PACValue || GCSValue) {
555 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
556 AArch64BuildAttributes::SubsectionType::ULEB128);
566 }
567}
568
569// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
570// (built-in functions __xray_customevent/__xray_typedevent).
571//
572// .Lxray_event_sled_N:
573// b 1f
574// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
575// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
576// bl __xray_CustomEvent or __xray_TypedEvent
577// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
578// 1:
579//
580// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
581//
582// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
583// After patching, b .+N will become a nop.
584void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
585 bool Typed) {
586 auto &O = *OutStreamer;
587 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
588 O.emitLabel(CurSled);
589 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
590 auto *Sym = MCSymbolRefExpr::create(
591 OutContext.getOrCreateSymbol(
592 Twine(MachO ? "_" : "") +
593 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
594 OutContext);
595 if (Typed) {
596 O.AddComment("Begin XRay typed event");
597 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
598 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
599 .addReg(AArch64::SP)
600 .addReg(AArch64::X0)
601 .addReg(AArch64::X1)
602 .addReg(AArch64::SP)
603 .addImm(-4));
604 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
605 .addReg(AArch64::X2)
606 .addReg(AArch64::SP)
607 .addImm(2));
608 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
609 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
610 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
611 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
612 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
613 .addReg(AArch64::X2)
614 .addReg(AArch64::SP)
615 .addImm(2));
616 O.AddComment("End XRay typed event");
617 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
618 .addReg(AArch64::SP)
619 .addReg(AArch64::X0)
620 .addReg(AArch64::X1)
621 .addReg(AArch64::SP)
622 .addImm(4));
623
624 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
625 } else {
626 O.AddComment("Begin XRay custom event");
627 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
628 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
629 .addReg(AArch64::SP)
630 .addReg(AArch64::X0)
631 .addReg(AArch64::X1)
632 .addReg(AArch64::SP)
633 .addImm(-2));
634 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
635 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
636 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
637 O.AddComment("End XRay custom event");
638 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
639 .addReg(AArch64::SP)
640 .addReg(AArch64::X0)
641 .addReg(AArch64::X1)
642 .addReg(AArch64::SP)
643 .addImm(2));
644
645 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
646 }
647}
648
649void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
650 Register AddrReg = MI.getOperand(0).getReg();
651 assert(std::next(MI.getIterator())->isCall() &&
652 "KCFI_CHECK not followed by a call instruction");
653 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
654 "KCFI_CHECK call target doesn't match call operand");
655
656 // Default to using the intra-procedure-call temporary registers for
657 // comparing the hashes.
658 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
659 if (AddrReg == AArch64::XZR) {
660 // Checking XZR makes no sense. Instead of emitting a load, zero
661 // ScratchRegs[0] and use it for the ESR AddrIndex below.
662 AddrReg = getXRegFromWReg(ScratchRegs[0]);
663 emitMovXReg(AddrReg, AArch64::XZR);
664 } else {
665 // If one of the scratch registers is used for the call target (e.g.
666 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
667 // temporary register instead (in this case, AArch64::W9) as the check
668 // is immediately followed by the call instruction.
669 for (auto &Reg : ScratchRegs) {
670 if (Reg == getWRegFromXReg(AddrReg)) {
671 Reg = AArch64::W9;
672 break;
673 }
674 }
675 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
676 "Invalid scratch registers for KCFI_CHECK");
677
678 // Adjust the offset for patchable-function-prefix. This assumes that
679 // patchable-function-prefix is the same for all functions.
680 int64_t PrefixNops = 0;
681 (void)MI.getMF()
682 ->getFunction()
683 .getFnAttribute("patchable-function-prefix")
684 .getValueAsString()
685 .getAsInteger(10, PrefixNops);
686
687 // Load the target function type hash.
688 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
689 .addReg(ScratchRegs[0])
690 .addReg(AddrReg)
691 .addImm(-(PrefixNops * 4 + 4)));
692 }
693
694 // Load the expected type hash.
695 const int64_t Type = MI.getOperand(1).getImm();
696 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
697 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
698
699 // Compare the hashes and trap if there's a mismatch.
700 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
701 .addReg(AArch64::WZR)
702 .addReg(ScratchRegs[0])
703 .addReg(ScratchRegs[1])
704 .addImm(0));
705
706 MCSymbol *Pass = OutContext.createTempSymbol();
707 EmitToStreamer(*OutStreamer,
708 MCInstBuilder(AArch64::Bcc)
709 .addImm(AArch64CC::EQ)
710 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
711
712 // The base ESR is 0x8000 and the register information is encoded in bits
713 // 0-9 as follows:
714 // - 0-4: n, where the register Xn contains the target address
715 // - 5-9: m, where the register Wm contains the expected type hash
716 // Where n, m are in [0, 30].
717 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
718 unsigned AddrIndex;
719 switch (AddrReg) {
720 default:
721 AddrIndex = AddrReg - AArch64::X0;
722 break;
723 case AArch64::FP:
724 AddrIndex = 29;
725 break;
726 case AArch64::LR:
727 AddrIndex = 30;
728 break;
729 }
730
731 assert(AddrIndex < 31 && TypeIndex < 31);
732
733 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
734 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
735 OutStreamer->emitLabel(Pass);
736}
737
738void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
739 Register Reg = MI.getOperand(0).getReg();
740
741 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
742 // statically known to be zero. However, conceivably, the HWASan pass may
743 // encounter a "cannot currently statically prove to be null" pointer (and is
744 // therefore unable to omit the intrinsic) that later optimization passes
745 // convert into a statically known-null pointer.
746 if (Reg == AArch64::XZR)
747 return;
748
749 bool IsShort =
750 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
751 (MI.getOpcode() ==
752 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
753 uint32_t AccessInfo = MI.getOperand(1).getImm();
754 bool IsFixedShadow =
755 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
756 (MI.getOpcode() ==
757 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
758 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
759
760 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
761 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
762 if (!Sym) {
763 // FIXME: Make this work on non-ELF.
764 if (!TM.getTargetTriple().isOSBinFormatELF())
765 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
766
767 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
768 utostr(AccessInfo);
769 if (IsFixedShadow)
770 SymName += "_fixed_" + utostr(FixedShadowOffset);
771 if (IsShort)
772 SymName += "_short_v2";
773 Sym = OutContext.getOrCreateSymbol(SymName);
774 }
775
776 EmitToStreamer(*OutStreamer,
777 MCInstBuilder(AArch64::BL)
778 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
779}
780
781void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
782 if (HwasanMemaccessSymbols.empty())
783 return;
784
785 const Triple &TT = TM.getTargetTriple();
786 assert(TT.isOSBinFormatELF());
787 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
788 // space.
789 auto STI = std::make_unique<AArch64Subtarget>(
790 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
791 true);
792 this->STI = STI.get();
793
794 MCSymbol *HwasanTagMismatchV1Sym =
795 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
796 MCSymbol *HwasanTagMismatchV2Sym =
797 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
798
799 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
800 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
801 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
802 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
803
804 for (auto &P : HwasanMemaccessSymbols) {
805 unsigned Reg = std::get<0>(P.first);
806 bool IsShort = std::get<1>(P.first);
807 uint32_t AccessInfo = std::get<2>(P.first);
808 bool IsFixedShadow = std::get<3>(P.first);
809 uint64_t FixedShadowOffset = std::get<4>(P.first);
810 const MCSymbolRefExpr *HwasanTagMismatchRef =
811 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
812 MCSymbol *Sym = P.second;
813
814 bool HasMatchAllTag =
815 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
816 uint8_t MatchAllTag =
817 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
818 unsigned Size =
819 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
820 bool CompileKernel =
821 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
822
823 OutStreamer->switchSection(OutContext.getELFSection(
824 ".text.hot", ELF::SHT_PROGBITS,
826 /*IsComdat=*/true));
827
828 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
829 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
830 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
831 OutStreamer->emitLabel(Sym);
832
833 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
834 .addReg(AArch64::X16)
835 .addReg(Reg)
836 .addImm(4)
837 .addImm(55));
838
839 if (IsFixedShadow) {
840 // Aarch64 makes it difficult to embed large constants in the code.
841 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
842 // left-shift option in the MOV instruction. Combined with the 16-bit
843 // immediate, this is enough to represent any offset up to 2**48.
844 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
845 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
846 .addReg(AArch64::W16)
847 .addReg(AArch64::X17)
848 .addReg(AArch64::X16)
849 .addImm(0)
850 .addImm(0));
851 } else {
852 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
853 .addReg(AArch64::W16)
854 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
855 .addReg(AArch64::X16)
856 .addImm(0)
857 .addImm(0));
858 }
859
860 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
861 .addReg(AArch64::XZR)
862 .addReg(AArch64::X16)
863 .addReg(Reg)
865 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
866 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
867 .addImm(AArch64CC::NE)
869 HandleMismatchOrPartialSym, OutContext)));
870 MCSymbol *ReturnSym = OutContext.createTempSymbol();
871 OutStreamer->emitLabel(ReturnSym);
872 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
873 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
874
875 if (HasMatchAllTag) {
876 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
877 .addReg(AArch64::X17)
878 .addReg(Reg)
879 .addImm(56)
880 .addImm(63));
881 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
882 .addReg(AArch64::XZR)
883 .addReg(AArch64::X17)
884 .addImm(MatchAllTag)
885 .addImm(0));
886 EmitToStreamer(
887 MCInstBuilder(AArch64::Bcc)
888 .addImm(AArch64CC::EQ)
889 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
890 }
891
892 if (IsShort) {
893 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
894 .addReg(AArch64::WZR)
895 .addReg(AArch64::W16)
896 .addImm(15)
897 .addImm(0));
898 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
899 EmitToStreamer(
900 MCInstBuilder(AArch64::Bcc)
901 .addImm(AArch64CC::HI)
902 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
903
904 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
905 .addReg(AArch64::X17)
906 .addReg(Reg)
907 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
908 if (Size != 1)
909 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
910 .addReg(AArch64::X17)
911 .addReg(AArch64::X17)
912 .addImm(Size - 1)
913 .addImm(0));
914 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
915 .addReg(AArch64::WZR)
916 .addReg(AArch64::W16)
917 .addReg(AArch64::W17)
918 .addImm(0));
919 EmitToStreamer(
920 MCInstBuilder(AArch64::Bcc)
921 .addImm(AArch64CC::LS)
922 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
923
924 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
925 .addReg(AArch64::X16)
926 .addReg(Reg)
927 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
928 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
929 .addReg(AArch64::W16)
930 .addReg(AArch64::X16)
931 .addImm(0));
932 EmitToStreamer(
933 MCInstBuilder(AArch64::SUBSXrs)
934 .addReg(AArch64::XZR)
935 .addReg(AArch64::X16)
936 .addReg(Reg)
938 EmitToStreamer(
939 MCInstBuilder(AArch64::Bcc)
940 .addImm(AArch64CC::EQ)
941 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
942
943 OutStreamer->emitLabel(HandleMismatchSym);
944 }
945
946 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
947 .addReg(AArch64::SP)
948 .addReg(AArch64::X0)
949 .addReg(AArch64::X1)
950 .addReg(AArch64::SP)
951 .addImm(-32));
952 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
953 .addReg(AArch64::FP)
954 .addReg(AArch64::LR)
955 .addReg(AArch64::SP)
956 .addImm(29));
957
958 if (Reg != AArch64::X0)
959 emitMovXReg(AArch64::X0, Reg);
960 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
961
962 if (CompileKernel) {
963 // The Linux kernel's dynamic loader doesn't support GOT relative
964 // relocations, but it doesn't support late binding either, so just call
965 // the function directly.
966 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
967 } else {
968 // Intentionally load the GOT entry and branch to it, rather than possibly
969 // late binding the function, which may clobber the registers before we
970 // have a chance to save them.
971 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
972 .addReg(AArch64::X16)
973 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
975 OutContext)));
976 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
977 .addReg(AArch64::X16)
978 .addReg(AArch64::X16)
979 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
981 OutContext)));
982 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
983 }
984 }
985 this->STI = nullptr;
986}
987
988static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
989 MCSymbol *StubLabel,
990 const MCExpr *StubAuthPtrRef) {
991 // sym$auth_ptr$key$disc:
992 OutStreamer.emitLabel(StubLabel);
993 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
994}
995
996void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
997 emitHwasanMemaccessSymbols(M);
998
999 const Triple &TT = TM.getTargetTriple();
1000 if (TT.isOSBinFormatMachO()) {
1001 // Output authenticated pointers as indirect symbols, if we have any.
1002 MachineModuleInfoMachO &MMIMacho =
1003 MMI->getObjFileInfo<MachineModuleInfoMachO>();
1004
1005 auto Stubs = MMIMacho.getAuthGVStubList();
1006
1007 if (!Stubs.empty()) {
1008 // Switch to the "__auth_ptr" section.
1009 OutStreamer->switchSection(
1010 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
1012 emitAlignment(Align(8));
1013
1014 for (const auto &Stub : Stubs)
1015 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1016
1017 OutStreamer->addBlankLine();
1018 }
1019
1020 // Funny Darwin hack: This flag tells the linker that no global symbols
1021 // contain code that falls through to other global symbols (e.g. the obvious
1022 // implementation of multiple entry points). If this doesn't occur, the
1023 // linker can safely perform dead code stripping. Since LLVM never
1024 // generates code that does this, it is always safe to set.
1025 OutStreamer->emitSubsectionsViaSymbols();
1026 }
1027
1028 if (TT.isOSBinFormatELF()) {
1029 // Output authenticated pointers as indirect symbols, if we have any.
1030 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
1031
1032 auto Stubs = MMIELF.getAuthGVStubList();
1033
1034 if (!Stubs.empty()) {
1035 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1036 OutStreamer->switchSection(TLOF.getDataSection());
1037 emitAlignment(Align(8));
1038
1039 for (const auto &Stub : Stubs)
1040 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1041
1042 OutStreamer->addBlankLine();
1043 }
1044
1045 // With signed ELF GOT enabled, the linker looks at the symbol type to
1046 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1047 // for functions not defined in the module have STT_NOTYPE type by default.
1048 // This makes linker to emit signing schema with DA key (instead of IA) for
1049 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1050 // all function symbols used in the module to have STT_FUNC type. See
1051 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1052 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1053 M.getModuleFlag("ptrauth-elf-got"));
1054 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1055 for (const GlobalValue &GV : M.global_values())
1056 if (!GV.use_empty() && isa<Function>(GV) &&
1057 !GV.getName().starts_with("llvm."))
1058 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1060 }
1061
1062 // Emit stack and fault map information.
1064
1065 // If import call optimization is enabled, emit the appropriate section.
1066 // We do this whether or not we recorded any import calls.
1067 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1068 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1069
1070 // Section always starts with some magic.
1071 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1072 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1073
1074 // Layout of this section is:
1075 // Per section that contains calls to imported functions:
1076 // uint32_t SectionSize: Size in bytes for information in this section.
1077 // uint32_t Section Number
1078 // Per call to imported function in section:
1079 // uint32_t Kind: the kind of imported function.
1080 // uint32_t BranchOffset: the offset of the branch instruction in its
1081 // parent section.
1082 // uint32_t TargetSymbolId: the symbol id of the called function.
1083 for (auto &[Section, CallsToImportedFuncs] :
1084 SectionToImportedFunctionCalls) {
1085 unsigned SectionSize =
1086 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1087 OutStreamer->emitInt32(SectionSize);
1088 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1089 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1090 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1091 OutStreamer->emitInt32(0x13);
1092 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1093 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1094 }
1095 }
1096 }
1097}
1098
1099void AArch64AsmPrinter::emitLOHs() {
1101
1102 for (const auto &D : AArch64FI->getLOHContainer()) {
1103 for (const MachineInstr *MI : D.getArgs()) {
1104 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1105 assert(LabelIt != LOHInstToLabel.end() &&
1106 "Label hasn't been inserted for LOH related instruction");
1107 MCArgs.push_back(LabelIt->second);
1108 }
1109 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1110 MCArgs.clear();
1111 }
1112}
1113
1114void AArch64AsmPrinter::emitFunctionBodyEnd() {
1115 if (!AArch64FI->getLOHRelated().empty())
1116 emitLOHs();
1117}
1118
1119/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1120MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1121 // Darwin uses a linker-private symbol name for constant-pools (to
1122 // avoid addends on the relocation?), ELF has no such concept and
1123 // uses a normal private symbol.
1124 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1125 return OutContext.getOrCreateSymbol(
1126 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1127 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1128
1129 return AsmPrinter::GetCPISymbol(CPID);
1130}
1131
1132void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1133 raw_ostream &O) {
1134 const MachineOperand &MO = MI->getOperand(OpNum);
1135 switch (MO.getType()) {
1136 default:
1137 llvm_unreachable("<unknown operand type>");
1139 Register Reg = MO.getReg();
1141 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1143 break;
1144 }
1146 O << MO.getImm();
1147 break;
1148 }
1150 PrintSymbolOperand(MO, O);
1151 break;
1152 }
1154 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1155 Sym->print(O, MAI);
1156 break;
1157 }
1158 }
1159}
1160
1161bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1162 raw_ostream &O) {
1163 Register Reg = MO.getReg();
1164 switch (Mode) {
1165 default:
1166 return true; // Unknown mode.
1167 case 'w':
1169 break;
1170 case 'x':
1172 break;
1173 case 't':
1175 break;
1176 }
1177
1179 return false;
1180}
1181
1182// Prints the register in MO using class RC using the offset in the
1183// new register class. This should not be used for cross class
1184// printing.
1185bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1186 const TargetRegisterClass *RC,
1187 unsigned AltName, raw_ostream &O) {
1188 assert(MO.isReg() && "Should only get here with a register!");
1189 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1190 Register Reg = MO.getReg();
1191 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1192 if (!RI->regsOverlap(RegToPrint, Reg))
1193 return true;
1194 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1195 return false;
1196}
1197
1198bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1199 const char *ExtraCode, raw_ostream &O) {
1200 const MachineOperand &MO = MI->getOperand(OpNum);
1201
1202 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1203 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1204 return false;
1205
1206 // Does this asm operand have a single letter operand modifier?
1207 if (ExtraCode && ExtraCode[0]) {
1208 if (ExtraCode[1] != 0)
1209 return true; // Unknown modifier.
1210
1211 switch (ExtraCode[0]) {
1212 default:
1213 return true; // Unknown modifier.
1214 case 'w': // Print W register
1215 case 'x': // Print X register
1216 if (MO.isReg())
1217 return printAsmMRegister(MO, ExtraCode[0], O);
1218 if (MO.isImm() && MO.getImm() == 0) {
1219 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1221 return false;
1222 }
1223 printOperand(MI, OpNum, O);
1224 return false;
1225 case 'b': // Print B register.
1226 case 'h': // Print H register.
1227 case 's': // Print S register.
1228 case 'd': // Print D register.
1229 case 'q': // Print Q register.
1230 case 'z': // Print Z register.
1231 if (MO.isReg()) {
1232 const TargetRegisterClass *RC;
1233 switch (ExtraCode[0]) {
1234 case 'b':
1235 RC = &AArch64::FPR8RegClass;
1236 break;
1237 case 'h':
1238 RC = &AArch64::FPR16RegClass;
1239 break;
1240 case 's':
1241 RC = &AArch64::FPR32RegClass;
1242 break;
1243 case 'd':
1244 RC = &AArch64::FPR64RegClass;
1245 break;
1246 case 'q':
1247 RC = &AArch64::FPR128RegClass;
1248 break;
1249 case 'z':
1250 RC = &AArch64::ZPRRegClass;
1251 break;
1252 default:
1253 return true;
1254 }
1255 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1256 }
1257 printOperand(MI, OpNum, O);
1258 return false;
1259 }
1260 }
1261
1262 // According to ARM, we should emit x and v registers unless we have a
1263 // modifier.
1264 if (MO.isReg()) {
1265 Register Reg = MO.getReg();
1266
1267 // If this is a w or x register, print an x register.
1268 if (AArch64::GPR32allRegClass.contains(Reg) ||
1269 AArch64::GPR64allRegClass.contains(Reg))
1270 return printAsmMRegister(MO, 'x', O);
1271
1272 // If this is an x register tuple, print an x register.
1273 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1274 return printAsmMRegister(MO, 't', O);
1275
1276 unsigned AltName = AArch64::NoRegAltName;
1277 const TargetRegisterClass *RegClass;
1278 if (AArch64::ZPRRegClass.contains(Reg)) {
1279 RegClass = &AArch64::ZPRRegClass;
1280 } else if (AArch64::PPRRegClass.contains(Reg)) {
1281 RegClass = &AArch64::PPRRegClass;
1282 } else if (AArch64::PNRRegClass.contains(Reg)) {
1283 RegClass = &AArch64::PNRRegClass;
1284 } else {
1285 RegClass = &AArch64::FPR128RegClass;
1286 AltName = AArch64::vreg;
1287 }
1288
1289 // If this is a b, h, s, d, or q register, print it as a v register.
1290 return printAsmRegInClass(MO, RegClass, AltName, O);
1291 }
1292
1293 printOperand(MI, OpNum, O);
1294 return false;
1295}
1296
1297bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1298 unsigned OpNum,
1299 const char *ExtraCode,
1300 raw_ostream &O) {
1301 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1302 return true; // Unknown modifier.
1303
1304 const MachineOperand &MO = MI->getOperand(OpNum);
1305 assert(MO.isReg() && "unexpected inline asm memory operand");
1306 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1307 return false;
1308}
1309
1310void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1311 raw_ostream &OS) {
1312 unsigned NOps = MI->getNumOperands();
1313 assert(NOps == 4);
1314 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1315 // cast away const; DIetc do not take const operands for some reason.
1316 OS << MI->getDebugVariable()->getName();
1317 OS << " <- ";
1318 // Frame address. Currently handles register +- offset only.
1319 assert(MI->isIndirectDebugValue());
1320 OS << '[';
1321 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1322 if (I != 0)
1323 OS << ", ";
1324 printOperand(MI, I, OS);
1325 }
1326 OS << ']';
1327 OS << "+";
1328 printOperand(MI, NOps - 2, OS);
1329}
1330
1331void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1332 ArrayRef<unsigned> JumpTableIndices) {
1333 // Fast return if there is nothing to emit to avoid creating empty sections.
1334 if (JumpTableIndices.empty())
1335 return;
1336 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1337 const auto &F = MF->getFunction();
1339
1340 MCSection *ReadOnlySec = nullptr;
1341 if (TM.Options.EnableStaticDataPartitioning) {
1342 ReadOnlySec =
1343 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1344 } else {
1345 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1346 }
1347 OutStreamer->switchSection(ReadOnlySec);
1348
1349 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1350 for (unsigned JTI : JumpTableIndices) {
1351 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1352
1353 // If this jump table was deleted, ignore it.
1354 if (JTBBs.empty()) continue;
1355
1356 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1357 emitAlignment(Align(Size));
1358 OutStreamer->emitLabel(GetJTISymbol(JTI));
1359
1360 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1361 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1362
1363 for (auto *JTBB : JTBBs) {
1364 const MCExpr *Value =
1365 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1366
1367 // Each entry is:
1368 // .byte/.hword (LBB - Lbase)>>2
1369 // or plain:
1370 // .word LBB - Lbase
1371 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1372 if (Size != 4)
1374 Value, MCConstantExpr::create(2, OutContext), OutContext);
1375
1376 OutStreamer->emitValue(Value, Size);
1377 }
1378 }
1379}
1380
1381std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1383AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1384 const MachineInstr *BranchInstr,
1385 const MCSymbol *BranchLabel) const {
1386 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1387 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1389 switch (AFI->getJumpTableEntrySize(JTI)) {
1390 case 1:
1391 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1392 break;
1393 case 2:
1394 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1395 break;
1396 case 4:
1397 EntrySize = codeview::JumpTableEntrySize::Int32;
1398 break;
1399 default:
1400 llvm_unreachable("Unexpected jump table entry size");
1401 }
1402 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1403}
1404
1405void AArch64AsmPrinter::emitFunctionEntryLabel() {
1406 const Triple &TT = TM.getTargetTriple();
1407 if (TT.isOSBinFormatELF() &&
1408 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1409 MF->getFunction().getCallingConv() ==
1410 CallingConv::AArch64_SVE_VectorCall ||
1411 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1412 auto *TS =
1413 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1414 TS->emitDirectiveVariantPCS(CurrentFnSym);
1415 }
1416
1418
1419 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1420 // For ARM64EC targets, a function definition's name is mangled differently
1421 // from the normal symbol, emit required aliases here.
1422 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1423 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1424 OutStreamer->emitAssignment(
1425 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1426 };
1427
1428 auto getSymbolFromMetadata = [&](StringRef Name) {
1429 MCSymbol *Sym = nullptr;
1430 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1431 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1432 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1433 }
1434 return Sym;
1435 };
1436
1437 SmallVector<MDNode *> UnmangledNames;
1438 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1439 for (MDNode *Node : UnmangledNames) {
1440 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1441 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1442 if (std::optional<std::string> MangledName =
1443 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1444 MCSymbol *ECMangledSym =
1445 MMI->getContext().getOrCreateSymbol(*MangledName);
1446 emitFunctionAlias(UnmangledSym, ECMangledSym);
1447 }
1448 }
1449 if (MCSymbol *ECMangledSym =
1450 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1451 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1452 }
1453}
1454
1455void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1456 const Constant *CV) {
1457 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1458 if (CPA->hasAddressDiscriminator() &&
1459 !CPA->hasSpecialAddressDiscriminator(
1462 "unexpected address discrimination value for ctors/dtors entry, only "
1463 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1464 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1465 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1466 // actual address discrimination value and only checks
1467 // hasAddressDiscriminator(), so it's OK to leave special address
1468 // discrimination value here.
1470}
1471
1472void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1473 const GlobalAlias &GA) {
1474 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1475 // Global aliases must point to a definition, but unmangled patchable
1476 // symbols are special and need to point to an undefined symbol with "EXP+"
1477 // prefix. Such undefined symbol is resolved by the linker by creating
1478 // x86 thunk that jumps back to the actual EC target.
1479 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1480 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1481 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1482 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1483
1484 OutStreamer->beginCOFFSymbolDef(ExpSym);
1485 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1486 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1488 OutStreamer->endCOFFSymbolDef();
1489
1490 OutStreamer->beginCOFFSymbolDef(Sym);
1491 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1492 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1494 OutStreamer->endCOFFSymbolDef();
1495 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1496 OutStreamer->emitAssignment(
1497 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1498 return;
1499 }
1500 }
1502}
1503
1504/// Small jump tables contain an unsigned byte or half, representing the offset
1505/// from the lowest-addressed possible destination to the desired basic
1506/// block. Since all instructions are 4-byte aligned, this is further compressed
1507/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1508/// materialize the correct destination we need:
1509///
1510/// adr xDest, .LBB0_0
1511/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1512/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1513void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1514 const llvm::MachineInstr &MI) {
1515 Register DestReg = MI.getOperand(0).getReg();
1516 Register ScratchReg = MI.getOperand(1).getReg();
1517 Register ScratchRegW =
1518 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1519 Register TableReg = MI.getOperand(2).getReg();
1520 Register EntryReg = MI.getOperand(3).getReg();
1521 int JTIdx = MI.getOperand(4).getIndex();
1522 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1523
1524 // This has to be first because the compression pass based its reachability
1525 // calculations on the start of the JumpTableDest instruction.
1526 auto Label =
1527 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1528
1529 // If we don't already have a symbol to use as the base, use the ADR
1530 // instruction itself.
1531 if (!Label) {
1533 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1534 OutStreamer.emitLabel(Label);
1535 }
1536
1537 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1538 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1539 .addReg(DestReg)
1540 .addExpr(LabelExpr));
1541
1542 // Load the number of instruction-steps to offset from the label.
1543 unsigned LdrOpcode;
1544 switch (Size) {
1545 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1546 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1547 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1548 default:
1549 llvm_unreachable("Unknown jump table size");
1550 }
1551
1552 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1553 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1554 .addReg(TableReg)
1555 .addReg(EntryReg)
1556 .addImm(0)
1557 .addImm(Size == 1 ? 0 : 1));
1558
1559 // Add to the already materialized base label address, multiplying by 4 if
1560 // compressed.
1561 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1562 .addReg(DestReg)
1563 .addReg(DestReg)
1564 .addReg(ScratchReg)
1565 .addImm(Size == 4 ? 0 : 2));
1566}
1567
1568void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1569 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1570 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1571
1572 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1573 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1574
1575 // Emit:
1576 // mov x17, #<size of table> ; depending on table size, with MOVKs
1577 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1578 // csel x16, x16, xzr, ls ; check for index overflow
1579 //
1580 // adrp x17, Ltable@PAGE ; materialize table address
1581 // add x17, Ltable@PAGEOFF
1582 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1583 //
1584 // Lanchor:
1585 // adr x17, Lanchor ; compute target address
1586 // add x16, x17, x16
1587 // br x16 ; branch to target
1588
1589 MachineOperand JTOp = MI.getOperand(0);
1590
1591 unsigned JTI = JTOp.getIndex();
1592 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1593 "unsupported compressed jump table");
1594
1595 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1596
1597 // cmp only supports a 12-bit immediate. If we need more, materialize the
1598 // immediate, using x17 as a scratch register.
1599 uint64_t MaxTableEntry = NumTableEntries - 1;
1600 if (isUInt<12>(MaxTableEntry)) {
1601 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1602 .addReg(AArch64::XZR)
1603 .addReg(AArch64::X16)
1604 .addImm(MaxTableEntry)
1605 .addImm(0));
1606 } else {
1607 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1608 // It's sad that we have to manually materialize instructions, but we can't
1609 // trivially reuse the main pseudo expansion logic.
1610 // A MOVK sequence is easy enough to generate and handles the general case.
1611 for (int Offset = 16; Offset < 64; Offset += 16) {
1612 if ((MaxTableEntry >> Offset) == 0)
1613 break;
1614 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1615 Offset);
1616 }
1617 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1618 .addReg(AArch64::XZR)
1619 .addReg(AArch64::X16)
1620 .addReg(AArch64::X17)
1621 .addImm(0));
1622 }
1623
1624 // This picks entry #0 on failure.
1625 // We might want to trap instead.
1626 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1627 .addReg(AArch64::X16)
1628 .addReg(AArch64::X16)
1629 .addReg(AArch64::XZR)
1630 .addImm(AArch64CC::LS));
1631
1632 // Prepare the @PAGE/@PAGEOFF low/high operands.
1633 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1634 MCOperand JTMCHi, JTMCLo;
1635
1636 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1637 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1638
1639 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1640 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1641
1642 EmitToStreamer(
1643 *OutStreamer,
1644 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1645
1646 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1647 .addReg(AArch64::X17)
1648 .addReg(AArch64::X17)
1649 .addOperand(JTMCLo)
1650 .addImm(0));
1651
1652 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1653 .addReg(AArch64::X16)
1654 .addReg(AArch64::X17)
1655 .addReg(AArch64::X16)
1656 .addImm(0)
1657 .addImm(1));
1658
1659 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1660 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1661 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1662
1663 OutStreamer->emitLabel(AdrLabel);
1664 EmitToStreamer(
1665 *OutStreamer,
1666 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1667
1668 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1669 .addReg(AArch64::X16)
1670 .addReg(AArch64::X17)
1671 .addReg(AArch64::X16)
1672 .addImm(0));
1673
1674 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1675}
1676
1677void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1678 const llvm::MachineInstr &MI) {
1679 unsigned Opcode = MI.getOpcode();
1680 assert(STI->hasMOPS());
1681 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1682
1683 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1684 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1685 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1686 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1687 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1688 if (Opcode == AArch64::MOPSMemorySetPseudo)
1689 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1690 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1691 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1692 llvm_unreachable("Unhandled memory operation pseudo");
1693 }();
1694 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1695 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1696
1697 for (auto Op : Ops) {
1698 int i = 0;
1699 auto MCIB = MCInstBuilder(Op);
1700 // Destination registers
1701 MCIB.addReg(MI.getOperand(i++).getReg());
1702 MCIB.addReg(MI.getOperand(i++).getReg());
1703 if (!IsSet)
1704 MCIB.addReg(MI.getOperand(i++).getReg());
1705 // Input registers
1706 MCIB.addReg(MI.getOperand(i++).getReg());
1707 MCIB.addReg(MI.getOperand(i++).getReg());
1708 MCIB.addReg(MI.getOperand(i++).getReg());
1709
1710 EmitToStreamer(OutStreamer, MCIB);
1711 }
1712}
1713
1714void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1715 const MachineInstr &MI) {
1716 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1717
1718 auto &Ctx = OutStreamer.getContext();
1719 MCSymbol *MILabel = Ctx.createTempSymbol();
1720 OutStreamer.emitLabel(MILabel);
1721
1722 SM.recordStackMap(*MILabel, MI);
1723 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1724
1725 // Scan ahead to trim the shadow.
1726 const MachineBasicBlock &MBB = *MI.getParent();
1728 ++MII;
1729 while (NumNOPBytes > 0) {
1730 if (MII == MBB.end() || MII->isCall() ||
1731 MII->getOpcode() == AArch64::DBG_VALUE ||
1732 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1733 MII->getOpcode() == TargetOpcode::STACKMAP)
1734 break;
1735 ++MII;
1736 NumNOPBytes -= 4;
1737 }
1738
1739 // Emit nops.
1740 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1741 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1742}
1743
1744// Lower a patchpoint of the form:
1745// [<def>], <id>, <numBytes>, <target>, <numArgs>
1746void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1747 const MachineInstr &MI) {
1748 auto &Ctx = OutStreamer.getContext();
1749 MCSymbol *MILabel = Ctx.createTempSymbol();
1750 OutStreamer.emitLabel(MILabel);
1751 SM.recordPatchPoint(*MILabel, MI);
1752
1753 PatchPointOpers Opers(&MI);
1754
1755 int64_t CallTarget = Opers.getCallTarget().getImm();
1756 unsigned EncodedBytes = 0;
1757 if (CallTarget) {
1758 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1759 "High 16 bits of call target should be zero.");
1760 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1761 EncodedBytes = 16;
1762 // Materialize the jump address:
1763 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1764 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1765 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1766 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1767 }
1768 // Emit padding.
1769 unsigned NumBytes = Opers.getNumPatchBytes();
1770 assert(NumBytes >= EncodedBytes &&
1771 "Patchpoint can't request size less than the length of a call.");
1772 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1773 "Invalid number of NOP bytes requested!");
1774 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1775 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1776}
1777
1778void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1779 const MachineInstr &MI) {
1780 StatepointOpers SOpers(&MI);
1781 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1782 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1783 for (unsigned i = 0; i < PatchBytes; i += 4)
1784 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1785 } else {
1786 // Lower call target and choose correct opcode
1787 const MachineOperand &CallTarget = SOpers.getCallTarget();
1788 MCOperand CallTargetMCOp;
1789 unsigned CallOpcode;
1790 switch (CallTarget.getType()) {
1793 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1794 CallOpcode = AArch64::BL;
1795 break;
1797 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1798 CallOpcode = AArch64::BL;
1799 break;
1801 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1802 CallOpcode = AArch64::BLR;
1803 break;
1804 default:
1805 llvm_unreachable("Unsupported operand type in statepoint call target");
1806 break;
1807 }
1808
1809 EmitToStreamer(OutStreamer,
1810 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1811 }
1812
1813 auto &Ctx = OutStreamer.getContext();
1814 MCSymbol *MILabel = Ctx.createTempSymbol();
1815 OutStreamer.emitLabel(MILabel);
1816 SM.recordStatepoint(*MILabel, MI);
1817}
1818
1819void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1820 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1821 // <opcode>, <operands>
1822
1823 Register DefRegister = FaultingMI.getOperand(0).getReg();
1825 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1826 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1827 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1828 unsigned OperandsBeginIdx = 4;
1829
1830 auto &Ctx = OutStreamer->getContext();
1831 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1832 OutStreamer->emitLabel(FaultingLabel);
1833
1834 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1835 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1836
1837 MCInst MI;
1838 MI.setOpcode(Opcode);
1839
1840 if (DefRegister != (Register)0)
1841 MI.addOperand(MCOperand::createReg(DefRegister));
1842
1843 for (const MachineOperand &MO :
1844 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1845 MCOperand Dest;
1846 lowerOperand(MO, Dest);
1847 MI.addOperand(Dest);
1848 }
1849
1850 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1851 EmitToStreamer(MI);
1852}
1853
1854void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1855 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1856 .addReg(Dest)
1857 .addReg(AArch64::XZR)
1858 .addReg(Src)
1859 .addImm(0));
1860}
1861
1862void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1863 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1864 EmitToStreamer(*OutStreamer,
1865 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1866 .addReg(Dest)
1867 .addImm(Imm)
1868 .addImm(Shift));
1869}
1870
1871void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1872 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1873 EmitToStreamer(*OutStreamer,
1874 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1875 .addReg(Dest)
1876 .addReg(Dest)
1877 .addImm(Imm)
1878 .addImm(Shift));
1879}
1880
1881void AArch64AsmPrinter::emitAUT(AArch64PACKey::ID Key, Register Pointer,
1882 Register Disc) {
1883 bool IsZeroDisc = Disc == AArch64::XZR;
1884 unsigned Opcode = getAUTOpcodeForKey(Key, IsZeroDisc);
1885
1886 // autiza x16 ; if IsZeroDisc
1887 // autia x16, x17 ; if !IsZeroDisc
1888 MCInst AUTInst;
1889 AUTInst.setOpcode(Opcode);
1890 AUTInst.addOperand(MCOperand::createReg(Pointer));
1891 AUTInst.addOperand(MCOperand::createReg(Pointer));
1892 if (!IsZeroDisc)
1893 AUTInst.addOperand(MCOperand::createReg(Disc));
1894
1895 EmitToStreamer(AUTInst);
1896}
1897
1898void AArch64AsmPrinter::emitPAC(AArch64PACKey::ID Key, Register Pointer,
1899 Register Disc) {
1900 bool IsZeroDisc = Disc == AArch64::XZR;
1901 unsigned Opcode = getPACOpcodeForKey(Key, IsZeroDisc);
1902
1903 // paciza x16 ; if IsZeroDisc
1904 // pacia x16, x17 ; if !IsZeroDisc
1905 MCInst PACInst;
1906 PACInst.setOpcode(Opcode);
1907 PACInst.addOperand(MCOperand::createReg(Pointer));
1908 PACInst.addOperand(MCOperand::createReg(Pointer));
1909 if (!IsZeroDisc)
1910 PACInst.addOperand(MCOperand::createReg(Disc));
1911
1912 EmitToStreamer(PACInst);
1913}
1914
1915void AArch64AsmPrinter::emitBLRA(bool IsCall, AArch64PACKey::ID Key,
1916 Register Target, Register Disc) {
1917 bool IsZeroDisc = Disc == AArch64::XZR;
1918 unsigned Opcode = getBranchOpcodeForKey(IsCall, Key, IsZeroDisc);
1919
1920 // blraaz x16 ; if IsZeroDisc
1921 // blraa x16, x17 ; if !IsZeroDisc
1922 MCInst Inst;
1923 Inst.setOpcode(Opcode);
1924 Inst.addOperand(MCOperand::createReg(Target));
1925 if (!IsZeroDisc)
1926 Inst.addOperand(MCOperand::createReg(Disc));
1927 EmitToStreamer(Inst);
1928}
1929
1930void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1931 Register DestReg = MI.getOperand(0).getReg();
1932 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1933 if (STI->hasZeroCycleZeroingFPR64()) {
1934 // Convert H/S register to corresponding D register
1935 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1936 if (AArch64::FPR16RegClass.contains(DestReg))
1937 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1938 &AArch64::FPR64RegClass);
1939 else if (AArch64::FPR32RegClass.contains(DestReg))
1940 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1941 &AArch64::FPR64RegClass);
1942 else
1943 assert(AArch64::FPR64RegClass.contains(DestReg));
1944
1945 MCInst MOVI;
1946 MOVI.setOpcode(AArch64::MOVID);
1947 MOVI.addOperand(MCOperand::createReg(DestReg));
1949 EmitToStreamer(*OutStreamer, MOVI);
1950 ++NumZCZeroingInstrsFPR;
1951 } else if (STI->hasZeroCycleZeroingFPR128()) {
1952 // Convert H/S/D register to corresponding Q register
1953 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1954 if (AArch64::FPR16RegClass.contains(DestReg)) {
1955 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1956 &AArch64::FPR128RegClass);
1957 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1958 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1959 &AArch64::FPR128RegClass);
1960 } else {
1961 assert(AArch64::FPR64RegClass.contains(DestReg));
1962 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1963 &AArch64::FPR128RegClass);
1964 }
1965
1966 MCInst MOVI;
1967 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1968 MOVI.addOperand(MCOperand::createReg(DestReg));
1970 EmitToStreamer(*OutStreamer, MOVI);
1971 ++NumZCZeroingInstrsFPR;
1972 } else {
1973 emitFMov0AsFMov(MI, DestReg);
1974 }
1975 } else {
1976 emitFMov0AsFMov(MI, DestReg);
1977 }
1978}
1979
1980void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1981 Register DestReg) {
1982 MCInst FMov;
1983 switch (MI.getOpcode()) {
1984 default:
1985 llvm_unreachable("Unexpected opcode");
1986 case AArch64::FMOVH0:
1987 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1988 if (!STI->hasFullFP16())
1989 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1990 FMov.addOperand(MCOperand::createReg(DestReg));
1991 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1992 break;
1993 case AArch64::FMOVS0:
1994 FMov.setOpcode(AArch64::FMOVWSr);
1995 FMov.addOperand(MCOperand::createReg(DestReg));
1996 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1997 break;
1998 case AArch64::FMOVD0:
1999 FMov.setOpcode(AArch64::FMOVXDr);
2000 FMov.addOperand(MCOperand::createReg(DestReg));
2001 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
2002 break;
2003 }
2004 EmitToStreamer(*OutStreamer, FMov);
2005}
2006
2007Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint64_t Disc,
2008 Register AddrDisc,
2009 Register ScratchReg,
2010 bool MayClobberAddrDisc) {
2011 assert(isPtrauthRegSafe(ScratchReg) &&
2012 "Safe scratch register must be provided by the caller");
2013 assert(isUInt<16>(Disc) && "Constant discriminator is too wide");
2014
2015 // So far we've used NoRegister in pseudos. Now we need real encodings.
2016 if (AddrDisc == AArch64::NoRegister)
2017 AddrDisc = AArch64::XZR;
2018
2019 // If there is no constant discriminator, there's no blend involved:
2020 // just use the address discriminator register as-is (XZR or not).
2021 if (!Disc)
2022 return AddrDisc;
2023
2024 // If there's only a constant discriminator, MOV it into the scratch register.
2025 if (AddrDisc == AArch64::XZR) {
2026 emitMOVZ(ScratchReg, Disc, 0);
2027 return ScratchReg;
2028 }
2029
2030 // If there are both, emit a blend into the scratch register.
2031
2032 // Check if we can save one MOV instruction.
2033 if (MayClobberAddrDisc && isPtrauthRegSafe(AddrDisc)) {
2034 ScratchReg = AddrDisc;
2035 } else {
2036 emitMovXReg(ScratchReg, AddrDisc);
2037 assert(ScratchReg != AddrDisc &&
2038 "Forbidden to clobber AddrDisc, but have to");
2039 }
2040
2041 emitMOVK(ScratchReg, Disc, 48);
2042 return ScratchReg;
2043}
2044
2045/// Emit a code sequence to check an authenticated pointer value.
2046///
2047/// This function emits a sequence of instructions that checks if TestedReg was
2048/// authenticated successfully. On success, execution continues at the next
2049/// instruction after the sequence.
2050///
2051/// The action performed on failure depends on the OnFailure argument:
2052/// * if OnFailure is not nullptr, control is transferred to that label after
2053/// clearing the PAC field
2054/// * otherwise, BRK instruction is emitted to generate an error
2055void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
2056 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
2057 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
2058 // Insert a sequence to check if authentication of TestedReg succeeded,
2059 // such as:
2060 //
2061 // - checked and clearing:
2062 // ; x16 is TestedReg, x17 is ScratchReg
2063 // mov x17, x16
2064 // xpaci x17
2065 // cmp x16, x17
2066 // b.eq Lsuccess
2067 // mov x16, x17
2068 // b Lend
2069 // Lsuccess:
2070 // ; skipped if authentication failed
2071 // Lend:
2072 // ...
2073 //
2074 // - checked and trapping:
2075 // mov x17, x16
2076 // xpaci x17
2077 // cmp x16, x17
2078 // b.eq Lsuccess
2079 // brk #<0xc470 + aut key>
2080 // Lsuccess:
2081 // ...
2082 //
2083 // See the documentation on AuthCheckMethod enumeration constants for
2084 // the specific code sequences that can be used to perform the check.
2086
2087 if (Method == AuthCheckMethod::None)
2088 return;
2089 if (Method == AuthCheckMethod::DummyLoad) {
2090 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
2091 .addReg(getWRegFromXReg(ScratchReg))
2092 .addReg(TestedReg)
2093 .addImm(0));
2094 assert(!OnFailure && "DummyLoad always traps on error");
2095 return;
2096 }
2097
2098 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2099 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2100 // mov Xscratch, Xtested
2101 emitMovXReg(ScratchReg, TestedReg);
2102
2103 if (Method == AuthCheckMethod::XPAC) {
2104 // xpac(i|d) Xscratch
2105 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2106 EmitToStreamer(
2107 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2108 } else {
2109 // xpaclri
2110
2111 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2112 assert(TestedReg == AArch64::LR &&
2113 "XPACHint mode is only compatible with checking the LR register");
2115 "XPACHint mode is only compatible with I-keys");
2116 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2117 }
2118
2119 // cmp Xtested, Xscratch
2120 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2121 .addReg(AArch64::XZR)
2122 .addReg(TestedReg)
2123 .addReg(ScratchReg)
2124 .addImm(0));
2125
2126 // b.eq Lsuccess
2127 EmitToStreamer(
2128 MCInstBuilder(AArch64::Bcc)
2129 .addImm(AArch64CC::EQ)
2130 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2131 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2132 // eor Xscratch, Xtested, Xtested, lsl #1
2133 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2134 .addReg(ScratchReg)
2135 .addReg(TestedReg)
2136 .addReg(TestedReg)
2137 .addImm(1));
2138 // tbz Xscratch, #62, Lsuccess
2139 EmitToStreamer(
2140 MCInstBuilder(AArch64::TBZX)
2141 .addReg(ScratchReg)
2142 .addImm(62)
2143 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2144 } else {
2145 llvm_unreachable("Unsupported check method");
2146 }
2147
2148 if (!OnFailure) {
2149 // Trapping sequences do a 'brk'.
2150 // brk #<0xc470 + aut key>
2151 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2152 } else {
2153 // Non-trapping checked sequences return the stripped result in TestedReg,
2154 // skipping over success-only code (such as re-signing the pointer) by
2155 // jumping to OnFailure label.
2156 // Note that this can introduce an authentication oracle (such as based on
2157 // the high bits of the re-signed value).
2158
2159 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2160 // instead of ScratchReg, thus eliminating one `mov` instruction.
2161 // Both XPAC and XPACHint can be further optimized by not using a
2162 // conditional branch jumping over an unconditional one.
2163
2164 switch (Method) {
2165 case AuthCheckMethod::XPACHint:
2166 // LR is already XPAC-ed at this point.
2167 break;
2168 case AuthCheckMethod::XPAC:
2169 // mov Xtested, Xscratch
2170 emitMovXReg(TestedReg, ScratchReg);
2171 break;
2172 default:
2173 // If Xtested was not XPAC-ed so far, emit XPAC here.
2174 // xpac(i|d) Xtested
2175 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2176 EmitToStreamer(
2177 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2178 }
2179
2180 // b Lend
2181 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2182 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2183 }
2184
2185 // If the auth check succeeds, we can continue.
2186 // Lsuccess:
2187 OutStreamer->emitLabel(SuccessSym);
2188}
2189
2190// With Pointer Authentication, it may be needed to explicitly check the
2191// authenticated value in LR before performing a tail call.
2192// Otherwise, the callee may re-sign the invalid return address,
2193// introducing a signing oracle.
2194void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2195 if (!AArch64FI->shouldSignReturnAddress(*MF))
2196 return;
2197
2198 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2199 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2200 return;
2201
2202 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2203 Register ScratchReg =
2204 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2205 assert(!TC->readsRegister(ScratchReg, TRI) &&
2206 "Neither x16 nor x17 is available as a scratch register");
2209 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2210 LRCheckMethod);
2211}
2212
2213bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2214 if (!DS)
2215 return false;
2216
2217 if (isa<GlobalAlias>(DS)) {
2218 // Just emit the nop directly.
2219 EmitToStreamer(MCInstBuilder(AArch64::NOP));
2220 return true;
2221 }
2222 MCSymbol *Dot = OutContext.createTempSymbol();
2223 OutStreamer->emitLabel(Dot);
2224 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2225
2226 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2227 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2228 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2229 SMLoc());
2230 return false;
2231}
2232
2233AArch64AsmPrinter::PtrAuthSchema::PtrAuthSchema(
2234 AArch64PACKey::ID Key, uint64_t IntDisc, const MachineOperand &AddrDiscOp)
2235 : Key(Key), IntDisc(IntDisc), AddrDisc(AddrDiscOp.getReg()),
2236 AddrDiscIsKilled(AddrDiscOp.isKill()) {}
2237
2238void AArch64AsmPrinter::emitPtrauthAuthResign(
2239 Register Pointer, Register Scratch, PtrAuthSchema AuthSchema,
2240 std::optional<PtrAuthSchema> SignSchema, Value *DS) {
2241 const bool IsResign = SignSchema.has_value();
2242
2243 // We expand AUT/AUTPAC into a sequence of the form
2244 //
2245 // ; authenticate x16
2246 // ; check pointer in x16
2247 // Lsuccess:
2248 // ; sign x16 (if AUTPAC)
2249 // Lend: ; if not trapping on failure
2250 //
2251 // with the checking sequence chosen depending on whether/how we should check
2252 // the pointer and whether we should trap on failure.
2253
2254 // By default, auth/resign sequences check for auth failures.
2255 bool ShouldCheck = true;
2256 // In the checked sequence, we only trap if explicitly requested.
2257 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2258
2259 // On an FPAC CPU, you get traps whether you want them or not: there's
2260 // no point in emitting checks or traps.
2261 if (STI->hasFPAC())
2262 ShouldCheck = ShouldTrap = false;
2263
2264 // However, command-line flags can override this, for experimentation.
2265 switch (PtrauthAuthChecks) {
2267 break;
2269 ShouldCheck = ShouldTrap = false;
2270 break;
2272 ShouldCheck = true;
2273 ShouldTrap = false;
2274 break;
2276 ShouldCheck = ShouldTrap = true;
2277 break;
2278 }
2279
2280 // Compute aut discriminator
2281 Register AUTDiscReg =
2282 emitPtrauthDiscriminator(AuthSchema.IntDisc, AuthSchema.AddrDisc, Scratch,
2283 AuthSchema.AddrDiscIsKilled);
2284
2285 if (!emitDeactivationSymbolRelocation(DS))
2286 emitAUT(AuthSchema.Key, Pointer, AUTDiscReg);
2287
2288 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2289 if (!IsResign && (!ShouldCheck || !ShouldTrap))
2290 return;
2291
2292 MCSymbol *EndSym = nullptr;
2293
2294 if (ShouldCheck) {
2295 if (IsResign && !ShouldTrap)
2296 EndSym = createTempSymbol("resign_end_");
2297
2298 emitPtrauthCheckAuthenticatedValue(Pointer, Scratch, AuthSchema.Key,
2299 AArch64PAuth::AuthCheckMethod::XPAC,
2300 EndSym);
2301 }
2302
2303 // We already emitted unchecked and checked-but-non-trapping AUTs.
2304 // That left us with trapping AUTs, and AUTPACs.
2305 // Trapping AUTs don't need PAC: we're done.
2306 if (!IsResign)
2307 return;
2308
2309 // Compute pac discriminator
2310 Register PACDiscReg = emitPtrauthDiscriminator(SignSchema->IntDisc,
2311 SignSchema->AddrDisc, Scratch);
2312 emitPAC(SignSchema->Key, Pointer, PACDiscReg);
2313
2314 // Lend:
2315 if (EndSym)
2316 OutStreamer->emitLabel(EndSym);
2317}
2318
2319void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2320 Register Val = MI->getOperand(1).getReg();
2321 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2322 uint64_t Disc = MI->getOperand(3).getImm();
2323 Register AddrDisc = MI->getOperand(4).getReg();
2324 bool AddrDiscKilled = MI->getOperand(4).isKill();
2325
2326 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2327 // register is available.
2328 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2329 assert(ScratchReg != AddrDisc &&
2330 "Neither X16 nor X17 is available as a scratch register");
2331
2332 // Compute pac discriminator
2333 Register DiscReg = emitPtrauthDiscriminator(
2334 Disc, AddrDisc, ScratchReg, /*MayClobberAddrDisc=*/AddrDiscKilled);
2335
2336 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2337 return;
2338
2339 emitPAC(Key, Val, DiscReg);
2340}
2341
2342void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2343 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2344 unsigned BrTarget = MI->getOperand(0).getReg();
2345
2346 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2347 uint64_t Disc = MI->getOperand(2).getImm();
2348
2349 unsigned AddrDisc = MI->getOperand(3).getReg();
2350
2351 // Make sure AddrDisc is solely used to compute the discriminator.
2352 // While hardly meaningful, it is still possible to describe an authentication
2353 // of a pointer against its own value (instead of storage address) with
2354 // intrinsics, so use report_fatal_error instead of assert.
2355 if (BrTarget == AddrDisc)
2356 report_fatal_error("Branch target is signed with its own value");
2357
2358 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2359 // fact that x16 and x17 are described as clobbered by the MI instruction and
2360 // AddrDisc is not used as any other input.
2361 //
2362 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2363 // either x16 or x17, meaning the returned register is always among the
2364 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2365 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2366 // among x16 and x17 to prevent clobbering unexpected registers.
2367 //
2368 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2369 // declared as clobbering x16/x17.
2370 //
2371 // FIXME: Make use of `killed` flags and register masks instead.
2372 bool AddrDiscIsImplicitDef =
2373 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2374 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2375 AddrDiscIsImplicitDef);
2376 emitBLRA(IsCall, Key, BrTarget, DiscReg);
2377}
2378
2379void AArch64AsmPrinter::emitAddImm(MCRegister Reg, int64_t Addend,
2380 MCRegister Tmp) {
2381 if (Addend != 0) {
2382 const uint64_t AbsOffset = (Addend > 0 ? Addend : -((uint64_t)Addend));
2383 const bool IsNeg = Addend < 0;
2384 if (isUInt<24>(AbsOffset)) {
2385 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2386 BitPos += 12) {
2387 EmitToStreamer(
2388 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2389 .addReg(Reg)
2390 .addReg(Reg)
2391 .addImm((AbsOffset >> BitPos) & 0xfff)
2392 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2393 }
2394 } else {
2395 const uint64_t UAddend = Addend;
2396 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2397 .addReg(Tmp)
2398 .addImm((IsNeg ? ~UAddend : UAddend) & 0xffff)
2399 .addImm(/*shift=*/0));
2400 auto NeedMovk = [IsNeg, UAddend](int BitPos) -> bool {
2401 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2402 uint64_t Shifted = UAddend >> BitPos;
2403 if (!IsNeg)
2404 return Shifted != 0;
2405 for (int I = 0; I != 64 - BitPos; I += 16)
2406 if (((Shifted >> I) & 0xffff) != 0xffff)
2407 return true;
2408 return false;
2409 };
2410 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2411 emitMOVK(Tmp, (UAddend >> BitPos) & 0xffff, BitPos);
2412
2413 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2414 .addReg(Reg)
2415 .addReg(Reg)
2416 .addReg(Tmp)
2417 .addImm(/*shift=*/0));
2418 }
2419 }
2420}
2421
2422void AArch64AsmPrinter::emitAddress(MCRegister Reg, const MCExpr *Expr,
2423 MCRegister Tmp, bool DSOLocal,
2424 const MCSubtargetInfo &STI) {
2425 MCValue Val;
2426 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2427 report_fatal_error("emitAddress could not evaluate");
2428 if (DSOLocal) {
2429 EmitToStreamer(
2430 MCInstBuilder(AArch64::ADRP)
2431 .addReg(Reg)
2433 OutStreamer->getContext())));
2434 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2435 .addReg(Reg)
2436 .addReg(Reg)
2437 .addExpr(MCSpecifierExpr::create(
2438 Expr, AArch64::S_LO12, OutStreamer->getContext()))
2439 .addImm(0));
2440 } else {
2441 auto *SymRef =
2442 MCSymbolRefExpr::create(Val.getAddSym(), OutStreamer->getContext());
2443 EmitToStreamer(
2444 MCInstBuilder(AArch64::ADRP)
2445 .addReg(Reg)
2447 OutStreamer->getContext())));
2448 EmitToStreamer(
2449 MCInstBuilder(AArch64::LDRXui)
2450 .addReg(Reg)
2451 .addReg(Reg)
2453 OutStreamer->getContext())));
2454 emitAddImm(Reg, Val.getConstant(), Tmp);
2455 }
2456}
2457
2459 // IFUNCs are ELF-only.
2460 if (!TT.isOSBinFormatELF())
2461 return false;
2462
2463 // IFUNCs are supported on glibc, bionic, and some but not all of the BSDs.
2464 return TT.isOSGlibc() || TT.isAndroid() || TT.isOSFreeBSD() ||
2465 TT.isOSDragonFly() || TT.isOSNetBSD();
2466}
2467
2468// Emit an ifunc resolver that returns a signed pointer to the specified target,
2469// and return a FUNCINIT reference to the resolver. In the linked binary, this
2470// function becomes the target of an IRELATIVE relocation. This resolver is used
2471// to relocate signed pointers in global variable initializers in special cases
2472// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2473//
2474// Example (signed null pointer, not address discriminated):
2475//
2476// .8byte .Lpauth_ifunc0
2477// .pushsection .text.startup,"ax",@progbits
2478// .Lpauth_ifunc0:
2479// mov x0, #0
2480// mov x1, #12345
2481// b __emupac_pacda
2482//
2483// Example (signed null pointer, address discriminated):
2484//
2485// .Ltmp:
2486// .8byte .Lpauth_ifunc0
2487// .pushsection .text.startup,"ax",@progbits
2488// .Lpauth_ifunc0:
2489// mov x0, #0
2490// adrp x1, .Ltmp
2491// add x1, x1, :lo12:.Ltmp
2492// b __emupac_pacda
2493// .popsection
2494//
2495// Example (signed pointer to symbol, not address discriminated):
2496//
2497// .Ltmp:
2498// .8byte .Lpauth_ifunc0
2499// .pushsection .text.startup,"ax",@progbits
2500// .Lpauth_ifunc0:
2501// adrp x0, symbol
2502// add x0, x0, :lo12:symbol
2503// mov x1, #12345
2504// b __emupac_pacda
2505// .popsection
2506//
2507// Example (signed null pointer, not address discriminated, with deactivation
2508// symbol ds):
2509//
2510// .8byte .Lpauth_ifunc0
2511// .pushsection .text.startup,"ax",@progbits
2512// .Lpauth_ifunc0:
2513// mov x0, #0
2514// mov x1, #12345
2515// .reloc ., R_AARCH64_PATCHINST, ds
2516// b __emupac_pacda
2517// ret
2518// .popsection
2519const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2520 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
2521 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2522 const Triple &TT = TM.getTargetTriple();
2523
2524 // We only emit an IRELATIVE relocation if the target supports IRELATIVE.
2526 return nullptr;
2527
2528 // For now, only the DA key is supported.
2529 if (KeyID != AArch64PACKey::DA)
2530 return nullptr;
2531
2532 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
2533 // space.
2534 auto STI = std::make_unique<AArch64Subtarget>(
2535 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
2536 true);
2537 this->STI = STI.get();
2538
2539 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2540 OutStreamer->emitLabel(Place);
2541 OutStreamer->pushSection();
2542
2543 const MCSymbolELF *Group =
2544 static_cast<MCSectionELF *>(OutStreamer->getCurrentSectionOnly())
2545 ->getGroup();
2547 if (Group)
2549 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2550 ".text.startup", ELF::SHT_PROGBITS, Flags, 0, Group, true,
2551 Group ? MCSection::NonUniqueID : PAuthIFuncNextUniqueID++, nullptr));
2552
2553 MCSymbol *IRelativeSym =
2554 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2555 OutStreamer->emitLabel(IRelativeSym);
2556 if (isa<MCConstantExpr>(Target)) {
2557 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2558 .addReg(AArch64::X0)
2559 .addExpr(Target)
2560 .addImm(0),
2561 *STI);
2562 } else {
2563 emitAddress(AArch64::X0, Target, AArch64::X16, IsDSOLocal, *STI);
2564 }
2565 if (HasAddressDiversity) {
2566 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2567 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2568 MCConstantExpr::create(Disc, OutStreamer->getContext()),
2569 OutStreamer->getContext());
2570 emitAddress(AArch64::X1, PlacePlusDisc, AArch64::X16, /*IsDSOLocal=*/true,
2571 *STI);
2572 } else {
2573 if (!isUInt<16>(Disc)) {
2574 OutContext.reportError(SMLoc(), "AArch64 PAC Discriminator '" +
2575 Twine(Disc) +
2576 "' out of range [0, 0xFFFF]");
2577 }
2578 emitMOVZ(AArch64::X1, Disc, 0);
2579 }
2580
2581 if (DSExpr) {
2582 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2583 OutStreamer->emitLabel(PrePACInst);
2584
2585 auto *PrePACInstExpr =
2586 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2587 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2588 DSExpr, SMLoc());
2589 }
2590
2591 // We don't know the subtarget because this is being emitted for a global
2592 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2593 // always call the EmuPAC runtime, which will end up using the PAC instruction
2594 // if the target supports PAC.
2595 MCSymbol *EmuPAC =
2596 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2597 const MCSymbolRefExpr *EmuPACRef =
2598 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2599 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2600 *STI);
2601
2602 // We need a RET despite the above tail call because the deactivation symbol
2603 // may replace the tail call with a NOP.
2604 if (DSExpr)
2605 OutStreamer->emitInstruction(
2606 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
2607 OutStreamer->popSection();
2608
2609 return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT,
2610 OutStreamer->getContext());
2611}
2612
2613const MCExpr *
2614AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2615 MCContext &Ctx = OutContext;
2616
2617 // Figure out the base symbol and the addend, if any.
2618 APInt Offset(64, 0);
2619 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2620 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2621
2622 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2623
2624 const MCExpr *Sym;
2625 if (BaseGVB) {
2626 // If there is an addend, turn that into the appropriate MCExpr.
2627 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2628 if (Offset.sgt(0))
2630 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2631 else if (Offset.slt(0))
2633 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2634 } else {
2635 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2636 }
2637
2638 const MCExpr *DSExpr = nullptr;
2639 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2640 if (isa<GlobalAlias>(DS))
2641 return Sym;
2642 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2643 }
2644
2645 uint64_t KeyID = CPA.getKey()->getZExtValue();
2646 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2647 // AArch64AuthMCExpr::printImpl, so fail fast.
2648 if (KeyID > AArch64PACKey::LAST) {
2649 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2650 "' out of range [0, " +
2651 Twine((unsigned)AArch64PACKey::LAST) + "]");
2652 KeyID = 0;
2653 }
2654
2655 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2656
2657 // Check if we can represent this with an IRELATIVE and emit it if so.
2658 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2659 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2660 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2661 return IFuncSym;
2662
2663 if (!isUInt<16>(Disc)) {
2664 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2665 "' out of range [0, 0xFFFF]");
2666 Disc = 0;
2667 }
2668
2669 if (DSExpr)
2670 report_fatal_error("deactivation symbols unsupported in constant "
2671 "expressions on this target");
2672
2673 // Finally build the complete @AUTH expr.
2674 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2675 CPA.hasAddressDiscriminator(), Ctx);
2676}
2677
2678void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2679 unsigned DstReg = MI.getOperand(0).getReg();
2680 const MachineOperand &GAOp = MI.getOperand(1);
2681 const uint64_t KeyC = MI.getOperand(2).getImm();
2682 assert(KeyC <= AArch64PACKey::LAST &&
2683 "key is out of range [0, AArch64PACKey::LAST]");
2684 const auto Key = (AArch64PACKey::ID)KeyC;
2685 const uint64_t Disc = MI.getOperand(3).getImm();
2686 assert(isUInt<16>(Disc) &&
2687 "constant discriminator is out of range [0, 0xffff]");
2688
2689 // Emit instruction sequence like the following:
2690 // ADRP x16, symbol$auth_ptr$key$disc
2691 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2692 //
2693 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2694 // to symbol.
2695 MCSymbol *AuthPtrStubSym;
2696 if (TM.getTargetTriple().isOSBinFormatELF()) {
2697 const auto &TLOF =
2698 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2699
2700 assert(GAOp.getOffset() == 0 &&
2701 "non-zero offset for $auth_ptr$ stub slots is not supported");
2702 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2703 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2704 } else {
2705 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2706 "LOADauthptrstatic is implemented only for MachO/ELF");
2707
2708 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2709 getObjFileLowering());
2710
2711 assert(GAOp.getOffset() == 0 &&
2712 "non-zero offset for $auth_ptr$ stub slots is not supported");
2713 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2714 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2715 }
2716
2717 MachineOperand StubMOHi =
2719 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2720 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2721 MCOperand StubMCHi, StubMCLo;
2722
2723 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2724 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2725
2726 EmitToStreamer(
2727 *OutStreamer,
2728 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2729
2730 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2731 .addReg(DstReg)
2732 .addReg(DstReg)
2733 .addOperand(StubMCLo));
2734}
2735
2736void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2737 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2738 const bool IsELFSignedGOT = MI.getParent()
2739 ->getParent()
2740 ->getInfo<AArch64FunctionInfo>()
2741 ->hasELFSignedGOT();
2742 MachineOperand GAOp = MI.getOperand(0);
2743 const uint64_t KeyC = MI.getOperand(1).getImm();
2744 assert(KeyC <= AArch64PACKey::LAST &&
2745 "key is out of range [0, AArch64PACKey::LAST]");
2746 const auto Key = (AArch64PACKey::ID)KeyC;
2747 const unsigned AddrDisc = MI.getOperand(2).getReg();
2748 const uint64_t Disc = MI.getOperand(3).getImm();
2749
2750 const int64_t Offset = GAOp.getOffset();
2751 GAOp.setOffset(0);
2752
2753 // Emit:
2754 // target materialization:
2755 // - via GOT:
2756 // - unsigned GOT:
2757 // adrp x16, :got:target
2758 // ldr x16, [x16, :got_lo12:target]
2759 // add offset to x16 if offset != 0
2760 // - ELF signed GOT:
2761 // adrp x17, :got:target
2762 // add x17, x17, :got_auth_lo12:target
2763 // ldr x16, [x17]
2764 // aut{i|d}a x16, x17
2765 // check+trap sequence (if no FPAC)
2766 // add offset to x16 if offset != 0
2767 //
2768 // - direct:
2769 // adrp x16, target
2770 // add x16, x16, :lo12:target
2771 // add offset to x16 if offset != 0
2772 //
2773 // add offset to x16:
2774 // - abs(offset) fits 24 bits:
2775 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2776 // - abs(offset) does not fit 24 bits:
2777 // - offset < 0:
2778 // movn+movk sequence filling x17 register with the offset (up to 4
2779 // instructions)
2780 // add x16, x16, x17
2781 // - offset > 0:
2782 // movz+movk sequence filling x17 register with the offset (up to 4
2783 // instructions)
2784 // add x16, x16, x17
2785 //
2786 // signing:
2787 // - 0 discriminator:
2788 // paciza x16
2789 // - Non-0 discriminator, no address discriminator:
2790 // mov x17, #Disc
2791 // pacia x16, x17
2792 // - address discriminator (with potentially folded immediate discriminator):
2793 // pacia x16, xAddrDisc
2794
2795 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2796 MCOperand GAMCHi, GAMCLo;
2797
2798 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2799 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2800 if (IsGOTLoad) {
2801 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2802 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2803 }
2804
2805 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2806 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2807
2808 EmitToStreamer(
2809 MCInstBuilder(AArch64::ADRP)
2810 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2811 .addOperand(GAMCHi));
2812
2813 if (IsGOTLoad) {
2814 if (IsELFSignedGOT) {
2815 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2816 .addReg(AArch64::X17)
2817 .addReg(AArch64::X17)
2818 .addOperand(GAMCLo)
2819 .addImm(0));
2820
2821 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2822 .addReg(AArch64::X16)
2823 .addReg(AArch64::X17)
2824 .addImm(0));
2825
2826 assert(GAOp.isGlobal());
2827 assert(GAOp.getGlobal()->getValueType() != nullptr);
2828
2829 bool IsFunctionTy = GAOp.getGlobal()->getValueType()->isFunctionTy();
2830 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2831 emitAUT(AuthKey, AArch64::X16, AArch64::X17);
2832
2833 if (!STI->hasFPAC())
2834 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2835 AArch64PAuth::AuthCheckMethod::XPAC);
2836 } else {
2837 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2838 .addReg(AArch64::X16)
2839 .addReg(AArch64::X16)
2840 .addOperand(GAMCLo));
2841 }
2842 } else {
2843 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2844 .addReg(AArch64::X16)
2845 .addReg(AArch64::X16)
2846 .addOperand(GAMCLo)
2847 .addImm(0));
2848 }
2849
2850 emitAddImm(AArch64::X16, Offset, AArch64::X17);
2851 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2852
2853 emitPAC(Key, AArch64::X16, DiscReg);
2854}
2855
2856void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2857 Register DstReg = MI.getOperand(0).getReg();
2858 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2859 const MachineOperand &GAMO = MI.getOperand(1);
2860 assert(GAMO.getOffset() == 0);
2861
2862 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2863 MCOperand GAMC;
2864 MCInstLowering.lowerOperand(GAMO, GAMC);
2865 EmitToStreamer(
2866 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2867 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2868 .addReg(AuthResultReg)
2869 .addReg(AArch64::X17)
2870 .addImm(0));
2871 } else {
2872 MachineOperand GAHiOp(GAMO);
2873 MachineOperand GALoOp(GAMO);
2874 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2875 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2876
2877 MCOperand GAMCHi, GAMCLo;
2878 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2879 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2880
2881 EmitToStreamer(
2882 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2883
2884 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2885 .addReg(AArch64::X17)
2886 .addReg(AArch64::X17)
2887 .addOperand(GAMCLo)
2888 .addImm(0));
2889
2890 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2891 .addReg(AuthResultReg)
2892 .addReg(AArch64::X17)
2893 .addImm(0));
2894 }
2895
2896 assert(GAMO.isGlobal());
2897 MCSymbol *UndefWeakSym;
2898 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2899 UndefWeakSym = createTempSymbol("undef_weak");
2900 EmitToStreamer(
2901 MCInstBuilder(AArch64::CBZX)
2902 .addReg(AuthResultReg)
2903 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2904 }
2905
2906 assert(GAMO.getGlobal()->getValueType() != nullptr);
2907
2908 bool IsFunctionTy = GAMO.getGlobal()->getValueType()->isFunctionTy();
2909 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2910 emitAUT(AuthKey, AuthResultReg, AArch64::X17);
2911
2912 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2913 OutStreamer->emitLabel(UndefWeakSym);
2914
2915 if (!STI->hasFPAC()) {
2916 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2917 AArch64PAuth::AuthCheckMethod::XPAC);
2918
2919 emitMovXReg(DstReg, AuthResultReg);
2920 }
2921}
2922
2923const MCExpr *
2924AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2925 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2926 const Function &Fn = *BA.getFunction();
2927
2928 if (std::optional<uint16_t> BADisc =
2929 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2930 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2931 /*HasAddressDiversity=*/false, OutContext);
2932
2933 return BAE;
2934}
2935
2936void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
2937 bool IsImm = false;
2938 unsigned Width = 0;
2939
2940 switch (MI->getOpcode()) {
2941 default:
2942 llvm_unreachable("This is not a CB pseudo instruction");
2943 case AArch64::CBBAssertExt:
2944 IsImm = false;
2945 Width = 8;
2946 break;
2947 case AArch64::CBHAssertExt:
2948 IsImm = false;
2949 Width = 16;
2950 break;
2951 case AArch64::CBWPrr:
2952 Width = 32;
2953 break;
2954 case AArch64::CBXPrr:
2955 Width = 64;
2956 break;
2957 case AArch64::CBWPri:
2958 IsImm = true;
2959 Width = 32;
2960 break;
2961 case AArch64::CBXPri:
2962 IsImm = true;
2963 Width = 64;
2964 break;
2965 }
2966
2968 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
2969 bool NeedsRegSwap = false;
2970 bool NeedsImmDec = false;
2971 bool NeedsImmInc = false;
2972
2973#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
2974 (IsImm \
2975 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
2976 : (Width == 8 \
2977 ? AArch64::CBB##RegCond##Wrr \
2978 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
2979 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
2980 : AArch64::CB##RegCond##Xrr))))
2981 unsigned MCOpC;
2982
2983 // Decide if we need to either swap register operands or increment/decrement
2984 // immediate operands
2985 switch (CC) {
2986 default:
2987 llvm_unreachable("Invalid CB condition code");
2988 case AArch64CC::EQ:
2989 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
2990 break;
2991 case AArch64CC::NE:
2992 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
2993 break;
2994 case AArch64CC::HS:
2995 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
2996 NeedsImmDec = IsImm;
2997 break;
2998 case AArch64CC::LO:
2999 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
3000 NeedsRegSwap = !IsImm;
3001 break;
3002 case AArch64CC::HI:
3003 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
3004 break;
3005 case AArch64CC::LS:
3006 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
3007 NeedsRegSwap = !IsImm;
3008 NeedsImmInc = IsImm;
3009 break;
3010 case AArch64CC::GE:
3011 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
3012 NeedsImmDec = IsImm;
3013 break;
3014 case AArch64CC::LT:
3015 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
3016 NeedsRegSwap = !IsImm;
3017 break;
3018 case AArch64CC::GT:
3019 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
3020 break;
3021 case AArch64CC::LE:
3022 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
3023 NeedsRegSwap = !IsImm;
3024 NeedsImmInc = IsImm;
3025 break;
3026 }
3027#undef GET_CB_OPC
3028
3029 MCInst Inst;
3030 Inst.setOpcode(MCOpC);
3031
3032 MCOperand Lhs, Rhs, Trgt;
3033 lowerOperand(MI->getOperand(1), Lhs);
3034 lowerOperand(MI->getOperand(2), Rhs);
3035 lowerOperand(MI->getOperand(3), Trgt);
3036
3037 // Now swap, increment or decrement
3038 if (NeedsRegSwap) {
3039 assert(Lhs.isReg() && "Expected register operand for CB");
3040 assert(Rhs.isReg() && "Expected register operand for CB");
3041 Inst.addOperand(Rhs);
3042 Inst.addOperand(Lhs);
3043 } else if (NeedsImmDec) {
3044 Rhs.setImm(Rhs.getImm() - 1);
3045 Inst.addOperand(Lhs);
3046 Inst.addOperand(Rhs);
3047 } else if (NeedsImmInc) {
3048 Rhs.setImm(Rhs.getImm() + 1);
3049 Inst.addOperand(Lhs);
3050 Inst.addOperand(Rhs);
3051 } else {
3052 Inst.addOperand(Lhs);
3053 Inst.addOperand(Rhs);
3054 }
3055
3056 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3057 "CB immediate operand out-of-bounds");
3058
3059 Inst.addOperand(Trgt);
3060 EmitToStreamer(*OutStreamer, Inst);
3061}
3062
3063// Simple pseudo-instructions have their lowering (with expansion to real
3064// instructions) auto-generated.
3065#include "AArch64GenMCPseudoLowering.inc"
3066
3067void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3068 S.emitInstruction(Inst, *STI);
3069#ifndef NDEBUG
3070 ++InstsEmitted;
3071#endif
3072}
3073
3074void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3075 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3076
3077#ifndef NDEBUG
3078 InstsEmitted = 0;
3079 llvm::scope_exit CheckMISize([&]() {
3080 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3081 });
3082#endif
3083
3084 // Do any auto-generated pseudo lowerings.
3085 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3086 EmitToStreamer(*OutStreamer, OutInst);
3087 return;
3088 }
3089
3090 if (MI->getOpcode() == AArch64::ADRP) {
3091 for (auto &Opd : MI->operands()) {
3092 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3093 "swift_async_extendedFramePointerFlags") {
3094 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3095 }
3096 }
3097 }
3098
3099 if (AArch64FI->getLOHRelated().count(MI)) {
3100 // Generate a label for LOH related instruction
3101 MCSymbol *LOHLabel = createTempSymbol("loh");
3102 // Associate the instruction with the label
3103 LOHInstToLabel[MI] = LOHLabel;
3104 OutStreamer->emitLabel(LOHLabel);
3105 }
3106
3107 AArch64TargetStreamer *TS =
3108 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3109 // Do any manual lowerings.
3110 switch (MI->getOpcode()) {
3111 default:
3113 "Unhandled tail call instruction");
3114 break;
3115 case AArch64::HINT: {
3116 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3117 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3118 // non-empty. If MI is the initial BTI, place the
3119 // __patchable_function_entries label after BTI.
3120 if (CurrentPatchableFunctionEntrySym &&
3121 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3122 MI == &MF->front().front()) {
3123 int64_t Imm = MI->getOperand(0).getImm();
3124 if ((Imm & 32) && (Imm & 6)) {
3125 MCInst Inst;
3126 MCInstLowering.Lower(MI, Inst);
3127 EmitToStreamer(*OutStreamer, Inst);
3128 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3129 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3130 return;
3131 }
3132 }
3133 break;
3134 }
3135 case AArch64::MOVMCSym: {
3136 Register DestReg = MI->getOperand(0).getReg();
3137 const MachineOperand &MO_Sym = MI->getOperand(1);
3138 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3139 MCOperand Hi_MCSym, Lo_MCSym;
3140
3141 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3142 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3143
3144 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3145 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3146
3147 MCInst MovZ;
3148 MovZ.setOpcode(AArch64::MOVZXi);
3149 MovZ.addOperand(MCOperand::createReg(DestReg));
3150 MovZ.addOperand(Hi_MCSym);
3152 EmitToStreamer(*OutStreamer, MovZ);
3153
3154 MCInst MovK;
3155 MovK.setOpcode(AArch64::MOVKXi);
3156 MovK.addOperand(MCOperand::createReg(DestReg));
3157 MovK.addOperand(MCOperand::createReg(DestReg));
3158 MovK.addOperand(Lo_MCSym);
3160 EmitToStreamer(*OutStreamer, MovK);
3161 return;
3162 }
3163 case AArch64::MOVIv2d_ns:
3164 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3165 // as movi is more efficient across all cores. Newer cores can eliminate
3166 // fmovs early and there is no difference with movi, but this not true for
3167 // all implementations.
3168 //
3169 // The floating-point version doesn't quite work in rare cases on older
3170 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3171 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3172 MI->getOperand(1).getImm() == 0) {
3173 MCInst TmpInst;
3174 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3175 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3176 TmpInst.addOperand(MCOperand::createImm(0));
3177 EmitToStreamer(*OutStreamer, TmpInst);
3178 return;
3179 }
3180 break;
3181
3182 case AArch64::DBG_VALUE:
3183 case AArch64::DBG_VALUE_LIST:
3184 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3185 SmallString<128> TmpStr;
3186 raw_svector_ostream OS(TmpStr);
3187 PrintDebugValueComment(MI, OS);
3188 OutStreamer->emitRawText(StringRef(OS.str()));
3189 }
3190 return;
3191
3192 case AArch64::EMITBKEY: {
3193 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3194 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3195 ExceptionHandlingType != ExceptionHandling::ARM)
3196 return;
3197
3198 if (getFunctionCFISectionType(*MF) == CFISection::None)
3199 return;
3200
3201 OutStreamer->emitCFIBKeyFrame();
3202 return;
3203 }
3204
3205 case AArch64::EMITMTETAGGED: {
3206 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3207 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3208 ExceptionHandlingType != ExceptionHandling::ARM)
3209 return;
3210
3211 if (getFunctionCFISectionType(*MF) != CFISection::None)
3212 OutStreamer->emitCFIMTETaggedFrame();
3213 return;
3214 }
3215
3216 case AArch64::AUTx16x17: {
3217 const Register Pointer = AArch64::X16;
3218 const Register Scratch = AArch64::X17;
3219
3220 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3221 MI->getOperand(1).getImm(), MI->getOperand(2));
3222
3223 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, std::nullopt,
3224 MI->getDeactivationSymbol());
3225 return;
3226 }
3227
3228 case AArch64::AUTxMxN: {
3229 const Register Pointer = MI->getOperand(0).getReg();
3230 const Register Scratch = MI->getOperand(1).getReg();
3231
3232 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3233 MI->getOperand(4).getImm(), MI->getOperand(5));
3234
3235 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, std::nullopt,
3236 MI->getDeactivationSymbol());
3237 return;
3238 }
3239
3240 case AArch64::AUTPAC: {
3241 const Register Pointer = AArch64::X16;
3242 const Register Scratch = AArch64::X17;
3243
3244 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3245 MI->getOperand(1).getImm(), MI->getOperand(2));
3246
3247 PtrAuthSchema SignSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3248 MI->getOperand(4).getImm(), MI->getOperand(5));
3249
3250 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, SignSchema,
3251 MI->getDeactivationSymbol());
3252 return;
3253 }
3254
3255 case AArch64::PAC:
3256 emitPtrauthSign(MI);
3257 return;
3258
3259 case AArch64::LOADauthptrstatic:
3260 LowerLOADauthptrstatic(*MI);
3261 return;
3262
3263 case AArch64::LOADgotPAC:
3264 case AArch64::MOVaddrPAC:
3265 LowerMOVaddrPAC(*MI);
3266 return;
3267
3268 case AArch64::LOADgotAUTH:
3269 LowerLOADgotAUTH(*MI);
3270 return;
3271
3272 case AArch64::BRA:
3273 case AArch64::BLRA:
3274 emitPtrauthBranch(MI);
3275 return;
3276
3277 // Tail calls use pseudo instructions so they have the proper code-gen
3278 // attributes (isCall, isReturn, etc.). We lower them to the real
3279 // instruction here.
3280 case AArch64::AUTH_TCRETURN:
3281 case AArch64::AUTH_TCRETURN_BTI: {
3282 Register Callee = MI->getOperand(0).getReg();
3283 const auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
3284 const uint64_t Disc = MI->getOperand(3).getImm();
3285
3286 Register AddrDisc = MI->getOperand(4).getReg();
3287
3288 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3289
3290 emitPtrauthTailCallHardening(MI);
3291
3292 // See the comments in emitPtrauthBranch.
3293 if (Callee == AddrDisc)
3294 report_fatal_error("Call target is signed with its own value");
3295
3296 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3297 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3298 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3299 // restriction manually not to clobber an unexpected register.
3300 bool AddrDiscIsImplicitDef =
3301 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3302 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3303 AddrDiscIsImplicitDef);
3304 emitBLRA(/*IsCall*/ false, Key, Callee, DiscReg);
3305 return;
3306 }
3307
3308 case AArch64::TCRETURNri:
3309 case AArch64::TCRETURNrix16x17:
3310 case AArch64::TCRETURNrix17:
3311 case AArch64::TCRETURNrinotx16:
3312 case AArch64::TCRETURNriALL: {
3313 emitPtrauthTailCallHardening(MI);
3314
3315 recordIfImportCall(MI);
3316 MCInst TmpInst;
3317 TmpInst.setOpcode(AArch64::BR);
3318 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3319 EmitToStreamer(*OutStreamer, TmpInst);
3320 return;
3321 }
3322 case AArch64::TCRETURNdi: {
3323 emitPtrauthTailCallHardening(MI);
3324
3325 MCOperand Dest;
3326 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3327 recordIfImportCall(MI);
3328 MCInst TmpInst;
3329 TmpInst.setOpcode(AArch64::B);
3330 TmpInst.addOperand(Dest);
3331 EmitToStreamer(*OutStreamer, TmpInst);
3332 return;
3333 }
3334 case AArch64::SpeculationBarrierISBDSBEndBB: {
3335 // Print DSB SYS + ISB
3336 MCInst TmpInstDSB;
3337 TmpInstDSB.setOpcode(AArch64::DSB);
3338 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3339 EmitToStreamer(*OutStreamer, TmpInstDSB);
3340 MCInst TmpInstISB;
3341 TmpInstISB.setOpcode(AArch64::ISB);
3342 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3343 EmitToStreamer(*OutStreamer, TmpInstISB);
3344 return;
3345 }
3346 case AArch64::SpeculationBarrierSBEndBB: {
3347 // Print SB
3348 MCInst TmpInstSB;
3349 TmpInstSB.setOpcode(AArch64::SB);
3350 EmitToStreamer(*OutStreamer, TmpInstSB);
3351 return;
3352 }
3353 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3354 /// lower this to:
3355 /// adrp x0, :tlsdesc_auth:var
3356 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3357 /// add x0, x0, #:tlsdesc_auth_lo12:var
3358 /// blraa x16, x0
3359 /// (TPIDR_EL0 offset now in x0)
3360 const MachineOperand &MO_Sym = MI->getOperand(0);
3361 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3362 MCOperand SymTLSDescLo12, SymTLSDesc;
3363 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3364 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3365 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3366 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3367
3368 MCInst Adrp;
3369 Adrp.setOpcode(AArch64::ADRP);
3370 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3371 Adrp.addOperand(SymTLSDesc);
3372 EmitToStreamer(*OutStreamer, Adrp);
3373
3374 MCInst Ldr;
3375 Ldr.setOpcode(AArch64::LDRXui);
3376 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3377 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3378 Ldr.addOperand(SymTLSDescLo12);
3380 EmitToStreamer(*OutStreamer, Ldr);
3381
3382 MCInst Add;
3383 Add.setOpcode(AArch64::ADDXri);
3384 Add.addOperand(MCOperand::createReg(AArch64::X0));
3385 Add.addOperand(MCOperand::createReg(AArch64::X0));
3386 Add.addOperand(SymTLSDescLo12);
3388 EmitToStreamer(*OutStreamer, Add);
3389
3390 // Authenticated TLSDESC accesses are not relaxed.
3391 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3392
3393 MCInst Blraa;
3394 Blraa.setOpcode(AArch64::BLRAA);
3395 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3396 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3397 EmitToStreamer(*OutStreamer, Blraa);
3398
3399 return;
3400 }
3401 case AArch64::TLSDESC_CALLSEQ: {
3402 /// lower this to:
3403 /// adrp x0, :tlsdesc:var
3404 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3405 /// add x0, x0, #:tlsdesc_lo12:var
3406 /// .tlsdesccall var
3407 /// blr x1
3408 /// (TPIDR_EL0 offset now in x0)
3409 const MachineOperand &MO_Sym = MI->getOperand(0);
3410 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3411 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3412 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3413 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3414 MCInstLowering.lowerOperand(MO_Sym, Sym);
3415 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3416 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3417
3418 MCInst Adrp;
3419 Adrp.setOpcode(AArch64::ADRP);
3420 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3421 Adrp.addOperand(SymTLSDesc);
3422 EmitToStreamer(*OutStreamer, Adrp);
3423
3424 MCInst Ldr;
3425 if (STI->isTargetILP32()) {
3426 Ldr.setOpcode(AArch64::LDRWui);
3427 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3428 } else {
3429 Ldr.setOpcode(AArch64::LDRXui);
3430 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3431 }
3432 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3433 Ldr.addOperand(SymTLSDescLo12);
3435 EmitToStreamer(*OutStreamer, Ldr);
3436
3437 MCInst Add;
3438 if (STI->isTargetILP32()) {
3439 Add.setOpcode(AArch64::ADDWri);
3440 Add.addOperand(MCOperand::createReg(AArch64::W0));
3441 Add.addOperand(MCOperand::createReg(AArch64::W0));
3442 } else {
3443 Add.setOpcode(AArch64::ADDXri);
3444 Add.addOperand(MCOperand::createReg(AArch64::X0));
3445 Add.addOperand(MCOperand::createReg(AArch64::X0));
3446 }
3447 Add.addOperand(SymTLSDescLo12);
3449 EmitToStreamer(*OutStreamer, Add);
3450
3451 // Emit a relocation-annotation. This expands to no code, but requests
3452 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3453 MCInst TLSDescCall;
3454 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3455 TLSDescCall.addOperand(Sym);
3456 EmitToStreamer(*OutStreamer, TLSDescCall);
3457#ifndef NDEBUG
3458 --InstsEmitted; // no code emitted
3459#endif
3460
3461 MCInst Blr;
3462 Blr.setOpcode(AArch64::BLR);
3463 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3464 EmitToStreamer(*OutStreamer, Blr);
3465
3466 return;
3467 }
3468
3469 case AArch64::JumpTableDest32:
3470 case AArch64::JumpTableDest16:
3471 case AArch64::JumpTableDest8:
3472 LowerJumpTableDest(*OutStreamer, *MI);
3473 return;
3474
3475 case AArch64::BR_JumpTable:
3476 LowerHardenedBRJumpTable(*MI);
3477 return;
3478
3479 case AArch64::FMOVH0:
3480 case AArch64::FMOVS0:
3481 case AArch64::FMOVD0:
3482 emitFMov0(*MI);
3483 return;
3484
3485 case AArch64::MOPSMemoryCopyPseudo:
3486 case AArch64::MOPSMemoryMovePseudo:
3487 case AArch64::MOPSMemorySetPseudo:
3488 case AArch64::MOPSMemorySetTaggingPseudo:
3489 LowerMOPS(*OutStreamer, *MI);
3490 return;
3491
3492 case TargetOpcode::STACKMAP:
3493 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3494
3495 case TargetOpcode::PATCHPOINT:
3496 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3497
3498 case TargetOpcode::STATEPOINT:
3499 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3500
3501 case TargetOpcode::FAULTING_OP:
3502 return LowerFAULTING_OP(*MI);
3503
3504 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3505 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3506 return;
3507
3508 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3509 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3510 return;
3511
3512 case TargetOpcode::PATCHABLE_TAIL_CALL:
3513 LowerPATCHABLE_TAIL_CALL(*MI);
3514 return;
3515 case TargetOpcode::PATCHABLE_EVENT_CALL:
3516 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3517 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3518 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3519
3520 case AArch64::KCFI_CHECK:
3521 LowerKCFI_CHECK(*MI);
3522 return;
3523
3524 case AArch64::HWASAN_CHECK_MEMACCESS:
3525 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3526 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3527 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3528 LowerHWASAN_CHECK_MEMACCESS(*MI);
3529 return;
3530
3531 case AArch64::SEH_StackAlloc:
3532 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3533 return;
3534
3535 case AArch64::SEH_SaveFPLR:
3536 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3537 return;
3538
3539 case AArch64::SEH_SaveFPLR_X:
3540 assert(MI->getOperand(0).getImm() < 0 &&
3541 "Pre increment SEH opcode must have a negative offset");
3542 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3543 return;
3544
3545 case AArch64::SEH_SaveReg:
3546 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3547 MI->getOperand(1).getImm());
3548 return;
3549
3550 case AArch64::SEH_SaveReg_X:
3551 assert(MI->getOperand(1).getImm() < 0 &&
3552 "Pre increment SEH opcode must have a negative offset");
3553 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3554 -MI->getOperand(1).getImm());
3555 return;
3556
3557 case AArch64::SEH_SaveRegP:
3558 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3559 MI->getOperand(0).getImm() <= 28) {
3560 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3561 "Register paired with LR must be odd");
3562 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3563 MI->getOperand(2).getImm());
3564 return;
3565 }
3566 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3567 "Non-consecutive registers not allowed for save_regp");
3568 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3569 MI->getOperand(2).getImm());
3570 return;
3571
3572 case AArch64::SEH_SaveRegP_X:
3573 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3574 "Non-consecutive registers not allowed for save_regp_x");
3575 assert(MI->getOperand(2).getImm() < 0 &&
3576 "Pre increment SEH opcode must have a negative offset");
3577 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3578 -MI->getOperand(2).getImm());
3579 return;
3580
3581 case AArch64::SEH_SaveFReg:
3582 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3583 MI->getOperand(1).getImm());
3584 return;
3585
3586 case AArch64::SEH_SaveFReg_X:
3587 assert(MI->getOperand(1).getImm() < 0 &&
3588 "Pre increment SEH opcode must have a negative offset");
3589 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3590 -MI->getOperand(1).getImm());
3591 return;
3592
3593 case AArch64::SEH_SaveFRegP:
3594 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3595 "Non-consecutive registers not allowed for save_regp");
3596 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3597 MI->getOperand(2).getImm());
3598 return;
3599
3600 case AArch64::SEH_SaveFRegP_X:
3601 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3602 "Non-consecutive registers not allowed for save_regp_x");
3603 assert(MI->getOperand(2).getImm() < 0 &&
3604 "Pre increment SEH opcode must have a negative offset");
3605 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3606 -MI->getOperand(2).getImm());
3607 return;
3608
3609 case AArch64::SEH_SetFP:
3611 return;
3612
3613 case AArch64::SEH_AddFP:
3614 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3615 return;
3616
3617 case AArch64::SEH_Nop:
3618 TS->emitARM64WinCFINop();
3619 return;
3620
3621 case AArch64::SEH_PrologEnd:
3623 return;
3624
3625 case AArch64::SEH_EpilogStart:
3627 return;
3628
3629 case AArch64::SEH_EpilogEnd:
3631 return;
3632
3633 case AArch64::SEH_PACSignLR:
3635 return;
3636
3637 case AArch64::SEH_SaveAnyRegI:
3638 assert(MI->getOperand(1).getImm() <= 1008 &&
3639 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3640 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3641 MI->getOperand(1).getImm());
3642 return;
3643
3644 case AArch64::SEH_SaveAnyRegIP:
3645 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3646 "Non-consecutive registers not allowed for save_any_reg");
3647 assert(MI->getOperand(2).getImm() <= 1008 &&
3648 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3649 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3650 MI->getOperand(2).getImm());
3651 return;
3652
3653 case AArch64::SEH_SaveAnyRegQP:
3654 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3655 "Non-consecutive registers not allowed for save_any_reg");
3656 assert(MI->getOperand(2).getImm() >= 0 &&
3657 "SaveAnyRegQP SEH opcode offset must be non-negative");
3658 assert(MI->getOperand(2).getImm() <= 1008 &&
3659 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3660 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3661 MI->getOperand(2).getImm());
3662 return;
3663
3664 case AArch64::SEH_SaveAnyRegQPX:
3665 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3666 "Non-consecutive registers not allowed for save_any_reg");
3667 assert(MI->getOperand(2).getImm() < 0 &&
3668 "SaveAnyRegQPX SEH opcode offset must be negative");
3669 assert(MI->getOperand(2).getImm() >= -1008 &&
3670 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3671 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3672 -MI->getOperand(2).getImm());
3673 return;
3674
3675 case AArch64::SEH_AllocZ:
3676 assert(MI->getOperand(0).getImm() >= 0 &&
3677 "AllocZ SEH opcode offset must be non-negative");
3678 assert(MI->getOperand(0).getImm() <= 255 &&
3679 "AllocZ SEH opcode offset must fit into 8 bits");
3680 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3681 return;
3682
3683 case AArch64::SEH_SaveZReg:
3684 assert(MI->getOperand(1).getImm() >= 0 &&
3685 "SaveZReg SEH opcode offset must be non-negative");
3686 assert(MI->getOperand(1).getImm() <= 255 &&
3687 "SaveZReg SEH opcode offset must fit into 8 bits");
3688 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3689 MI->getOperand(1).getImm());
3690 return;
3691
3692 case AArch64::SEH_SavePReg:
3693 assert(MI->getOperand(1).getImm() >= 0 &&
3694 "SavePReg SEH opcode offset must be non-negative");
3695 assert(MI->getOperand(1).getImm() <= 255 &&
3696 "SavePReg SEH opcode offset must fit into 8 bits");
3697 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3698 MI->getOperand(1).getImm());
3699 return;
3700
3701 case AArch64::BLR:
3702 case AArch64::BR: {
3703 recordIfImportCall(MI);
3704 MCInst TmpInst;
3705 MCInstLowering.Lower(MI, TmpInst);
3706 EmitToStreamer(*OutStreamer, TmpInst);
3707 return;
3708 }
3709 case AArch64::CBWPri:
3710 case AArch64::CBXPri:
3711 case AArch64::CBBAssertExt:
3712 case AArch64::CBHAssertExt:
3713 case AArch64::CBWPrr:
3714 case AArch64::CBXPrr:
3715 emitCBPseudoExpansion(MI);
3716 return;
3717 }
3718
3719 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3720 return;
3721
3722 // Finally, do the automated lowerings for everything else.
3723 MCInst TmpInst;
3724 MCInstLowering.Lower(MI, TmpInst);
3725 EmitToStreamer(*OutStreamer, TmpInst);
3726}
3727
3728void AArch64AsmPrinter::recordIfImportCall(
3729 const llvm::MachineInstr *BranchInst) {
3730 if (!EnableImportCallOptimization)
3731 return;
3732
3733 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3734 if (GV && GV->hasDLLImportStorageClass()) {
3735 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3736 OutStreamer->emitLabel(CallSiteSymbol);
3737
3738 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3739 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3740 .push_back({CallSiteSymbol, CalledSymbol});
3741 }
3742}
3743
3744void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3745 MCSymbol *LazyPointer) {
3746 // _ifunc:
3747 // adrp x16, lazy_pointer@GOTPAGE
3748 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3749 // ldr x16, [x16]
3750 // br x16
3751
3752 {
3753 MCInst Adrp;
3754 Adrp.setOpcode(AArch64::ADRP);
3755 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3756 MCOperand SymPage;
3757 MCInstLowering.lowerOperand(
3760 SymPage);
3761 Adrp.addOperand(SymPage);
3762 EmitToStreamer(Adrp);
3763 }
3764
3765 {
3766 MCInst Ldr;
3767 Ldr.setOpcode(AArch64::LDRXui);
3768 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3769 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3770 MCOperand SymPageOff;
3771 MCInstLowering.lowerOperand(
3774 SymPageOff);
3775 Ldr.addOperand(SymPageOff);
3777 EmitToStreamer(Ldr);
3778 }
3779
3780 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3781 .addReg(AArch64::X16)
3782 .addReg(AArch64::X16)
3783 .addImm(0));
3784
3785 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3786 : AArch64::BR)
3787 .addReg(AArch64::X16));
3788}
3789
3790void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3791 const GlobalIFunc &GI,
3792 MCSymbol *LazyPointer) {
3793 // These stub helpers are only ever called once, so here we're optimizing for
3794 // minimum size by using the pre-indexed store variants, which saves a few
3795 // bytes of instructions to bump & restore sp.
3796
3797 // _ifunc.stub_helper:
3798 // stp fp, lr, [sp, #-16]!
3799 // mov fp, sp
3800 // stp x1, x0, [sp, #-16]!
3801 // stp x3, x2, [sp, #-16]!
3802 // stp x5, x4, [sp, #-16]!
3803 // stp x7, x6, [sp, #-16]!
3804 // stp d1, d0, [sp, #-16]!
3805 // stp d3, d2, [sp, #-16]!
3806 // stp d5, d4, [sp, #-16]!
3807 // stp d7, d6, [sp, #-16]!
3808 // bl _resolver
3809 // adrp x16, lazy_pointer@GOTPAGE
3810 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3811 // str x0, [x16]
3812 // mov x16, x0
3813 // ldp d7, d6, [sp], #16
3814 // ldp d5, d4, [sp], #16
3815 // ldp d3, d2, [sp], #16
3816 // ldp d1, d0, [sp], #16
3817 // ldp x7, x6, [sp], #16
3818 // ldp x5, x4, [sp], #16
3819 // ldp x3, x2, [sp], #16
3820 // ldp x1, x0, [sp], #16
3821 // ldp fp, lr, [sp], #16
3822 // br x16
3823
3824 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3825 .addReg(AArch64::SP)
3826 .addReg(AArch64::FP)
3827 .addReg(AArch64::LR)
3828 .addReg(AArch64::SP)
3829 .addImm(-2));
3830
3831 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3832 .addReg(AArch64::FP)
3833 .addReg(AArch64::SP)
3834 .addImm(0)
3835 .addImm(0));
3836
3837 for (int I = 0; I != 4; ++I)
3838 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3839 .addReg(AArch64::SP)
3840 .addReg(AArch64::X1 + 2 * I)
3841 .addReg(AArch64::X0 + 2 * I)
3842 .addReg(AArch64::SP)
3843 .addImm(-2));
3844
3845 for (int I = 0; I != 4; ++I)
3846 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3847 .addReg(AArch64::SP)
3848 .addReg(AArch64::D1 + 2 * I)
3849 .addReg(AArch64::D0 + 2 * I)
3850 .addReg(AArch64::SP)
3851 .addImm(-2));
3852
3853 EmitToStreamer(
3854 MCInstBuilder(AArch64::BL)
3856
3857 {
3858 MCInst Adrp;
3859 Adrp.setOpcode(AArch64::ADRP);
3860 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3861 MCOperand SymPage;
3862 MCInstLowering.lowerOperand(
3863 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3865 SymPage);
3866 Adrp.addOperand(SymPage);
3867 EmitToStreamer(Adrp);
3868 }
3869
3870 {
3871 MCInst Ldr;
3872 Ldr.setOpcode(AArch64::LDRXui);
3873 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3874 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3875 MCOperand SymPageOff;
3876 MCInstLowering.lowerOperand(
3877 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3879 SymPageOff);
3880 Ldr.addOperand(SymPageOff);
3882 EmitToStreamer(Ldr);
3883 }
3884
3885 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3886 .addReg(AArch64::X0)
3887 .addReg(AArch64::X16)
3888 .addImm(0));
3889
3890 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3891 .addReg(AArch64::X16)
3892 .addReg(AArch64::X0)
3893 .addImm(0)
3894 .addImm(0));
3895
3896 for (int I = 3; I != -1; --I)
3897 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3898 .addReg(AArch64::SP)
3899 .addReg(AArch64::D1 + 2 * I)
3900 .addReg(AArch64::D0 + 2 * I)
3901 .addReg(AArch64::SP)
3902 .addImm(2));
3903
3904 for (int I = 3; I != -1; --I)
3905 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3906 .addReg(AArch64::SP)
3907 .addReg(AArch64::X1 + 2 * I)
3908 .addReg(AArch64::X0 + 2 * I)
3909 .addReg(AArch64::SP)
3910 .addImm(2));
3911
3912 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3913 .addReg(AArch64::SP)
3914 .addReg(AArch64::FP)
3915 .addReg(AArch64::LR)
3916 .addReg(AArch64::SP)
3917 .addImm(2));
3918
3919 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3920 : AArch64::BR)
3921 .addReg(AArch64::X16));
3922}
3923
3924const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
3925 const Constant *BaseCV,
3926 uint64_t Offset) {
3927 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3928 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3929 OutContext);
3930 }
3931
3932 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
3933}
3934
3935char AArch64AsmPrinter::ID = 0;
3936
3937INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3938 "AArch64 Assembly Printer", false, false)
3939
3940// Force static initialization.
3941extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
3942LLVMInitializeAArch64AsmPrinter() {
3948}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
void setPreservesAll()
Set by analyses that do not transform their input at all.
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:636
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
Function * getFunction() const
Definition Constants.h:940
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1083
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:729
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:553
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
static constexpr unsigned NonUniqueID
Definition MCSection.h:522
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitCFIBKeyFrame()
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:368
MCContext & getContext() const
Definition MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:387
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:443
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
const FeatureBitset & getFeatureBits() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM Value Representation.
Definition Value.h:75
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1248
@ SHF_GROUP
Definition ELF.h:1270
@ SHF_EXECINSTR
Definition ELF.h:1251
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1859
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1860
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1861
@ SHT_PROGBITS
Definition ELF.h:1147
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:292
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
scope_exit(Callable) -> scope_exit< Callable >
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
static unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K, bool Zero)
Return B(L)RA opcode to be used for an authenticated branch or call using the given key,...
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...