LLVM 23.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
47#include "llvm/IR/DataLayout.h"
49#include "llvm/IR/Mangler.h"
50#include "llvm/IR/Module.h"
51#include "llvm/MC/MCAsmInfo.h"
52#include "llvm/MC/MCContext.h"
53#include "llvm/MC/MCExpr.h"
54#include "llvm/MC/MCInst.h"
58#include "llvm/MC/MCStreamer.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/MCValue.h"
70#include <cassert>
71#include <cstdint>
72#include <map>
73#include <memory>
74
75using namespace llvm;
76
77#define DEBUG_TYPE "AArch64AsmPrinter"
78
79// Doesn't count FPR128 ZCZ instructions which are handled
80// by TableGen pattern matching
81STATISTIC(NumZCZeroingInstrsFPR,
82 "Number of zero-cycle FPR zeroing instructions expanded from "
83 "canonical pseudo instructions");
84
87 "aarch64-ptrauth-auth-checks", cl::Hidden,
88 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
89 clEnumValN(Poison, "poison", "poison on failure"),
90 clEnumValN(Trap, "trap", "trap on failure")),
91 cl::desc("Check pointer authentication auth/resign failures"),
93
94namespace {
95
96class AArch64AsmPrinter : public AsmPrinter {
97 AArch64MCInstLower MCInstLowering;
98 FaultMaps FM;
99 const AArch64Subtarget *STI;
100 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
101#ifndef NDEBUG
102 unsigned InstsEmitted;
103#endif
104 bool EnableImportCallOptimization = false;
106 SectionToImportedFunctionCalls;
107 unsigned PAuthIFuncNextUniqueID = 1;
108
109public:
110 static char ID;
111
112 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
113 : AsmPrinter(TM, std::move(Streamer), ID),
114 MCInstLowering(OutContext, *this), FM(*this) {}
115
116 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
117
118 /// Wrapper for MCInstLowering.lowerOperand() for the
119 /// tblgen'erated pseudo lowering.
120 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
121 return MCInstLowering.lowerOperand(MO, MCOp);
122 }
123
124 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
125
126 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
127
128 void emitStartOfAsmFile(Module &M) override;
129 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
130 ArrayRef<unsigned> JumpTableIndices) override;
131 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
133 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
134 const MCSymbol *BranchLabel) const override;
135
136 void emitFunctionEntryLabel() override;
137
138 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
139
140 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
141
142 void LowerHardenedBRJumpTable(const MachineInstr &MI);
143
144 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
145
146 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
147 const MachineInstr &MI);
148 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
149 const MachineInstr &MI);
150 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
151 const MachineInstr &MI);
152 void LowerFAULTING_OP(const MachineInstr &MI);
153
154 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
155 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
156 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
157 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
158
159 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
160 HwasanMemaccessTuple;
161 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
162 void LowerKCFI_CHECK(const MachineInstr &MI);
163 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
164 void emitHwasanMemaccessSymbols(Module &M);
165
166 void emitSled(const MachineInstr &MI, SledKind Kind);
167
168 // Returns whether Reg may be used to store sensitive temporary values when
169 // expanding PtrAuth pseudos. Some OSes may take extra care to protect a
170 // small subset of GPRs on context switches - use these registers then.
171 //
172 // If there are no preferred registers, returns true for any Reg.
173 bool isPtrauthRegSafe(Register Reg) const {
174 if (STI->isX16X17Safer())
175 return Reg == AArch64::X16 || Reg == AArch64::X17;
176
177 return true;
178 }
179
180 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
181 void emitPtrauthBranch(const MachineInstr *MI);
182
183 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
184 Register ScratchReg,
187 const MCSymbol *OnFailure = nullptr);
188
189 // Check authenticated LR before tail calling.
190 void emitPtrauthTailCallHardening(const MachineInstr *TC);
191
192 struct PtrAuthSchema {
193 PtrAuthSchema(AArch64PACKey::ID Key, uint64_t IntDisc,
194 const MachineOperand &AddrDiscOp);
195
197 uint64_t IntDisc;
198 Register AddrDisc;
199 bool AddrDiscIsKilled;
200 };
201
202 // Helper for emitting AUTRELLOADPAC: increment Pointer by Addend and then by
203 // a 32-bit signed value loaded from memory. The instructions emitted are
204 //
205 // ldrsw Scratch, [Pointer, #Addend]!
206 // add Pointer, Pointer, Scratch
207 //
208 // for small Addend value, with longer sequences required for wider Addend.
209 void emitPtrauthApplyIndirectAddend(Register Pointer, Register Scratch,
210 int64_t Addend);
211
212 // Emit the sequence for AUT or AUTPAC. Addend if AUTRELLOADPAC
213 void emitPtrauthAuthResign(Register Pointer, Register Scratch,
214 PtrAuthSchema AuthSchema,
215 std::optional<PtrAuthSchema> SignSchema,
216 std::optional<int64_t> Addend, Value *DS);
217
218 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
219 // if no instruction should be emitted because the deactivation symbol is
220 // defined in the current module so this function emitted a NOP instead.
221 bool emitDeactivationSymbolRelocation(Value *DS);
222
223 // Emit the sequence for PAC.
224 void emitPtrauthSign(const MachineInstr *MI);
225
226 // Emit the sequence to compute the discriminator.
227 //
228 // The Scratch register passed to this function must be safe, as returned by
229 // isPtrauthRegSafe(ScratchReg).
230 //
231 // The returned register is either ScratchReg, AddrDisc, or XZR. Furthermore,
232 // it is guaranteed to be safe (or XZR), with the only exception of
233 // passing-through an *unmodified* unsafe AddrDisc register.
234 //
235 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
236 // MayClobberAddrDisc may save one MOV instruction, provided
237 // isPtrauthRegSafe(AddrDisc) is true:
238 //
239 // mov x17, x16
240 // movk x17, #1234, lsl #48
241 // ; x16 is not used anymore
242 //
243 // can be replaced by
244 //
245 // movk x16, #1234, lsl #48
246 Register emitPtrauthDiscriminator(uint64_t Disc, Register AddrDisc,
247 Register ScratchReg,
248 bool MayClobberAddrDisc = false);
249
250 // Emit the sequence for LOADauthptrstatic
251 void LowerLOADauthptrstatic(const MachineInstr &MI);
252
253 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
254 // adrp-add followed by PAC sign)
255 void LowerMOVaddrPAC(const MachineInstr &MI);
256
257 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
258 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
259 // authenticating)
260 void LowerLOADgotAUTH(const MachineInstr &MI);
261
262 void emitAddImm(MCRegister Val, int64_t Addend, MCRegister Tmp);
263 void emitAddress(MCRegister Reg, const MCExpr *Expr, MCRegister Tmp,
264 bool DSOLocal, const MCSubtargetInfo &STI);
265
266 const MCExpr *emitPAuthRelocationAsIRelative(
267 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
268 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
269
270 /// tblgen'erated driver function for lowering simple MI->MC
271 /// pseudo instructions.
272 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
273
274 // Emit Build Attributes
275 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
276 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
277
278 // Emit expansion of Compare-and-branch pseudo instructions
279 void emitCBPseudoExpansion(const MachineInstr *MI);
280
281 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
282 void EmitToStreamer(const MCInst &Inst) {
283 EmitToStreamer(*OutStreamer, Inst);
284 }
285
286 void emitInstruction(const MachineInstr *MI) override;
287
288 void emitFunctionHeaderComment() override;
289
290 void getAnalysisUsage(AnalysisUsage &AU) const override {
292 AU.setPreservesAll();
293 }
294
295 bool runOnMachineFunction(MachineFunction &MF) override {
296 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
297 PSI = &PSIW->getPSI();
298 if (auto *SDPIW =
299 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
300 SDPI = &SDPIW->getStaticDataProfileInfo();
301
302 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
303 STI = &MF.getSubtarget<AArch64Subtarget>();
304
305 SetupMachineFunction(MF);
306
307 if (STI->isTargetCOFF()) {
308 bool Local = MF.getFunction().hasLocalLinkage();
311 int Type =
313
314 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
315 OutStreamer->emitCOFFSymbolStorageClass(Scl);
316 OutStreamer->emitCOFFSymbolType(Type);
317 OutStreamer->endCOFFSymbolDef();
318 }
319
320 // Emit the rest of the function body.
321 emitFunctionBody();
322
323 // Emit the XRay table for this function.
324 emitXRayTable();
325
326 // We didn't modify anything.
327 return false;
328 }
329
330 const MCExpr *lowerConstant(const Constant *CV,
331 const Constant *BaseCV = nullptr,
332 uint64_t Offset = 0) override;
333
334private:
335 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
336 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
337 bool printAsmRegInClass(const MachineOperand &MO,
338 const TargetRegisterClass *RC, unsigned AltName,
339 raw_ostream &O);
340
341 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
342 const char *ExtraCode, raw_ostream &O) override;
343 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
344 const char *ExtraCode, raw_ostream &O) override;
345
346 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
347
348 void emitFunctionBodyEnd() override;
349 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
350
351 MCSymbol *GetCPISymbol(unsigned CPID) const override;
352 void emitEndOfAsmFile(Module &M) override;
353
354 AArch64FunctionInfo *AArch64FI = nullptr;
355
356 /// Emit the LOHs contained in AArch64FI.
357 void emitLOHs();
358
359 void emitMovXReg(Register Dest, Register Src);
360 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
361 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
362
363 void emitAUT(AArch64PACKey::ID Key, Register Pointer, Register Disc);
364 void emitPAC(AArch64PACKey::ID Key, Register Pointer, Register Disc);
365 void emitBLRA(bool IsCall, AArch64PACKey::ID Key, Register Target,
366 Register Disc);
367
368 /// Emit instruction to set float register to zero.
369 void emitFMov0(const MachineInstr &MI);
370 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
371
372 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
373
374 MInstToMCSymbol LOHInstToLabel;
375
376 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
377 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
378 }
379
380 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
381 assert(STI);
382 return STI;
383 }
384 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
385 MCSymbol *LazyPointer) override;
386 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
387 MCSymbol *LazyPointer) override;
388
389 /// Checks if this instruction is part of a sequence that is eligle for import
390 /// call optimization and, if so, records it to be emitted in the import call
391 /// section.
392 void recordIfImportCall(const MachineInstr *BranchInst);
393};
394
395} // end anonymous namespace
396
397void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
398 const Triple &TT = TM.getTargetTriple();
399
400 if (TT.isOSBinFormatCOFF()) {
401 emitCOFFFeatureSymbol(M);
402 emitCOFFReplaceableFunctionData(M);
403
404 if (M.getModuleFlag("import-call-optimization"))
405 EnableImportCallOptimization = true;
406 }
407
408 if (!TT.isOSBinFormatELF())
409 return;
410
411 // For emitting build attributes and .note.gnu.property section
412 auto *TS =
413 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
414 // Assemble feature flags that may require creation of build attributes and a
415 // note section.
416 unsigned BAFlags = 0;
417 unsigned GNUFlags = 0;
418 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
419 M.getModuleFlag("branch-target-enforcement"))) {
420 if (!BTE->isZero()) {
421 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
423 }
424 }
425
426 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
427 M.getModuleFlag("guarded-control-stack"))) {
428 if (!GCS->isZero()) {
429 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
431 }
432 }
433
434 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
435 M.getModuleFlag("sign-return-address"))) {
436 if (!Sign->isZero()) {
437 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
439 }
440 }
441
442 uint64_t PAuthABIPlatform = -1;
443 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
444 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
445 PAuthABIPlatform = PAP->getZExtValue();
446 }
447
448 uint64_t PAuthABIVersion = -1;
449 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
450 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
451 PAuthABIVersion = PAV->getZExtValue();
452 }
453
454 // Emit AArch64 Build Attributes
455 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
456 // Emit a .note.gnu.property section with the flags.
457 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
458}
459
460void AArch64AsmPrinter::emitFunctionHeaderComment() {
461 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
462 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
463 if (OutlinerString != std::nullopt)
464 OutStreamer->getCommentOS() << ' ' << OutlinerString;
465}
466
467void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
468{
469 const Function &F = MF->getFunction();
470 if (F.hasFnAttribute("patchable-function-entry")) {
471 unsigned Num;
472 if (F.getFnAttribute("patchable-function-entry")
473 .getValueAsString()
474 .getAsInteger(10, Num))
475 return;
476 emitNops(Num);
477 return;
478 }
479
480 emitSled(MI, SledKind::FUNCTION_ENTER);
481}
482
483void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
484 emitSled(MI, SledKind::FUNCTION_EXIT);
485}
486
487void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
488 emitSled(MI, SledKind::TAIL_CALL);
489}
490
491void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
492 static const int8_t NoopsInSledCount = 7;
493 // We want to emit the following pattern:
494 //
495 // .Lxray_sled_N:
496 // ALIGN
497 // B #32
498 // ; 7 NOP instructions (28 bytes)
499 // .tmpN
500 //
501 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
502 // over the full 32 bytes (8 instructions) with the following pattern:
503 //
504 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
505 // LDR W17, #12 ; W17 := function ID
506 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
507 // BLR X16 ; call the tracing trampoline
508 // ;DATA: 32 bits of function ID
509 // ;DATA: lower 32 bits of the address of the trampoline
510 // ;DATA: higher 32 bits of the address of the trampoline
511 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
512 //
513 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
514 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
515 OutStreamer->emitLabel(CurSled);
516 auto Target = OutContext.createTempSymbol();
517
518 // Emit "B #32" instruction, which jumps over the next 28 bytes.
519 // The operand has to be the number of 4-byte instructions to jump over,
520 // including the current instruction.
521 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
522
523 for (int8_t I = 0; I < NoopsInSledCount; I++)
524 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::NOP));
525
526 OutStreamer->emitLabel(Target);
527 recordSled(CurSled, MI, Kind, 2);
528}
529
530void AArch64AsmPrinter::emitAttributes(unsigned Flags,
531 uint64_t PAuthABIPlatform,
532 uint64_t PAuthABIVersion,
533 AArch64TargetStreamer *TS) {
534
535 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
536 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
537
538 if (PAuthABIPlatform || PAuthABIVersion) {
542 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
543 AArch64BuildAttributes::SubsectionType::ULEB128);
547 PAuthABIPlatform, "");
551 "");
552 }
553
554 unsigned BTIValue =
556 unsigned PACValue =
558 unsigned GCSValue =
560
561 if (BTIValue || PACValue || GCSValue) {
565 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
566 AArch64BuildAttributes::SubsectionType::ULEB128);
576 }
577}
578
579// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
580// (built-in functions __xray_customevent/__xray_typedevent).
581//
582// .Lxray_event_sled_N:
583// b 1f
584// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
585// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
586// bl __xray_CustomEvent or __xray_TypedEvent
587// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
588// 1:
589//
590// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
591//
592// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
593// After patching, b .+N will become a nop.
594void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
595 bool Typed) {
596 auto &O = *OutStreamer;
597 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
598 O.emitLabel(CurSled);
599 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
600 auto *Sym = MCSymbolRefExpr::create(
601 OutContext.getOrCreateSymbol(
602 Twine(MachO ? "_" : "") +
603 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
604 OutContext);
605 if (Typed) {
606 O.AddComment("Begin XRay typed event");
607 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
608 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
609 .addReg(AArch64::SP)
610 .addReg(AArch64::X0)
611 .addReg(AArch64::X1)
612 .addReg(AArch64::SP)
613 .addImm(-4));
614 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
615 .addReg(AArch64::X2)
616 .addReg(AArch64::SP)
617 .addImm(2));
618 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
619 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
620 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
621 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
622 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
623 .addReg(AArch64::X2)
624 .addReg(AArch64::SP)
625 .addImm(2));
626 O.AddComment("End XRay typed event");
627 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
628 .addReg(AArch64::SP)
629 .addReg(AArch64::X0)
630 .addReg(AArch64::X1)
631 .addReg(AArch64::SP)
632 .addImm(4));
633
634 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
635 } else {
636 O.AddComment("Begin XRay custom event");
637 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
638 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
639 .addReg(AArch64::SP)
640 .addReg(AArch64::X0)
641 .addReg(AArch64::X1)
642 .addReg(AArch64::SP)
643 .addImm(-2));
644 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
645 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
646 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
647 O.AddComment("End XRay custom event");
648 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
649 .addReg(AArch64::SP)
650 .addReg(AArch64::X0)
651 .addReg(AArch64::X1)
652 .addReg(AArch64::SP)
653 .addImm(2));
654
655 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
656 }
657}
658
659void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
660 Register AddrReg = MI.getOperand(0).getReg();
661 assert(std::next(MI.getIterator())->isCall() &&
662 "KCFI_CHECK not followed by a call instruction");
663 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
664 "KCFI_CHECK call target doesn't match call operand");
665
666 // Default to using the intra-procedure-call temporary registers for
667 // comparing the hashes.
668 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
669 if (AddrReg == AArch64::XZR) {
670 // Checking XZR makes no sense. Instead of emitting a load, zero
671 // ScratchRegs[0] and use it for the ESR AddrIndex below.
672 AddrReg = getXRegFromWReg(ScratchRegs[0]);
673 emitMovXReg(AddrReg, AArch64::XZR);
674 } else {
675 // If one of the scratch registers is used for the call target (e.g.
676 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
677 // temporary register instead (in this case, AArch64::W9) as the check
678 // is immediately followed by the call instruction.
679 for (auto &Reg : ScratchRegs) {
680 if (Reg == getWRegFromXReg(AddrReg)) {
681 Reg = AArch64::W9;
682 break;
683 }
684 }
685 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
686 "Invalid scratch registers for KCFI_CHECK");
687
688 // Adjust the offset for patchable-function-prefix. This assumes that
689 // patchable-function-prefix is the same for all functions.
690 int64_t PrefixNops =
691 MI.getMF()->getFunction().getFnAttributeAsParsedInteger(
692 "patchable-function-prefix");
693
694 // Load the target function type hash.
695 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
696 .addReg(ScratchRegs[0])
697 .addReg(AddrReg)
698 .addImm(-(PrefixNops * 4 + 4)));
699 }
700
701 // Load the expected type hash.
702 const int64_t Type = MI.getOperand(1).getImm();
703 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
704 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
705
706 // Compare the hashes and trap if there's a mismatch.
707 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
708 .addReg(AArch64::WZR)
709 .addReg(ScratchRegs[0])
710 .addReg(ScratchRegs[1])
711 .addImm(0));
712
713 MCSymbol *Pass = OutContext.createTempSymbol();
714 EmitToStreamer(*OutStreamer,
715 MCInstBuilder(AArch64::Bcc)
716 .addImm(AArch64CC::EQ)
717 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
718
719 // The base ESR is 0x8000 and the register information is encoded in bits
720 // 0-9 as follows:
721 // - 0-4: n, where the register Xn contains the target address
722 // - 5-9: m, where the register Wm contains the expected type hash
723 // Where n, m are in [0, 30].
724 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
725 unsigned AddrIndex;
726 switch (AddrReg) {
727 default:
728 AddrIndex = AddrReg - AArch64::X0;
729 break;
730 case AArch64::FP:
731 AddrIndex = 29;
732 break;
733 case AArch64::LR:
734 AddrIndex = 30;
735 break;
736 }
737
738 assert(AddrIndex < 31 && TypeIndex < 31);
739
740 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
741 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
742 OutStreamer->emitLabel(Pass);
743}
744
745void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
746 Register Reg = MI.getOperand(0).getReg();
747
748 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
749 // statically known to be zero. However, conceivably, the HWASan pass may
750 // encounter a "cannot currently statically prove to be null" pointer (and is
751 // therefore unable to omit the intrinsic) that later optimization passes
752 // convert into a statically known-null pointer.
753 if (Reg == AArch64::XZR)
754 return;
755
756 bool IsShort =
757 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
758 (MI.getOpcode() ==
759 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
760 uint32_t AccessInfo = MI.getOperand(1).getImm();
761 bool IsFixedShadow =
762 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
763 (MI.getOpcode() ==
764 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
765 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
766
767 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
768 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
769 if (!Sym) {
770 // FIXME: Make this work on non-ELF.
771 if (!TM.getTargetTriple().isOSBinFormatELF())
772 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
773
774 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
775 utostr(AccessInfo);
776 if (IsFixedShadow)
777 SymName += "_fixed_" + utostr(FixedShadowOffset);
778 if (IsShort)
779 SymName += "_short_v2";
780 Sym = OutContext.getOrCreateSymbol(SymName);
781 }
782
783 EmitToStreamer(*OutStreamer,
784 MCInstBuilder(AArch64::BL)
785 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
786}
787
788void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
789 if (HwasanMemaccessSymbols.empty())
790 return;
791
792 const Triple &TT = TM.getTargetTriple();
793 assert(TT.isOSBinFormatELF());
794 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
795 // space.
796 auto STI = std::make_unique<AArch64Subtarget>(
797 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
798 true);
799 this->STI = STI.get();
800
801 MCSymbol *HwasanTagMismatchV1Sym =
802 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
803 MCSymbol *HwasanTagMismatchV2Sym =
804 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
805
806 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
807 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
808 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
809 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
810
811 for (auto &P : HwasanMemaccessSymbols) {
812 unsigned Reg = std::get<0>(P.first);
813 bool IsShort = std::get<1>(P.first);
814 uint32_t AccessInfo = std::get<2>(P.first);
815 bool IsFixedShadow = std::get<3>(P.first);
816 uint64_t FixedShadowOffset = std::get<4>(P.first);
817 const MCSymbolRefExpr *HwasanTagMismatchRef =
818 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
819 MCSymbol *Sym = P.second;
820
821 bool HasMatchAllTag =
822 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
823 uint8_t MatchAllTag =
824 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
825 unsigned Size =
826 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
827 bool CompileKernel =
828 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
829
830 OutStreamer->switchSection(OutContext.getELFSection(
831 ".text.hot", ELF::SHT_PROGBITS,
833 /*IsComdat=*/true));
834
835 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
836 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
837 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
838 OutStreamer->emitLabel(Sym);
839
840 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
841 .addReg(AArch64::X16)
842 .addReg(Reg)
843 .addImm(4)
844 .addImm(55));
845
846 if (IsFixedShadow) {
847 // Aarch64 makes it difficult to embed large constants in the code.
848 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
849 // left-shift option in the MOV instruction. Combined with the 16-bit
850 // immediate, this is enough to represent any offset up to 2**48.
851 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
852 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
853 .addReg(AArch64::W16)
854 .addReg(AArch64::X17)
855 .addReg(AArch64::X16)
856 .addImm(0)
857 .addImm(0));
858 } else {
859 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
860 .addReg(AArch64::W16)
861 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
862 .addReg(AArch64::X16)
863 .addImm(0)
864 .addImm(0));
865 }
866
867 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
868 .addReg(AArch64::XZR)
869 .addReg(AArch64::X16)
870 .addReg(Reg)
872 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
873 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
874 .addImm(AArch64CC::NE)
876 HandleMismatchOrPartialSym, OutContext)));
877 MCSymbol *ReturnSym = OutContext.createTempSymbol();
878 OutStreamer->emitLabel(ReturnSym);
879 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
880 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
881
882 if (HasMatchAllTag) {
883 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
884 .addReg(AArch64::X17)
885 .addReg(Reg)
886 .addImm(56)
887 .addImm(63));
888 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
889 .addReg(AArch64::XZR)
890 .addReg(AArch64::X17)
891 .addImm(MatchAllTag)
892 .addImm(0));
893 EmitToStreamer(
894 MCInstBuilder(AArch64::Bcc)
895 .addImm(AArch64CC::EQ)
896 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
897 }
898
899 if (IsShort) {
900 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
901 .addReg(AArch64::WZR)
902 .addReg(AArch64::W16)
903 .addImm(15)
904 .addImm(0));
905 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
906 EmitToStreamer(
907 MCInstBuilder(AArch64::Bcc)
908 .addImm(AArch64CC::HI)
909 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
910
911 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
912 .addReg(AArch64::X17)
913 .addReg(Reg)
914 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
915 if (Size != 1)
916 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
917 .addReg(AArch64::X17)
918 .addReg(AArch64::X17)
919 .addImm(Size - 1)
920 .addImm(0));
921 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
922 .addReg(AArch64::WZR)
923 .addReg(AArch64::W16)
924 .addReg(AArch64::W17)
925 .addImm(0));
926 EmitToStreamer(
927 MCInstBuilder(AArch64::Bcc)
928 .addImm(AArch64CC::LS)
929 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
930
931 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
932 .addReg(AArch64::X16)
933 .addReg(Reg)
934 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
935 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
936 .addReg(AArch64::W16)
937 .addReg(AArch64::X16)
938 .addImm(0));
939 EmitToStreamer(
940 MCInstBuilder(AArch64::SUBSXrs)
941 .addReg(AArch64::XZR)
942 .addReg(AArch64::X16)
943 .addReg(Reg)
945 EmitToStreamer(
946 MCInstBuilder(AArch64::Bcc)
947 .addImm(AArch64CC::EQ)
948 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
949
950 OutStreamer->emitLabel(HandleMismatchSym);
951 }
952
953 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
954 .addReg(AArch64::SP)
955 .addReg(AArch64::X0)
956 .addReg(AArch64::X1)
957 .addReg(AArch64::SP)
958 .addImm(-32));
959 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
960 .addReg(AArch64::FP)
961 .addReg(AArch64::LR)
962 .addReg(AArch64::SP)
963 .addImm(29));
964
965 if (Reg != AArch64::X0)
966 emitMovXReg(AArch64::X0, Reg);
967 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
968
969 if (CompileKernel) {
970 // The Linux kernel's dynamic loader doesn't support GOT relative
971 // relocations, but it doesn't support late binding either, so just call
972 // the function directly.
973 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
974 } else {
975 // Intentionally load the GOT entry and branch to it, rather than possibly
976 // late binding the function, which may clobber the registers before we
977 // have a chance to save them.
978 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
979 .addReg(AArch64::X16)
980 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
982 OutContext)));
983 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
984 .addReg(AArch64::X16)
985 .addReg(AArch64::X16)
986 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
988 OutContext)));
989 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
990 }
991 }
992 this->STI = nullptr;
993}
994
995static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
996 MCSymbol *StubLabel,
997 const MCExpr *StubAuthPtrRef) {
998 // sym$auth_ptr$key$disc:
999 OutStreamer.emitLabel(StubLabel);
1000 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
1001}
1002
1003void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
1004 emitHwasanMemaccessSymbols(M);
1005
1006 const Triple &TT = TM.getTargetTriple();
1007 if (TT.isOSBinFormatMachO()) {
1008 // Output authenticated pointers as indirect symbols, if we have any.
1009 MachineModuleInfoMachO &MMIMacho =
1010 MMI->getObjFileInfo<MachineModuleInfoMachO>();
1011
1012 auto Stubs = MMIMacho.getAuthGVStubList();
1013
1014 if (!Stubs.empty()) {
1015 // Switch to the "__auth_ptr" section.
1016 OutStreamer->switchSection(
1017 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
1019 emitAlignment(Align(8));
1020
1021 for (const auto &Stub : Stubs)
1022 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1023
1024 OutStreamer->addBlankLine();
1025 }
1026
1027 // Funny Darwin hack: This flag tells the linker that no global symbols
1028 // contain code that falls through to other global symbols (e.g. the obvious
1029 // implementation of multiple entry points). If this doesn't occur, the
1030 // linker can safely perform dead code stripping. Since LLVM never
1031 // generates code that does this, it is always safe to set.
1032 OutStreamer->emitSubsectionsViaSymbols();
1033 }
1034
1035 if (TT.isOSBinFormatELF()) {
1036 // Output authenticated pointers as indirect symbols, if we have any.
1037 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
1038
1039 auto Stubs = MMIELF.getAuthGVStubList();
1040
1041 if (!Stubs.empty()) {
1042 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1043 OutStreamer->switchSection(TLOF.getDataSection());
1044 emitAlignment(Align(8));
1045
1046 for (const auto &Stub : Stubs)
1047 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1048
1049 OutStreamer->addBlankLine();
1050 }
1051
1052 // With signed ELF GOT enabled, the linker looks at the symbol type to
1053 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1054 // for functions not defined in the module have STT_NOTYPE type by default.
1055 // This makes linker to emit signing schema with DA key (instead of IA) for
1056 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1057 // all function symbols used in the module to have STT_FUNC type. See
1058 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1059 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1060 M.getModuleFlag("ptrauth-elf-got"));
1061 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1062 for (const GlobalValue &GV : M.global_values())
1063 if (!GV.use_empty() && isa<Function>(GV) &&
1064 !GV.getName().starts_with("llvm."))
1065 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1067 }
1068
1069 // Emit stack and fault map information.
1071
1072 // If import call optimization is enabled, emit the appropriate section.
1073 // We do this whether or not we recorded any import calls.
1074 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1075 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1076
1077 // Section always starts with some magic.
1078 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1079 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1080
1081 // Layout of this section is:
1082 // Per section that contains calls to imported functions:
1083 // uint32_t SectionSize: Size in bytes for information in this section.
1084 // uint32_t Section Number
1085 // Per call to imported function in section:
1086 // uint32_t Kind: the kind of imported function.
1087 // uint32_t BranchOffset: the offset of the branch instruction in its
1088 // parent section.
1089 // uint32_t TargetSymbolId: the symbol id of the called function.
1090 for (auto &[Section, CallsToImportedFuncs] :
1091 SectionToImportedFunctionCalls) {
1092 unsigned SectionSize =
1093 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1094 OutStreamer->emitInt32(SectionSize);
1095 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1096 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1097 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1098 OutStreamer->emitInt32(0x13);
1099 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1100 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1101 }
1102 }
1103 }
1104}
1105
1106void AArch64AsmPrinter::emitLOHs() {
1108
1109 for (const auto &D : AArch64FI->getLOHContainer()) {
1110 for (const MachineInstr *MI : D.getArgs()) {
1111 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1112 assert(LabelIt != LOHInstToLabel.end() &&
1113 "Label hasn't been inserted for LOH related instruction");
1114 MCArgs.push_back(LabelIt->second);
1115 }
1116 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1117 MCArgs.clear();
1118 }
1119}
1120
1121void AArch64AsmPrinter::emitFunctionBodyEnd() {
1122 if (!AArch64FI->getLOHRelated().empty())
1123 emitLOHs();
1124}
1125
1126/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1127MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1128 // Darwin uses a linker-private symbol name for constant-pools (to
1129 // avoid addends on the relocation?), ELF has no such concept and
1130 // uses a normal private symbol.
1131 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1132 return OutContext.getOrCreateSymbol(
1133 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1134 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1135
1136 return AsmPrinter::GetCPISymbol(CPID);
1137}
1138
1139void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1140 raw_ostream &O) {
1141 const MachineOperand &MO = MI->getOperand(OpNum);
1142 switch (MO.getType()) {
1143 default:
1144 llvm_unreachable("<unknown operand type>");
1146 Register Reg = MO.getReg();
1148 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1150 break;
1151 }
1153 O << MO.getImm();
1154 break;
1155 }
1157 PrintSymbolOperand(MO, O);
1158 break;
1159 }
1161 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1162 Sym->print(O, MAI);
1163 break;
1164 }
1165 }
1166}
1167
1168bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1169 raw_ostream &O) {
1170 Register Reg = MO.getReg();
1171 switch (Mode) {
1172 default:
1173 return true; // Unknown mode.
1174 case 'w':
1176 break;
1177 case 'x':
1179 break;
1180 case 't':
1182 break;
1183 }
1184
1186 return false;
1187}
1188
1189// Prints the register in MO using class RC using the offset in the
1190// new register class. This should not be used for cross class
1191// printing.
1192bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1193 const TargetRegisterClass *RC,
1194 unsigned AltName, raw_ostream &O) {
1195 assert(MO.isReg() && "Should only get here with a register!");
1196 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1197 Register Reg = MO.getReg();
1198 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1199 if (!RI->regsOverlap(RegToPrint, Reg))
1200 return true;
1201 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1202 return false;
1203}
1204
1205bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1206 const char *ExtraCode, raw_ostream &O) {
1207 const MachineOperand &MO = MI->getOperand(OpNum);
1208
1209 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1210 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1211 return false;
1212
1213 // Does this asm operand have a single letter operand modifier?
1214 if (ExtraCode && ExtraCode[0]) {
1215 if (ExtraCode[1] != 0)
1216 return true; // Unknown modifier.
1217
1218 switch (ExtraCode[0]) {
1219 default:
1220 return true; // Unknown modifier.
1221 case 'w': // Print W register
1222 case 'x': // Print X register
1223 if (MO.isReg())
1224 return printAsmMRegister(MO, ExtraCode[0], O);
1225 if (MO.isImm() && MO.getImm() == 0) {
1226 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1228 return false;
1229 }
1230 printOperand(MI, OpNum, O);
1231 return false;
1232 case 'b': // Print B register.
1233 case 'h': // Print H register.
1234 case 's': // Print S register.
1235 case 'd': // Print D register.
1236 case 'q': // Print Q register.
1237 case 'z': // Print Z register.
1238 if (MO.isReg()) {
1239 const TargetRegisterClass *RC;
1240 switch (ExtraCode[0]) {
1241 case 'b':
1242 RC = &AArch64::FPR8RegClass;
1243 break;
1244 case 'h':
1245 RC = &AArch64::FPR16RegClass;
1246 break;
1247 case 's':
1248 RC = &AArch64::FPR32RegClass;
1249 break;
1250 case 'd':
1251 RC = &AArch64::FPR64RegClass;
1252 break;
1253 case 'q':
1254 RC = &AArch64::FPR128RegClass;
1255 break;
1256 case 'z':
1257 RC = &AArch64::ZPRRegClass;
1258 break;
1259 default:
1260 return true;
1261 }
1262 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1263 }
1264 printOperand(MI, OpNum, O);
1265 return false;
1266 }
1267 }
1268
1269 // According to ARM, we should emit x and v registers unless we have a
1270 // modifier.
1271 if (MO.isReg()) {
1272 Register Reg = MO.getReg();
1273
1274 // If this is a w or x register, print an x register.
1275 if (AArch64::GPR32allRegClass.contains(Reg) ||
1276 AArch64::GPR64allRegClass.contains(Reg))
1277 return printAsmMRegister(MO, 'x', O);
1278
1279 // If this is an x register tuple, print an x register.
1280 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1281 return printAsmMRegister(MO, 't', O);
1282
1283 unsigned AltName = AArch64::NoRegAltName;
1284 const TargetRegisterClass *RegClass;
1285 if (AArch64::ZPRRegClass.contains(Reg)) {
1286 RegClass = &AArch64::ZPRRegClass;
1287 } else if (AArch64::PPRRegClass.contains(Reg)) {
1288 RegClass = &AArch64::PPRRegClass;
1289 } else if (AArch64::PNRRegClass.contains(Reg)) {
1290 RegClass = &AArch64::PNRRegClass;
1291 } else {
1292 RegClass = &AArch64::FPR128RegClass;
1293 AltName = AArch64::vreg;
1294 }
1295
1296 // If this is a b, h, s, d, or q register, print it as a v register.
1297 return printAsmRegInClass(MO, RegClass, AltName, O);
1298 }
1299
1300 printOperand(MI, OpNum, O);
1301 return false;
1302}
1303
1304bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1305 unsigned OpNum,
1306 const char *ExtraCode,
1307 raw_ostream &O) {
1308 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1309 return true; // Unknown modifier.
1310
1311 const MachineOperand &MO = MI->getOperand(OpNum);
1312 assert(MO.isReg() && "unexpected inline asm memory operand");
1313 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1314 return false;
1315}
1316
1317void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1318 raw_ostream &OS) {
1319 unsigned NOps = MI->getNumOperands();
1320 assert(NOps == 4);
1321 OS << '\t' << MAI.getCommentString() << "DEBUG_VALUE: ";
1322 // cast away const; DIetc do not take const operands for some reason.
1323 OS << MI->getDebugVariable()->getName();
1324 OS << " <- ";
1325 // Frame address. Currently handles register +- offset only.
1326 assert(MI->isIndirectDebugValue());
1327 OS << '[';
1328 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1329 if (I != 0)
1330 OS << ", ";
1331 printOperand(MI, I, OS);
1332 }
1333 OS << ']';
1334 OS << "+";
1335 printOperand(MI, NOps - 2, OS);
1336}
1337
1338void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1339 ArrayRef<unsigned> JumpTableIndices) {
1340 // Fast return if there is nothing to emit to avoid creating empty sections.
1341 if (JumpTableIndices.empty())
1342 return;
1343 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1344 const auto &F = MF->getFunction();
1346
1347 MCSection *ReadOnlySec = nullptr;
1348 if (TM.Options.EnableStaticDataPartitioning) {
1349 ReadOnlySec =
1350 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1351 } else {
1352 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1353 }
1354 OutStreamer->switchSection(ReadOnlySec);
1355
1356 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1357 for (unsigned JTI : JumpTableIndices) {
1358 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1359
1360 // If this jump table was deleted, ignore it.
1361 if (JTBBs.empty()) continue;
1362
1363 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1364 emitAlignment(Align(Size));
1365 OutStreamer->emitLabel(GetJTISymbol(JTI));
1366
1367 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1368 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1369
1370 for (auto *JTBB : JTBBs) {
1371 const MCExpr *Value =
1372 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1373
1374 // Each entry is:
1375 // .byte/.hword (LBB - Lbase)>>2
1376 // or plain:
1377 // .word LBB - Lbase
1378 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1379 if (Size != 4)
1381 Value, MCConstantExpr::create(2, OutContext), OutContext);
1382
1383 OutStreamer->emitValue(Value, Size);
1384 }
1385 }
1386}
1387
1388std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1390AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1391 const MachineInstr *BranchInstr,
1392 const MCSymbol *BranchLabel) const {
1393 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1394 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1396 switch (AFI->getJumpTableEntrySize(JTI)) {
1397 case 1:
1398 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1399 break;
1400 case 2:
1401 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1402 break;
1403 case 4:
1404 EntrySize = codeview::JumpTableEntrySize::Int32;
1405 break;
1406 default:
1407 llvm_unreachable("Unexpected jump table entry size");
1408 }
1409 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1410}
1411
1412void AArch64AsmPrinter::emitFunctionEntryLabel() {
1413 const Triple &TT = TM.getTargetTriple();
1414 if (TT.isOSBinFormatELF() &&
1415 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1416 MF->getFunction().getCallingConv() ==
1417 CallingConv::AArch64_SVE_VectorCall ||
1418 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1419 auto *TS =
1420 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1421 TS->emitDirectiveVariantPCS(CurrentFnSym);
1422 }
1423
1425
1426 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1427 // For ARM64EC targets, a function definition's name is mangled differently
1428 // from the normal symbol, emit required aliases here.
1429 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1430 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1431 OutStreamer->emitAssignment(
1432 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1433 };
1434
1435 auto getSymbolFromMetadata = [&](StringRef Name) {
1436 MCSymbol *Sym = nullptr;
1437 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1438 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1439 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1440 }
1441 return Sym;
1442 };
1443
1444 SmallVector<MDNode *> UnmangledNames;
1445 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1446 for (MDNode *Node : UnmangledNames) {
1447 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1448 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1449 if (std::optional<std::string> MangledName =
1450 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1451 MCSymbol *ECMangledSym =
1452 MMI->getContext().getOrCreateSymbol(*MangledName);
1453 emitFunctionAlias(UnmangledSym, ECMangledSym);
1454 }
1455 }
1456 if (MCSymbol *ECMangledSym =
1457 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1458 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1459 }
1460}
1461
1462void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1463 const Constant *CV) {
1464 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1465 if (CPA->hasAddressDiscriminator() &&
1466 !CPA->hasSpecialAddressDiscriminator(
1469 "unexpected address discrimination value for ctors/dtors entry, only "
1470 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1471 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1472 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1473 // actual address discrimination value and only checks
1474 // hasAddressDiscriminator(), so it's OK to leave special address
1475 // discrimination value here.
1477}
1478
1479void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1480 const GlobalAlias &GA) {
1481 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1482 // Global aliases must point to a definition, but unmangled patchable
1483 // symbols are special and need to point to an undefined symbol with "EXP+"
1484 // prefix. Such undefined symbol is resolved by the linker by creating
1485 // x86 thunk that jumps back to the actual EC target.
1486 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1487 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1488 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1489 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1490
1491 OutStreamer->beginCOFFSymbolDef(ExpSym);
1492 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1493 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1495 OutStreamer->endCOFFSymbolDef();
1496
1497 OutStreamer->beginCOFFSymbolDef(Sym);
1498 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1499 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1501 OutStreamer->endCOFFSymbolDef();
1502 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1503 OutStreamer->emitAssignment(
1504 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1505 return;
1506 }
1507 }
1509}
1510
1511/// Small jump tables contain an unsigned byte or half, representing the offset
1512/// from the lowest-addressed possible destination to the desired basic
1513/// block. Since all instructions are 4-byte aligned, this is further compressed
1514/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1515/// materialize the correct destination we need:
1516///
1517/// adr xDest, .LBB0_0
1518/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1519/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1520void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1521 const llvm::MachineInstr &MI) {
1522 Register DestReg = MI.getOperand(0).getReg();
1523 Register ScratchReg = MI.getOperand(1).getReg();
1524 Register ScratchRegW =
1525 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1526 Register TableReg = MI.getOperand(2).getReg();
1527 Register EntryReg = MI.getOperand(3).getReg();
1528 int JTIdx = MI.getOperand(4).getIndex();
1529 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1530
1531 // This has to be first because the compression pass based its reachability
1532 // calculations on the start of the JumpTableDest instruction.
1533 auto Label =
1534 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1535
1536 // If we don't already have a symbol to use as the base, use the ADR
1537 // instruction itself.
1538 if (!Label) {
1540 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1541 OutStreamer.emitLabel(Label);
1542 }
1543
1544 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1545 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1546 .addReg(DestReg)
1547 .addExpr(LabelExpr));
1548
1549 // Load the number of instruction-steps to offset from the label.
1550 unsigned LdrOpcode;
1551 switch (Size) {
1552 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1553 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1554 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1555 default:
1556 llvm_unreachable("Unknown jump table size");
1557 }
1558
1559 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1560 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1561 .addReg(TableReg)
1562 .addReg(EntryReg)
1563 .addImm(0)
1564 .addImm(Size == 1 ? 0 : 1));
1565
1566 // Add to the already materialized base label address, multiplying by 4 if
1567 // compressed.
1568 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1569 .addReg(DestReg)
1570 .addReg(DestReg)
1571 .addReg(ScratchReg)
1572 .addImm(Size == 4 ? 0 : 2));
1573}
1574
1575void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1576 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1577 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1578
1579 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1580 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1581
1582 // Emit:
1583 // mov x17, #<size of table> ; depending on table size, with MOVKs
1584 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1585 // csel x16, x16, xzr, ls ; check for index overflow
1586 //
1587 // adrp x17, Ltable@PAGE ; materialize table address
1588 // add x17, Ltable@PAGEOFF
1589 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1590 //
1591 // Lanchor:
1592 // adr x17, Lanchor ; compute target address
1593 // add x16, x17, x16
1594 // br x16 ; branch to target
1595
1596 MachineOperand JTOp = MI.getOperand(0);
1597
1598 unsigned JTI = JTOp.getIndex();
1599 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1600 "unsupported compressed jump table");
1601
1602 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1603
1604 // cmp only supports a 12-bit immediate. If we need more, materialize the
1605 // immediate, using x17 as a scratch register.
1606 uint64_t MaxTableEntry = NumTableEntries - 1;
1607 if (isUInt<12>(MaxTableEntry)) {
1608 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1609 .addReg(AArch64::XZR)
1610 .addReg(AArch64::X16)
1611 .addImm(MaxTableEntry)
1612 .addImm(0));
1613 } else {
1614 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1615 // It's sad that we have to manually materialize instructions, but we can't
1616 // trivially reuse the main pseudo expansion logic.
1617 // A MOVK sequence is easy enough to generate and handles the general case.
1618 for (int Offset = 16; Offset < 64; Offset += 16) {
1619 if ((MaxTableEntry >> Offset) == 0)
1620 break;
1621 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1622 Offset);
1623 }
1624 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1625 .addReg(AArch64::XZR)
1626 .addReg(AArch64::X16)
1627 .addReg(AArch64::X17)
1628 .addImm(0));
1629 }
1630
1631 // This picks entry #0 on failure.
1632 // We might want to trap instead.
1633 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1634 .addReg(AArch64::X16)
1635 .addReg(AArch64::X16)
1636 .addReg(AArch64::XZR)
1637 .addImm(AArch64CC::LS));
1638
1639 // Prepare the @PAGE/@PAGEOFF low/high operands.
1640 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1641 MCOperand JTMCHi, JTMCLo;
1642
1643 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1644 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1645
1646 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1647 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1648
1649 EmitToStreamer(
1650 *OutStreamer,
1651 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1652
1653 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1654 .addReg(AArch64::X17)
1655 .addReg(AArch64::X17)
1656 .addOperand(JTMCLo)
1657 .addImm(0));
1658
1659 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1660 .addReg(AArch64::X16)
1661 .addReg(AArch64::X17)
1662 .addReg(AArch64::X16)
1663 .addImm(0)
1664 .addImm(1));
1665
1666 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1667 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1668 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1669
1670 OutStreamer->emitLabel(AdrLabel);
1671 EmitToStreamer(
1672 *OutStreamer,
1673 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1674
1675 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1676 .addReg(AArch64::X16)
1677 .addReg(AArch64::X17)
1678 .addReg(AArch64::X16)
1679 .addImm(0));
1680
1681 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1682}
1683
1684void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1685 const llvm::MachineInstr &MI) {
1686 unsigned Opcode = MI.getOpcode();
1687 assert(STI->hasMOPS());
1688 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1689
1690 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1691 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1692 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1693 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1694 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1695 if (Opcode == AArch64::MOPSMemorySetPseudo)
1696 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1697 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1698 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1699 llvm_unreachable("Unhandled memory operation pseudo");
1700 }();
1701 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1702 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1703
1704 for (auto Op : Ops) {
1705 int i = 0;
1706 auto MCIB = MCInstBuilder(Op);
1707 // Destination registers
1708 MCIB.addReg(MI.getOperand(i++).getReg());
1709 MCIB.addReg(MI.getOperand(i++).getReg());
1710 if (!IsSet)
1711 MCIB.addReg(MI.getOperand(i++).getReg());
1712 // Input registers
1713 MCIB.addReg(MI.getOperand(i++).getReg());
1714 MCIB.addReg(MI.getOperand(i++).getReg());
1715 MCIB.addReg(MI.getOperand(i++).getReg());
1716
1717 EmitToStreamer(OutStreamer, MCIB);
1718 }
1719}
1720
1721void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1722 const MachineInstr &MI) {
1723 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1724
1725 auto &Ctx = OutStreamer.getContext();
1726 MCSymbol *MILabel = Ctx.createTempSymbol();
1727 OutStreamer.emitLabel(MILabel);
1728
1729 SM.recordStackMap(*MILabel, MI);
1730 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1731
1732 // Scan ahead to trim the shadow.
1733 const MachineBasicBlock &MBB = *MI.getParent();
1735 ++MII;
1736 while (NumNOPBytes > 0) {
1737 if (MII == MBB.end() || MII->isCall() ||
1738 MII->getOpcode() == AArch64::DBG_VALUE ||
1739 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1740 MII->getOpcode() == TargetOpcode::STACKMAP)
1741 break;
1742 ++MII;
1743 NumNOPBytes -= 4;
1744 }
1745
1746 // Emit nops.
1747 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1748 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1749}
1750
1751// Lower a patchpoint of the form:
1752// [<def>], <id>, <numBytes>, <target>, <numArgs>
1753void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1754 const MachineInstr &MI) {
1755 auto &Ctx = OutStreamer.getContext();
1756 MCSymbol *MILabel = Ctx.createTempSymbol();
1757 OutStreamer.emitLabel(MILabel);
1758 SM.recordPatchPoint(*MILabel, MI);
1759
1760 PatchPointOpers Opers(&MI);
1761
1762 int64_t CallTarget = Opers.getCallTarget().getImm();
1763 unsigned EncodedBytes = 0;
1764 if (CallTarget) {
1765 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1766 "High 16 bits of call target should be zero.");
1767 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1768 EncodedBytes = 16;
1769 // Materialize the jump address:
1770 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1771 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1772 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1773 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1774 }
1775 // Emit padding.
1776 unsigned NumBytes = Opers.getNumPatchBytes();
1777 assert(NumBytes >= EncodedBytes &&
1778 "Patchpoint can't request size less than the length of a call.");
1779 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1780 "Invalid number of NOP bytes requested!");
1781 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1782 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1783}
1784
1785void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1786 const MachineInstr &MI) {
1787 StatepointOpers SOpers(&MI);
1788 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1789 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1790 for (unsigned i = 0; i < PatchBytes; i += 4)
1791 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1792 } else {
1793 // Lower call target and choose correct opcode
1794 const MachineOperand &CallTarget = SOpers.getCallTarget();
1795 MCOperand CallTargetMCOp;
1796 unsigned CallOpcode;
1797 switch (CallTarget.getType()) {
1800 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1801 CallOpcode = AArch64::BL;
1802 break;
1804 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1805 CallOpcode = AArch64::BL;
1806 break;
1808 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1809 CallOpcode = AArch64::BLR;
1810 break;
1811 default:
1812 llvm_unreachable("Unsupported operand type in statepoint call target");
1813 break;
1814 }
1815
1816 EmitToStreamer(OutStreamer,
1817 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1818 }
1819
1820 auto &Ctx = OutStreamer.getContext();
1821 MCSymbol *MILabel = Ctx.createTempSymbol();
1822 OutStreamer.emitLabel(MILabel);
1823 SM.recordStatepoint(*MILabel, MI);
1824}
1825
1826void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1827 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1828 // <opcode>, <operands>
1829
1830 Register DefRegister = FaultingMI.getOperand(0).getReg();
1832 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1833 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1834 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1835 unsigned OperandsBeginIdx = 4;
1836
1837 auto &Ctx = OutStreamer->getContext();
1838 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1839 OutStreamer->emitLabel(FaultingLabel);
1840
1841 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1842 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1843
1844 MCInst MI;
1845 MI.setOpcode(Opcode);
1846
1847 if (DefRegister != (Register)0)
1848 MI.addOperand(MCOperand::createReg(DefRegister));
1849
1850 for (const MachineOperand &MO :
1851 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1852 MCOperand Dest;
1853 lowerOperand(MO, Dest);
1854 MI.addOperand(Dest);
1855 }
1856
1857 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1858 EmitToStreamer(MI);
1859}
1860
1861void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1862 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1863 .addReg(Dest)
1864 .addReg(AArch64::XZR)
1865 .addReg(Src)
1866 .addImm(0));
1867}
1868
1869void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1870 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1871 EmitToStreamer(*OutStreamer,
1872 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1873 .addReg(Dest)
1874 .addImm(Imm)
1875 .addImm(Shift));
1876}
1877
1878void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1879 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1880 EmitToStreamer(*OutStreamer,
1881 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1882 .addReg(Dest)
1883 .addReg(Dest)
1884 .addImm(Imm)
1885 .addImm(Shift));
1886}
1887
1888void AArch64AsmPrinter::emitAUT(AArch64PACKey::ID Key, Register Pointer,
1889 Register Disc) {
1890 bool IsZeroDisc = Disc == AArch64::XZR;
1891 unsigned Opcode = getAUTOpcodeForKey(Key, IsZeroDisc);
1892
1893 // autiza x16 ; if IsZeroDisc
1894 // autia x16, x17 ; if !IsZeroDisc
1895 MCInst AUTInst;
1896 AUTInst.setOpcode(Opcode);
1897 AUTInst.addOperand(MCOperand::createReg(Pointer));
1898 AUTInst.addOperand(MCOperand::createReg(Pointer));
1899 if (!IsZeroDisc)
1900 AUTInst.addOperand(MCOperand::createReg(Disc));
1901
1902 EmitToStreamer(AUTInst);
1903}
1904
1905void AArch64AsmPrinter::emitPAC(AArch64PACKey::ID Key, Register Pointer,
1906 Register Disc) {
1907 bool IsZeroDisc = Disc == AArch64::XZR;
1908 unsigned Opcode = getPACOpcodeForKey(Key, IsZeroDisc);
1909
1910 // paciza x16 ; if IsZeroDisc
1911 // pacia x16, x17 ; if !IsZeroDisc
1912 MCInst PACInst;
1913 PACInst.setOpcode(Opcode);
1914 PACInst.addOperand(MCOperand::createReg(Pointer));
1915 PACInst.addOperand(MCOperand::createReg(Pointer));
1916 if (!IsZeroDisc)
1917 PACInst.addOperand(MCOperand::createReg(Disc));
1918
1919 EmitToStreamer(PACInst);
1920}
1921
1922void AArch64AsmPrinter::emitBLRA(bool IsCall, AArch64PACKey::ID Key,
1923 Register Target, Register Disc) {
1924 bool IsZeroDisc = Disc == AArch64::XZR;
1925 unsigned Opcode = getBranchOpcodeForKey(IsCall, Key, IsZeroDisc);
1926
1927 // blraaz x16 ; if IsZeroDisc
1928 // blraa x16, x17 ; if !IsZeroDisc
1929 MCInst Inst;
1930 Inst.setOpcode(Opcode);
1931 Inst.addOperand(MCOperand::createReg(Target));
1932 if (!IsZeroDisc)
1933 Inst.addOperand(MCOperand::createReg(Disc));
1934 EmitToStreamer(Inst);
1935}
1936
1937void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1938 Register DestReg = MI.getOperand(0).getReg();
1939 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1940 if (STI->hasZeroCycleZeroingFPR64()) {
1941 // Convert H/S register to corresponding D register
1942 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1943 if (AArch64::FPR16RegClass.contains(DestReg))
1944 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1945 &AArch64::FPR64RegClass);
1946 else if (AArch64::FPR32RegClass.contains(DestReg))
1947 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1948 &AArch64::FPR64RegClass);
1949 else
1950 assert(AArch64::FPR64RegClass.contains(DestReg));
1951
1952 MCInst MOVI;
1953 MOVI.setOpcode(AArch64::MOVID);
1954 MOVI.addOperand(MCOperand::createReg(DestReg));
1956 EmitToStreamer(*OutStreamer, MOVI);
1957 ++NumZCZeroingInstrsFPR;
1958 } else if (STI->hasZeroCycleZeroingFPR128()) {
1959 // Convert H/S/D register to corresponding Q register
1960 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1961 if (AArch64::FPR16RegClass.contains(DestReg)) {
1962 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1963 &AArch64::FPR128RegClass);
1964 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1965 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1966 &AArch64::FPR128RegClass);
1967 } else {
1968 assert(AArch64::FPR64RegClass.contains(DestReg));
1969 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1970 &AArch64::FPR128RegClass);
1971 }
1972
1973 MCInst MOVI;
1974 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1975 MOVI.addOperand(MCOperand::createReg(DestReg));
1977 EmitToStreamer(*OutStreamer, MOVI);
1978 ++NumZCZeroingInstrsFPR;
1979 } else {
1980 emitFMov0AsFMov(MI, DestReg);
1981 }
1982 } else {
1983 emitFMov0AsFMov(MI, DestReg);
1984 }
1985}
1986
1987void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1988 Register DestReg) {
1989 MCInst FMov;
1990 switch (MI.getOpcode()) {
1991 default:
1992 llvm_unreachable("Unexpected opcode");
1993 case AArch64::FMOVH0:
1994 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1995 if (!STI->hasFullFP16())
1996 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1997 FMov.addOperand(MCOperand::createReg(DestReg));
1998 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1999 break;
2000 case AArch64::FMOVS0:
2001 FMov.setOpcode(AArch64::FMOVWSr);
2002 FMov.addOperand(MCOperand::createReg(DestReg));
2003 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
2004 break;
2005 case AArch64::FMOVD0:
2006 FMov.setOpcode(AArch64::FMOVXDr);
2007 FMov.addOperand(MCOperand::createReg(DestReg));
2008 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
2009 break;
2010 }
2011 EmitToStreamer(*OutStreamer, FMov);
2012}
2013
2014Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint64_t Disc,
2015 Register AddrDisc,
2016 Register ScratchReg,
2017 bool MayClobberAddrDisc) {
2018 assert(isPtrauthRegSafe(ScratchReg) &&
2019 "Safe scratch register must be provided by the caller");
2020 assert(isUInt<16>(Disc) && "Constant discriminator is too wide");
2021
2022 // So far we've used NoRegister in pseudos. Now we need real encodings.
2023 if (AddrDisc == AArch64::NoRegister)
2024 AddrDisc = AArch64::XZR;
2025
2026 // If there is no constant discriminator, there's no blend involved:
2027 // just use the address discriminator register as-is (XZR or not).
2028 if (!Disc)
2029 return AddrDisc;
2030
2031 // If there's only a constant discriminator, MOV it into the scratch register.
2032 if (AddrDisc == AArch64::XZR) {
2033 emitMOVZ(ScratchReg, Disc, 0);
2034 return ScratchReg;
2035 }
2036
2037 // If there are both, emit a blend into the scratch register.
2038
2039 // Check if we can save one MOV instruction.
2040 if (MayClobberAddrDisc && isPtrauthRegSafe(AddrDisc)) {
2041 ScratchReg = AddrDisc;
2042 } else {
2043 emitMovXReg(ScratchReg, AddrDisc);
2044 assert(ScratchReg != AddrDisc &&
2045 "Forbidden to clobber AddrDisc, but have to");
2046 }
2047
2048 emitMOVK(ScratchReg, Disc, 48);
2049 return ScratchReg;
2050}
2051
2052/// Emit a code sequence to check an authenticated pointer value.
2053///
2054/// This function emits a sequence of instructions that checks if TestedReg was
2055/// authenticated successfully. On success, execution continues at the next
2056/// instruction after the sequence.
2057///
2058/// The action performed on failure depends on the OnFailure argument:
2059/// * if OnFailure is not nullptr, control is transferred to that label after
2060/// clearing the PAC field
2061/// * otherwise, BRK instruction is emitted to generate an error
2062void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
2063 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
2064 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
2065 // Insert a sequence to check if authentication of TestedReg succeeded,
2066 // such as:
2067 //
2068 // - checked and clearing:
2069 // ; x16 is TestedReg, x17 is ScratchReg
2070 // mov x17, x16
2071 // xpaci x17
2072 // cmp x16, x17
2073 // b.eq Lsuccess
2074 // mov x16, x17
2075 // b Lend
2076 // Lsuccess:
2077 // ; skipped if authentication failed
2078 // Lend:
2079 // ...
2080 //
2081 // - checked and trapping:
2082 // mov x17, x16
2083 // xpaci x17
2084 // cmp x16, x17
2085 // b.eq Lsuccess
2086 // brk #<0xc470 + aut key>
2087 // Lsuccess:
2088 // ...
2089 //
2090 // See the documentation on AuthCheckMethod enumeration constants for
2091 // the specific code sequences that can be used to perform the check.
2093
2094 if (Method == AuthCheckMethod::None)
2095 return;
2096 if (Method == AuthCheckMethod::DummyLoad) {
2097 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
2098 .addReg(getWRegFromXReg(ScratchReg))
2099 .addReg(TestedReg)
2100 .addImm(0));
2101 assert(!OnFailure && "DummyLoad always traps on error");
2102 return;
2103 }
2104
2105 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2106 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2107 // mov Xscratch, Xtested
2108 emitMovXReg(ScratchReg, TestedReg);
2109
2110 if (Method == AuthCheckMethod::XPAC) {
2111 // xpac(i|d) Xscratch
2112 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2113 EmitToStreamer(
2114 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2115 } else {
2116 // xpaclri
2117
2118 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2119 assert(TestedReg == AArch64::LR &&
2120 "XPACHint mode is only compatible with checking the LR register");
2122 "XPACHint mode is only compatible with I-keys");
2123 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2124 }
2125
2126 // cmp Xtested, Xscratch
2127 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2128 .addReg(AArch64::XZR)
2129 .addReg(TestedReg)
2130 .addReg(ScratchReg)
2131 .addImm(0));
2132
2133 // b.eq Lsuccess
2134 EmitToStreamer(
2135 MCInstBuilder(AArch64::Bcc)
2136 .addImm(AArch64CC::EQ)
2137 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2138 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2139 // eor Xscratch, Xtested, Xtested, lsl #1
2140 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2141 .addReg(ScratchReg)
2142 .addReg(TestedReg)
2143 .addReg(TestedReg)
2144 .addImm(1));
2145 // tbz Xscratch, #62, Lsuccess
2146 EmitToStreamer(
2147 MCInstBuilder(AArch64::TBZX)
2148 .addReg(ScratchReg)
2149 .addImm(62)
2150 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2151 } else {
2152 llvm_unreachable("Unsupported check method");
2153 }
2154
2155 if (!OnFailure) {
2156 // Trapping sequences do a 'brk'.
2157 // brk #<0xc470 + aut key>
2158 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2159 } else {
2160 // Non-trapping checked sequences return the stripped result in TestedReg,
2161 // skipping over success-only code (such as re-signing the pointer) by
2162 // jumping to OnFailure label.
2163 // Note that this can introduce an authentication oracle (such as based on
2164 // the high bits of the re-signed value).
2165
2166 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2167 // instead of ScratchReg, thus eliminating one `mov` instruction.
2168 // Both XPAC and XPACHint can be further optimized by not using a
2169 // conditional branch jumping over an unconditional one.
2170
2171 switch (Method) {
2172 case AuthCheckMethod::XPACHint:
2173 // LR is already XPAC-ed at this point.
2174 break;
2175 case AuthCheckMethod::XPAC:
2176 // mov Xtested, Xscratch
2177 emitMovXReg(TestedReg, ScratchReg);
2178 break;
2179 default:
2180 // If Xtested was not XPAC-ed so far, emit XPAC here.
2181 // xpac(i|d) Xtested
2182 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2183 EmitToStreamer(
2184 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2185 }
2186
2187 // b Lend
2188 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2189 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2190 }
2191
2192 // If the auth check succeeds, we can continue.
2193 // Lsuccess:
2194 OutStreamer->emitLabel(SuccessSym);
2195}
2196
2197// With Pointer Authentication, it may be needed to explicitly check the
2198// authenticated value in LR before performing a tail call.
2199// Otherwise, the callee may re-sign the invalid return address,
2200// introducing a signing oracle.
2201void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2202 if (!AArch64FI->shouldSignReturnAddress(*MF))
2203 return;
2204
2205 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2206 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2207 return;
2208
2209 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2210 Register ScratchReg =
2211 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2212 assert(!TC->readsRegister(ScratchReg, TRI) &&
2213 "Neither x16 nor x17 is available as a scratch register");
2216 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2217 LRCheckMethod);
2218}
2219
2220bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2221 if (!DS)
2222 return false;
2223
2224 if (isa<GlobalAlias>(DS)) {
2225 // Just emit the nop directly.
2226 EmitToStreamer(MCInstBuilder(AArch64::NOP));
2227 return true;
2228 }
2229 MCSymbol *Dot = OutContext.createTempSymbol();
2230 OutStreamer->emitLabel(Dot);
2231 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2232
2233 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2234 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2235 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2236 SMLoc());
2237 return false;
2238}
2239
2240AArch64AsmPrinter::PtrAuthSchema::PtrAuthSchema(
2241 AArch64PACKey::ID Key, uint64_t IntDisc, const MachineOperand &AddrDiscOp)
2242 : Key(Key), IntDisc(IntDisc), AddrDisc(AddrDiscOp.getReg()),
2243 AddrDiscIsKilled(AddrDiscOp.isKill()) {}
2244
2245void AArch64AsmPrinter::emitPtrauthApplyIndirectAddend(Register Pointer,
2246 Register Scratch,
2247 int64_t Addend) {
2248 if (isInt<9>(Addend)) {
2249 // ldrsw Scratch, [Pointer, #Addend]! ; note: Pointer+Addend is used later.
2250 EmitToStreamer(MCInstBuilder(AArch64::LDRSWpre)
2251 .addReg(Pointer)
2252 .addReg(Scratch)
2253 .addReg(Pointer)
2254 .addImm(/*simm9:*/ Addend));
2255 } else {
2256 // Pointer += Addend computation has 2 variants
2257 if (isUInt<24>(Addend)) {
2258 // Variant 1: add Pointer, Pointer, (Addend >> shift12) lsl shift12
2259 // This can take up to 2 instructions.
2260 for (int BitPos = 0; BitPos != 24 && (Addend >> BitPos); BitPos += 12) {
2261 EmitToStreamer(
2262 MCInstBuilder(AArch64::ADDXri)
2263 .addReg(Pointer)
2264 .addReg(Pointer)
2265 .addImm((Addend >> BitPos) & 0xfff)
2266 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2267 }
2268 } else {
2269 // Variant 2: accumulate constant in Scratch 16 bits at a time,
2270 // and add it to Pointer. This can take 2-5 instructions.
2271 emitMOVZ(Scratch, Addend & 0xffff, 0);
2272 for (int Offset = 16; Offset < 64; Offset += 16) {
2273 if (unsigned Fragment = (Addend >> Offset) & 0xffff)
2274 emitMOVK(Scratch, Fragment, Offset);
2275 }
2276
2277 // add Pointer, Pointer, Scratch
2278 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2279 .addReg(Pointer)
2280 .addReg(Pointer)
2281 .addReg(Scratch)
2282 .addImm(0));
2283 }
2284 // ldrsw Scratch, [Pointer]
2285 EmitToStreamer(MCInstBuilder(AArch64::LDRSWui)
2286 .addReg(Scratch)
2287 .addReg(Pointer)
2288 .addImm(0));
2289 }
2290 // add Pointer, Pointer, Scratch
2291 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2292 .addReg(Pointer)
2293 .addReg(Pointer)
2294 .addReg(Scratch)
2295 .addImm(0));
2296}
2297
2298void AArch64AsmPrinter::emitPtrauthAuthResign(
2299 Register Pointer, Register Scratch, PtrAuthSchema AuthSchema,
2300 std::optional<PtrAuthSchema> SignSchema, std::optional<int64_t> Addend,
2301 Value *DS) {
2302 const bool IsResign = SignSchema.has_value();
2303 // We expand AUT/AUTPAC into a sequence of the form
2304 //
2305 // ; authenticate x16
2306 // ; check pointer in x16
2307 // Lsuccess:
2308 // ; sign x16 (if AUTPAC)
2309 // Lend: ; if not trapping on failure
2310 //
2311 // with the checking sequence chosen depending on whether/how we should check
2312 // the pointer and whether we should trap on failure.
2313
2314 // By default, auth/resign sequences check for auth failures.
2315 bool ShouldCheck = true;
2316 // In the checked sequence, we only trap if explicitly requested.
2317 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2318
2319 // On an FPAC CPU, you get traps whether you want them or not: there's
2320 // no point in emitting checks or traps.
2321 if (STI->hasFPAC())
2322 ShouldCheck = ShouldTrap = false;
2323
2324 // However, command-line flags can override this, for experimentation.
2325 switch (PtrauthAuthChecks) {
2327 break;
2329 ShouldCheck = ShouldTrap = false;
2330 break;
2332 ShouldCheck = true;
2333 ShouldTrap = false;
2334 break;
2336 ShouldCheck = ShouldTrap = true;
2337 break;
2338 }
2339
2340 // Compute aut discriminator
2341 Register AUTDiscReg =
2342 emitPtrauthDiscriminator(AuthSchema.IntDisc, AuthSchema.AddrDisc, Scratch,
2343 AuthSchema.AddrDiscIsKilled);
2344
2345 if (!emitDeactivationSymbolRelocation(DS))
2346 emitAUT(AuthSchema.Key, Pointer, AUTDiscReg);
2347
2348 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2349 if (!IsResign && (!ShouldCheck || !ShouldTrap))
2350 return;
2351
2352 MCSymbol *EndSym = nullptr;
2353
2354 if (ShouldCheck) {
2355 if (IsResign && !ShouldTrap)
2356 EndSym = createTempSymbol("resign_end_");
2357
2358 emitPtrauthCheckAuthenticatedValue(Pointer, Scratch, AuthSchema.Key,
2359 AArch64PAuth::AuthCheckMethod::XPAC,
2360 EndSym);
2361 }
2362
2363 // We already emitted unchecked and checked-but-non-trapping AUTs.
2364 // That left us with trapping AUTs, and AUTPA/AUTRELLOADPACs.
2365 // Trapping AUTs don't need PAC: we're done.
2366 if (!IsResign)
2367 return;
2368
2369 if (Addend.has_value())
2370 emitPtrauthApplyIndirectAddend(Pointer, Scratch, *Addend);
2371
2372 // Compute pac discriminator into x17
2373 Register PACDiscReg = emitPtrauthDiscriminator(SignSchema->IntDisc,
2374 SignSchema->AddrDisc, Scratch);
2375 emitPAC(SignSchema->Key, Pointer, PACDiscReg);
2376
2377 // Lend:
2378 if (EndSym)
2379 OutStreamer->emitLabel(EndSym);
2380}
2381
2382void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2383 Register Val = MI->getOperand(1).getReg();
2384 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2385 uint64_t Disc = MI->getOperand(3).getImm();
2386 Register AddrDisc = MI->getOperand(4).getReg();
2387 bool AddrDiscKilled = MI->getOperand(4).isKill();
2388
2389 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2390 // register is available.
2391 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2392 assert(ScratchReg != AddrDisc &&
2393 "Neither X16 nor X17 is available as a scratch register");
2394
2395 // Compute pac discriminator
2396 Register DiscReg = emitPtrauthDiscriminator(
2397 Disc, AddrDisc, ScratchReg, /*MayClobberAddrDisc=*/AddrDiscKilled);
2398
2399 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2400 return;
2401
2402 emitPAC(Key, Val, DiscReg);
2403}
2404
2405void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2406 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2407 unsigned BrTarget = MI->getOperand(0).getReg();
2408
2409 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2410 uint64_t Disc = MI->getOperand(2).getImm();
2411
2412 unsigned AddrDisc = MI->getOperand(3).getReg();
2413
2414 // Make sure AddrDisc is solely used to compute the discriminator.
2415 // While hardly meaningful, it is still possible to describe an authentication
2416 // of a pointer against its own value (instead of storage address) with
2417 // intrinsics, so use report_fatal_error instead of assert.
2418 if (BrTarget == AddrDisc)
2419 report_fatal_error("Branch target is signed with its own value");
2420
2421 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2422 // fact that x16 and x17 are described as clobbered by the MI instruction and
2423 // AddrDisc is not used as any other input.
2424 //
2425 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2426 // either x16 or x17, meaning the returned register is always among the
2427 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2428 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2429 // among x16 and x17 to prevent clobbering unexpected registers.
2430 //
2431 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2432 // declared as clobbering x16/x17.
2433 //
2434 // FIXME: Make use of `killed` flags and register masks instead.
2435 bool AddrDiscIsImplicitDef =
2436 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2437 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2438 AddrDiscIsImplicitDef);
2439 emitBLRA(IsCall, Key, BrTarget, DiscReg);
2440}
2441
2442void AArch64AsmPrinter::emitAddImm(MCRegister Reg, int64_t Addend,
2443 MCRegister Tmp) {
2444 if (Addend != 0) {
2445 const uint64_t AbsOffset = (Addend > 0 ? Addend : -((uint64_t)Addend));
2446 const bool IsNeg = Addend < 0;
2447 if (isUInt<24>(AbsOffset)) {
2448 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2449 BitPos += 12) {
2450 EmitToStreamer(
2451 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2452 .addReg(Reg)
2453 .addReg(Reg)
2454 .addImm((AbsOffset >> BitPos) & 0xfff)
2455 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2456 }
2457 } else {
2458 const uint64_t UAddend = Addend;
2459 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2460 .addReg(Tmp)
2461 .addImm((IsNeg ? ~UAddend : UAddend) & 0xffff)
2462 .addImm(/*shift=*/0));
2463 auto NeedMovk = [IsNeg, UAddend](int BitPos) -> bool {
2464 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2465 uint64_t Shifted = UAddend >> BitPos;
2466 if (!IsNeg)
2467 return Shifted != 0;
2468 for (int I = 0; I != 64 - BitPos; I += 16)
2469 if (((Shifted >> I) & 0xffff) != 0xffff)
2470 return true;
2471 return false;
2472 };
2473 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2474 emitMOVK(Tmp, (UAddend >> BitPos) & 0xffff, BitPos);
2475
2476 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2477 .addReg(Reg)
2478 .addReg(Reg)
2479 .addReg(Tmp)
2480 .addImm(/*shift=*/0));
2481 }
2482 }
2483}
2484
2485void AArch64AsmPrinter::emitAddress(MCRegister Reg, const MCExpr *Expr,
2486 MCRegister Tmp, bool DSOLocal,
2487 const MCSubtargetInfo &STI) {
2488 MCValue Val;
2489 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2490 report_fatal_error("emitAddress could not evaluate");
2491 if (DSOLocal) {
2492 EmitToStreamer(
2493 MCInstBuilder(AArch64::ADRP)
2494 .addReg(Reg)
2496 OutStreamer->getContext())));
2497 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2498 .addReg(Reg)
2499 .addReg(Reg)
2500 .addExpr(MCSpecifierExpr::create(
2501 Expr, AArch64::S_LO12, OutStreamer->getContext()))
2502 .addImm(0));
2503 } else {
2504 auto *SymRef =
2505 MCSymbolRefExpr::create(Val.getAddSym(), OutStreamer->getContext());
2506 EmitToStreamer(
2507 MCInstBuilder(AArch64::ADRP)
2508 .addReg(Reg)
2510 OutStreamer->getContext())));
2511 EmitToStreamer(
2512 MCInstBuilder(AArch64::LDRXui)
2513 .addReg(Reg)
2514 .addReg(Reg)
2516 OutStreamer->getContext())));
2517 emitAddImm(Reg, Val.getConstant(), Tmp);
2518 }
2519}
2520
2522 // IFUNCs are ELF-only.
2523 if (!TT.isOSBinFormatELF())
2524 return false;
2525
2526 // IFUNCs are supported on glibc, bionic, and some but not all of the BSDs.
2527 return TT.isOSGlibc() || TT.isAndroid() || TT.isOSFreeBSD() ||
2528 TT.isOSDragonFly() || TT.isOSNetBSD();
2529}
2530
2531// Emit an ifunc resolver that returns a signed pointer to the specified target,
2532// and return a FUNCINIT reference to the resolver. In the linked binary, this
2533// function becomes the target of an IRELATIVE relocation. This resolver is used
2534// to relocate signed pointers in global variable initializers in special cases
2535// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2536//
2537// Example (signed null pointer, not address discriminated):
2538//
2539// .8byte .Lpauth_ifunc0
2540// .pushsection .text.startup,"ax",@progbits
2541// .Lpauth_ifunc0:
2542// mov x0, #0
2543// mov x1, #12345
2544// b __emupac_pacda
2545//
2546// Example (signed null pointer, address discriminated):
2547//
2548// .Ltmp:
2549// .8byte .Lpauth_ifunc0
2550// .pushsection .text.startup,"ax",@progbits
2551// .Lpauth_ifunc0:
2552// mov x0, #0
2553// adrp x1, .Ltmp
2554// add x1, x1, :lo12:.Ltmp
2555// b __emupac_pacda
2556// .popsection
2557//
2558// Example (signed pointer to symbol, not address discriminated):
2559//
2560// .Ltmp:
2561// .8byte .Lpauth_ifunc0
2562// .pushsection .text.startup,"ax",@progbits
2563// .Lpauth_ifunc0:
2564// adrp x0, symbol
2565// add x0, x0, :lo12:symbol
2566// mov x1, #12345
2567// b __emupac_pacda
2568// .popsection
2569//
2570// Example (signed null pointer, not address discriminated, with deactivation
2571// symbol ds):
2572//
2573// .8byte .Lpauth_ifunc0
2574// .pushsection .text.startup,"ax",@progbits
2575// .Lpauth_ifunc0:
2576// mov x0, #0
2577// mov x1, #12345
2578// .reloc ., R_AARCH64_PATCHINST, ds
2579// b __emupac_pacda
2580// ret
2581// .popsection
2582const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2583 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
2584 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2585 const Triple &TT = TM.getTargetTriple();
2586
2587 // We only emit an IRELATIVE relocation if the target supports IRELATIVE.
2589 return nullptr;
2590
2591 // For now, only the DA key is supported.
2592 if (KeyID != AArch64PACKey::DA)
2593 return nullptr;
2594
2595 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
2596 // space.
2597 auto STI = std::make_unique<AArch64Subtarget>(
2598 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
2599 true);
2600 this->STI = STI.get();
2601
2602 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2603 OutStreamer->emitLabel(Place);
2604 OutStreamer->pushSection();
2605
2606 const MCSymbolELF *Group =
2607 static_cast<MCSectionELF *>(OutStreamer->getCurrentSectionOnly())
2608 ->getGroup();
2610 if (Group)
2612 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2613 ".text.startup", ELF::SHT_PROGBITS, Flags, 0, Group, true,
2614 Group ? MCSection::NonUniqueID : PAuthIFuncNextUniqueID++, nullptr));
2615
2616 MCSymbol *IRelativeSym =
2617 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2618 OutStreamer->emitLabel(IRelativeSym);
2619 if (isa<MCConstantExpr>(Target)) {
2620 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2621 .addReg(AArch64::X0)
2622 .addExpr(Target)
2623 .addImm(0),
2624 *STI);
2625 } else {
2626 emitAddress(AArch64::X0, Target, AArch64::X16, IsDSOLocal, *STI);
2627 }
2628 if (HasAddressDiversity) {
2629 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2630 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2631 MCConstantExpr::create(Disc, OutStreamer->getContext()),
2632 OutStreamer->getContext());
2633 emitAddress(AArch64::X1, PlacePlusDisc, AArch64::X16, /*IsDSOLocal=*/true,
2634 *STI);
2635 } else {
2636 if (!isUInt<16>(Disc)) {
2637 OutContext.reportError(SMLoc(), "AArch64 PAC Discriminator '" +
2638 Twine(Disc) +
2639 "' out of range [0, 0xFFFF]");
2640 }
2641 emitMOVZ(AArch64::X1, Disc, 0);
2642 }
2643
2644 if (DSExpr) {
2645 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2646 OutStreamer->emitLabel(PrePACInst);
2647
2648 auto *PrePACInstExpr =
2649 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2650 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2651 DSExpr, SMLoc());
2652 }
2653
2654 // We don't know the subtarget because this is being emitted for a global
2655 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2656 // always call the EmuPAC runtime, which will end up using the PAC instruction
2657 // if the target supports PAC.
2658 MCSymbol *EmuPAC =
2659 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2660 const MCSymbolRefExpr *EmuPACRef =
2661 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2662 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2663 *STI);
2664
2665 // We need a RET despite the above tail call because the deactivation symbol
2666 // may replace the tail call with a NOP.
2667 if (DSExpr)
2668 OutStreamer->emitInstruction(
2669 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
2670 OutStreamer->popSection();
2671
2673 MCSymbolRefExpr::create(IRelativeSym, OutStreamer->getContext()),
2674 AArch64::S_FUNCINIT, OutStreamer->getContext());
2675}
2676
2677const MCExpr *
2678AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2679 MCContext &Ctx = OutContext;
2680
2681 // Figure out the base symbol and the addend, if any.
2682 APInt Offset(64, 0);
2683 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2684 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2685
2686 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2687
2688 const MCExpr *Sym;
2689 if (BaseGVB) {
2690 // If there is an addend, turn that into the appropriate MCExpr.
2691 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2692 if (Offset.sgt(0))
2694 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2695 else if (Offset.slt(0))
2697 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2698 } else if (isa<ConstantPointerNull>(BaseGV)) {
2699 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2700 } else {
2701 reportFatalUsageError("unsupported constant expression in ptrauth pointer");
2702 }
2703
2704 const MCExpr *DSExpr = nullptr;
2705 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2706 if (isa<GlobalAlias>(DS))
2707 return Sym;
2708 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2709 }
2710
2711 uint64_t KeyID = CPA.getKey()->getZExtValue();
2712 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2713 // AArch64AuthMCExpr::printImpl, so fail fast.
2714 if (KeyID > AArch64PACKey::LAST) {
2715 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2716 "' out of range [0, " +
2717 Twine((unsigned)AArch64PACKey::LAST) + "]");
2718 KeyID = 0;
2719 }
2720
2721 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2722
2723 // Check if we can represent this with an IRELATIVE and emit it if so.
2724 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2725 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2726 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2727 return IFuncSym;
2728
2729 if (!isUInt<16>(Disc)) {
2730 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2731 "' out of range [0, 0xFFFF]");
2732 Disc = 0;
2733 }
2734
2735 if (DSExpr)
2736 report_fatal_error("deactivation symbols unsupported in constant "
2737 "expressions on this target");
2738
2739 // Finally build the complete @AUTH expr.
2740 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2741 CPA.hasAddressDiscriminator(), Ctx);
2742}
2743
2744void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2745 unsigned DstReg = MI.getOperand(0).getReg();
2746 const MachineOperand &GAOp = MI.getOperand(1);
2747 const uint64_t KeyC = MI.getOperand(2).getImm();
2748 assert(KeyC <= AArch64PACKey::LAST &&
2749 "key is out of range [0, AArch64PACKey::LAST]");
2750 const auto Key = (AArch64PACKey::ID)KeyC;
2751 const uint64_t Disc = MI.getOperand(3).getImm();
2752 assert(isUInt<16>(Disc) &&
2753 "constant discriminator is out of range [0, 0xffff]");
2754
2755 // Emit instruction sequence like the following:
2756 // ADRP x16, symbol$auth_ptr$key$disc
2757 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2758 //
2759 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2760 // to symbol.
2761 MCSymbol *AuthPtrStubSym;
2762 if (TM.getTargetTriple().isOSBinFormatELF()) {
2763 const auto &TLOF =
2764 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2765
2766 assert(GAOp.getOffset() == 0 &&
2767 "non-zero offset for $auth_ptr$ stub slots is not supported");
2768 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2769 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2770 } else {
2771 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2772 "LOADauthptrstatic is implemented only for MachO/ELF");
2773
2774 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2775 getObjFileLowering());
2776
2777 assert(GAOp.getOffset() == 0 &&
2778 "non-zero offset for $auth_ptr$ stub slots is not supported");
2779 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2780 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2781 }
2782
2783 MachineOperand StubMOHi =
2785 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2786 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2787 MCOperand StubMCHi, StubMCLo;
2788
2789 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2790 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2791
2792 EmitToStreamer(
2793 *OutStreamer,
2794 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2795
2796 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2797 .addReg(DstReg)
2798 .addReg(DstReg)
2799 .addOperand(StubMCLo));
2800}
2801
2802void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2803 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2804 const bool IsELFSignedGOT = MI.getParent()
2805 ->getParent()
2806 ->getInfo<AArch64FunctionInfo>()
2807 ->hasELFSignedGOT();
2808 MachineOperand GAOp = MI.getOperand(0);
2809 const uint64_t KeyC = MI.getOperand(1).getImm();
2810 assert(KeyC <= AArch64PACKey::LAST &&
2811 "key is out of range [0, AArch64PACKey::LAST]");
2812 const auto Key = (AArch64PACKey::ID)KeyC;
2813 const unsigned AddrDisc = MI.getOperand(2).getReg();
2814 const uint64_t Disc = MI.getOperand(3).getImm();
2815
2816 const int64_t Offset = GAOp.getOffset();
2817 GAOp.setOffset(0);
2818
2819 // Emit:
2820 // target materialization:
2821 // - via GOT:
2822 // - unsigned GOT:
2823 // adrp x16, :got:target
2824 // ldr x16, [x16, :got_lo12:target]
2825 // add offset to x16 if offset != 0
2826 // - ELF signed GOT:
2827 // adrp x17, :got:target
2828 // add x17, x17, :got_auth_lo12:target
2829 // ldr x16, [x17]
2830 // aut{i|d}a x16, x17
2831 // check+trap sequence (if no FPAC)
2832 // add offset to x16 if offset != 0
2833 //
2834 // - direct:
2835 // adrp x16, target
2836 // add x16, x16, :lo12:target
2837 // add offset to x16 if offset != 0
2838 //
2839 // add offset to x16:
2840 // - abs(offset) fits 24 bits:
2841 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2842 // - abs(offset) does not fit 24 bits:
2843 // - offset < 0:
2844 // movn+movk sequence filling x17 register with the offset (up to 4
2845 // instructions)
2846 // add x16, x16, x17
2847 // - offset > 0:
2848 // movz+movk sequence filling x17 register with the offset (up to 4
2849 // instructions)
2850 // add x16, x16, x17
2851 //
2852 // signing:
2853 // - 0 discriminator:
2854 // paciza x16
2855 // - Non-0 discriminator, no address discriminator:
2856 // mov x17, #Disc
2857 // pacia x16, x17
2858 // - address discriminator (with potentially folded immediate discriminator):
2859 // pacia x16, xAddrDisc
2860
2861 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2862 MCOperand GAMCHi, GAMCLo;
2863
2864 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2865 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2866 if (IsGOTLoad) {
2867 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2868 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2869 }
2870
2871 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2872 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2873
2874 EmitToStreamer(
2875 MCInstBuilder(AArch64::ADRP)
2876 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2877 .addOperand(GAMCHi));
2878
2879 if (IsGOTLoad) {
2880 if (IsELFSignedGOT) {
2881 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2882 .addReg(AArch64::X17)
2883 .addReg(AArch64::X17)
2884 .addOperand(GAMCLo)
2885 .addImm(0));
2886
2887 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2888 .addReg(AArch64::X16)
2889 .addReg(AArch64::X17)
2890 .addImm(0));
2891
2892 assert(GAOp.isGlobal());
2893 assert(GAOp.getGlobal()->getValueType() != nullptr);
2894
2895 bool IsFunctionTy = GAOp.getGlobal()->getValueType()->isFunctionTy();
2896 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2897 emitAUT(AuthKey, AArch64::X16, AArch64::X17);
2898
2899 if (!STI->hasFPAC())
2900 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2901 AArch64PAuth::AuthCheckMethod::XPAC);
2902 } else {
2903 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2904 .addReg(AArch64::X16)
2905 .addReg(AArch64::X16)
2906 .addOperand(GAMCLo));
2907 }
2908 } else {
2909 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2910 .addReg(AArch64::X16)
2911 .addReg(AArch64::X16)
2912 .addOperand(GAMCLo)
2913 .addImm(0));
2914 }
2915
2916 emitAddImm(AArch64::X16, Offset, AArch64::X17);
2917 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2918
2919 emitPAC(Key, AArch64::X16, DiscReg);
2920}
2921
2922void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2923 Register DstReg = MI.getOperand(0).getReg();
2924 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2925 const MachineOperand &GAMO = MI.getOperand(1);
2926 assert(GAMO.getOffset() == 0);
2927
2928 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2929 MCOperand GAMC;
2930 MCInstLowering.lowerOperand(GAMO, GAMC);
2931 EmitToStreamer(
2932 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2933 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2934 .addReg(AuthResultReg)
2935 .addReg(AArch64::X17)
2936 .addImm(0));
2937 } else {
2938 MachineOperand GAHiOp(GAMO);
2939 MachineOperand GALoOp(GAMO);
2940 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2941 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2942
2943 MCOperand GAMCHi, GAMCLo;
2944 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2945 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2946
2947 EmitToStreamer(
2948 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2949
2950 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2951 .addReg(AArch64::X17)
2952 .addReg(AArch64::X17)
2953 .addOperand(GAMCLo)
2954 .addImm(0));
2955
2956 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2957 .addReg(AuthResultReg)
2958 .addReg(AArch64::X17)
2959 .addImm(0));
2960 }
2961
2962 assert(GAMO.isGlobal());
2963 MCSymbol *UndefWeakSym;
2964 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2965 UndefWeakSym = createTempSymbol("undef_weak");
2966 EmitToStreamer(
2967 MCInstBuilder(AArch64::CBZX)
2968 .addReg(AuthResultReg)
2969 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2970 }
2971
2972 assert(GAMO.getGlobal()->getValueType() != nullptr);
2973
2974 bool IsFunctionTy = GAMO.getGlobal()->getValueType()->isFunctionTy();
2975 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2976 emitAUT(AuthKey, AuthResultReg, AArch64::X17);
2977
2978 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2979 OutStreamer->emitLabel(UndefWeakSym);
2980
2981 if (!STI->hasFPAC()) {
2982 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2983 AArch64PAuth::AuthCheckMethod::XPAC);
2984
2985 emitMovXReg(DstReg, AuthResultReg);
2986 }
2987}
2988
2989const MCExpr *
2990AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2991 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2992 const Function &Fn = *BA.getFunction();
2993
2994 if (std::optional<uint16_t> BADisc =
2995 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2996 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2997 /*HasAddressDiversity=*/false, OutContext);
2998
2999 return BAE;
3000}
3001
3002void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
3003 bool IsImm = false;
3004 unsigned Width = 0;
3005
3006 switch (MI->getOpcode()) {
3007 default:
3008 llvm_unreachable("This is not a CB pseudo instruction");
3009 case AArch64::CBBAssertExt:
3010 IsImm = false;
3011 Width = 8;
3012 break;
3013 case AArch64::CBHAssertExt:
3014 IsImm = false;
3015 Width = 16;
3016 break;
3017 case AArch64::CBWPrr:
3018 Width = 32;
3019 break;
3020 case AArch64::CBXPrr:
3021 Width = 64;
3022 break;
3023 case AArch64::CBWPri:
3024 IsImm = true;
3025 Width = 32;
3026 break;
3027 case AArch64::CBXPri:
3028 IsImm = true;
3029 Width = 64;
3030 break;
3031 }
3032
3034 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
3035 bool NeedsRegSwap = false;
3036 bool NeedsImmDec = false;
3037 bool NeedsImmInc = false;
3038
3039#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
3040 (IsImm \
3041 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
3042 : (Width == 8 \
3043 ? AArch64::CBB##RegCond##Wrr \
3044 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
3045 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
3046 : AArch64::CB##RegCond##Xrr))))
3047 unsigned MCOpC;
3048
3049 // Decide if we need to either swap register operands or increment/decrement
3050 // immediate operands
3051 switch (CC) {
3052 default:
3053 llvm_unreachable("Invalid CB condition code");
3054 case AArch64CC::EQ:
3055 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
3056 break;
3057 case AArch64CC::NE:
3058 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
3059 break;
3060 case AArch64CC::HS:
3061 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
3062 NeedsImmDec = IsImm;
3063 break;
3064 case AArch64CC::LO:
3065 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
3066 NeedsRegSwap = !IsImm;
3067 break;
3068 case AArch64CC::HI:
3069 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
3070 break;
3071 case AArch64CC::LS:
3072 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
3073 NeedsRegSwap = !IsImm;
3074 NeedsImmInc = IsImm;
3075 break;
3076 case AArch64CC::GE:
3077 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
3078 NeedsImmDec = IsImm;
3079 break;
3080 case AArch64CC::LT:
3081 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
3082 NeedsRegSwap = !IsImm;
3083 break;
3084 case AArch64CC::GT:
3085 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
3086 break;
3087 case AArch64CC::LE:
3088 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
3089 NeedsRegSwap = !IsImm;
3090 NeedsImmInc = IsImm;
3091 break;
3092 }
3093#undef GET_CB_OPC
3094
3095 MCInst Inst;
3096 Inst.setOpcode(MCOpC);
3097
3098 MCOperand Lhs, Rhs, Trgt;
3099 lowerOperand(MI->getOperand(1), Lhs);
3100 lowerOperand(MI->getOperand(2), Rhs);
3101 lowerOperand(MI->getOperand(3), Trgt);
3102
3103 // Now swap, increment or decrement
3104 if (NeedsRegSwap) {
3105 assert(Lhs.isReg() && "Expected register operand for CB");
3106 assert(Rhs.isReg() && "Expected register operand for CB");
3107 Inst.addOperand(Rhs);
3108 Inst.addOperand(Lhs);
3109 } else if (NeedsImmDec) {
3110 Rhs.setImm(Rhs.getImm() - 1);
3111 Inst.addOperand(Lhs);
3112 Inst.addOperand(Rhs);
3113 } else if (NeedsImmInc) {
3114 Rhs.setImm(Rhs.getImm() + 1);
3115 Inst.addOperand(Lhs);
3116 Inst.addOperand(Rhs);
3117 } else {
3118 Inst.addOperand(Lhs);
3119 Inst.addOperand(Rhs);
3120 }
3121
3122 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3123 "CB immediate operand out-of-bounds");
3124
3125 Inst.addOperand(Trgt);
3126 EmitToStreamer(*OutStreamer, Inst);
3127}
3128
3129// Simple pseudo-instructions have their lowering (with expansion to real
3130// instructions) auto-generated.
3131#include "AArch64GenMCPseudoLowering.inc"
3132
3133void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3134 S.emitInstruction(Inst, *STI);
3135#ifndef NDEBUG
3136 ++InstsEmitted;
3137#endif
3138}
3139
3140void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3141 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3142
3143#ifndef NDEBUG
3144 InstsEmitted = 0;
3145 llvm::scope_exit CheckMISize([&]() {
3146 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3147 });
3148#endif
3149
3150 // Do any auto-generated pseudo lowerings.
3151 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3152 EmitToStreamer(*OutStreamer, OutInst);
3153 return;
3154 }
3155
3156 if (MI->getOpcode() == AArch64::ADRP) {
3157 for (auto &Opd : MI->operands()) {
3158 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3159 "swift_async_extendedFramePointerFlags") {
3160 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3161 }
3162 }
3163 }
3164
3165 if (AArch64FI->getLOHRelated().count(MI)) {
3166 // Generate a label for LOH related instruction
3167 MCSymbol *LOHLabel = createTempSymbol("loh");
3168 // Associate the instruction with the label
3169 LOHInstToLabel[MI] = LOHLabel;
3170 OutStreamer->emitLabel(LOHLabel);
3171 }
3172
3173 AArch64TargetStreamer *TS =
3174 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3175 // Do any manual lowerings.
3176 switch (MI->getOpcode()) {
3177 default:
3179 "Unhandled tail call instruction");
3180 break;
3181 case AArch64::HINT: {
3182 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3183 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3184 // non-empty. If MI is the initial BTI, place the
3185 // __patchable_function_entries label after BTI.
3186 if (CurrentPatchableFunctionEntrySym &&
3187 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3188 MI == &MF->front().front()) {
3189 int64_t Imm = MI->getOperand(0).getImm();
3190 if ((Imm & 32) && (Imm & 6)) {
3191 MCInst Inst;
3192 MCInstLowering.Lower(MI, Inst);
3193 EmitToStreamer(*OutStreamer, Inst);
3194 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3195 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3196 return;
3197 }
3198 }
3199 break;
3200 }
3201 case AArch64::MOVMCSym: {
3202 Register DestReg = MI->getOperand(0).getReg();
3203 const MachineOperand &MO_Sym = MI->getOperand(1);
3204 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3205 MCOperand Hi_MCSym, Lo_MCSym;
3206
3207 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3208 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3209
3210 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3211 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3212
3213 MCInst MovZ;
3214 MovZ.setOpcode(AArch64::MOVZXi);
3215 MovZ.addOperand(MCOperand::createReg(DestReg));
3216 MovZ.addOperand(Hi_MCSym);
3218 EmitToStreamer(*OutStreamer, MovZ);
3219
3220 MCInst MovK;
3221 MovK.setOpcode(AArch64::MOVKXi);
3222 MovK.addOperand(MCOperand::createReg(DestReg));
3223 MovK.addOperand(MCOperand::createReg(DestReg));
3224 MovK.addOperand(Lo_MCSym);
3226 EmitToStreamer(*OutStreamer, MovK);
3227 return;
3228 }
3229 case AArch64::MOVIv2d_ns:
3230 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3231 // as movi is more efficient across all cores. Newer cores can eliminate
3232 // fmovs early and there is no difference with movi, but this not true for
3233 // all implementations.
3234 //
3235 // The floating-point version doesn't quite work in rare cases on older
3236 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3237 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3238 MI->getOperand(1).getImm() == 0) {
3239 MCInst TmpInst;
3240 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3241 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3242 TmpInst.addOperand(MCOperand::createImm(0));
3243 EmitToStreamer(*OutStreamer, TmpInst);
3244 return;
3245 }
3246 break;
3247
3248 case AArch64::DBG_VALUE:
3249 case AArch64::DBG_VALUE_LIST:
3250 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3251 SmallString<128> TmpStr;
3252 raw_svector_ostream OS(TmpStr);
3253 PrintDebugValueComment(MI, OS);
3254 OutStreamer->emitRawText(StringRef(OS.str()));
3255 }
3256 return;
3257
3258 case AArch64::EMITBKEY: {
3259 ExceptionHandling ExceptionHandlingType = MAI.getExceptionHandlingType();
3260 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3261 ExceptionHandlingType != ExceptionHandling::ARM)
3262 return;
3263
3264 if (getFunctionCFISectionType(*MF) == CFISection::None)
3265 return;
3266
3267 OutStreamer->emitCFIBKeyFrame();
3268 return;
3269 }
3270
3271 case AArch64::EMITMTETAGGED: {
3272 ExceptionHandling ExceptionHandlingType = MAI.getExceptionHandlingType();
3273 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3274 ExceptionHandlingType != ExceptionHandling::ARM)
3275 return;
3276
3277 if (getFunctionCFISectionType(*MF) != CFISection::None)
3278 OutStreamer->emitCFIMTETaggedFrame();
3279 return;
3280 }
3281
3282 case AArch64::AUTx16x17: {
3283 const Register Pointer = AArch64::X16;
3284 const Register Scratch = AArch64::X17;
3285
3286 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3287 MI->getOperand(1).getImm(), MI->getOperand(2));
3288
3289 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, std::nullopt,
3290 std::nullopt, MI->getDeactivationSymbol());
3291 return;
3292 }
3293
3294 case AArch64::AUTxMxN: {
3295 const Register Pointer = MI->getOperand(0).getReg();
3296 const Register Scratch = MI->getOperand(1).getReg();
3297
3298 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3299 MI->getOperand(4).getImm(), MI->getOperand(5));
3300
3301 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, std::nullopt,
3302 std::nullopt, MI->getDeactivationSymbol());
3303 return;
3304 }
3305
3306 case AArch64::AUTPAC: {
3307 const Register Pointer = AArch64::X16;
3308 const Register Scratch = AArch64::X17;
3309
3310 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3311 MI->getOperand(1).getImm(), MI->getOperand(2));
3312
3313 PtrAuthSchema SignSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3314 MI->getOperand(4).getImm(), MI->getOperand(5));
3315
3316 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, SignSchema,
3317 std::nullopt, MI->getDeactivationSymbol());
3318 return;
3319 }
3320
3321 case AArch64::AUTRELLOADPAC: {
3322 const Register Pointer = AArch64::X16;
3323 const Register Scratch = AArch64::X17;
3324
3325 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3326 MI->getOperand(1).getImm(), MI->getOperand(2));
3327
3328 PtrAuthSchema SignSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3329 MI->getOperand(4).getImm(), MI->getOperand(5));
3330
3331 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, SignSchema,
3332 MI->getOperand(6).getImm(),
3333 MI->getDeactivationSymbol());
3334
3335 return;
3336 }
3337
3338 case AArch64::PAC:
3339 emitPtrauthSign(MI);
3340 return;
3341
3342 case AArch64::LOADauthptrstatic:
3343 LowerLOADauthptrstatic(*MI);
3344 return;
3345
3346 case AArch64::LOADgotPAC:
3347 case AArch64::MOVaddrPAC:
3348 LowerMOVaddrPAC(*MI);
3349 return;
3350
3351 case AArch64::LOADgotAUTH:
3352 LowerLOADgotAUTH(*MI);
3353 return;
3354
3355 case AArch64::BRA:
3356 case AArch64::BLRA:
3357 emitPtrauthBranch(MI);
3358 return;
3359
3360 // Tail calls use pseudo instructions so they have the proper code-gen
3361 // attributes (isCall, isReturn, etc.). We lower them to the real
3362 // instruction here.
3363 case AArch64::AUTH_TCRETURN:
3364 case AArch64::AUTH_TCRETURN_BTI: {
3365 Register Callee = MI->getOperand(0).getReg();
3366 const auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
3367 const uint64_t Disc = MI->getOperand(3).getImm();
3368
3369 Register AddrDisc = MI->getOperand(4).getReg();
3370
3371 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3372
3373 emitPtrauthTailCallHardening(MI);
3374
3375 // See the comments in emitPtrauthBranch.
3376 if (Callee == AddrDisc)
3377 report_fatal_error("Call target is signed with its own value");
3378
3379 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3380 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3381 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3382 // restriction manually not to clobber an unexpected register.
3383 bool AddrDiscIsImplicitDef =
3384 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3385 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3386 AddrDiscIsImplicitDef);
3387 emitBLRA(/*IsCall*/ false, Key, Callee, DiscReg);
3388 return;
3389 }
3390
3391 case AArch64::TCRETURNri:
3392 case AArch64::TCRETURNrix16x17:
3393 case AArch64::TCRETURNrix17:
3394 case AArch64::TCRETURNrinotx16:
3395 case AArch64::TCRETURNriALL: {
3396 emitPtrauthTailCallHardening(MI);
3397
3398 recordIfImportCall(MI);
3399 MCInst TmpInst;
3400 TmpInst.setOpcode(AArch64::BR);
3401 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3402 EmitToStreamer(*OutStreamer, TmpInst);
3403 return;
3404 }
3405 case AArch64::TCRETURNdi: {
3406 emitPtrauthTailCallHardening(MI);
3407
3408 MCOperand Dest;
3409 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3410 recordIfImportCall(MI);
3411 MCInst TmpInst;
3412 TmpInst.setOpcode(AArch64::B);
3413 TmpInst.addOperand(Dest);
3414 EmitToStreamer(*OutStreamer, TmpInst);
3415 return;
3416 }
3417 case AArch64::SpeculationBarrierISBDSBEndBB: {
3418 // Print DSB SYS + ISB
3419 MCInst TmpInstDSB;
3420 TmpInstDSB.setOpcode(AArch64::DSB);
3421 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3422 EmitToStreamer(*OutStreamer, TmpInstDSB);
3423 MCInst TmpInstISB;
3424 TmpInstISB.setOpcode(AArch64::ISB);
3425 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3426 EmitToStreamer(*OutStreamer, TmpInstISB);
3427 return;
3428 }
3429 case AArch64::SpeculationBarrierSBEndBB: {
3430 // Print SB
3431 MCInst TmpInstSB;
3432 TmpInstSB.setOpcode(AArch64::SB);
3433 EmitToStreamer(*OutStreamer, TmpInstSB);
3434 return;
3435 }
3436 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3437 /// lower this to:
3438 /// adrp x0, :tlsdesc_auth:var
3439 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3440 /// add x0, x0, #:tlsdesc_auth_lo12:var
3441 /// blraa x16, x0
3442 /// (TPIDR_EL0 offset now in x0)
3443 const MachineOperand &MO_Sym = MI->getOperand(0);
3444 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3445 MCOperand SymTLSDescLo12, SymTLSDesc;
3446 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3447 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3448 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3449 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3450
3451 MCInst Adrp;
3452 Adrp.setOpcode(AArch64::ADRP);
3453 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3454 Adrp.addOperand(SymTLSDesc);
3455 EmitToStreamer(*OutStreamer, Adrp);
3456
3457 MCInst Ldr;
3458 Ldr.setOpcode(AArch64::LDRXui);
3459 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3460 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3461 Ldr.addOperand(SymTLSDescLo12);
3463 EmitToStreamer(*OutStreamer, Ldr);
3464
3465 MCInst Add;
3466 Add.setOpcode(AArch64::ADDXri);
3467 Add.addOperand(MCOperand::createReg(AArch64::X0));
3468 Add.addOperand(MCOperand::createReg(AArch64::X0));
3469 Add.addOperand(SymTLSDescLo12);
3471 EmitToStreamer(*OutStreamer, Add);
3472
3473 // Authenticated TLSDESC accesses are not relaxed.
3474 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3475
3476 MCInst Blraa;
3477 Blraa.setOpcode(AArch64::BLRAA);
3478 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3479 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3480 EmitToStreamer(*OutStreamer, Blraa);
3481
3482 return;
3483 }
3484 case AArch64::TLSDESC_CALLSEQ: {
3485 /// lower this to:
3486 /// adrp x0, :tlsdesc:var
3487 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3488 /// add x0, x0, #:tlsdesc_lo12:var
3489 /// .tlsdesccall var
3490 /// blr x1
3491 /// (TPIDR_EL0 offset now in x0)
3492 const MachineOperand &MO_Sym = MI->getOperand(0);
3493 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3494 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3495 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3496 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3497 MCInstLowering.lowerOperand(MO_Sym, Sym);
3498 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3499 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3500
3501 MCInst Adrp;
3502 Adrp.setOpcode(AArch64::ADRP);
3503 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3504 Adrp.addOperand(SymTLSDesc);
3505 EmitToStreamer(*OutStreamer, Adrp);
3506
3507 MCInst Ldr;
3508 if (STI->isTargetILP32()) {
3509 Ldr.setOpcode(AArch64::LDRWui);
3510 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3511 } else {
3512 Ldr.setOpcode(AArch64::LDRXui);
3513 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3514 }
3515 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3516 Ldr.addOperand(SymTLSDescLo12);
3518 EmitToStreamer(*OutStreamer, Ldr);
3519
3520 MCInst Add;
3521 if (STI->isTargetILP32()) {
3522 Add.setOpcode(AArch64::ADDWri);
3523 Add.addOperand(MCOperand::createReg(AArch64::W0));
3524 Add.addOperand(MCOperand::createReg(AArch64::W0));
3525 } else {
3526 Add.setOpcode(AArch64::ADDXri);
3527 Add.addOperand(MCOperand::createReg(AArch64::X0));
3528 Add.addOperand(MCOperand::createReg(AArch64::X0));
3529 }
3530 Add.addOperand(SymTLSDescLo12);
3532 EmitToStreamer(*OutStreamer, Add);
3533
3534 // Emit a relocation-annotation. This expands to no code, but requests
3535 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3536 MCInst TLSDescCall;
3537 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3538 TLSDescCall.addOperand(Sym);
3539 EmitToStreamer(*OutStreamer, TLSDescCall);
3540#ifndef NDEBUG
3541 --InstsEmitted; // no code emitted
3542#endif
3543
3544 MCInst Blr;
3545 Blr.setOpcode(AArch64::BLR);
3546 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3547 EmitToStreamer(*OutStreamer, Blr);
3548
3549 return;
3550 }
3551
3552 case AArch64::JumpTableDest32:
3553 case AArch64::JumpTableDest16:
3554 case AArch64::JumpTableDest8:
3555 LowerJumpTableDest(*OutStreamer, *MI);
3556 return;
3557
3558 case AArch64::BR_JumpTable:
3559 LowerHardenedBRJumpTable(*MI);
3560 return;
3561
3562 case AArch64::FMOVH0:
3563 case AArch64::FMOVS0:
3564 case AArch64::FMOVD0:
3565 emitFMov0(*MI);
3566 return;
3567
3568 case AArch64::MOPSMemoryCopyPseudo:
3569 case AArch64::MOPSMemoryMovePseudo:
3570 case AArch64::MOPSMemorySetPseudo:
3571 case AArch64::MOPSMemorySetTaggingPseudo:
3572 LowerMOPS(*OutStreamer, *MI);
3573 return;
3574
3575 case TargetOpcode::STACKMAP:
3576 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3577
3578 case TargetOpcode::PATCHPOINT:
3579 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3580
3581 case TargetOpcode::STATEPOINT:
3582 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3583
3584 case TargetOpcode::FAULTING_OP:
3585 return LowerFAULTING_OP(*MI);
3586
3587 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3588 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3589 return;
3590
3591 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3592 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3593 return;
3594
3595 case TargetOpcode::PATCHABLE_TAIL_CALL:
3596 LowerPATCHABLE_TAIL_CALL(*MI);
3597 return;
3598 case TargetOpcode::PATCHABLE_EVENT_CALL:
3599 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3600 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3601 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3602
3603 case AArch64::KCFI_CHECK:
3604 LowerKCFI_CHECK(*MI);
3605 return;
3606
3607 case AArch64::HWASAN_CHECK_MEMACCESS:
3608 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3609 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3610 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3611 LowerHWASAN_CHECK_MEMACCESS(*MI);
3612 return;
3613
3614 case AArch64::SEH_StackAlloc:
3615 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3616 return;
3617
3618 case AArch64::SEH_SaveFPLR:
3619 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3620 return;
3621
3622 case AArch64::SEH_SaveFPLR_X:
3623 assert(MI->getOperand(0).getImm() < 0 &&
3624 "Pre increment SEH opcode must have a negative offset");
3625 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3626 return;
3627
3628 case AArch64::SEH_SaveReg:
3629 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3630 MI->getOperand(1).getImm());
3631 return;
3632
3633 case AArch64::SEH_SaveReg_X:
3634 assert(MI->getOperand(1).getImm() < 0 &&
3635 "Pre increment SEH opcode must have a negative offset");
3636 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3637 -MI->getOperand(1).getImm());
3638 return;
3639
3640 case AArch64::SEH_SaveRegP:
3641 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3642 MI->getOperand(0).getImm() <= 28) {
3643 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3644 "Register paired with LR must be odd");
3645 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3646 MI->getOperand(2).getImm());
3647 return;
3648 }
3649 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3650 "Non-consecutive registers not allowed for save_regp");
3651 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3652 MI->getOperand(2).getImm());
3653 return;
3654
3655 case AArch64::SEH_SaveRegP_X:
3656 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3657 "Non-consecutive registers not allowed for save_regp_x");
3658 assert(MI->getOperand(2).getImm() < 0 &&
3659 "Pre increment SEH opcode must have a negative offset");
3660 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3661 -MI->getOperand(2).getImm());
3662 return;
3663
3664 case AArch64::SEH_SaveFReg:
3665 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3666 MI->getOperand(1).getImm());
3667 return;
3668
3669 case AArch64::SEH_SaveFReg_X:
3670 assert(MI->getOperand(1).getImm() < 0 &&
3671 "Pre increment SEH opcode must have a negative offset");
3672 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3673 -MI->getOperand(1).getImm());
3674 return;
3675
3676 case AArch64::SEH_SaveFRegP:
3677 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3678 "Non-consecutive registers not allowed for save_regp");
3679 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3680 MI->getOperand(2).getImm());
3681 return;
3682
3683 case AArch64::SEH_SaveFRegP_X:
3684 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3685 "Non-consecutive registers not allowed for save_regp_x");
3686 assert(MI->getOperand(2).getImm() < 0 &&
3687 "Pre increment SEH opcode must have a negative offset");
3688 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3689 -MI->getOperand(2).getImm());
3690 return;
3691
3692 case AArch64::SEH_SetFP:
3694 return;
3695
3696 case AArch64::SEH_AddFP:
3697 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3698 return;
3699
3700 case AArch64::SEH_Nop:
3701 TS->emitARM64WinCFINop();
3702 return;
3703
3704 case AArch64::SEH_PrologEnd:
3706 return;
3707
3708 case AArch64::SEH_EpilogStart:
3710 return;
3711
3712 case AArch64::SEH_EpilogEnd:
3714 return;
3715
3716 case AArch64::SEH_PACSignLR:
3718 return;
3719
3720 case AArch64::SEH_SaveAnyRegI:
3721 assert(MI->getOperand(1).getImm() <= 1008 &&
3722 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3723 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3724 MI->getOperand(1).getImm());
3725 return;
3726
3727 case AArch64::SEH_SaveAnyRegIP:
3728 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3729 "Non-consecutive registers not allowed for save_any_reg");
3730 assert(MI->getOperand(2).getImm() <= 1008 &&
3731 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3732 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3733 MI->getOperand(2).getImm());
3734 return;
3735
3736 case AArch64::SEH_SaveAnyRegQP:
3737 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3738 "Non-consecutive registers not allowed for save_any_reg");
3739 assert(MI->getOperand(2).getImm() >= 0 &&
3740 "SaveAnyRegQP SEH opcode offset must be non-negative");
3741 assert(MI->getOperand(2).getImm() <= 1008 &&
3742 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3743 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3744 MI->getOperand(2).getImm());
3745 return;
3746
3747 case AArch64::SEH_SaveAnyRegQPX:
3748 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3749 "Non-consecutive registers not allowed for save_any_reg");
3750 assert(MI->getOperand(2).getImm() < 0 &&
3751 "SaveAnyRegQPX SEH opcode offset must be negative");
3752 assert(MI->getOperand(2).getImm() >= -1008 &&
3753 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3754 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3755 -MI->getOperand(2).getImm());
3756 return;
3757
3758 case AArch64::SEH_AllocZ:
3759 assert(MI->getOperand(0).getImm() >= 0 &&
3760 "AllocZ SEH opcode offset must be non-negative");
3761 assert(MI->getOperand(0).getImm() <= 255 &&
3762 "AllocZ SEH opcode offset must fit into 8 bits");
3763 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3764 return;
3765
3766 case AArch64::SEH_SaveZReg:
3767 assert(MI->getOperand(1).getImm() >= 0 &&
3768 "SaveZReg SEH opcode offset must be non-negative");
3769 assert(MI->getOperand(1).getImm() <= 255 &&
3770 "SaveZReg SEH opcode offset must fit into 8 bits");
3771 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3772 MI->getOperand(1).getImm());
3773 return;
3774
3775 case AArch64::SEH_SavePReg:
3776 assert(MI->getOperand(1).getImm() >= 0 &&
3777 "SavePReg SEH opcode offset must be non-negative");
3778 assert(MI->getOperand(1).getImm() <= 255 &&
3779 "SavePReg SEH opcode offset must fit into 8 bits");
3780 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3781 MI->getOperand(1).getImm());
3782 return;
3783
3784 case AArch64::BLR:
3785 case AArch64::BR: {
3786 recordIfImportCall(MI);
3787 MCInst TmpInst;
3788 MCInstLowering.Lower(MI, TmpInst);
3789 EmitToStreamer(*OutStreamer, TmpInst);
3790 return;
3791 }
3792 case AArch64::CBWPri:
3793 case AArch64::CBXPri:
3794 case AArch64::CBBAssertExt:
3795 case AArch64::CBHAssertExt:
3796 case AArch64::CBWPrr:
3797 case AArch64::CBXPrr:
3798 emitCBPseudoExpansion(MI);
3799 return;
3800 }
3801
3802 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3803 return;
3804
3805 // Finally, do the automated lowerings for everything else.
3806 MCInst TmpInst;
3807 MCInstLowering.Lower(MI, TmpInst);
3808 EmitToStreamer(*OutStreamer, TmpInst);
3809}
3810
3811void AArch64AsmPrinter::recordIfImportCall(
3812 const llvm::MachineInstr *BranchInst) {
3813 if (!EnableImportCallOptimization)
3814 return;
3815
3816 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3817 if (GV && GV->hasDLLImportStorageClass()) {
3818 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3819 OutStreamer->emitLabel(CallSiteSymbol);
3820
3821 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3822 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3823 .push_back({CallSiteSymbol, CalledSymbol});
3824 }
3825}
3826
3827void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3828 MCSymbol *LazyPointer) {
3829 // _ifunc:
3830 // adrp x16, lazy_pointer@GOTPAGE
3831 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3832 // ldr x16, [x16]
3833 // br x16
3834
3835 {
3836 MCInst Adrp;
3837 Adrp.setOpcode(AArch64::ADRP);
3838 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3839 MCOperand SymPage;
3840 MCInstLowering.lowerOperand(
3843 SymPage);
3844 Adrp.addOperand(SymPage);
3845 EmitToStreamer(Adrp);
3846 }
3847
3848 {
3849 MCInst Ldr;
3850 Ldr.setOpcode(AArch64::LDRXui);
3851 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3852 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3853 MCOperand SymPageOff;
3854 MCInstLowering.lowerOperand(
3857 SymPageOff);
3858 Ldr.addOperand(SymPageOff);
3860 EmitToStreamer(Ldr);
3861 }
3862
3863 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3864 .addReg(AArch64::X16)
3865 .addReg(AArch64::X16)
3866 .addImm(0));
3867
3868 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3869 : AArch64::BR)
3870 .addReg(AArch64::X16));
3871}
3872
3873void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3874 const GlobalIFunc &GI,
3875 MCSymbol *LazyPointer) {
3876 // These stub helpers are only ever called once, so here we're optimizing for
3877 // minimum size by using the pre-indexed store variants, which saves a few
3878 // bytes of instructions to bump & restore sp.
3879
3880 // _ifunc.stub_helper:
3881 // stp fp, lr, [sp, #-16]!
3882 // mov fp, sp
3883 // stp x1, x0, [sp, #-16]!
3884 // stp x3, x2, [sp, #-16]!
3885 // stp x5, x4, [sp, #-16]!
3886 // stp x7, x6, [sp, #-16]!
3887 // stp d1, d0, [sp, #-16]!
3888 // stp d3, d2, [sp, #-16]!
3889 // stp d5, d4, [sp, #-16]!
3890 // stp d7, d6, [sp, #-16]!
3891 // bl _resolver
3892 // adrp x16, lazy_pointer@GOTPAGE
3893 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3894 // str x0, [x16]
3895 // mov x16, x0
3896 // ldp d7, d6, [sp], #16
3897 // ldp d5, d4, [sp], #16
3898 // ldp d3, d2, [sp], #16
3899 // ldp d1, d0, [sp], #16
3900 // ldp x7, x6, [sp], #16
3901 // ldp x5, x4, [sp], #16
3902 // ldp x3, x2, [sp], #16
3903 // ldp x1, x0, [sp], #16
3904 // ldp fp, lr, [sp], #16
3905 // br x16
3906
3907 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3908 .addReg(AArch64::SP)
3909 .addReg(AArch64::FP)
3910 .addReg(AArch64::LR)
3911 .addReg(AArch64::SP)
3912 .addImm(-2));
3913
3914 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3915 .addReg(AArch64::FP)
3916 .addReg(AArch64::SP)
3917 .addImm(0)
3918 .addImm(0));
3919
3920 for (int I = 0; I != 4; ++I)
3921 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3922 .addReg(AArch64::SP)
3923 .addReg(AArch64::X1 + 2 * I)
3924 .addReg(AArch64::X0 + 2 * I)
3925 .addReg(AArch64::SP)
3926 .addImm(-2));
3927
3928 for (int I = 0; I != 4; ++I)
3929 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3930 .addReg(AArch64::SP)
3931 .addReg(AArch64::D1 + 2 * I)
3932 .addReg(AArch64::D0 + 2 * I)
3933 .addReg(AArch64::SP)
3934 .addImm(-2));
3935
3936 EmitToStreamer(
3937 MCInstBuilder(AArch64::BL)
3939
3940 {
3941 MCInst Adrp;
3942 Adrp.setOpcode(AArch64::ADRP);
3943 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3944 MCOperand SymPage;
3945 MCInstLowering.lowerOperand(
3946 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3948 SymPage);
3949 Adrp.addOperand(SymPage);
3950 EmitToStreamer(Adrp);
3951 }
3952
3953 {
3954 MCInst Ldr;
3955 Ldr.setOpcode(AArch64::LDRXui);
3956 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3957 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3958 MCOperand SymPageOff;
3959 MCInstLowering.lowerOperand(
3960 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3962 SymPageOff);
3963 Ldr.addOperand(SymPageOff);
3965 EmitToStreamer(Ldr);
3966 }
3967
3968 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3969 .addReg(AArch64::X0)
3970 .addReg(AArch64::X16)
3971 .addImm(0));
3972
3973 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3974 .addReg(AArch64::X16)
3975 .addReg(AArch64::X0)
3976 .addImm(0)
3977 .addImm(0));
3978
3979 for (int I = 3; I != -1; --I)
3980 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3981 .addReg(AArch64::SP)
3982 .addReg(AArch64::D1 + 2 * I)
3983 .addReg(AArch64::D0 + 2 * I)
3984 .addReg(AArch64::SP)
3985 .addImm(2));
3986
3987 for (int I = 3; I != -1; --I)
3988 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3989 .addReg(AArch64::SP)
3990 .addReg(AArch64::X1 + 2 * I)
3991 .addReg(AArch64::X0 + 2 * I)
3992 .addReg(AArch64::SP)
3993 .addImm(2));
3994
3995 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3996 .addReg(AArch64::SP)
3997 .addReg(AArch64::FP)
3998 .addReg(AArch64::LR)
3999 .addReg(AArch64::SP)
4000 .addImm(2));
4001
4002 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
4003 : AArch64::BR)
4004 .addReg(AArch64::X16));
4005}
4006
4007const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
4008 const Constant *BaseCV,
4009 uint64_t Offset) {
4010 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
4011 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
4012 OutContext);
4013 }
4014
4015 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
4016}
4017
4018char AArch64AsmPrinter::ID = 0;
4019
4020INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
4021 "AArch64 Assembly Printer", false, false)
4022
4023// Force static initialization.
4024extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
4025LLVMInitializeAArch64AsmPrinter() {
4031}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:598
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr unsigned SM(unsigned Version)
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:483
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
void setPreservesAll()
Set by analyses that do not transform their input at all.
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:658
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
Function * getFunction() const
Definition Constants.h:1101
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1226
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
Constant * getDeactivationSymbol() const
Definition Constants.h:1248
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1244
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:550
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
static constexpr unsigned NonUniqueID
Definition MCSection.h:578
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:222
virtual void emitCFIBKeyFrame()
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:375
MCContext & getContext() const
Definition MCStreamer.h:323
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:394
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:333
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:450
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:428
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
const FeatureBitset & getFeatureBits() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
constexpr const char * data() const
Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:275
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1249
@ SHF_GROUP
Definition ELF.h:1271
@ SHF_EXECINSTR
Definition ELF.h:1252
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1860
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1861
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1862
@ SHT_PROGBITS
Definition ELF.h:1148
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:683
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:292
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
scope_exit(Callable) -> scope_exit< Callable >
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
static unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K, bool Zero)
Return B(L)RA opcode to be used for an authenticated branch or call using the given key,...
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...