LLVM 23.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
47#include "llvm/IR/DataLayout.h"
49#include "llvm/IR/Mangler.h"
50#include "llvm/IR/Module.h"
51#include "llvm/MC/MCAsmInfo.h"
52#include "llvm/MC/MCContext.h"
53#include "llvm/MC/MCExpr.h"
54#include "llvm/MC/MCInst.h"
58#include "llvm/MC/MCStreamer.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/MCValue.h"
70#include <cassert>
71#include <cstdint>
72#include <map>
73#include <memory>
74
75using namespace llvm;
76
77#define DEBUG_TYPE "AArch64AsmPrinter"
78
79// Doesn't count FPR128 ZCZ instructions which are handled
80// by TableGen pattern matching
81STATISTIC(NumZCZeroingInstrsFPR,
82 "Number of zero-cycle FPR zeroing instructions expanded from "
83 "canonical pseudo instructions");
84
87 "aarch64-ptrauth-auth-checks", cl::Hidden,
88 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
89 clEnumValN(Poison, "poison", "poison on failure"),
90 clEnumValN(Trap, "trap", "trap on failure")),
91 cl::desc("Check pointer authentication auth/resign failures"),
93
94namespace {
95
96class AArch64AsmPrinter : public AsmPrinter {
97 AArch64MCInstLower MCInstLowering;
98 FaultMaps FM;
99 const AArch64Subtarget *STI;
100 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
101#ifndef NDEBUG
102 unsigned InstsEmitted;
103#endif
104 bool EnableImportCallOptimization = false;
106 SectionToImportedFunctionCalls;
107 unsigned PAuthIFuncNextUniqueID = 1;
108
109public:
110 static char ID;
111
112 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
113 : AsmPrinter(TM, std::move(Streamer), ID),
114 MCInstLowering(OutContext, *this), FM(*this) {}
115
116 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
117
118 /// Wrapper for MCInstLowering.lowerOperand() for the
119 /// tblgen'erated pseudo lowering.
120 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
121 return MCInstLowering.lowerOperand(MO, MCOp);
122 }
123
124 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
125
126 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
127
128 void emitStartOfAsmFile(Module &M) override;
129 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
130 ArrayRef<unsigned> JumpTableIndices) override;
131 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
133 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
134 const MCSymbol *BranchLabel) const override;
135
136 void emitFunctionEntryLabel() override;
137
138 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
139
140 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
141
142 void LowerHardenedBRJumpTable(const MachineInstr &MI);
143
144 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
145
146 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
147 const MachineInstr &MI);
148 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
149 const MachineInstr &MI);
150 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
151 const MachineInstr &MI);
152 void LowerFAULTING_OP(const MachineInstr &MI);
153
154 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
155 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
156 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
157 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
158
159 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
160 HwasanMemaccessTuple;
161 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
162 void LowerKCFI_CHECK(const MachineInstr &MI);
163 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
164 void emitHwasanMemaccessSymbols(Module &M);
165
166 void emitSled(const MachineInstr &MI, SledKind Kind);
167
168 // Returns whether Reg may be used to store sensitive temporary values when
169 // expanding PtrAuth pseudos. Some OSes may take extra care to protect a
170 // small subset of GPRs on context switches - use these registers then.
171 //
172 // If there are no preferred registers, returns true for any Reg.
173 bool isPtrauthRegSafe(Register Reg) const {
174 if (STI->isX16X17Safer())
175 return Reg == AArch64::X16 || Reg == AArch64::X17;
176
177 return true;
178 }
179
180 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
181 void emitPtrauthBranch(const MachineInstr *MI);
182
183 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
184 Register ScratchReg,
187 const MCSymbol *OnFailure = nullptr);
188
189 // Check authenticated LR before tail calling.
190 void emitPtrauthTailCallHardening(const MachineInstr *TC);
191
192 struct PtrAuthSchema {
193 PtrAuthSchema(AArch64PACKey::ID Key, uint64_t IntDisc,
194 const MachineOperand &AddrDiscOp);
195
197 uint64_t IntDisc;
198 Register AddrDisc;
199 bool AddrDiscIsKilled;
200 };
201
202 // Emit the sequence for AUT or AUTPAC. Addend if AUTRELLOADPAC
203 void emitPtrauthAuthResign(Register Pointer, Register Scratch,
204 PtrAuthSchema AuthSchema,
205 std::optional<PtrAuthSchema> SignSchema,
206 std::optional<uint64_t> Addend, Value *DS);
207
208 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
209 // if no instruction should be emitted because the deactivation symbol is
210 // defined in the current module so this function emitted a NOP instead.
211 bool emitDeactivationSymbolRelocation(Value *DS);
212
213 // Emit the sequence for PAC.
214 void emitPtrauthSign(const MachineInstr *MI);
215
216 // Emit the sequence to compute the discriminator.
217 //
218 // The Scratch register passed to this function must be safe, as returned by
219 // isPtrauthRegSafe(ScratchReg).
220 //
221 // The returned register is either ScratchReg, AddrDisc, or XZR. Furthermore,
222 // it is guaranteed to be safe (or XZR), with the only exception of
223 // passing-through an *unmodified* unsafe AddrDisc register.
224 //
225 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
226 // MayClobberAddrDisc may save one MOV instruction, provided
227 // isPtrauthRegSafe(AddrDisc) is true:
228 //
229 // mov x17, x16
230 // movk x17, #1234, lsl #48
231 // ; x16 is not used anymore
232 //
233 // can be replaced by
234 //
235 // movk x16, #1234, lsl #48
236 Register emitPtrauthDiscriminator(uint64_t Disc, Register AddrDisc,
237 Register ScratchReg,
238 bool MayClobberAddrDisc = false);
239
240 // Emit the sequence for LOADauthptrstatic
241 void LowerLOADauthptrstatic(const MachineInstr &MI);
242
243 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
244 // adrp-add followed by PAC sign)
245 void LowerMOVaddrPAC(const MachineInstr &MI);
246
247 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
248 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
249 // authenticating)
250 void LowerLOADgotAUTH(const MachineInstr &MI);
251
252 void emitAddImm(MCRegister Val, int64_t Addend, MCRegister Tmp);
253 void emitAddress(MCRegister Reg, const MCExpr *Expr, MCRegister Tmp,
254 bool DSOLocal, const MCSubtargetInfo &STI);
255
256 const MCExpr *emitPAuthRelocationAsIRelative(
257 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
258 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
259
260 /// tblgen'erated driver function for lowering simple MI->MC
261 /// pseudo instructions.
262 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
263
264 // Emit Build Attributes
265 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
266 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
267
268 // Emit expansion of Compare-and-branch pseudo instructions
269 void emitCBPseudoExpansion(const MachineInstr *MI);
270
271 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
272 void EmitToStreamer(const MCInst &Inst) {
273 EmitToStreamer(*OutStreamer, Inst);
274 }
275
276 void emitInstruction(const MachineInstr *MI) override;
277
278 void emitFunctionHeaderComment() override;
279
280 void getAnalysisUsage(AnalysisUsage &AU) const override {
282 AU.setPreservesAll();
283 }
284
285 bool runOnMachineFunction(MachineFunction &MF) override {
286 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
287 PSI = &PSIW->getPSI();
288 if (auto *SDPIW =
289 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
290 SDPI = &SDPIW->getStaticDataProfileInfo();
291
292 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
293 STI = &MF.getSubtarget<AArch64Subtarget>();
294
295 SetupMachineFunction(MF);
296
297 if (STI->isTargetCOFF()) {
298 bool Local = MF.getFunction().hasLocalLinkage();
301 int Type =
303
304 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
305 OutStreamer->emitCOFFSymbolStorageClass(Scl);
306 OutStreamer->emitCOFFSymbolType(Type);
307 OutStreamer->endCOFFSymbolDef();
308 }
309
310 // Emit the rest of the function body.
311 emitFunctionBody();
312
313 // Emit the XRay table for this function.
314 emitXRayTable();
315
316 // We didn't modify anything.
317 return false;
318 }
319
320 const MCExpr *lowerConstant(const Constant *CV,
321 const Constant *BaseCV = nullptr,
322 uint64_t Offset = 0) override;
323
324private:
325 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
326 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
327 bool printAsmRegInClass(const MachineOperand &MO,
328 const TargetRegisterClass *RC, unsigned AltName,
329 raw_ostream &O);
330
331 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
332 const char *ExtraCode, raw_ostream &O) override;
333 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
334 const char *ExtraCode, raw_ostream &O) override;
335
336 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
337
338 void emitFunctionBodyEnd() override;
339 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
340
341 MCSymbol *GetCPISymbol(unsigned CPID) const override;
342 void emitEndOfAsmFile(Module &M) override;
343
344 AArch64FunctionInfo *AArch64FI = nullptr;
345
346 /// Emit the LOHs contained in AArch64FI.
347 void emitLOHs();
348
349 void emitMovXReg(Register Dest, Register Src);
350 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
351 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
352
353 void emitAUT(AArch64PACKey::ID Key, Register Pointer, Register Disc);
354 void emitPAC(AArch64PACKey::ID Key, Register Pointer, Register Disc);
355 void emitBLRA(bool IsCall, AArch64PACKey::ID Key, Register Target,
356 Register Disc);
357
358 /// Emit instruction to set float register to zero.
359 void emitFMov0(const MachineInstr &MI);
360 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
361
362 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
363
364 MInstToMCSymbol LOHInstToLabel;
365
366 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
367 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
368 }
369
370 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
371 assert(STI);
372 return STI;
373 }
374 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
375 MCSymbol *LazyPointer) override;
376 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
377 MCSymbol *LazyPointer) override;
378
379 /// Checks if this instruction is part of a sequence that is eligle for import
380 /// call optimization and, if so, records it to be emitted in the import call
381 /// section.
382 void recordIfImportCall(const MachineInstr *BranchInst);
383};
384
385} // end anonymous namespace
386
387void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
388 const Triple &TT = TM.getTargetTriple();
389
390 if (TT.isOSBinFormatCOFF()) {
391 emitCOFFFeatureSymbol(M);
392 emitCOFFReplaceableFunctionData(M);
393
394 if (M.getModuleFlag("import-call-optimization"))
395 EnableImportCallOptimization = true;
396 }
397
398 if (!TT.isOSBinFormatELF())
399 return;
400
401 // For emitting build attributes and .note.gnu.property section
402 auto *TS =
403 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
404 // Assemble feature flags that may require creation of build attributes and a
405 // note section.
406 unsigned BAFlags = 0;
407 unsigned GNUFlags = 0;
408 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
409 M.getModuleFlag("branch-target-enforcement"))) {
410 if (!BTE->isZero()) {
411 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
413 }
414 }
415
416 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
417 M.getModuleFlag("guarded-control-stack"))) {
418 if (!GCS->isZero()) {
419 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
421 }
422 }
423
424 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
425 M.getModuleFlag("sign-return-address"))) {
426 if (!Sign->isZero()) {
427 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
429 }
430 }
431
432 uint64_t PAuthABIPlatform = -1;
433 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
434 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
435 PAuthABIPlatform = PAP->getZExtValue();
436 }
437
438 uint64_t PAuthABIVersion = -1;
439 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
440 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
441 PAuthABIVersion = PAV->getZExtValue();
442 }
443
444 // Emit AArch64 Build Attributes
445 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
446 // Emit a .note.gnu.property section with the flags.
447 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
448}
449
450void AArch64AsmPrinter::emitFunctionHeaderComment() {
451 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
452 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
453 if (OutlinerString != std::nullopt)
454 OutStreamer->getCommentOS() << ' ' << OutlinerString;
455}
456
457void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
458{
459 const Function &F = MF->getFunction();
460 if (F.hasFnAttribute("patchable-function-entry")) {
461 unsigned Num;
462 if (F.getFnAttribute("patchable-function-entry")
463 .getValueAsString()
464 .getAsInteger(10, Num))
465 return;
466 emitNops(Num);
467 return;
468 }
469
470 emitSled(MI, SledKind::FUNCTION_ENTER);
471}
472
473void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
474 emitSled(MI, SledKind::FUNCTION_EXIT);
475}
476
477void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
478 emitSled(MI, SledKind::TAIL_CALL);
479}
480
481void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
482 static const int8_t NoopsInSledCount = 7;
483 // We want to emit the following pattern:
484 //
485 // .Lxray_sled_N:
486 // ALIGN
487 // B #32
488 // ; 7 NOP instructions (28 bytes)
489 // .tmpN
490 //
491 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
492 // over the full 32 bytes (8 instructions) with the following pattern:
493 //
494 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
495 // LDR W17, #12 ; W17 := function ID
496 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
497 // BLR X16 ; call the tracing trampoline
498 // ;DATA: 32 bits of function ID
499 // ;DATA: lower 32 bits of the address of the trampoline
500 // ;DATA: higher 32 bits of the address of the trampoline
501 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
502 //
503 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
504 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
505 OutStreamer->emitLabel(CurSled);
506 auto Target = OutContext.createTempSymbol();
507
508 // Emit "B #32" instruction, which jumps over the next 28 bytes.
509 // The operand has to be the number of 4-byte instructions to jump over,
510 // including the current instruction.
511 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
512
513 for (int8_t I = 0; I < NoopsInSledCount; I++)
514 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::NOP));
515
516 OutStreamer->emitLabel(Target);
517 recordSled(CurSled, MI, Kind, 2);
518}
519
520void AArch64AsmPrinter::emitAttributes(unsigned Flags,
521 uint64_t PAuthABIPlatform,
522 uint64_t PAuthABIVersion,
523 AArch64TargetStreamer *TS) {
524
525 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
526 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
527
528 if (PAuthABIPlatform || PAuthABIVersion) {
532 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
533 AArch64BuildAttributes::SubsectionType::ULEB128);
537 PAuthABIPlatform, "");
541 "");
542 }
543
544 unsigned BTIValue =
546 unsigned PACValue =
548 unsigned GCSValue =
550
551 if (BTIValue || PACValue || GCSValue) {
555 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
556 AArch64BuildAttributes::SubsectionType::ULEB128);
566 }
567}
568
569// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
570// (built-in functions __xray_customevent/__xray_typedevent).
571//
572// .Lxray_event_sled_N:
573// b 1f
574// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
575// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
576// bl __xray_CustomEvent or __xray_TypedEvent
577// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
578// 1:
579//
580// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
581//
582// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
583// After patching, b .+N will become a nop.
584void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
585 bool Typed) {
586 auto &O = *OutStreamer;
587 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
588 O.emitLabel(CurSled);
589 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
590 auto *Sym = MCSymbolRefExpr::create(
591 OutContext.getOrCreateSymbol(
592 Twine(MachO ? "_" : "") +
593 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
594 OutContext);
595 if (Typed) {
596 O.AddComment("Begin XRay typed event");
597 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
598 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
599 .addReg(AArch64::SP)
600 .addReg(AArch64::X0)
601 .addReg(AArch64::X1)
602 .addReg(AArch64::SP)
603 .addImm(-4));
604 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
605 .addReg(AArch64::X2)
606 .addReg(AArch64::SP)
607 .addImm(2));
608 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
609 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
610 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
611 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
612 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
613 .addReg(AArch64::X2)
614 .addReg(AArch64::SP)
615 .addImm(2));
616 O.AddComment("End XRay typed event");
617 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
618 .addReg(AArch64::SP)
619 .addReg(AArch64::X0)
620 .addReg(AArch64::X1)
621 .addReg(AArch64::SP)
622 .addImm(4));
623
624 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
625 } else {
626 O.AddComment("Begin XRay custom event");
627 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
628 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
629 .addReg(AArch64::SP)
630 .addReg(AArch64::X0)
631 .addReg(AArch64::X1)
632 .addReg(AArch64::SP)
633 .addImm(-2));
634 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
635 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
636 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
637 O.AddComment("End XRay custom event");
638 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
639 .addReg(AArch64::SP)
640 .addReg(AArch64::X0)
641 .addReg(AArch64::X1)
642 .addReg(AArch64::SP)
643 .addImm(2));
644
645 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
646 }
647}
648
649void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
650 Register AddrReg = MI.getOperand(0).getReg();
651 assert(std::next(MI.getIterator())->isCall() &&
652 "KCFI_CHECK not followed by a call instruction");
653 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
654 "KCFI_CHECK call target doesn't match call operand");
655
656 // Default to using the intra-procedure-call temporary registers for
657 // comparing the hashes.
658 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
659 if (AddrReg == AArch64::XZR) {
660 // Checking XZR makes no sense. Instead of emitting a load, zero
661 // ScratchRegs[0] and use it for the ESR AddrIndex below.
662 AddrReg = getXRegFromWReg(ScratchRegs[0]);
663 emitMovXReg(AddrReg, AArch64::XZR);
664 } else {
665 // If one of the scratch registers is used for the call target (e.g.
666 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
667 // temporary register instead (in this case, AArch64::W9) as the check
668 // is immediately followed by the call instruction.
669 for (auto &Reg : ScratchRegs) {
670 if (Reg == getWRegFromXReg(AddrReg)) {
671 Reg = AArch64::W9;
672 break;
673 }
674 }
675 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
676 "Invalid scratch registers for KCFI_CHECK");
677
678 // Adjust the offset for patchable-function-prefix. This assumes that
679 // patchable-function-prefix is the same for all functions.
680 int64_t PrefixNops = 0;
681 (void)MI.getMF()
682 ->getFunction()
683 .getFnAttribute("patchable-function-prefix")
684 .getValueAsString()
685 .getAsInteger(10, PrefixNops);
686
687 // Load the target function type hash.
688 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
689 .addReg(ScratchRegs[0])
690 .addReg(AddrReg)
691 .addImm(-(PrefixNops * 4 + 4)));
692 }
693
694 // Load the expected type hash.
695 const int64_t Type = MI.getOperand(1).getImm();
696 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
697 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
698
699 // Compare the hashes and trap if there's a mismatch.
700 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
701 .addReg(AArch64::WZR)
702 .addReg(ScratchRegs[0])
703 .addReg(ScratchRegs[1])
704 .addImm(0));
705
706 MCSymbol *Pass = OutContext.createTempSymbol();
707 EmitToStreamer(*OutStreamer,
708 MCInstBuilder(AArch64::Bcc)
709 .addImm(AArch64CC::EQ)
710 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
711
712 // The base ESR is 0x8000 and the register information is encoded in bits
713 // 0-9 as follows:
714 // - 0-4: n, where the register Xn contains the target address
715 // - 5-9: m, where the register Wm contains the expected type hash
716 // Where n, m are in [0, 30].
717 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
718 unsigned AddrIndex;
719 switch (AddrReg) {
720 default:
721 AddrIndex = AddrReg - AArch64::X0;
722 break;
723 case AArch64::FP:
724 AddrIndex = 29;
725 break;
726 case AArch64::LR:
727 AddrIndex = 30;
728 break;
729 }
730
731 assert(AddrIndex < 31 && TypeIndex < 31);
732
733 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
734 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
735 OutStreamer->emitLabel(Pass);
736}
737
738void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
739 Register Reg = MI.getOperand(0).getReg();
740
741 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
742 // statically known to be zero. However, conceivably, the HWASan pass may
743 // encounter a "cannot currently statically prove to be null" pointer (and is
744 // therefore unable to omit the intrinsic) that later optimization passes
745 // convert into a statically known-null pointer.
746 if (Reg == AArch64::XZR)
747 return;
748
749 bool IsShort =
750 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
751 (MI.getOpcode() ==
752 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
753 uint32_t AccessInfo = MI.getOperand(1).getImm();
754 bool IsFixedShadow =
755 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
756 (MI.getOpcode() ==
757 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
758 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
759
760 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
761 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
762 if (!Sym) {
763 // FIXME: Make this work on non-ELF.
764 if (!TM.getTargetTriple().isOSBinFormatELF())
765 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
766
767 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
768 utostr(AccessInfo);
769 if (IsFixedShadow)
770 SymName += "_fixed_" + utostr(FixedShadowOffset);
771 if (IsShort)
772 SymName += "_short_v2";
773 Sym = OutContext.getOrCreateSymbol(SymName);
774 }
775
776 EmitToStreamer(*OutStreamer,
777 MCInstBuilder(AArch64::BL)
778 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
779}
780
781void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
782 if (HwasanMemaccessSymbols.empty())
783 return;
784
785 const Triple &TT = TM.getTargetTriple();
786 assert(TT.isOSBinFormatELF());
787 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
788 // space.
789 auto STI = std::make_unique<AArch64Subtarget>(
790 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
791 true);
792 this->STI = STI.get();
793
794 MCSymbol *HwasanTagMismatchV1Sym =
795 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
796 MCSymbol *HwasanTagMismatchV2Sym =
797 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
798
799 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
800 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
801 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
802 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
803
804 for (auto &P : HwasanMemaccessSymbols) {
805 unsigned Reg = std::get<0>(P.first);
806 bool IsShort = std::get<1>(P.first);
807 uint32_t AccessInfo = std::get<2>(P.first);
808 bool IsFixedShadow = std::get<3>(P.first);
809 uint64_t FixedShadowOffset = std::get<4>(P.first);
810 const MCSymbolRefExpr *HwasanTagMismatchRef =
811 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
812 MCSymbol *Sym = P.second;
813
814 bool HasMatchAllTag =
815 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
816 uint8_t MatchAllTag =
817 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
818 unsigned Size =
819 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
820 bool CompileKernel =
821 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
822
823 OutStreamer->switchSection(OutContext.getELFSection(
824 ".text.hot", ELF::SHT_PROGBITS,
826 /*IsComdat=*/true));
827
828 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
829 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
830 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
831 OutStreamer->emitLabel(Sym);
832
833 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
834 .addReg(AArch64::X16)
835 .addReg(Reg)
836 .addImm(4)
837 .addImm(55));
838
839 if (IsFixedShadow) {
840 // Aarch64 makes it difficult to embed large constants in the code.
841 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
842 // left-shift option in the MOV instruction. Combined with the 16-bit
843 // immediate, this is enough to represent any offset up to 2**48.
844 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
845 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
846 .addReg(AArch64::W16)
847 .addReg(AArch64::X17)
848 .addReg(AArch64::X16)
849 .addImm(0)
850 .addImm(0));
851 } else {
852 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
853 .addReg(AArch64::W16)
854 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
855 .addReg(AArch64::X16)
856 .addImm(0)
857 .addImm(0));
858 }
859
860 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
861 .addReg(AArch64::XZR)
862 .addReg(AArch64::X16)
863 .addReg(Reg)
865 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
866 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
867 .addImm(AArch64CC::NE)
869 HandleMismatchOrPartialSym, OutContext)));
870 MCSymbol *ReturnSym = OutContext.createTempSymbol();
871 OutStreamer->emitLabel(ReturnSym);
872 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
873 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
874
875 if (HasMatchAllTag) {
876 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
877 .addReg(AArch64::X17)
878 .addReg(Reg)
879 .addImm(56)
880 .addImm(63));
881 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
882 .addReg(AArch64::XZR)
883 .addReg(AArch64::X17)
884 .addImm(MatchAllTag)
885 .addImm(0));
886 EmitToStreamer(
887 MCInstBuilder(AArch64::Bcc)
888 .addImm(AArch64CC::EQ)
889 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
890 }
891
892 if (IsShort) {
893 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
894 .addReg(AArch64::WZR)
895 .addReg(AArch64::W16)
896 .addImm(15)
897 .addImm(0));
898 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
899 EmitToStreamer(
900 MCInstBuilder(AArch64::Bcc)
901 .addImm(AArch64CC::HI)
902 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
903
904 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
905 .addReg(AArch64::X17)
906 .addReg(Reg)
907 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
908 if (Size != 1)
909 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
910 .addReg(AArch64::X17)
911 .addReg(AArch64::X17)
912 .addImm(Size - 1)
913 .addImm(0));
914 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
915 .addReg(AArch64::WZR)
916 .addReg(AArch64::W16)
917 .addReg(AArch64::W17)
918 .addImm(0));
919 EmitToStreamer(
920 MCInstBuilder(AArch64::Bcc)
921 .addImm(AArch64CC::LS)
922 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
923
924 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
925 .addReg(AArch64::X16)
926 .addReg(Reg)
927 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
928 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
929 .addReg(AArch64::W16)
930 .addReg(AArch64::X16)
931 .addImm(0));
932 EmitToStreamer(
933 MCInstBuilder(AArch64::SUBSXrs)
934 .addReg(AArch64::XZR)
935 .addReg(AArch64::X16)
936 .addReg(Reg)
938 EmitToStreamer(
939 MCInstBuilder(AArch64::Bcc)
940 .addImm(AArch64CC::EQ)
941 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
942
943 OutStreamer->emitLabel(HandleMismatchSym);
944 }
945
946 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
947 .addReg(AArch64::SP)
948 .addReg(AArch64::X0)
949 .addReg(AArch64::X1)
950 .addReg(AArch64::SP)
951 .addImm(-32));
952 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
953 .addReg(AArch64::FP)
954 .addReg(AArch64::LR)
955 .addReg(AArch64::SP)
956 .addImm(29));
957
958 if (Reg != AArch64::X0)
959 emitMovXReg(AArch64::X0, Reg);
960 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
961
962 if (CompileKernel) {
963 // The Linux kernel's dynamic loader doesn't support GOT relative
964 // relocations, but it doesn't support late binding either, so just call
965 // the function directly.
966 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
967 } else {
968 // Intentionally load the GOT entry and branch to it, rather than possibly
969 // late binding the function, which may clobber the registers before we
970 // have a chance to save them.
971 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
972 .addReg(AArch64::X16)
973 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
975 OutContext)));
976 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
977 .addReg(AArch64::X16)
978 .addReg(AArch64::X16)
979 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
981 OutContext)));
982 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
983 }
984 }
985 this->STI = nullptr;
986}
987
988static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
989 MCSymbol *StubLabel,
990 const MCExpr *StubAuthPtrRef) {
991 // sym$auth_ptr$key$disc:
992 OutStreamer.emitLabel(StubLabel);
993 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
994}
995
996void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
997 emitHwasanMemaccessSymbols(M);
998
999 const Triple &TT = TM.getTargetTriple();
1000 if (TT.isOSBinFormatMachO()) {
1001 // Output authenticated pointers as indirect symbols, if we have any.
1002 MachineModuleInfoMachO &MMIMacho =
1003 MMI->getObjFileInfo<MachineModuleInfoMachO>();
1004
1005 auto Stubs = MMIMacho.getAuthGVStubList();
1006
1007 if (!Stubs.empty()) {
1008 // Switch to the "__auth_ptr" section.
1009 OutStreamer->switchSection(
1010 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
1012 emitAlignment(Align(8));
1013
1014 for (const auto &Stub : Stubs)
1015 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1016
1017 OutStreamer->addBlankLine();
1018 }
1019
1020 // Funny Darwin hack: This flag tells the linker that no global symbols
1021 // contain code that falls through to other global symbols (e.g. the obvious
1022 // implementation of multiple entry points). If this doesn't occur, the
1023 // linker can safely perform dead code stripping. Since LLVM never
1024 // generates code that does this, it is always safe to set.
1025 OutStreamer->emitSubsectionsViaSymbols();
1026 }
1027
1028 if (TT.isOSBinFormatELF()) {
1029 // Output authenticated pointers as indirect symbols, if we have any.
1030 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
1031
1032 auto Stubs = MMIELF.getAuthGVStubList();
1033
1034 if (!Stubs.empty()) {
1035 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1036 OutStreamer->switchSection(TLOF.getDataSection());
1037 emitAlignment(Align(8));
1038
1039 for (const auto &Stub : Stubs)
1040 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1041
1042 OutStreamer->addBlankLine();
1043 }
1044
1045 // With signed ELF GOT enabled, the linker looks at the symbol type to
1046 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1047 // for functions not defined in the module have STT_NOTYPE type by default.
1048 // This makes linker to emit signing schema with DA key (instead of IA) for
1049 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1050 // all function symbols used in the module to have STT_FUNC type. See
1051 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1052 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1053 M.getModuleFlag("ptrauth-elf-got"));
1054 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1055 for (const GlobalValue &GV : M.global_values())
1056 if (!GV.use_empty() && isa<Function>(GV) &&
1057 !GV.getName().starts_with("llvm."))
1058 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1060 }
1061
1062 // Emit stack and fault map information.
1064
1065 // If import call optimization is enabled, emit the appropriate section.
1066 // We do this whether or not we recorded any import calls.
1067 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1068 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1069
1070 // Section always starts with some magic.
1071 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1072 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1073
1074 // Layout of this section is:
1075 // Per section that contains calls to imported functions:
1076 // uint32_t SectionSize: Size in bytes for information in this section.
1077 // uint32_t Section Number
1078 // Per call to imported function in section:
1079 // uint32_t Kind: the kind of imported function.
1080 // uint32_t BranchOffset: the offset of the branch instruction in its
1081 // parent section.
1082 // uint32_t TargetSymbolId: the symbol id of the called function.
1083 for (auto &[Section, CallsToImportedFuncs] :
1084 SectionToImportedFunctionCalls) {
1085 unsigned SectionSize =
1086 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1087 OutStreamer->emitInt32(SectionSize);
1088 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1089 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1090 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1091 OutStreamer->emitInt32(0x13);
1092 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1093 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1094 }
1095 }
1096 }
1097}
1098
1099void AArch64AsmPrinter::emitLOHs() {
1101
1102 for (const auto &D : AArch64FI->getLOHContainer()) {
1103 for (const MachineInstr *MI : D.getArgs()) {
1104 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1105 assert(LabelIt != LOHInstToLabel.end() &&
1106 "Label hasn't been inserted for LOH related instruction");
1107 MCArgs.push_back(LabelIt->second);
1108 }
1109 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1110 MCArgs.clear();
1111 }
1112}
1113
1114void AArch64AsmPrinter::emitFunctionBodyEnd() {
1115 if (!AArch64FI->getLOHRelated().empty())
1116 emitLOHs();
1117}
1118
1119/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1120MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1121 // Darwin uses a linker-private symbol name for constant-pools (to
1122 // avoid addends on the relocation?), ELF has no such concept and
1123 // uses a normal private symbol.
1124 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1125 return OutContext.getOrCreateSymbol(
1126 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1127 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1128
1129 return AsmPrinter::GetCPISymbol(CPID);
1130}
1131
1132void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1133 raw_ostream &O) {
1134 const MachineOperand &MO = MI->getOperand(OpNum);
1135 switch (MO.getType()) {
1136 default:
1137 llvm_unreachable("<unknown operand type>");
1139 Register Reg = MO.getReg();
1141 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1143 break;
1144 }
1146 O << MO.getImm();
1147 break;
1148 }
1150 PrintSymbolOperand(MO, O);
1151 break;
1152 }
1154 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1155 Sym->print(O, MAI);
1156 break;
1157 }
1158 }
1159}
1160
1161bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1162 raw_ostream &O) {
1163 Register Reg = MO.getReg();
1164 switch (Mode) {
1165 default:
1166 return true; // Unknown mode.
1167 case 'w':
1169 break;
1170 case 'x':
1172 break;
1173 case 't':
1175 break;
1176 }
1177
1179 return false;
1180}
1181
1182// Prints the register in MO using class RC using the offset in the
1183// new register class. This should not be used for cross class
1184// printing.
1185bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1186 const TargetRegisterClass *RC,
1187 unsigned AltName, raw_ostream &O) {
1188 assert(MO.isReg() && "Should only get here with a register!");
1189 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1190 Register Reg = MO.getReg();
1191 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1192 if (!RI->regsOverlap(RegToPrint, Reg))
1193 return true;
1194 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1195 return false;
1196}
1197
1198bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1199 const char *ExtraCode, raw_ostream &O) {
1200 const MachineOperand &MO = MI->getOperand(OpNum);
1201
1202 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1203 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1204 return false;
1205
1206 // Does this asm operand have a single letter operand modifier?
1207 if (ExtraCode && ExtraCode[0]) {
1208 if (ExtraCode[1] != 0)
1209 return true; // Unknown modifier.
1210
1211 switch (ExtraCode[0]) {
1212 default:
1213 return true; // Unknown modifier.
1214 case 'w': // Print W register
1215 case 'x': // Print X register
1216 if (MO.isReg())
1217 return printAsmMRegister(MO, ExtraCode[0], O);
1218 if (MO.isImm() && MO.getImm() == 0) {
1219 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1221 return false;
1222 }
1223 printOperand(MI, OpNum, O);
1224 return false;
1225 case 'b': // Print B register.
1226 case 'h': // Print H register.
1227 case 's': // Print S register.
1228 case 'd': // Print D register.
1229 case 'q': // Print Q register.
1230 case 'z': // Print Z register.
1231 if (MO.isReg()) {
1232 const TargetRegisterClass *RC;
1233 switch (ExtraCode[0]) {
1234 case 'b':
1235 RC = &AArch64::FPR8RegClass;
1236 break;
1237 case 'h':
1238 RC = &AArch64::FPR16RegClass;
1239 break;
1240 case 's':
1241 RC = &AArch64::FPR32RegClass;
1242 break;
1243 case 'd':
1244 RC = &AArch64::FPR64RegClass;
1245 break;
1246 case 'q':
1247 RC = &AArch64::FPR128RegClass;
1248 break;
1249 case 'z':
1250 RC = &AArch64::ZPRRegClass;
1251 break;
1252 default:
1253 return true;
1254 }
1255 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1256 }
1257 printOperand(MI, OpNum, O);
1258 return false;
1259 }
1260 }
1261
1262 // According to ARM, we should emit x and v registers unless we have a
1263 // modifier.
1264 if (MO.isReg()) {
1265 Register Reg = MO.getReg();
1266
1267 // If this is a w or x register, print an x register.
1268 if (AArch64::GPR32allRegClass.contains(Reg) ||
1269 AArch64::GPR64allRegClass.contains(Reg))
1270 return printAsmMRegister(MO, 'x', O);
1271
1272 // If this is an x register tuple, print an x register.
1273 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1274 return printAsmMRegister(MO, 't', O);
1275
1276 unsigned AltName = AArch64::NoRegAltName;
1277 const TargetRegisterClass *RegClass;
1278 if (AArch64::ZPRRegClass.contains(Reg)) {
1279 RegClass = &AArch64::ZPRRegClass;
1280 } else if (AArch64::PPRRegClass.contains(Reg)) {
1281 RegClass = &AArch64::PPRRegClass;
1282 } else if (AArch64::PNRRegClass.contains(Reg)) {
1283 RegClass = &AArch64::PNRRegClass;
1284 } else {
1285 RegClass = &AArch64::FPR128RegClass;
1286 AltName = AArch64::vreg;
1287 }
1288
1289 // If this is a b, h, s, d, or q register, print it as a v register.
1290 return printAsmRegInClass(MO, RegClass, AltName, O);
1291 }
1292
1293 printOperand(MI, OpNum, O);
1294 return false;
1295}
1296
1297bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1298 unsigned OpNum,
1299 const char *ExtraCode,
1300 raw_ostream &O) {
1301 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1302 return true; // Unknown modifier.
1303
1304 const MachineOperand &MO = MI->getOperand(OpNum);
1305 assert(MO.isReg() && "unexpected inline asm memory operand");
1306 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1307 return false;
1308}
1309
1310void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1311 raw_ostream &OS) {
1312 unsigned NOps = MI->getNumOperands();
1313 assert(NOps == 4);
1314 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1315 // cast away const; DIetc do not take const operands for some reason.
1316 OS << MI->getDebugVariable()->getName();
1317 OS << " <- ";
1318 // Frame address. Currently handles register +- offset only.
1319 assert(MI->isIndirectDebugValue());
1320 OS << '[';
1321 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1322 if (I != 0)
1323 OS << ", ";
1324 printOperand(MI, I, OS);
1325 }
1326 OS << ']';
1327 OS << "+";
1328 printOperand(MI, NOps - 2, OS);
1329}
1330
1331void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1332 ArrayRef<unsigned> JumpTableIndices) {
1333 // Fast return if there is nothing to emit to avoid creating empty sections.
1334 if (JumpTableIndices.empty())
1335 return;
1336 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1337 const auto &F = MF->getFunction();
1339
1340 MCSection *ReadOnlySec = nullptr;
1341 if (TM.Options.EnableStaticDataPartitioning) {
1342 ReadOnlySec =
1343 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1344 } else {
1345 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1346 }
1347 OutStreamer->switchSection(ReadOnlySec);
1348
1349 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1350 for (unsigned JTI : JumpTableIndices) {
1351 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1352
1353 // If this jump table was deleted, ignore it.
1354 if (JTBBs.empty()) continue;
1355
1356 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1357 emitAlignment(Align(Size));
1358 OutStreamer->emitLabel(GetJTISymbol(JTI));
1359
1360 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1361 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1362
1363 for (auto *JTBB : JTBBs) {
1364 const MCExpr *Value =
1365 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1366
1367 // Each entry is:
1368 // .byte/.hword (LBB - Lbase)>>2
1369 // or plain:
1370 // .word LBB - Lbase
1371 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1372 if (Size != 4)
1374 Value, MCConstantExpr::create(2, OutContext), OutContext);
1375
1376 OutStreamer->emitValue(Value, Size);
1377 }
1378 }
1379}
1380
1381std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1383AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1384 const MachineInstr *BranchInstr,
1385 const MCSymbol *BranchLabel) const {
1386 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1387 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1389 switch (AFI->getJumpTableEntrySize(JTI)) {
1390 case 1:
1391 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1392 break;
1393 case 2:
1394 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1395 break;
1396 case 4:
1397 EntrySize = codeview::JumpTableEntrySize::Int32;
1398 break;
1399 default:
1400 llvm_unreachable("Unexpected jump table entry size");
1401 }
1402 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1403}
1404
1405void AArch64AsmPrinter::emitFunctionEntryLabel() {
1406 const Triple &TT = TM.getTargetTriple();
1407 if (TT.isOSBinFormatELF() &&
1408 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1409 MF->getFunction().getCallingConv() ==
1410 CallingConv::AArch64_SVE_VectorCall ||
1411 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1412 auto *TS =
1413 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1414 TS->emitDirectiveVariantPCS(CurrentFnSym);
1415 }
1416
1418
1419 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1420 // For ARM64EC targets, a function definition's name is mangled differently
1421 // from the normal symbol, emit required aliases here.
1422 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1423 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1424 OutStreamer->emitAssignment(
1425 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1426 };
1427
1428 auto getSymbolFromMetadata = [&](StringRef Name) {
1429 MCSymbol *Sym = nullptr;
1430 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1431 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1432 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1433 }
1434 return Sym;
1435 };
1436
1437 SmallVector<MDNode *> UnmangledNames;
1438 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1439 for (MDNode *Node : UnmangledNames) {
1440 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1441 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1442 if (std::optional<std::string> MangledName =
1443 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1444 MCSymbol *ECMangledSym =
1445 MMI->getContext().getOrCreateSymbol(*MangledName);
1446 emitFunctionAlias(UnmangledSym, ECMangledSym);
1447 }
1448 }
1449 if (MCSymbol *ECMangledSym =
1450 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1451 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1452 }
1453}
1454
1455void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1456 const Constant *CV) {
1457 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1458 if (CPA->hasAddressDiscriminator() &&
1459 !CPA->hasSpecialAddressDiscriminator(
1462 "unexpected address discrimination value for ctors/dtors entry, only "
1463 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1464 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1465 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1466 // actual address discrimination value and only checks
1467 // hasAddressDiscriminator(), so it's OK to leave special address
1468 // discrimination value here.
1470}
1471
1472void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1473 const GlobalAlias &GA) {
1474 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1475 // Global aliases must point to a definition, but unmangled patchable
1476 // symbols are special and need to point to an undefined symbol with "EXP+"
1477 // prefix. Such undefined symbol is resolved by the linker by creating
1478 // x86 thunk that jumps back to the actual EC target.
1479 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1480 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1481 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1482 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1483
1484 OutStreamer->beginCOFFSymbolDef(ExpSym);
1485 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1486 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1488 OutStreamer->endCOFFSymbolDef();
1489
1490 OutStreamer->beginCOFFSymbolDef(Sym);
1491 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1492 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1494 OutStreamer->endCOFFSymbolDef();
1495 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1496 OutStreamer->emitAssignment(
1497 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1498 return;
1499 }
1500 }
1502}
1503
1504/// Small jump tables contain an unsigned byte or half, representing the offset
1505/// from the lowest-addressed possible destination to the desired basic
1506/// block. Since all instructions are 4-byte aligned, this is further compressed
1507/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1508/// materialize the correct destination we need:
1509///
1510/// adr xDest, .LBB0_0
1511/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1512/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1513void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1514 const llvm::MachineInstr &MI) {
1515 Register DestReg = MI.getOperand(0).getReg();
1516 Register ScratchReg = MI.getOperand(1).getReg();
1517 Register ScratchRegW =
1518 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1519 Register TableReg = MI.getOperand(2).getReg();
1520 Register EntryReg = MI.getOperand(3).getReg();
1521 int JTIdx = MI.getOperand(4).getIndex();
1522 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1523
1524 // This has to be first because the compression pass based its reachability
1525 // calculations on the start of the JumpTableDest instruction.
1526 auto Label =
1527 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1528
1529 // If we don't already have a symbol to use as the base, use the ADR
1530 // instruction itself.
1531 if (!Label) {
1533 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1534 OutStreamer.emitLabel(Label);
1535 }
1536
1537 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1538 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1539 .addReg(DestReg)
1540 .addExpr(LabelExpr));
1541
1542 // Load the number of instruction-steps to offset from the label.
1543 unsigned LdrOpcode;
1544 switch (Size) {
1545 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1546 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1547 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1548 default:
1549 llvm_unreachable("Unknown jump table size");
1550 }
1551
1552 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1553 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1554 .addReg(TableReg)
1555 .addReg(EntryReg)
1556 .addImm(0)
1557 .addImm(Size == 1 ? 0 : 1));
1558
1559 // Add to the already materialized base label address, multiplying by 4 if
1560 // compressed.
1561 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1562 .addReg(DestReg)
1563 .addReg(DestReg)
1564 .addReg(ScratchReg)
1565 .addImm(Size == 4 ? 0 : 2));
1566}
1567
1568void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1569 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1570 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1571
1572 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1573 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1574
1575 // Emit:
1576 // mov x17, #<size of table> ; depending on table size, with MOVKs
1577 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1578 // csel x16, x16, xzr, ls ; check for index overflow
1579 //
1580 // adrp x17, Ltable@PAGE ; materialize table address
1581 // add x17, Ltable@PAGEOFF
1582 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1583 //
1584 // Lanchor:
1585 // adr x17, Lanchor ; compute target address
1586 // add x16, x17, x16
1587 // br x16 ; branch to target
1588
1589 MachineOperand JTOp = MI.getOperand(0);
1590
1591 unsigned JTI = JTOp.getIndex();
1592 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1593 "unsupported compressed jump table");
1594
1595 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1596
1597 // cmp only supports a 12-bit immediate. If we need more, materialize the
1598 // immediate, using x17 as a scratch register.
1599 uint64_t MaxTableEntry = NumTableEntries - 1;
1600 if (isUInt<12>(MaxTableEntry)) {
1601 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1602 .addReg(AArch64::XZR)
1603 .addReg(AArch64::X16)
1604 .addImm(MaxTableEntry)
1605 .addImm(0));
1606 } else {
1607 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1608 // It's sad that we have to manually materialize instructions, but we can't
1609 // trivially reuse the main pseudo expansion logic.
1610 // A MOVK sequence is easy enough to generate and handles the general case.
1611 for (int Offset = 16; Offset < 64; Offset += 16) {
1612 if ((MaxTableEntry >> Offset) == 0)
1613 break;
1614 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1615 Offset);
1616 }
1617 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1618 .addReg(AArch64::XZR)
1619 .addReg(AArch64::X16)
1620 .addReg(AArch64::X17)
1621 .addImm(0));
1622 }
1623
1624 // This picks entry #0 on failure.
1625 // We might want to trap instead.
1626 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1627 .addReg(AArch64::X16)
1628 .addReg(AArch64::X16)
1629 .addReg(AArch64::XZR)
1630 .addImm(AArch64CC::LS));
1631
1632 // Prepare the @PAGE/@PAGEOFF low/high operands.
1633 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1634 MCOperand JTMCHi, JTMCLo;
1635
1636 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1637 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1638
1639 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1640 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1641
1642 EmitToStreamer(
1643 *OutStreamer,
1644 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1645
1646 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1647 .addReg(AArch64::X17)
1648 .addReg(AArch64::X17)
1649 .addOperand(JTMCLo)
1650 .addImm(0));
1651
1652 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1653 .addReg(AArch64::X16)
1654 .addReg(AArch64::X17)
1655 .addReg(AArch64::X16)
1656 .addImm(0)
1657 .addImm(1));
1658
1659 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1660 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1661 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1662
1663 OutStreamer->emitLabel(AdrLabel);
1664 EmitToStreamer(
1665 *OutStreamer,
1666 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1667
1668 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1669 .addReg(AArch64::X16)
1670 .addReg(AArch64::X17)
1671 .addReg(AArch64::X16)
1672 .addImm(0));
1673
1674 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1675}
1676
1677void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1678 const llvm::MachineInstr &MI) {
1679 unsigned Opcode = MI.getOpcode();
1680 assert(STI->hasMOPS());
1681 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1682
1683 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1684 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1685 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1686 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1687 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1688 if (Opcode == AArch64::MOPSMemorySetPseudo)
1689 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1690 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1691 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1692 llvm_unreachable("Unhandled memory operation pseudo");
1693 }();
1694 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1695 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1696
1697 for (auto Op : Ops) {
1698 int i = 0;
1699 auto MCIB = MCInstBuilder(Op);
1700 // Destination registers
1701 MCIB.addReg(MI.getOperand(i++).getReg());
1702 MCIB.addReg(MI.getOperand(i++).getReg());
1703 if (!IsSet)
1704 MCIB.addReg(MI.getOperand(i++).getReg());
1705 // Input registers
1706 MCIB.addReg(MI.getOperand(i++).getReg());
1707 MCIB.addReg(MI.getOperand(i++).getReg());
1708 MCIB.addReg(MI.getOperand(i++).getReg());
1709
1710 EmitToStreamer(OutStreamer, MCIB);
1711 }
1712}
1713
1714void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1715 const MachineInstr &MI) {
1716 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1717
1718 auto &Ctx = OutStreamer.getContext();
1719 MCSymbol *MILabel = Ctx.createTempSymbol();
1720 OutStreamer.emitLabel(MILabel);
1721
1722 SM.recordStackMap(*MILabel, MI);
1723 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1724
1725 // Scan ahead to trim the shadow.
1726 const MachineBasicBlock &MBB = *MI.getParent();
1728 ++MII;
1729 while (NumNOPBytes > 0) {
1730 if (MII == MBB.end() || MII->isCall() ||
1731 MII->getOpcode() == AArch64::DBG_VALUE ||
1732 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1733 MII->getOpcode() == TargetOpcode::STACKMAP)
1734 break;
1735 ++MII;
1736 NumNOPBytes -= 4;
1737 }
1738
1739 // Emit nops.
1740 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1741 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1742}
1743
1744// Lower a patchpoint of the form:
1745// [<def>], <id>, <numBytes>, <target>, <numArgs>
1746void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1747 const MachineInstr &MI) {
1748 auto &Ctx = OutStreamer.getContext();
1749 MCSymbol *MILabel = Ctx.createTempSymbol();
1750 OutStreamer.emitLabel(MILabel);
1751 SM.recordPatchPoint(*MILabel, MI);
1752
1753 PatchPointOpers Opers(&MI);
1754
1755 int64_t CallTarget = Opers.getCallTarget().getImm();
1756 unsigned EncodedBytes = 0;
1757 if (CallTarget) {
1758 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1759 "High 16 bits of call target should be zero.");
1760 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1761 EncodedBytes = 16;
1762 // Materialize the jump address:
1763 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1764 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1765 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1766 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1767 }
1768 // Emit padding.
1769 unsigned NumBytes = Opers.getNumPatchBytes();
1770 assert(NumBytes >= EncodedBytes &&
1771 "Patchpoint can't request size less than the length of a call.");
1772 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1773 "Invalid number of NOP bytes requested!");
1774 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1775 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1776}
1777
1778void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1779 const MachineInstr &MI) {
1780 StatepointOpers SOpers(&MI);
1781 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1782 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1783 for (unsigned i = 0; i < PatchBytes; i += 4)
1784 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1785 } else {
1786 // Lower call target and choose correct opcode
1787 const MachineOperand &CallTarget = SOpers.getCallTarget();
1788 MCOperand CallTargetMCOp;
1789 unsigned CallOpcode;
1790 switch (CallTarget.getType()) {
1793 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1794 CallOpcode = AArch64::BL;
1795 break;
1797 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1798 CallOpcode = AArch64::BL;
1799 break;
1801 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1802 CallOpcode = AArch64::BLR;
1803 break;
1804 default:
1805 llvm_unreachable("Unsupported operand type in statepoint call target");
1806 break;
1807 }
1808
1809 EmitToStreamer(OutStreamer,
1810 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1811 }
1812
1813 auto &Ctx = OutStreamer.getContext();
1814 MCSymbol *MILabel = Ctx.createTempSymbol();
1815 OutStreamer.emitLabel(MILabel);
1816 SM.recordStatepoint(*MILabel, MI);
1817}
1818
1819void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1820 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1821 // <opcode>, <operands>
1822
1823 Register DefRegister = FaultingMI.getOperand(0).getReg();
1825 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1826 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1827 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1828 unsigned OperandsBeginIdx = 4;
1829
1830 auto &Ctx = OutStreamer->getContext();
1831 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1832 OutStreamer->emitLabel(FaultingLabel);
1833
1834 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1835 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1836
1837 MCInst MI;
1838 MI.setOpcode(Opcode);
1839
1840 if (DefRegister != (Register)0)
1841 MI.addOperand(MCOperand::createReg(DefRegister));
1842
1843 for (const MachineOperand &MO :
1844 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1845 MCOperand Dest;
1846 lowerOperand(MO, Dest);
1847 MI.addOperand(Dest);
1848 }
1849
1850 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1851 EmitToStreamer(MI);
1852}
1853
1854void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1855 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1856 .addReg(Dest)
1857 .addReg(AArch64::XZR)
1858 .addReg(Src)
1859 .addImm(0));
1860}
1861
1862void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1863 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1864 EmitToStreamer(*OutStreamer,
1865 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1866 .addReg(Dest)
1867 .addImm(Imm)
1868 .addImm(Shift));
1869}
1870
1871void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1872 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1873 EmitToStreamer(*OutStreamer,
1874 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1875 .addReg(Dest)
1876 .addReg(Dest)
1877 .addImm(Imm)
1878 .addImm(Shift));
1879}
1880
1881void AArch64AsmPrinter::emitAUT(AArch64PACKey::ID Key, Register Pointer,
1882 Register Disc) {
1883 bool IsZeroDisc = Disc == AArch64::XZR;
1884 unsigned Opcode = getAUTOpcodeForKey(Key, IsZeroDisc);
1885
1886 // autiza x16 ; if IsZeroDisc
1887 // autia x16, x17 ; if !IsZeroDisc
1888 MCInst AUTInst;
1889 AUTInst.setOpcode(Opcode);
1890 AUTInst.addOperand(MCOperand::createReg(Pointer));
1891 AUTInst.addOperand(MCOperand::createReg(Pointer));
1892 if (!IsZeroDisc)
1893 AUTInst.addOperand(MCOperand::createReg(Disc));
1894
1895 EmitToStreamer(AUTInst);
1896}
1897
1898void AArch64AsmPrinter::emitPAC(AArch64PACKey::ID Key, Register Pointer,
1899 Register Disc) {
1900 bool IsZeroDisc = Disc == AArch64::XZR;
1901 unsigned Opcode = getPACOpcodeForKey(Key, IsZeroDisc);
1902
1903 // paciza x16 ; if IsZeroDisc
1904 // pacia x16, x17 ; if !IsZeroDisc
1905 MCInst PACInst;
1906 PACInst.setOpcode(Opcode);
1907 PACInst.addOperand(MCOperand::createReg(Pointer));
1908 PACInst.addOperand(MCOperand::createReg(Pointer));
1909 if (!IsZeroDisc)
1910 PACInst.addOperand(MCOperand::createReg(Disc));
1911
1912 EmitToStreamer(PACInst);
1913}
1914
1915void AArch64AsmPrinter::emitBLRA(bool IsCall, AArch64PACKey::ID Key,
1916 Register Target, Register Disc) {
1917 bool IsZeroDisc = Disc == AArch64::XZR;
1918 unsigned Opcode = getBranchOpcodeForKey(IsCall, Key, IsZeroDisc);
1919
1920 // blraaz x16 ; if IsZeroDisc
1921 // blraa x16, x17 ; if !IsZeroDisc
1922 MCInst Inst;
1923 Inst.setOpcode(Opcode);
1924 Inst.addOperand(MCOperand::createReg(Target));
1925 if (!IsZeroDisc)
1926 Inst.addOperand(MCOperand::createReg(Disc));
1927 EmitToStreamer(Inst);
1928}
1929
1930void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1931 Register DestReg = MI.getOperand(0).getReg();
1932 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1933 if (STI->hasZeroCycleZeroingFPR64()) {
1934 // Convert H/S register to corresponding D register
1935 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1936 if (AArch64::FPR16RegClass.contains(DestReg))
1937 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1938 &AArch64::FPR64RegClass);
1939 else if (AArch64::FPR32RegClass.contains(DestReg))
1940 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1941 &AArch64::FPR64RegClass);
1942 else
1943 assert(AArch64::FPR64RegClass.contains(DestReg));
1944
1945 MCInst MOVI;
1946 MOVI.setOpcode(AArch64::MOVID);
1947 MOVI.addOperand(MCOperand::createReg(DestReg));
1949 EmitToStreamer(*OutStreamer, MOVI);
1950 ++NumZCZeroingInstrsFPR;
1951 } else if (STI->hasZeroCycleZeroingFPR128()) {
1952 // Convert H/S/D register to corresponding Q register
1953 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1954 if (AArch64::FPR16RegClass.contains(DestReg)) {
1955 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1956 &AArch64::FPR128RegClass);
1957 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1958 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1959 &AArch64::FPR128RegClass);
1960 } else {
1961 assert(AArch64::FPR64RegClass.contains(DestReg));
1962 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1963 &AArch64::FPR128RegClass);
1964 }
1965
1966 MCInst MOVI;
1967 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1968 MOVI.addOperand(MCOperand::createReg(DestReg));
1970 EmitToStreamer(*OutStreamer, MOVI);
1971 ++NumZCZeroingInstrsFPR;
1972 } else {
1973 emitFMov0AsFMov(MI, DestReg);
1974 }
1975 } else {
1976 emitFMov0AsFMov(MI, DestReg);
1977 }
1978}
1979
1980void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1981 Register DestReg) {
1982 MCInst FMov;
1983 switch (MI.getOpcode()) {
1984 default:
1985 llvm_unreachable("Unexpected opcode");
1986 case AArch64::FMOVH0:
1987 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1988 if (!STI->hasFullFP16())
1989 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1990 FMov.addOperand(MCOperand::createReg(DestReg));
1991 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1992 break;
1993 case AArch64::FMOVS0:
1994 FMov.setOpcode(AArch64::FMOVWSr);
1995 FMov.addOperand(MCOperand::createReg(DestReg));
1996 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1997 break;
1998 case AArch64::FMOVD0:
1999 FMov.setOpcode(AArch64::FMOVXDr);
2000 FMov.addOperand(MCOperand::createReg(DestReg));
2001 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
2002 break;
2003 }
2004 EmitToStreamer(*OutStreamer, FMov);
2005}
2006
2007Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint64_t Disc,
2008 Register AddrDisc,
2009 Register ScratchReg,
2010 bool MayClobberAddrDisc) {
2011 assert(isPtrauthRegSafe(ScratchReg) &&
2012 "Safe scratch register must be provided by the caller");
2013 assert(isUInt<16>(Disc) && "Constant discriminator is too wide");
2014
2015 // So far we've used NoRegister in pseudos. Now we need real encodings.
2016 if (AddrDisc == AArch64::NoRegister)
2017 AddrDisc = AArch64::XZR;
2018
2019 // If there is no constant discriminator, there's no blend involved:
2020 // just use the address discriminator register as-is (XZR or not).
2021 if (!Disc)
2022 return AddrDisc;
2023
2024 // If there's only a constant discriminator, MOV it into the scratch register.
2025 if (AddrDisc == AArch64::XZR) {
2026 emitMOVZ(ScratchReg, Disc, 0);
2027 return ScratchReg;
2028 }
2029
2030 // If there are both, emit a blend into the scratch register.
2031
2032 // Check if we can save one MOV instruction.
2033 if (MayClobberAddrDisc && isPtrauthRegSafe(AddrDisc)) {
2034 ScratchReg = AddrDisc;
2035 } else {
2036 emitMovXReg(ScratchReg, AddrDisc);
2037 assert(ScratchReg != AddrDisc &&
2038 "Forbidden to clobber AddrDisc, but have to");
2039 }
2040
2041 emitMOVK(ScratchReg, Disc, 48);
2042 return ScratchReg;
2043}
2044
2045/// Emit a code sequence to check an authenticated pointer value.
2046///
2047/// This function emits a sequence of instructions that checks if TestedReg was
2048/// authenticated successfully. On success, execution continues at the next
2049/// instruction after the sequence.
2050///
2051/// The action performed on failure depends on the OnFailure argument:
2052/// * if OnFailure is not nullptr, control is transferred to that label after
2053/// clearing the PAC field
2054/// * otherwise, BRK instruction is emitted to generate an error
2055void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
2056 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
2057 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
2058 // Insert a sequence to check if authentication of TestedReg succeeded,
2059 // such as:
2060 //
2061 // - checked and clearing:
2062 // ; x16 is TestedReg, x17 is ScratchReg
2063 // mov x17, x16
2064 // xpaci x17
2065 // cmp x16, x17
2066 // b.eq Lsuccess
2067 // mov x16, x17
2068 // b Lend
2069 // Lsuccess:
2070 // ; skipped if authentication failed
2071 // Lend:
2072 // ...
2073 //
2074 // - checked and trapping:
2075 // mov x17, x16
2076 // xpaci x17
2077 // cmp x16, x17
2078 // b.eq Lsuccess
2079 // brk #<0xc470 + aut key>
2080 // Lsuccess:
2081 // ...
2082 //
2083 // See the documentation on AuthCheckMethod enumeration constants for
2084 // the specific code sequences that can be used to perform the check.
2086
2087 if (Method == AuthCheckMethod::None)
2088 return;
2089 if (Method == AuthCheckMethod::DummyLoad) {
2090 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
2091 .addReg(getWRegFromXReg(ScratchReg))
2092 .addReg(TestedReg)
2093 .addImm(0));
2094 assert(!OnFailure && "DummyLoad always traps on error");
2095 return;
2096 }
2097
2098 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2099 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2100 // mov Xscratch, Xtested
2101 emitMovXReg(ScratchReg, TestedReg);
2102
2103 if (Method == AuthCheckMethod::XPAC) {
2104 // xpac(i|d) Xscratch
2105 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2106 EmitToStreamer(
2107 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2108 } else {
2109 // xpaclri
2110
2111 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2112 assert(TestedReg == AArch64::LR &&
2113 "XPACHint mode is only compatible with checking the LR register");
2115 "XPACHint mode is only compatible with I-keys");
2116 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2117 }
2118
2119 // cmp Xtested, Xscratch
2120 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2121 .addReg(AArch64::XZR)
2122 .addReg(TestedReg)
2123 .addReg(ScratchReg)
2124 .addImm(0));
2125
2126 // b.eq Lsuccess
2127 EmitToStreamer(
2128 MCInstBuilder(AArch64::Bcc)
2129 .addImm(AArch64CC::EQ)
2130 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2131 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2132 // eor Xscratch, Xtested, Xtested, lsl #1
2133 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2134 .addReg(ScratchReg)
2135 .addReg(TestedReg)
2136 .addReg(TestedReg)
2137 .addImm(1));
2138 // tbz Xscratch, #62, Lsuccess
2139 EmitToStreamer(
2140 MCInstBuilder(AArch64::TBZX)
2141 .addReg(ScratchReg)
2142 .addImm(62)
2143 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2144 } else {
2145 llvm_unreachable("Unsupported check method");
2146 }
2147
2148 if (!OnFailure) {
2149 // Trapping sequences do a 'brk'.
2150 // brk #<0xc470 + aut key>
2151 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2152 } else {
2153 // Non-trapping checked sequences return the stripped result in TestedReg,
2154 // skipping over success-only code (such as re-signing the pointer) by
2155 // jumping to OnFailure label.
2156 // Note that this can introduce an authentication oracle (such as based on
2157 // the high bits of the re-signed value).
2158
2159 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2160 // instead of ScratchReg, thus eliminating one `mov` instruction.
2161 // Both XPAC and XPACHint can be further optimized by not using a
2162 // conditional branch jumping over an unconditional one.
2163
2164 switch (Method) {
2165 case AuthCheckMethod::XPACHint:
2166 // LR is already XPAC-ed at this point.
2167 break;
2168 case AuthCheckMethod::XPAC:
2169 // mov Xtested, Xscratch
2170 emitMovXReg(TestedReg, ScratchReg);
2171 break;
2172 default:
2173 // If Xtested was not XPAC-ed so far, emit XPAC here.
2174 // xpac(i|d) Xtested
2175 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2176 EmitToStreamer(
2177 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2178 }
2179
2180 // b Lend
2181 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2182 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2183 }
2184
2185 // If the auth check succeeds, we can continue.
2186 // Lsuccess:
2187 OutStreamer->emitLabel(SuccessSym);
2188}
2189
2190// With Pointer Authentication, it may be needed to explicitly check the
2191// authenticated value in LR before performing a tail call.
2192// Otherwise, the callee may re-sign the invalid return address,
2193// introducing a signing oracle.
2194void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2195 if (!AArch64FI->shouldSignReturnAddress(*MF))
2196 return;
2197
2198 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2199 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2200 return;
2201
2202 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2203 Register ScratchReg =
2204 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2205 assert(!TC->readsRegister(ScratchReg, TRI) &&
2206 "Neither x16 nor x17 is available as a scratch register");
2209 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2210 LRCheckMethod);
2211}
2212
2213bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2214 if (!DS)
2215 return false;
2216
2217 if (isa<GlobalAlias>(DS)) {
2218 // Just emit the nop directly.
2219 EmitToStreamer(MCInstBuilder(AArch64::NOP));
2220 return true;
2221 }
2222 MCSymbol *Dot = OutContext.createTempSymbol();
2223 OutStreamer->emitLabel(Dot);
2224 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2225
2226 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2227 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2228 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2229 SMLoc());
2230 return false;
2231}
2232
2233AArch64AsmPrinter::PtrAuthSchema::PtrAuthSchema(
2234 AArch64PACKey::ID Key, uint64_t IntDisc, const MachineOperand &AddrDiscOp)
2235 : Key(Key), IntDisc(IntDisc), AddrDisc(AddrDiscOp.getReg()),
2236 AddrDiscIsKilled(AddrDiscOp.isKill()) {}
2237
2238void AArch64AsmPrinter::emitPtrauthAuthResign(
2239 Register Pointer, Register Scratch, PtrAuthSchema AuthSchema,
2240 std::optional<PtrAuthSchema> SignSchema, std::optional<uint64_t> OptAddend,
2241 Value *DS) {
2242 const bool IsResign = SignSchema.has_value();
2243 const bool HasLoad = OptAddend.has_value();
2244 // We expand AUT/AUTPAC into a sequence of the form
2245 //
2246 // ; authenticate x16
2247 // ; check pointer in x16
2248 // Lsuccess:
2249 // ; sign x16 (if AUTPAC)
2250 // Lend: ; if not trapping on failure
2251 //
2252 // with the checking sequence chosen depending on whether/how we should check
2253 // the pointer and whether we should trap on failure.
2254
2255 // By default, auth/resign sequences check for auth failures.
2256 bool ShouldCheck = true;
2257 // In the checked sequence, we only trap if explicitly requested.
2258 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2259
2260 // On an FPAC CPU, you get traps whether you want them or not: there's
2261 // no point in emitting checks or traps.
2262 if (STI->hasFPAC())
2263 ShouldCheck = ShouldTrap = false;
2264
2265 // However, command-line flags can override this, for experimentation.
2266 switch (PtrauthAuthChecks) {
2268 break;
2270 ShouldCheck = ShouldTrap = false;
2271 break;
2273 ShouldCheck = true;
2274 ShouldTrap = false;
2275 break;
2277 ShouldCheck = ShouldTrap = true;
2278 break;
2279 }
2280
2281 // Compute aut discriminator
2282 Register AUTDiscReg =
2283 emitPtrauthDiscriminator(AuthSchema.IntDisc, AuthSchema.AddrDisc, Scratch,
2284 AuthSchema.AddrDiscIsKilled);
2285
2286 if (!emitDeactivationSymbolRelocation(DS))
2287 emitAUT(AuthSchema.Key, Pointer, AUTDiscReg);
2288
2289 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2290 if (!IsResign && (!ShouldCheck || !ShouldTrap))
2291 return;
2292
2293 MCSymbol *EndSym = nullptr;
2294
2295 if (ShouldCheck) {
2296 if (IsResign && !ShouldTrap)
2297 EndSym = createTempSymbol("resign_end_");
2298
2299 emitPtrauthCheckAuthenticatedValue(Pointer, Scratch, AuthSchema.Key,
2300 AArch64PAuth::AuthCheckMethod::XPAC,
2301 EndSym);
2302 }
2303
2304 // We already emitted unchecked and checked-but-non-trapping AUTs.
2305 // That left us with trapping AUTs, and AUTPA/AUTRELLOADPACs.
2306 // Trapping AUTs don't need PAC: we're done.
2307 if (!IsResign)
2308 return;
2309
2310 if (HasLoad) {
2311 int64_t Addend = *OptAddend;
2312 // incoming rawpointer in X16, X17 is not live at this point.
2313 // LDSRWpre x17, x16, simm9 ; note: x16+simm9 used later.
2314 if (isInt<9>(Addend)) {
2315 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWpre)
2316 .addReg(AArch64::X16)
2317 .addReg(AArch64::X17)
2318 .addReg(AArch64::X16)
2319 .addImm(/*simm9:*/ Addend));
2320 } else {
2321 // x16 = x16 + Addend computation has 2 variants
2322 if (isUInt<24>(Addend)) {
2323 // variant 1: add x16, x16, Addend >> shift12 ls shift12
2324 // This can take upto 2 instructions.
2325 for (int BitPos = 0; BitPos != 24 && (Addend >> BitPos); BitPos += 12) {
2326 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
2327 .addReg(AArch64::X16)
2328 .addReg(AArch64::X16)
2329 .addImm((Addend >> BitPos) & 0xfff)
2331 AArch64_AM::LSL, BitPos)));
2332 }
2333 } else {
2334 // variant 2: accumulate constant in X17 16 bits at a time, and add to
2335 // X16 This can take 2-5 instructions.
2336 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVZXi)
2337 .addReg(AArch64::X17)
2338 .addImm(Addend & 0xffff)
2340 AArch64_AM::LSL, 0)));
2341
2342 for (int Offset = 16; Offset < 64; Offset += 16) {
2343 uint16_t Fragment = static_cast<uint16_t>(Addend >> Offset);
2344 if (!Fragment)
2345 continue;
2346 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKXi)
2347 .addReg(AArch64::X17)
2348 .addReg(AArch64::X17)
2349 .addImm(Fragment)
2350 .addImm(/*shift:*/ Offset));
2351 }
2352 // addx x16, x16, x17
2353 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
2354 .addReg(AArch64::X16)
2355 .addReg(AArch64::X16)
2356 .addReg(AArch64::X17)
2357 .addImm(0));
2358 }
2359 // ldrsw x17,x16(0)
2360 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWui)
2361 .addReg(AArch64::X17)
2362 .addReg(AArch64::X16)
2363 .addImm(0));
2364 }
2365 // addx x16, x16, x17
2366 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
2367 .addReg(AArch64::X16)
2368 .addReg(AArch64::X16)
2369 .addReg(AArch64::X17)
2370 .addImm(0));
2371
2372 } /* HasLoad == true */
2373
2374 // Compute pac discriminator into x17
2375 Register PACDiscReg = emitPtrauthDiscriminator(SignSchema->IntDisc,
2376 SignSchema->AddrDisc, Scratch);
2377 emitPAC(SignSchema->Key, Pointer, PACDiscReg);
2378
2379 // Lend:
2380 if (EndSym)
2381 OutStreamer->emitLabel(EndSym);
2382}
2383
2384void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2385 Register Val = MI->getOperand(1).getReg();
2386 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2387 uint64_t Disc = MI->getOperand(3).getImm();
2388 Register AddrDisc = MI->getOperand(4).getReg();
2389 bool AddrDiscKilled = MI->getOperand(4).isKill();
2390
2391 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2392 // register is available.
2393 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2394 assert(ScratchReg != AddrDisc &&
2395 "Neither X16 nor X17 is available as a scratch register");
2396
2397 // Compute pac discriminator
2398 Register DiscReg = emitPtrauthDiscriminator(
2399 Disc, AddrDisc, ScratchReg, /*MayClobberAddrDisc=*/AddrDiscKilled);
2400
2401 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2402 return;
2403
2404 emitPAC(Key, Val, DiscReg);
2405}
2406
2407void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2408 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2409 unsigned BrTarget = MI->getOperand(0).getReg();
2410
2411 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2412 uint64_t Disc = MI->getOperand(2).getImm();
2413
2414 unsigned AddrDisc = MI->getOperand(3).getReg();
2415
2416 // Make sure AddrDisc is solely used to compute the discriminator.
2417 // While hardly meaningful, it is still possible to describe an authentication
2418 // of a pointer against its own value (instead of storage address) with
2419 // intrinsics, so use report_fatal_error instead of assert.
2420 if (BrTarget == AddrDisc)
2421 report_fatal_error("Branch target is signed with its own value");
2422
2423 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2424 // fact that x16 and x17 are described as clobbered by the MI instruction and
2425 // AddrDisc is not used as any other input.
2426 //
2427 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2428 // either x16 or x17, meaning the returned register is always among the
2429 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2430 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2431 // among x16 and x17 to prevent clobbering unexpected registers.
2432 //
2433 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2434 // declared as clobbering x16/x17.
2435 //
2436 // FIXME: Make use of `killed` flags and register masks instead.
2437 bool AddrDiscIsImplicitDef =
2438 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2439 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2440 AddrDiscIsImplicitDef);
2441 emitBLRA(IsCall, Key, BrTarget, DiscReg);
2442}
2443
2444void AArch64AsmPrinter::emitAddImm(MCRegister Reg, int64_t Addend,
2445 MCRegister Tmp) {
2446 if (Addend != 0) {
2447 const uint64_t AbsOffset = (Addend > 0 ? Addend : -((uint64_t)Addend));
2448 const bool IsNeg = Addend < 0;
2449 if (isUInt<24>(AbsOffset)) {
2450 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2451 BitPos += 12) {
2452 EmitToStreamer(
2453 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2454 .addReg(Reg)
2455 .addReg(Reg)
2456 .addImm((AbsOffset >> BitPos) & 0xfff)
2457 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2458 }
2459 } else {
2460 const uint64_t UAddend = Addend;
2461 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2462 .addReg(Tmp)
2463 .addImm((IsNeg ? ~UAddend : UAddend) & 0xffff)
2464 .addImm(/*shift=*/0));
2465 auto NeedMovk = [IsNeg, UAddend](int BitPos) -> bool {
2466 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2467 uint64_t Shifted = UAddend >> BitPos;
2468 if (!IsNeg)
2469 return Shifted != 0;
2470 for (int I = 0; I != 64 - BitPos; I += 16)
2471 if (((Shifted >> I) & 0xffff) != 0xffff)
2472 return true;
2473 return false;
2474 };
2475 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2476 emitMOVK(Tmp, (UAddend >> BitPos) & 0xffff, BitPos);
2477
2478 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2479 .addReg(Reg)
2480 .addReg(Reg)
2481 .addReg(Tmp)
2482 .addImm(/*shift=*/0));
2483 }
2484 }
2485}
2486
2487void AArch64AsmPrinter::emitAddress(MCRegister Reg, const MCExpr *Expr,
2488 MCRegister Tmp, bool DSOLocal,
2489 const MCSubtargetInfo &STI) {
2490 MCValue Val;
2491 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2492 report_fatal_error("emitAddress could not evaluate");
2493 if (DSOLocal) {
2494 EmitToStreamer(
2495 MCInstBuilder(AArch64::ADRP)
2496 .addReg(Reg)
2498 OutStreamer->getContext())));
2499 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2500 .addReg(Reg)
2501 .addReg(Reg)
2502 .addExpr(MCSpecifierExpr::create(
2503 Expr, AArch64::S_LO12, OutStreamer->getContext()))
2504 .addImm(0));
2505 } else {
2506 auto *SymRef =
2507 MCSymbolRefExpr::create(Val.getAddSym(), OutStreamer->getContext());
2508 EmitToStreamer(
2509 MCInstBuilder(AArch64::ADRP)
2510 .addReg(Reg)
2512 OutStreamer->getContext())));
2513 EmitToStreamer(
2514 MCInstBuilder(AArch64::LDRXui)
2515 .addReg(Reg)
2516 .addReg(Reg)
2518 OutStreamer->getContext())));
2519 emitAddImm(Reg, Val.getConstant(), Tmp);
2520 }
2521}
2522
2524 // IFUNCs are ELF-only.
2525 if (!TT.isOSBinFormatELF())
2526 return false;
2527
2528 // IFUNCs are supported on glibc, bionic, and some but not all of the BSDs.
2529 return TT.isOSGlibc() || TT.isAndroid() || TT.isOSFreeBSD() ||
2530 TT.isOSDragonFly() || TT.isOSNetBSD();
2531}
2532
2533// Emit an ifunc resolver that returns a signed pointer to the specified target,
2534// and return a FUNCINIT reference to the resolver. In the linked binary, this
2535// function becomes the target of an IRELATIVE relocation. This resolver is used
2536// to relocate signed pointers in global variable initializers in special cases
2537// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2538//
2539// Example (signed null pointer, not address discriminated):
2540//
2541// .8byte .Lpauth_ifunc0
2542// .pushsection .text.startup,"ax",@progbits
2543// .Lpauth_ifunc0:
2544// mov x0, #0
2545// mov x1, #12345
2546// b __emupac_pacda
2547//
2548// Example (signed null pointer, address discriminated):
2549//
2550// .Ltmp:
2551// .8byte .Lpauth_ifunc0
2552// .pushsection .text.startup,"ax",@progbits
2553// .Lpauth_ifunc0:
2554// mov x0, #0
2555// adrp x1, .Ltmp
2556// add x1, x1, :lo12:.Ltmp
2557// b __emupac_pacda
2558// .popsection
2559//
2560// Example (signed pointer to symbol, not address discriminated):
2561//
2562// .Ltmp:
2563// .8byte .Lpauth_ifunc0
2564// .pushsection .text.startup,"ax",@progbits
2565// .Lpauth_ifunc0:
2566// adrp x0, symbol
2567// add x0, x0, :lo12:symbol
2568// mov x1, #12345
2569// b __emupac_pacda
2570// .popsection
2571//
2572// Example (signed null pointer, not address discriminated, with deactivation
2573// symbol ds):
2574//
2575// .8byte .Lpauth_ifunc0
2576// .pushsection .text.startup,"ax",@progbits
2577// .Lpauth_ifunc0:
2578// mov x0, #0
2579// mov x1, #12345
2580// .reloc ., R_AARCH64_PATCHINST, ds
2581// b __emupac_pacda
2582// ret
2583// .popsection
2584const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2585 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
2586 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2587 const Triple &TT = TM.getTargetTriple();
2588
2589 // We only emit an IRELATIVE relocation if the target supports IRELATIVE.
2591 return nullptr;
2592
2593 // For now, only the DA key is supported.
2594 if (KeyID != AArch64PACKey::DA)
2595 return nullptr;
2596
2597 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
2598 // space.
2599 auto STI = std::make_unique<AArch64Subtarget>(
2600 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
2601 true);
2602 this->STI = STI.get();
2603
2604 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2605 OutStreamer->emitLabel(Place);
2606 OutStreamer->pushSection();
2607
2608 const MCSymbolELF *Group =
2609 static_cast<MCSectionELF *>(OutStreamer->getCurrentSectionOnly())
2610 ->getGroup();
2612 if (Group)
2614 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2615 ".text.startup", ELF::SHT_PROGBITS, Flags, 0, Group, true,
2616 Group ? MCSection::NonUniqueID : PAuthIFuncNextUniqueID++, nullptr));
2617
2618 MCSymbol *IRelativeSym =
2619 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2620 OutStreamer->emitLabel(IRelativeSym);
2621 if (isa<MCConstantExpr>(Target)) {
2622 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2623 .addReg(AArch64::X0)
2624 .addExpr(Target)
2625 .addImm(0),
2626 *STI);
2627 } else {
2628 emitAddress(AArch64::X0, Target, AArch64::X16, IsDSOLocal, *STI);
2629 }
2630 if (HasAddressDiversity) {
2631 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2632 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2633 MCConstantExpr::create(Disc, OutStreamer->getContext()),
2634 OutStreamer->getContext());
2635 emitAddress(AArch64::X1, PlacePlusDisc, AArch64::X16, /*IsDSOLocal=*/true,
2636 *STI);
2637 } else {
2638 if (!isUInt<16>(Disc)) {
2639 OutContext.reportError(SMLoc(), "AArch64 PAC Discriminator '" +
2640 Twine(Disc) +
2641 "' out of range [0, 0xFFFF]");
2642 }
2643 emitMOVZ(AArch64::X1, Disc, 0);
2644 }
2645
2646 if (DSExpr) {
2647 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2648 OutStreamer->emitLabel(PrePACInst);
2649
2650 auto *PrePACInstExpr =
2651 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2652 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2653 DSExpr, SMLoc());
2654 }
2655
2656 // We don't know the subtarget because this is being emitted for a global
2657 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2658 // always call the EmuPAC runtime, which will end up using the PAC instruction
2659 // if the target supports PAC.
2660 MCSymbol *EmuPAC =
2661 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2662 const MCSymbolRefExpr *EmuPACRef =
2663 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2664 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2665 *STI);
2666
2667 // We need a RET despite the above tail call because the deactivation symbol
2668 // may replace the tail call with a NOP.
2669 if (DSExpr)
2670 OutStreamer->emitInstruction(
2671 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
2672 OutStreamer->popSection();
2673
2675 MCSymbolRefExpr::create(IRelativeSym, OutStreamer->getContext()),
2676 AArch64::S_FUNCINIT, OutStreamer->getContext());
2677}
2678
2679const MCExpr *
2680AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2681 MCContext &Ctx = OutContext;
2682
2683 // Figure out the base symbol and the addend, if any.
2684 APInt Offset(64, 0);
2685 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2686 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2687
2688 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2689
2690 const MCExpr *Sym;
2691 if (BaseGVB) {
2692 // If there is an addend, turn that into the appropriate MCExpr.
2693 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2694 if (Offset.sgt(0))
2696 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2697 else if (Offset.slt(0))
2699 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2700 } else {
2701 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2702 }
2703
2704 const MCExpr *DSExpr = nullptr;
2705 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2706 if (isa<GlobalAlias>(DS))
2707 return Sym;
2708 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2709 }
2710
2711 uint64_t KeyID = CPA.getKey()->getZExtValue();
2712 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2713 // AArch64AuthMCExpr::printImpl, so fail fast.
2714 if (KeyID > AArch64PACKey::LAST) {
2715 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2716 "' out of range [0, " +
2717 Twine((unsigned)AArch64PACKey::LAST) + "]");
2718 KeyID = 0;
2719 }
2720
2721 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2722
2723 // Check if we can represent this with an IRELATIVE and emit it if so.
2724 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2725 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2726 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2727 return IFuncSym;
2728
2729 if (!isUInt<16>(Disc)) {
2730 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2731 "' out of range [0, 0xFFFF]");
2732 Disc = 0;
2733 }
2734
2735 if (DSExpr)
2736 report_fatal_error("deactivation symbols unsupported in constant "
2737 "expressions on this target");
2738
2739 // Finally build the complete @AUTH expr.
2740 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2741 CPA.hasAddressDiscriminator(), Ctx);
2742}
2743
2744void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2745 unsigned DstReg = MI.getOperand(0).getReg();
2746 const MachineOperand &GAOp = MI.getOperand(1);
2747 const uint64_t KeyC = MI.getOperand(2).getImm();
2748 assert(KeyC <= AArch64PACKey::LAST &&
2749 "key is out of range [0, AArch64PACKey::LAST]");
2750 const auto Key = (AArch64PACKey::ID)KeyC;
2751 const uint64_t Disc = MI.getOperand(3).getImm();
2752 assert(isUInt<16>(Disc) &&
2753 "constant discriminator is out of range [0, 0xffff]");
2754
2755 // Emit instruction sequence like the following:
2756 // ADRP x16, symbol$auth_ptr$key$disc
2757 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2758 //
2759 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2760 // to symbol.
2761 MCSymbol *AuthPtrStubSym;
2762 if (TM.getTargetTriple().isOSBinFormatELF()) {
2763 const auto &TLOF =
2764 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2765
2766 assert(GAOp.getOffset() == 0 &&
2767 "non-zero offset for $auth_ptr$ stub slots is not supported");
2768 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2769 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2770 } else {
2771 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2772 "LOADauthptrstatic is implemented only for MachO/ELF");
2773
2774 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2775 getObjFileLowering());
2776
2777 assert(GAOp.getOffset() == 0 &&
2778 "non-zero offset for $auth_ptr$ stub slots is not supported");
2779 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2780 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2781 }
2782
2783 MachineOperand StubMOHi =
2785 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2786 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2787 MCOperand StubMCHi, StubMCLo;
2788
2789 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2790 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2791
2792 EmitToStreamer(
2793 *OutStreamer,
2794 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2795
2796 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2797 .addReg(DstReg)
2798 .addReg(DstReg)
2799 .addOperand(StubMCLo));
2800}
2801
2802void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2803 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2804 const bool IsELFSignedGOT = MI.getParent()
2805 ->getParent()
2806 ->getInfo<AArch64FunctionInfo>()
2807 ->hasELFSignedGOT();
2808 MachineOperand GAOp = MI.getOperand(0);
2809 const uint64_t KeyC = MI.getOperand(1).getImm();
2810 assert(KeyC <= AArch64PACKey::LAST &&
2811 "key is out of range [0, AArch64PACKey::LAST]");
2812 const auto Key = (AArch64PACKey::ID)KeyC;
2813 const unsigned AddrDisc = MI.getOperand(2).getReg();
2814 const uint64_t Disc = MI.getOperand(3).getImm();
2815
2816 const int64_t Offset = GAOp.getOffset();
2817 GAOp.setOffset(0);
2818
2819 // Emit:
2820 // target materialization:
2821 // - via GOT:
2822 // - unsigned GOT:
2823 // adrp x16, :got:target
2824 // ldr x16, [x16, :got_lo12:target]
2825 // add offset to x16 if offset != 0
2826 // - ELF signed GOT:
2827 // adrp x17, :got:target
2828 // add x17, x17, :got_auth_lo12:target
2829 // ldr x16, [x17]
2830 // aut{i|d}a x16, x17
2831 // check+trap sequence (if no FPAC)
2832 // add offset to x16 if offset != 0
2833 //
2834 // - direct:
2835 // adrp x16, target
2836 // add x16, x16, :lo12:target
2837 // add offset to x16 if offset != 0
2838 //
2839 // add offset to x16:
2840 // - abs(offset) fits 24 bits:
2841 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2842 // - abs(offset) does not fit 24 bits:
2843 // - offset < 0:
2844 // movn+movk sequence filling x17 register with the offset (up to 4
2845 // instructions)
2846 // add x16, x16, x17
2847 // - offset > 0:
2848 // movz+movk sequence filling x17 register with the offset (up to 4
2849 // instructions)
2850 // add x16, x16, x17
2851 //
2852 // signing:
2853 // - 0 discriminator:
2854 // paciza x16
2855 // - Non-0 discriminator, no address discriminator:
2856 // mov x17, #Disc
2857 // pacia x16, x17
2858 // - address discriminator (with potentially folded immediate discriminator):
2859 // pacia x16, xAddrDisc
2860
2861 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2862 MCOperand GAMCHi, GAMCLo;
2863
2864 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2865 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2866 if (IsGOTLoad) {
2867 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2868 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2869 }
2870
2871 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2872 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2873
2874 EmitToStreamer(
2875 MCInstBuilder(AArch64::ADRP)
2876 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2877 .addOperand(GAMCHi));
2878
2879 if (IsGOTLoad) {
2880 if (IsELFSignedGOT) {
2881 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2882 .addReg(AArch64::X17)
2883 .addReg(AArch64::X17)
2884 .addOperand(GAMCLo)
2885 .addImm(0));
2886
2887 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2888 .addReg(AArch64::X16)
2889 .addReg(AArch64::X17)
2890 .addImm(0));
2891
2892 assert(GAOp.isGlobal());
2893 assert(GAOp.getGlobal()->getValueType() != nullptr);
2894
2895 bool IsFunctionTy = GAOp.getGlobal()->getValueType()->isFunctionTy();
2896 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2897 emitAUT(AuthKey, AArch64::X16, AArch64::X17);
2898
2899 if (!STI->hasFPAC())
2900 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2901 AArch64PAuth::AuthCheckMethod::XPAC);
2902 } else {
2903 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2904 .addReg(AArch64::X16)
2905 .addReg(AArch64::X16)
2906 .addOperand(GAMCLo));
2907 }
2908 } else {
2909 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2910 .addReg(AArch64::X16)
2911 .addReg(AArch64::X16)
2912 .addOperand(GAMCLo)
2913 .addImm(0));
2914 }
2915
2916 emitAddImm(AArch64::X16, Offset, AArch64::X17);
2917 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2918
2919 emitPAC(Key, AArch64::X16, DiscReg);
2920}
2921
2922void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2923 Register DstReg = MI.getOperand(0).getReg();
2924 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2925 const MachineOperand &GAMO = MI.getOperand(1);
2926 assert(GAMO.getOffset() == 0);
2927
2928 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2929 MCOperand GAMC;
2930 MCInstLowering.lowerOperand(GAMO, GAMC);
2931 EmitToStreamer(
2932 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2933 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2934 .addReg(AuthResultReg)
2935 .addReg(AArch64::X17)
2936 .addImm(0));
2937 } else {
2938 MachineOperand GAHiOp(GAMO);
2939 MachineOperand GALoOp(GAMO);
2940 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2941 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2942
2943 MCOperand GAMCHi, GAMCLo;
2944 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2945 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2946
2947 EmitToStreamer(
2948 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2949
2950 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2951 .addReg(AArch64::X17)
2952 .addReg(AArch64::X17)
2953 .addOperand(GAMCLo)
2954 .addImm(0));
2955
2956 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2957 .addReg(AuthResultReg)
2958 .addReg(AArch64::X17)
2959 .addImm(0));
2960 }
2961
2962 assert(GAMO.isGlobal());
2963 MCSymbol *UndefWeakSym;
2964 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2965 UndefWeakSym = createTempSymbol("undef_weak");
2966 EmitToStreamer(
2967 MCInstBuilder(AArch64::CBZX)
2968 .addReg(AuthResultReg)
2969 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2970 }
2971
2972 assert(GAMO.getGlobal()->getValueType() != nullptr);
2973
2974 bool IsFunctionTy = GAMO.getGlobal()->getValueType()->isFunctionTy();
2975 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2976 emitAUT(AuthKey, AuthResultReg, AArch64::X17);
2977
2978 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2979 OutStreamer->emitLabel(UndefWeakSym);
2980
2981 if (!STI->hasFPAC()) {
2982 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2983 AArch64PAuth::AuthCheckMethod::XPAC);
2984
2985 emitMovXReg(DstReg, AuthResultReg);
2986 }
2987}
2988
2989const MCExpr *
2990AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2991 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2992 const Function &Fn = *BA.getFunction();
2993
2994 if (std::optional<uint16_t> BADisc =
2995 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2996 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2997 /*HasAddressDiversity=*/false, OutContext);
2998
2999 return BAE;
3000}
3001
3002void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
3003 bool IsImm = false;
3004 unsigned Width = 0;
3005
3006 switch (MI->getOpcode()) {
3007 default:
3008 llvm_unreachable("This is not a CB pseudo instruction");
3009 case AArch64::CBBAssertExt:
3010 IsImm = false;
3011 Width = 8;
3012 break;
3013 case AArch64::CBHAssertExt:
3014 IsImm = false;
3015 Width = 16;
3016 break;
3017 case AArch64::CBWPrr:
3018 Width = 32;
3019 break;
3020 case AArch64::CBXPrr:
3021 Width = 64;
3022 break;
3023 case AArch64::CBWPri:
3024 IsImm = true;
3025 Width = 32;
3026 break;
3027 case AArch64::CBXPri:
3028 IsImm = true;
3029 Width = 64;
3030 break;
3031 }
3032
3034 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
3035 bool NeedsRegSwap = false;
3036 bool NeedsImmDec = false;
3037 bool NeedsImmInc = false;
3038
3039#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
3040 (IsImm \
3041 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
3042 : (Width == 8 \
3043 ? AArch64::CBB##RegCond##Wrr \
3044 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
3045 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
3046 : AArch64::CB##RegCond##Xrr))))
3047 unsigned MCOpC;
3048
3049 // Decide if we need to either swap register operands or increment/decrement
3050 // immediate operands
3051 switch (CC) {
3052 default:
3053 llvm_unreachable("Invalid CB condition code");
3054 case AArch64CC::EQ:
3055 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
3056 break;
3057 case AArch64CC::NE:
3058 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
3059 break;
3060 case AArch64CC::HS:
3061 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
3062 NeedsImmDec = IsImm;
3063 break;
3064 case AArch64CC::LO:
3065 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
3066 NeedsRegSwap = !IsImm;
3067 break;
3068 case AArch64CC::HI:
3069 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
3070 break;
3071 case AArch64CC::LS:
3072 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
3073 NeedsRegSwap = !IsImm;
3074 NeedsImmInc = IsImm;
3075 break;
3076 case AArch64CC::GE:
3077 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
3078 NeedsImmDec = IsImm;
3079 break;
3080 case AArch64CC::LT:
3081 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
3082 NeedsRegSwap = !IsImm;
3083 break;
3084 case AArch64CC::GT:
3085 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
3086 break;
3087 case AArch64CC::LE:
3088 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
3089 NeedsRegSwap = !IsImm;
3090 NeedsImmInc = IsImm;
3091 break;
3092 }
3093#undef GET_CB_OPC
3094
3095 MCInst Inst;
3096 Inst.setOpcode(MCOpC);
3097
3098 MCOperand Lhs, Rhs, Trgt;
3099 lowerOperand(MI->getOperand(1), Lhs);
3100 lowerOperand(MI->getOperand(2), Rhs);
3101 lowerOperand(MI->getOperand(3), Trgt);
3102
3103 // Now swap, increment or decrement
3104 if (NeedsRegSwap) {
3105 assert(Lhs.isReg() && "Expected register operand for CB");
3106 assert(Rhs.isReg() && "Expected register operand for CB");
3107 Inst.addOperand(Rhs);
3108 Inst.addOperand(Lhs);
3109 } else if (NeedsImmDec) {
3110 Rhs.setImm(Rhs.getImm() - 1);
3111 Inst.addOperand(Lhs);
3112 Inst.addOperand(Rhs);
3113 } else if (NeedsImmInc) {
3114 Rhs.setImm(Rhs.getImm() + 1);
3115 Inst.addOperand(Lhs);
3116 Inst.addOperand(Rhs);
3117 } else {
3118 Inst.addOperand(Lhs);
3119 Inst.addOperand(Rhs);
3120 }
3121
3122 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3123 "CB immediate operand out-of-bounds");
3124
3125 Inst.addOperand(Trgt);
3126 EmitToStreamer(*OutStreamer, Inst);
3127}
3128
3129// Simple pseudo-instructions have their lowering (with expansion to real
3130// instructions) auto-generated.
3131#include "AArch64GenMCPseudoLowering.inc"
3132
3133void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3134 S.emitInstruction(Inst, *STI);
3135#ifndef NDEBUG
3136 ++InstsEmitted;
3137#endif
3138}
3139
3140void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3141 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3142
3143#ifndef NDEBUG
3144 InstsEmitted = 0;
3145 llvm::scope_exit CheckMISize([&]() {
3146 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3147 });
3148#endif
3149
3150 // Do any auto-generated pseudo lowerings.
3151 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3152 EmitToStreamer(*OutStreamer, OutInst);
3153 return;
3154 }
3155
3156 if (MI->getOpcode() == AArch64::ADRP) {
3157 for (auto &Opd : MI->operands()) {
3158 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3159 "swift_async_extendedFramePointerFlags") {
3160 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3161 }
3162 }
3163 }
3164
3165 if (AArch64FI->getLOHRelated().count(MI)) {
3166 // Generate a label for LOH related instruction
3167 MCSymbol *LOHLabel = createTempSymbol("loh");
3168 // Associate the instruction with the label
3169 LOHInstToLabel[MI] = LOHLabel;
3170 OutStreamer->emitLabel(LOHLabel);
3171 }
3172
3173 AArch64TargetStreamer *TS =
3174 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3175 // Do any manual lowerings.
3176 switch (MI->getOpcode()) {
3177 default:
3179 "Unhandled tail call instruction");
3180 break;
3181 case AArch64::HINT: {
3182 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3183 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3184 // non-empty. If MI is the initial BTI, place the
3185 // __patchable_function_entries label after BTI.
3186 if (CurrentPatchableFunctionEntrySym &&
3187 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3188 MI == &MF->front().front()) {
3189 int64_t Imm = MI->getOperand(0).getImm();
3190 if ((Imm & 32) && (Imm & 6)) {
3191 MCInst Inst;
3192 MCInstLowering.Lower(MI, Inst);
3193 EmitToStreamer(*OutStreamer, Inst);
3194 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3195 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3196 return;
3197 }
3198 }
3199 break;
3200 }
3201 case AArch64::MOVMCSym: {
3202 Register DestReg = MI->getOperand(0).getReg();
3203 const MachineOperand &MO_Sym = MI->getOperand(1);
3204 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3205 MCOperand Hi_MCSym, Lo_MCSym;
3206
3207 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3208 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3209
3210 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3211 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3212
3213 MCInst MovZ;
3214 MovZ.setOpcode(AArch64::MOVZXi);
3215 MovZ.addOperand(MCOperand::createReg(DestReg));
3216 MovZ.addOperand(Hi_MCSym);
3218 EmitToStreamer(*OutStreamer, MovZ);
3219
3220 MCInst MovK;
3221 MovK.setOpcode(AArch64::MOVKXi);
3222 MovK.addOperand(MCOperand::createReg(DestReg));
3223 MovK.addOperand(MCOperand::createReg(DestReg));
3224 MovK.addOperand(Lo_MCSym);
3226 EmitToStreamer(*OutStreamer, MovK);
3227 return;
3228 }
3229 case AArch64::MOVIv2d_ns:
3230 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3231 // as movi is more efficient across all cores. Newer cores can eliminate
3232 // fmovs early and there is no difference with movi, but this not true for
3233 // all implementations.
3234 //
3235 // The floating-point version doesn't quite work in rare cases on older
3236 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3237 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3238 MI->getOperand(1).getImm() == 0) {
3239 MCInst TmpInst;
3240 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3241 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3242 TmpInst.addOperand(MCOperand::createImm(0));
3243 EmitToStreamer(*OutStreamer, TmpInst);
3244 return;
3245 }
3246 break;
3247
3248 case AArch64::DBG_VALUE:
3249 case AArch64::DBG_VALUE_LIST:
3250 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3251 SmallString<128> TmpStr;
3252 raw_svector_ostream OS(TmpStr);
3253 PrintDebugValueComment(MI, OS);
3254 OutStreamer->emitRawText(StringRef(OS.str()));
3255 }
3256 return;
3257
3258 case AArch64::EMITBKEY: {
3259 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3260 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3261 ExceptionHandlingType != ExceptionHandling::ARM)
3262 return;
3263
3264 if (getFunctionCFISectionType(*MF) == CFISection::None)
3265 return;
3266
3267 OutStreamer->emitCFIBKeyFrame();
3268 return;
3269 }
3270
3271 case AArch64::EMITMTETAGGED: {
3272 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3273 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3274 ExceptionHandlingType != ExceptionHandling::ARM)
3275 return;
3276
3277 if (getFunctionCFISectionType(*MF) != CFISection::None)
3278 OutStreamer->emitCFIMTETaggedFrame();
3279 return;
3280 }
3281
3282 case AArch64::AUTx16x17: {
3283 const Register Pointer = AArch64::X16;
3284 const Register Scratch = AArch64::X17;
3285
3286 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3287 MI->getOperand(1).getImm(), MI->getOperand(2));
3288
3289 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, std::nullopt,
3290 std::nullopt, MI->getDeactivationSymbol());
3291 return;
3292 }
3293
3294 case AArch64::AUTxMxN: {
3295 const Register Pointer = MI->getOperand(0).getReg();
3296 const Register Scratch = MI->getOperand(1).getReg();
3297
3298 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3299 MI->getOperand(4).getImm(), MI->getOperand(5));
3300
3301 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, std::nullopt,
3302 std::nullopt, MI->getDeactivationSymbol());
3303 return;
3304 }
3305
3306 case AArch64::AUTPAC: {
3307 const Register Pointer = AArch64::X16;
3308 const Register Scratch = AArch64::X17;
3309
3310 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3311 MI->getOperand(1).getImm(), MI->getOperand(2));
3312
3313 PtrAuthSchema SignSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3314 MI->getOperand(4).getImm(), MI->getOperand(5));
3315
3316 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, SignSchema,
3317 std::nullopt, MI->getDeactivationSymbol());
3318 return;
3319 }
3320
3321 case AArch64::AUTRELLOADPAC: {
3322 const Register Pointer = AArch64::X16;
3323 const Register Scratch = AArch64::X17;
3324
3325 PtrAuthSchema AuthSchema((AArch64PACKey::ID)MI->getOperand(0).getImm(),
3326 MI->getOperand(1).getImm(), MI->getOperand(2));
3327
3328 PtrAuthSchema SignSchema((AArch64PACKey::ID)MI->getOperand(3).getImm(),
3329 MI->getOperand(4).getImm(), MI->getOperand(5));
3330
3331 emitPtrauthAuthResign(Pointer, Scratch, AuthSchema, SignSchema,
3332 MI->getOperand(6).getImm(),
3333 MI->getDeactivationSymbol());
3334
3335 return;
3336 }
3337
3338 case AArch64::PAC:
3339 emitPtrauthSign(MI);
3340 return;
3341
3342 case AArch64::LOADauthptrstatic:
3343 LowerLOADauthptrstatic(*MI);
3344 return;
3345
3346 case AArch64::LOADgotPAC:
3347 case AArch64::MOVaddrPAC:
3348 LowerMOVaddrPAC(*MI);
3349 return;
3350
3351 case AArch64::LOADgotAUTH:
3352 LowerLOADgotAUTH(*MI);
3353 return;
3354
3355 case AArch64::BRA:
3356 case AArch64::BLRA:
3357 emitPtrauthBranch(MI);
3358 return;
3359
3360 // Tail calls use pseudo instructions so they have the proper code-gen
3361 // attributes (isCall, isReturn, etc.). We lower them to the real
3362 // instruction here.
3363 case AArch64::AUTH_TCRETURN:
3364 case AArch64::AUTH_TCRETURN_BTI: {
3365 Register Callee = MI->getOperand(0).getReg();
3366 const auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
3367 const uint64_t Disc = MI->getOperand(3).getImm();
3368
3369 Register AddrDisc = MI->getOperand(4).getReg();
3370
3371 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3372
3373 emitPtrauthTailCallHardening(MI);
3374
3375 // See the comments in emitPtrauthBranch.
3376 if (Callee == AddrDisc)
3377 report_fatal_error("Call target is signed with its own value");
3378
3379 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3380 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3381 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3382 // restriction manually not to clobber an unexpected register.
3383 bool AddrDiscIsImplicitDef =
3384 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3385 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3386 AddrDiscIsImplicitDef);
3387 emitBLRA(/*IsCall*/ false, Key, Callee, DiscReg);
3388 return;
3389 }
3390
3391 case AArch64::TCRETURNri:
3392 case AArch64::TCRETURNrix16x17:
3393 case AArch64::TCRETURNrix17:
3394 case AArch64::TCRETURNrinotx16:
3395 case AArch64::TCRETURNriALL: {
3396 emitPtrauthTailCallHardening(MI);
3397
3398 recordIfImportCall(MI);
3399 MCInst TmpInst;
3400 TmpInst.setOpcode(AArch64::BR);
3401 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3402 EmitToStreamer(*OutStreamer, TmpInst);
3403 return;
3404 }
3405 case AArch64::TCRETURNdi: {
3406 emitPtrauthTailCallHardening(MI);
3407
3408 MCOperand Dest;
3409 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3410 recordIfImportCall(MI);
3411 MCInst TmpInst;
3412 TmpInst.setOpcode(AArch64::B);
3413 TmpInst.addOperand(Dest);
3414 EmitToStreamer(*OutStreamer, TmpInst);
3415 return;
3416 }
3417 case AArch64::SpeculationBarrierISBDSBEndBB: {
3418 // Print DSB SYS + ISB
3419 MCInst TmpInstDSB;
3420 TmpInstDSB.setOpcode(AArch64::DSB);
3421 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3422 EmitToStreamer(*OutStreamer, TmpInstDSB);
3423 MCInst TmpInstISB;
3424 TmpInstISB.setOpcode(AArch64::ISB);
3425 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3426 EmitToStreamer(*OutStreamer, TmpInstISB);
3427 return;
3428 }
3429 case AArch64::SpeculationBarrierSBEndBB: {
3430 // Print SB
3431 MCInst TmpInstSB;
3432 TmpInstSB.setOpcode(AArch64::SB);
3433 EmitToStreamer(*OutStreamer, TmpInstSB);
3434 return;
3435 }
3436 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3437 /// lower this to:
3438 /// adrp x0, :tlsdesc_auth:var
3439 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3440 /// add x0, x0, #:tlsdesc_auth_lo12:var
3441 /// blraa x16, x0
3442 /// (TPIDR_EL0 offset now in x0)
3443 const MachineOperand &MO_Sym = MI->getOperand(0);
3444 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3445 MCOperand SymTLSDescLo12, SymTLSDesc;
3446 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3447 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3448 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3449 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3450
3451 MCInst Adrp;
3452 Adrp.setOpcode(AArch64::ADRP);
3453 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3454 Adrp.addOperand(SymTLSDesc);
3455 EmitToStreamer(*OutStreamer, Adrp);
3456
3457 MCInst Ldr;
3458 Ldr.setOpcode(AArch64::LDRXui);
3459 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3460 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3461 Ldr.addOperand(SymTLSDescLo12);
3463 EmitToStreamer(*OutStreamer, Ldr);
3464
3465 MCInst Add;
3466 Add.setOpcode(AArch64::ADDXri);
3467 Add.addOperand(MCOperand::createReg(AArch64::X0));
3468 Add.addOperand(MCOperand::createReg(AArch64::X0));
3469 Add.addOperand(SymTLSDescLo12);
3471 EmitToStreamer(*OutStreamer, Add);
3472
3473 // Authenticated TLSDESC accesses are not relaxed.
3474 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3475
3476 MCInst Blraa;
3477 Blraa.setOpcode(AArch64::BLRAA);
3478 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3479 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3480 EmitToStreamer(*OutStreamer, Blraa);
3481
3482 return;
3483 }
3484 case AArch64::TLSDESC_CALLSEQ: {
3485 /// lower this to:
3486 /// adrp x0, :tlsdesc:var
3487 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3488 /// add x0, x0, #:tlsdesc_lo12:var
3489 /// .tlsdesccall var
3490 /// blr x1
3491 /// (TPIDR_EL0 offset now in x0)
3492 const MachineOperand &MO_Sym = MI->getOperand(0);
3493 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3494 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3495 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3496 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3497 MCInstLowering.lowerOperand(MO_Sym, Sym);
3498 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3499 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3500
3501 MCInst Adrp;
3502 Adrp.setOpcode(AArch64::ADRP);
3503 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3504 Adrp.addOperand(SymTLSDesc);
3505 EmitToStreamer(*OutStreamer, Adrp);
3506
3507 MCInst Ldr;
3508 if (STI->isTargetILP32()) {
3509 Ldr.setOpcode(AArch64::LDRWui);
3510 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3511 } else {
3512 Ldr.setOpcode(AArch64::LDRXui);
3513 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3514 }
3515 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3516 Ldr.addOperand(SymTLSDescLo12);
3518 EmitToStreamer(*OutStreamer, Ldr);
3519
3520 MCInst Add;
3521 if (STI->isTargetILP32()) {
3522 Add.setOpcode(AArch64::ADDWri);
3523 Add.addOperand(MCOperand::createReg(AArch64::W0));
3524 Add.addOperand(MCOperand::createReg(AArch64::W0));
3525 } else {
3526 Add.setOpcode(AArch64::ADDXri);
3527 Add.addOperand(MCOperand::createReg(AArch64::X0));
3528 Add.addOperand(MCOperand::createReg(AArch64::X0));
3529 }
3530 Add.addOperand(SymTLSDescLo12);
3532 EmitToStreamer(*OutStreamer, Add);
3533
3534 // Emit a relocation-annotation. This expands to no code, but requests
3535 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3536 MCInst TLSDescCall;
3537 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3538 TLSDescCall.addOperand(Sym);
3539 EmitToStreamer(*OutStreamer, TLSDescCall);
3540#ifndef NDEBUG
3541 --InstsEmitted; // no code emitted
3542#endif
3543
3544 MCInst Blr;
3545 Blr.setOpcode(AArch64::BLR);
3546 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3547 EmitToStreamer(*OutStreamer, Blr);
3548
3549 return;
3550 }
3551
3552 case AArch64::JumpTableDest32:
3553 case AArch64::JumpTableDest16:
3554 case AArch64::JumpTableDest8:
3555 LowerJumpTableDest(*OutStreamer, *MI);
3556 return;
3557
3558 case AArch64::BR_JumpTable:
3559 LowerHardenedBRJumpTable(*MI);
3560 return;
3561
3562 case AArch64::FMOVH0:
3563 case AArch64::FMOVS0:
3564 case AArch64::FMOVD0:
3565 emitFMov0(*MI);
3566 return;
3567
3568 case AArch64::MOPSMemoryCopyPseudo:
3569 case AArch64::MOPSMemoryMovePseudo:
3570 case AArch64::MOPSMemorySetPseudo:
3571 case AArch64::MOPSMemorySetTaggingPseudo:
3572 LowerMOPS(*OutStreamer, *MI);
3573 return;
3574
3575 case TargetOpcode::STACKMAP:
3576 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3577
3578 case TargetOpcode::PATCHPOINT:
3579 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3580
3581 case TargetOpcode::STATEPOINT:
3582 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3583
3584 case TargetOpcode::FAULTING_OP:
3585 return LowerFAULTING_OP(*MI);
3586
3587 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3588 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3589 return;
3590
3591 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3592 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3593 return;
3594
3595 case TargetOpcode::PATCHABLE_TAIL_CALL:
3596 LowerPATCHABLE_TAIL_CALL(*MI);
3597 return;
3598 case TargetOpcode::PATCHABLE_EVENT_CALL:
3599 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3600 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3601 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3602
3603 case AArch64::KCFI_CHECK:
3604 LowerKCFI_CHECK(*MI);
3605 return;
3606
3607 case AArch64::HWASAN_CHECK_MEMACCESS:
3608 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3609 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3610 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3611 LowerHWASAN_CHECK_MEMACCESS(*MI);
3612 return;
3613
3614 case AArch64::SEH_StackAlloc:
3615 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3616 return;
3617
3618 case AArch64::SEH_SaveFPLR:
3619 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3620 return;
3621
3622 case AArch64::SEH_SaveFPLR_X:
3623 assert(MI->getOperand(0).getImm() < 0 &&
3624 "Pre increment SEH opcode must have a negative offset");
3625 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3626 return;
3627
3628 case AArch64::SEH_SaveReg:
3629 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3630 MI->getOperand(1).getImm());
3631 return;
3632
3633 case AArch64::SEH_SaveReg_X:
3634 assert(MI->getOperand(1).getImm() < 0 &&
3635 "Pre increment SEH opcode must have a negative offset");
3636 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3637 -MI->getOperand(1).getImm());
3638 return;
3639
3640 case AArch64::SEH_SaveRegP:
3641 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3642 MI->getOperand(0).getImm() <= 28) {
3643 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3644 "Register paired with LR must be odd");
3645 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3646 MI->getOperand(2).getImm());
3647 return;
3648 }
3649 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3650 "Non-consecutive registers not allowed for save_regp");
3651 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3652 MI->getOperand(2).getImm());
3653 return;
3654
3655 case AArch64::SEH_SaveRegP_X:
3656 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3657 "Non-consecutive registers not allowed for save_regp_x");
3658 assert(MI->getOperand(2).getImm() < 0 &&
3659 "Pre increment SEH opcode must have a negative offset");
3660 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3661 -MI->getOperand(2).getImm());
3662 return;
3663
3664 case AArch64::SEH_SaveFReg:
3665 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3666 MI->getOperand(1).getImm());
3667 return;
3668
3669 case AArch64::SEH_SaveFReg_X:
3670 assert(MI->getOperand(1).getImm() < 0 &&
3671 "Pre increment SEH opcode must have a negative offset");
3672 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3673 -MI->getOperand(1).getImm());
3674 return;
3675
3676 case AArch64::SEH_SaveFRegP:
3677 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3678 "Non-consecutive registers not allowed for save_regp");
3679 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3680 MI->getOperand(2).getImm());
3681 return;
3682
3683 case AArch64::SEH_SaveFRegP_X:
3684 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3685 "Non-consecutive registers not allowed for save_regp_x");
3686 assert(MI->getOperand(2).getImm() < 0 &&
3687 "Pre increment SEH opcode must have a negative offset");
3688 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3689 -MI->getOperand(2).getImm());
3690 return;
3691
3692 case AArch64::SEH_SetFP:
3694 return;
3695
3696 case AArch64::SEH_AddFP:
3697 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3698 return;
3699
3700 case AArch64::SEH_Nop:
3701 TS->emitARM64WinCFINop();
3702 return;
3703
3704 case AArch64::SEH_PrologEnd:
3706 return;
3707
3708 case AArch64::SEH_EpilogStart:
3710 return;
3711
3712 case AArch64::SEH_EpilogEnd:
3714 return;
3715
3716 case AArch64::SEH_PACSignLR:
3718 return;
3719
3720 case AArch64::SEH_SaveAnyRegI:
3721 assert(MI->getOperand(1).getImm() <= 1008 &&
3722 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3723 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3724 MI->getOperand(1).getImm());
3725 return;
3726
3727 case AArch64::SEH_SaveAnyRegIP:
3728 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3729 "Non-consecutive registers not allowed for save_any_reg");
3730 assert(MI->getOperand(2).getImm() <= 1008 &&
3731 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3732 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3733 MI->getOperand(2).getImm());
3734 return;
3735
3736 case AArch64::SEH_SaveAnyRegQP:
3737 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3738 "Non-consecutive registers not allowed for save_any_reg");
3739 assert(MI->getOperand(2).getImm() >= 0 &&
3740 "SaveAnyRegQP SEH opcode offset must be non-negative");
3741 assert(MI->getOperand(2).getImm() <= 1008 &&
3742 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3743 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3744 MI->getOperand(2).getImm());
3745 return;
3746
3747 case AArch64::SEH_SaveAnyRegQPX:
3748 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3749 "Non-consecutive registers not allowed for save_any_reg");
3750 assert(MI->getOperand(2).getImm() < 0 &&
3751 "SaveAnyRegQPX SEH opcode offset must be negative");
3752 assert(MI->getOperand(2).getImm() >= -1008 &&
3753 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3754 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3755 -MI->getOperand(2).getImm());
3756 return;
3757
3758 case AArch64::SEH_AllocZ:
3759 assert(MI->getOperand(0).getImm() >= 0 &&
3760 "AllocZ SEH opcode offset must be non-negative");
3761 assert(MI->getOperand(0).getImm() <= 255 &&
3762 "AllocZ SEH opcode offset must fit into 8 bits");
3763 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3764 return;
3765
3766 case AArch64::SEH_SaveZReg:
3767 assert(MI->getOperand(1).getImm() >= 0 &&
3768 "SaveZReg SEH opcode offset must be non-negative");
3769 assert(MI->getOperand(1).getImm() <= 255 &&
3770 "SaveZReg SEH opcode offset must fit into 8 bits");
3771 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3772 MI->getOperand(1).getImm());
3773 return;
3774
3775 case AArch64::SEH_SavePReg:
3776 assert(MI->getOperand(1).getImm() >= 0 &&
3777 "SavePReg SEH opcode offset must be non-negative");
3778 assert(MI->getOperand(1).getImm() <= 255 &&
3779 "SavePReg SEH opcode offset must fit into 8 bits");
3780 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3781 MI->getOperand(1).getImm());
3782 return;
3783
3784 case AArch64::BLR:
3785 case AArch64::BR: {
3786 recordIfImportCall(MI);
3787 MCInst TmpInst;
3788 MCInstLowering.Lower(MI, TmpInst);
3789 EmitToStreamer(*OutStreamer, TmpInst);
3790 return;
3791 }
3792 case AArch64::CBWPri:
3793 case AArch64::CBXPri:
3794 case AArch64::CBBAssertExt:
3795 case AArch64::CBHAssertExt:
3796 case AArch64::CBWPrr:
3797 case AArch64::CBXPrr:
3798 emitCBPseudoExpansion(MI);
3799 return;
3800 }
3801
3802 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3803 return;
3804
3805 // Finally, do the automated lowerings for everything else.
3806 MCInst TmpInst;
3807 MCInstLowering.Lower(MI, TmpInst);
3808 EmitToStreamer(*OutStreamer, TmpInst);
3809}
3810
3811void AArch64AsmPrinter::recordIfImportCall(
3812 const llvm::MachineInstr *BranchInst) {
3813 if (!EnableImportCallOptimization)
3814 return;
3815
3816 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3817 if (GV && GV->hasDLLImportStorageClass()) {
3818 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3819 OutStreamer->emitLabel(CallSiteSymbol);
3820
3821 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3822 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3823 .push_back({CallSiteSymbol, CalledSymbol});
3824 }
3825}
3826
3827void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3828 MCSymbol *LazyPointer) {
3829 // _ifunc:
3830 // adrp x16, lazy_pointer@GOTPAGE
3831 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3832 // ldr x16, [x16]
3833 // br x16
3834
3835 {
3836 MCInst Adrp;
3837 Adrp.setOpcode(AArch64::ADRP);
3838 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3839 MCOperand SymPage;
3840 MCInstLowering.lowerOperand(
3843 SymPage);
3844 Adrp.addOperand(SymPage);
3845 EmitToStreamer(Adrp);
3846 }
3847
3848 {
3849 MCInst Ldr;
3850 Ldr.setOpcode(AArch64::LDRXui);
3851 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3852 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3853 MCOperand SymPageOff;
3854 MCInstLowering.lowerOperand(
3857 SymPageOff);
3858 Ldr.addOperand(SymPageOff);
3860 EmitToStreamer(Ldr);
3861 }
3862
3863 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3864 .addReg(AArch64::X16)
3865 .addReg(AArch64::X16)
3866 .addImm(0));
3867
3868 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3869 : AArch64::BR)
3870 .addReg(AArch64::X16));
3871}
3872
3873void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3874 const GlobalIFunc &GI,
3875 MCSymbol *LazyPointer) {
3876 // These stub helpers are only ever called once, so here we're optimizing for
3877 // minimum size by using the pre-indexed store variants, which saves a few
3878 // bytes of instructions to bump & restore sp.
3879
3880 // _ifunc.stub_helper:
3881 // stp fp, lr, [sp, #-16]!
3882 // mov fp, sp
3883 // stp x1, x0, [sp, #-16]!
3884 // stp x3, x2, [sp, #-16]!
3885 // stp x5, x4, [sp, #-16]!
3886 // stp x7, x6, [sp, #-16]!
3887 // stp d1, d0, [sp, #-16]!
3888 // stp d3, d2, [sp, #-16]!
3889 // stp d5, d4, [sp, #-16]!
3890 // stp d7, d6, [sp, #-16]!
3891 // bl _resolver
3892 // adrp x16, lazy_pointer@GOTPAGE
3893 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3894 // str x0, [x16]
3895 // mov x16, x0
3896 // ldp d7, d6, [sp], #16
3897 // ldp d5, d4, [sp], #16
3898 // ldp d3, d2, [sp], #16
3899 // ldp d1, d0, [sp], #16
3900 // ldp x7, x6, [sp], #16
3901 // ldp x5, x4, [sp], #16
3902 // ldp x3, x2, [sp], #16
3903 // ldp x1, x0, [sp], #16
3904 // ldp fp, lr, [sp], #16
3905 // br x16
3906
3907 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3908 .addReg(AArch64::SP)
3909 .addReg(AArch64::FP)
3910 .addReg(AArch64::LR)
3911 .addReg(AArch64::SP)
3912 .addImm(-2));
3913
3914 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3915 .addReg(AArch64::FP)
3916 .addReg(AArch64::SP)
3917 .addImm(0)
3918 .addImm(0));
3919
3920 for (int I = 0; I != 4; ++I)
3921 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3922 .addReg(AArch64::SP)
3923 .addReg(AArch64::X1 + 2 * I)
3924 .addReg(AArch64::X0 + 2 * I)
3925 .addReg(AArch64::SP)
3926 .addImm(-2));
3927
3928 for (int I = 0; I != 4; ++I)
3929 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3930 .addReg(AArch64::SP)
3931 .addReg(AArch64::D1 + 2 * I)
3932 .addReg(AArch64::D0 + 2 * I)
3933 .addReg(AArch64::SP)
3934 .addImm(-2));
3935
3936 EmitToStreamer(
3937 MCInstBuilder(AArch64::BL)
3939
3940 {
3941 MCInst Adrp;
3942 Adrp.setOpcode(AArch64::ADRP);
3943 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3944 MCOperand SymPage;
3945 MCInstLowering.lowerOperand(
3946 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3948 SymPage);
3949 Adrp.addOperand(SymPage);
3950 EmitToStreamer(Adrp);
3951 }
3952
3953 {
3954 MCInst Ldr;
3955 Ldr.setOpcode(AArch64::LDRXui);
3956 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3957 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3958 MCOperand SymPageOff;
3959 MCInstLowering.lowerOperand(
3960 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3962 SymPageOff);
3963 Ldr.addOperand(SymPageOff);
3965 EmitToStreamer(Ldr);
3966 }
3967
3968 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3969 .addReg(AArch64::X0)
3970 .addReg(AArch64::X16)
3971 .addImm(0));
3972
3973 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3974 .addReg(AArch64::X16)
3975 .addReg(AArch64::X0)
3976 .addImm(0)
3977 .addImm(0));
3978
3979 for (int I = 3; I != -1; --I)
3980 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3981 .addReg(AArch64::SP)
3982 .addReg(AArch64::D1 + 2 * I)
3983 .addReg(AArch64::D0 + 2 * I)
3984 .addReg(AArch64::SP)
3985 .addImm(2));
3986
3987 for (int I = 3; I != -1; --I)
3988 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3989 .addReg(AArch64::SP)
3990 .addReg(AArch64::X1 + 2 * I)
3991 .addReg(AArch64::X0 + 2 * I)
3992 .addReg(AArch64::SP)
3993 .addImm(2));
3994
3995 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3996 .addReg(AArch64::SP)
3997 .addReg(AArch64::FP)
3998 .addReg(AArch64::LR)
3999 .addReg(AArch64::SP)
4000 .addImm(2));
4001
4002 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
4003 : AArch64::BR)
4004 .addReg(AArch64::X16));
4005}
4006
4007const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
4008 const Constant *BaseCV,
4009 uint64_t Offset) {
4010 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
4011 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
4012 OutContext);
4013 }
4014
4015 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
4016}
4017
4018char AArch64AsmPrinter::ID = 0;
4019
4020INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
4021 "AArch64 Assembly Printer", false, false)
4022
4023// Force static initialization.
4024extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
4025LLVMInitializeAArch64AsmPrinter() {
4031}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:598
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr unsigned SM(unsigned Version)
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
void setPreservesAll()
Set by analyses that do not transform their input at all.
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:650
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
Function * getFunction() const
Definition Constants.h:940
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1083
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:729
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:577
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:553
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
static constexpr unsigned NonUniqueID
Definition MCSection.h:521
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:221
virtual void emitCFIBKeyFrame()
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:374
MCContext & getContext() const
Definition MCStreamer.h:322
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:393
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:332
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:449
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:427
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
const FeatureBitset & getFeatureBits() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM Value Representation.
Definition Value.h:75
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1250
@ SHF_GROUP
Definition ELF.h:1272
@ SHF_EXECINSTR
Definition ELF.h:1253
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1861
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1862
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1863
@ SHT_PROGBITS
Definition ELF.h:1149
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:683
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:292
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
scope_exit(Callable) -> scope_exit< Callable >
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
static unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K, bool Zero)
Return B(L)RA opcode to be used for an authenticated branch or call using the given key,...
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1917
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...