LLVM 23.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
47#include "llvm/IR/DataLayout.h"
49#include "llvm/IR/Mangler.h"
50#include "llvm/IR/Module.h"
51#include "llvm/MC/MCAsmInfo.h"
52#include "llvm/MC/MCContext.h"
53#include "llvm/MC/MCExpr.h"
54#include "llvm/MC/MCInst.h"
58#include "llvm/MC/MCStreamer.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/MCValue.h"
70#include <cassert>
71#include <cstdint>
72#include <map>
73#include <memory>
74
75using namespace llvm;
76
77#define DEBUG_TYPE "AArch64AsmPrinter"
78
79// Doesn't count FPR128 ZCZ instructions which are handled
80// by TableGen pattern matching
81STATISTIC(NumZCZeroingInstrsFPR,
82 "Number of zero-cycle FPR zeroing instructions expanded from "
83 "canonical pseudo instructions");
84
87 "aarch64-ptrauth-auth-checks", cl::Hidden,
88 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
89 clEnumValN(Poison, "poison", "poison on failure"),
90 clEnumValN(Trap, "trap", "trap on failure")),
91 cl::desc("Check pointer authentication auth/resign failures"),
93
94namespace {
95
96class AArch64AsmPrinter : public AsmPrinter {
97 AArch64MCInstLower MCInstLowering;
98 FaultMaps FM;
99 const AArch64Subtarget *STI;
100 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
101#ifndef NDEBUG
102 unsigned InstsEmitted;
103#endif
104 bool EnableImportCallOptimization = false;
106 SectionToImportedFunctionCalls;
107 unsigned PAuthIFuncNextUniqueID = 1;
108
109public:
110 static char ID;
111
112 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
113 : AsmPrinter(TM, std::move(Streamer), ID),
114 MCInstLowering(OutContext, *this), FM(*this) {}
115
116 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
117
118 /// Wrapper for MCInstLowering.lowerOperand() for the
119 /// tblgen'erated pseudo lowering.
120 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
121 return MCInstLowering.lowerOperand(MO, MCOp);
122 }
123
124 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
125
126 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
127
128 void emitStartOfAsmFile(Module &M) override;
129 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
130 ArrayRef<unsigned> JumpTableIndices) override;
131 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
133 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
134 const MCSymbol *BranchLabel) const override;
135
136 void emitFunctionEntryLabel() override;
137
138 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
139
140 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
141
142 void LowerHardenedBRJumpTable(const MachineInstr &MI);
143
144 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
145
146 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
147 const MachineInstr &MI);
148 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
149 const MachineInstr &MI);
150 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
151 const MachineInstr &MI);
152 void LowerFAULTING_OP(const MachineInstr &MI);
153
154 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
155 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
156 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
157 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
158
159 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
160 HwasanMemaccessTuple;
161 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
162 void LowerKCFI_CHECK(const MachineInstr &MI);
163 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
164 void emitHwasanMemaccessSymbols(Module &M);
165
166 void emitSled(const MachineInstr &MI, SledKind Kind);
167
168 // Returns whether Reg may be used to store sensitive temporary values when
169 // expanding PtrAuth pseudos. Some OSes may take extra care to protect a
170 // small subset of GPRs on context switches - use these registers then.
171 //
172 // If there are no preferred registers, returns true for any Reg.
173 bool isPtrauthRegSafe(Register Reg) const {
174 if (STI->isX16X17Safer())
175 return Reg == AArch64::X16 || Reg == AArch64::X17;
176
177 return true;
178 }
179
180 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
181 void emitPtrauthBranch(const MachineInstr *MI);
182
183 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
184 Register ScratchReg,
187 const MCSymbol *OnFailure = nullptr);
188
189 // Check authenticated LR before tail calling.
190 void emitPtrauthTailCallHardening(const MachineInstr *TC);
191
192 // Emit the sequence for AUT or AUTPAC.
193 void emitPtrauthAuthResign(Register AUTVal, AArch64PACKey::ID AUTKey,
194 uint64_t AUTDisc,
195 const MachineOperand *AUTAddrDisc,
196 Register Scratch,
197 std::optional<AArch64PACKey::ID> PACKey,
198 uint64_t PACDisc, Register PACAddrDisc, Value *DS);
199
200 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
201 // if no instruction should be emitted because the deactivation symbol is
202 // defined in the current module so this function emitted a NOP instead.
203 bool emitDeactivationSymbolRelocation(Value *DS);
204
205 // Emit the sequence for PAC.
206 void emitPtrauthSign(const MachineInstr *MI);
207
208 // Emit the sequence to compute the discriminator.
209 //
210 // The Scratch register passed to this function must be safe, as returned by
211 // isPtrauthRegSafe(ScratchReg).
212 //
213 // The returned register is either ScratchReg, AddrDisc, or XZR. Furthermore,
214 // it is guaranteed to be safe (or XZR), with the only exception of
215 // passing-through an *unmodified* unsafe AddrDisc register.
216 //
217 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
218 // MayClobberAddrDisc may save one MOV instruction, provided
219 // isPtrauthRegSafe(AddrDisc) is true:
220 //
221 // mov x17, x16
222 // movk x17, #1234, lsl #48
223 // ; x16 is not used anymore
224 //
225 // can be replaced by
226 //
227 // movk x16, #1234, lsl #48
228 Register emitPtrauthDiscriminator(uint64_t Disc, Register AddrDisc,
229 Register ScratchReg,
230 bool MayClobberAddrDisc = false);
231
232 // Emit the sequence for LOADauthptrstatic
233 void LowerLOADauthptrstatic(const MachineInstr &MI);
234
235 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
236 // adrp-add followed by PAC sign)
237 void LowerMOVaddrPAC(const MachineInstr &MI);
238
239 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
240 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
241 // authenticating)
242 void LowerLOADgotAUTH(const MachineInstr &MI);
243
244 void emitAddImm(MCRegister Val, int64_t Addend, MCRegister Tmp);
245 void emitAddress(MCRegister Reg, const MCExpr *Expr, MCRegister Tmp,
246 bool DSOLocal, const MCSubtargetInfo &STI);
247
248 const MCExpr *emitPAuthRelocationAsIRelative(
249 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
250 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
251
252 /// tblgen'erated driver function for lowering simple MI->MC
253 /// pseudo instructions.
254 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
255
256 // Emit Build Attributes
257 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
258 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
259
260 // Emit expansion of Compare-and-branch pseudo instructions
261 void emitCBPseudoExpansion(const MachineInstr *MI);
262
263 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
264 void EmitToStreamer(const MCInst &Inst) {
265 EmitToStreamer(*OutStreamer, Inst);
266 }
267
268 void emitInstruction(const MachineInstr *MI) override;
269
270 void emitFunctionHeaderComment() override;
271
272 void getAnalysisUsage(AnalysisUsage &AU) const override {
274 AU.setPreservesAll();
275 }
276
277 bool runOnMachineFunction(MachineFunction &MF) override {
278 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
279 PSI = &PSIW->getPSI();
280 if (auto *SDPIW =
281 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
282 SDPI = &SDPIW->getStaticDataProfileInfo();
283
284 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
285 STI = &MF.getSubtarget<AArch64Subtarget>();
286
287 SetupMachineFunction(MF);
288
289 if (STI->isTargetCOFF()) {
290 bool Local = MF.getFunction().hasLocalLinkage();
293 int Type =
295
296 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
297 OutStreamer->emitCOFFSymbolStorageClass(Scl);
298 OutStreamer->emitCOFFSymbolType(Type);
299 OutStreamer->endCOFFSymbolDef();
300 }
301
302 // Emit the rest of the function body.
303 emitFunctionBody();
304
305 // Emit the XRay table for this function.
306 emitXRayTable();
307
308 // We didn't modify anything.
309 return false;
310 }
311
312 const MCExpr *lowerConstant(const Constant *CV,
313 const Constant *BaseCV = nullptr,
314 uint64_t Offset = 0) override;
315
316private:
317 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
318 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
319 bool printAsmRegInClass(const MachineOperand &MO,
320 const TargetRegisterClass *RC, unsigned AltName,
321 raw_ostream &O);
322
323 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
324 const char *ExtraCode, raw_ostream &O) override;
325 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
326 const char *ExtraCode, raw_ostream &O) override;
327
328 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
329
330 void emitFunctionBodyEnd() override;
331 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
332
333 MCSymbol *GetCPISymbol(unsigned CPID) const override;
334 void emitEndOfAsmFile(Module &M) override;
335
336 AArch64FunctionInfo *AArch64FI = nullptr;
337
338 /// Emit the LOHs contained in AArch64FI.
339 void emitLOHs();
340
341 void emitMovXReg(Register Dest, Register Src);
342 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
343 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
344
345 void emitAUT(AArch64PACKey::ID Key, Register Pointer, Register Disc);
346 void emitPAC(AArch64PACKey::ID Key, Register Pointer, Register Disc);
347 void emitBLRA(bool IsCall, AArch64PACKey::ID Key, Register Target,
348 Register Disc);
349
350 /// Emit instruction to set float register to zero.
351 void emitFMov0(const MachineInstr &MI);
352 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
353
354 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
355
356 MInstToMCSymbol LOHInstToLabel;
357
358 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
359 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
360 }
361
362 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
363 assert(STI);
364 return STI;
365 }
366 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
367 MCSymbol *LazyPointer) override;
368 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
369 MCSymbol *LazyPointer) override;
370
371 /// Checks if this instruction is part of a sequence that is eligle for import
372 /// call optimization and, if so, records it to be emitted in the import call
373 /// section.
374 void recordIfImportCall(const MachineInstr *BranchInst);
375};
376
377} // end anonymous namespace
378
379void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
380 const Triple &TT = TM.getTargetTriple();
381
382 if (TT.isOSBinFormatCOFF()) {
383 emitCOFFFeatureSymbol(M);
384 emitCOFFReplaceableFunctionData(M);
385
386 if (M.getModuleFlag("import-call-optimization"))
387 EnableImportCallOptimization = true;
388 }
389
390 if (!TT.isOSBinFormatELF())
391 return;
392
393 // For emitting build attributes and .note.gnu.property section
394 auto *TS =
395 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
396 // Assemble feature flags that may require creation of build attributes and a
397 // note section.
398 unsigned BAFlags = 0;
399 unsigned GNUFlags = 0;
400 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
401 M.getModuleFlag("branch-target-enforcement"))) {
402 if (!BTE->isZero()) {
403 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
405 }
406 }
407
408 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
409 M.getModuleFlag("guarded-control-stack"))) {
410 if (!GCS->isZero()) {
411 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
413 }
414 }
415
416 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
417 M.getModuleFlag("sign-return-address"))) {
418 if (!Sign->isZero()) {
419 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
421 }
422 }
423
424 uint64_t PAuthABIPlatform = -1;
425 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
426 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
427 PAuthABIPlatform = PAP->getZExtValue();
428 }
429
430 uint64_t PAuthABIVersion = -1;
431 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
432 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
433 PAuthABIVersion = PAV->getZExtValue();
434 }
435
436 // Emit AArch64 Build Attributes
437 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
438 // Emit a .note.gnu.property section with the flags.
439 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
440}
441
442void AArch64AsmPrinter::emitFunctionHeaderComment() {
443 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
444 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
445 if (OutlinerString != std::nullopt)
446 OutStreamer->getCommentOS() << ' ' << OutlinerString;
447}
448
449void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
450{
451 const Function &F = MF->getFunction();
452 if (F.hasFnAttribute("patchable-function-entry")) {
453 unsigned Num;
454 if (F.getFnAttribute("patchable-function-entry")
455 .getValueAsString()
456 .getAsInteger(10, Num))
457 return;
458 emitNops(Num);
459 return;
460 }
461
462 emitSled(MI, SledKind::FUNCTION_ENTER);
463}
464
465void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
466 emitSled(MI, SledKind::FUNCTION_EXIT);
467}
468
469void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
470 emitSled(MI, SledKind::TAIL_CALL);
471}
472
473void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
474 static const int8_t NoopsInSledCount = 7;
475 // We want to emit the following pattern:
476 //
477 // .Lxray_sled_N:
478 // ALIGN
479 // B #32
480 // ; 7 NOP instructions (28 bytes)
481 // .tmpN
482 //
483 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
484 // over the full 32 bytes (8 instructions) with the following pattern:
485 //
486 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
487 // LDR W17, #12 ; W17 := function ID
488 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
489 // BLR X16 ; call the tracing trampoline
490 // ;DATA: 32 bits of function ID
491 // ;DATA: lower 32 bits of the address of the trampoline
492 // ;DATA: higher 32 bits of the address of the trampoline
493 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
494 //
495 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
496 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
497 OutStreamer->emitLabel(CurSled);
498 auto Target = OutContext.createTempSymbol();
499
500 // Emit "B #32" instruction, which jumps over the next 28 bytes.
501 // The operand has to be the number of 4-byte instructions to jump over,
502 // including the current instruction.
503 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
504
505 for (int8_t I = 0; I < NoopsInSledCount; I++)
506 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::NOP));
507
508 OutStreamer->emitLabel(Target);
509 recordSled(CurSled, MI, Kind, 2);
510}
511
512void AArch64AsmPrinter::emitAttributes(unsigned Flags,
513 uint64_t PAuthABIPlatform,
514 uint64_t PAuthABIVersion,
515 AArch64TargetStreamer *TS) {
516
517 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
518 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
519
520 if (PAuthABIPlatform || PAuthABIVersion) {
524 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
525 AArch64BuildAttributes::SubsectionType::ULEB128);
529 PAuthABIPlatform, "");
533 "");
534 }
535
536 unsigned BTIValue =
538 unsigned PACValue =
540 unsigned GCSValue =
542
543 if (BTIValue || PACValue || GCSValue) {
547 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
548 AArch64BuildAttributes::SubsectionType::ULEB128);
558 }
559}
560
561// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
562// (built-in functions __xray_customevent/__xray_typedevent).
563//
564// .Lxray_event_sled_N:
565// b 1f
566// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
567// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
568// bl __xray_CustomEvent or __xray_TypedEvent
569// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
570// 1:
571//
572// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
573//
574// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
575// After patching, b .+N will become a nop.
576void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
577 bool Typed) {
578 auto &O = *OutStreamer;
579 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
580 O.emitLabel(CurSled);
581 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
582 auto *Sym = MCSymbolRefExpr::create(
583 OutContext.getOrCreateSymbol(
584 Twine(MachO ? "_" : "") +
585 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
586 OutContext);
587 if (Typed) {
588 O.AddComment("Begin XRay typed event");
589 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
590 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
591 .addReg(AArch64::SP)
592 .addReg(AArch64::X0)
593 .addReg(AArch64::X1)
594 .addReg(AArch64::SP)
595 .addImm(-4));
596 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
597 .addReg(AArch64::X2)
598 .addReg(AArch64::SP)
599 .addImm(2));
600 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
601 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
602 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
603 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
604 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
605 .addReg(AArch64::X2)
606 .addReg(AArch64::SP)
607 .addImm(2));
608 O.AddComment("End XRay typed event");
609 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
610 .addReg(AArch64::SP)
611 .addReg(AArch64::X0)
612 .addReg(AArch64::X1)
613 .addReg(AArch64::SP)
614 .addImm(4));
615
616 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
617 } else {
618 O.AddComment("Begin XRay custom event");
619 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
620 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
621 .addReg(AArch64::SP)
622 .addReg(AArch64::X0)
623 .addReg(AArch64::X1)
624 .addReg(AArch64::SP)
625 .addImm(-2));
626 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
627 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
628 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
629 O.AddComment("End XRay custom event");
630 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
631 .addReg(AArch64::SP)
632 .addReg(AArch64::X0)
633 .addReg(AArch64::X1)
634 .addReg(AArch64::SP)
635 .addImm(2));
636
637 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
638 }
639}
640
641void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
642 Register AddrReg = MI.getOperand(0).getReg();
643 assert(std::next(MI.getIterator())->isCall() &&
644 "KCFI_CHECK not followed by a call instruction");
645 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
646 "KCFI_CHECK call target doesn't match call operand");
647
648 // Default to using the intra-procedure-call temporary registers for
649 // comparing the hashes.
650 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
651 if (AddrReg == AArch64::XZR) {
652 // Checking XZR makes no sense. Instead of emitting a load, zero
653 // ScratchRegs[0] and use it for the ESR AddrIndex below.
654 AddrReg = getXRegFromWReg(ScratchRegs[0]);
655 emitMovXReg(AddrReg, AArch64::XZR);
656 } else {
657 // If one of the scratch registers is used for the call target (e.g.
658 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
659 // temporary register instead (in this case, AArch64::W9) as the check
660 // is immediately followed by the call instruction.
661 for (auto &Reg : ScratchRegs) {
662 if (Reg == getWRegFromXReg(AddrReg)) {
663 Reg = AArch64::W9;
664 break;
665 }
666 }
667 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
668 "Invalid scratch registers for KCFI_CHECK");
669
670 // Adjust the offset for patchable-function-prefix. This assumes that
671 // patchable-function-prefix is the same for all functions.
672 int64_t PrefixNops = 0;
673 (void)MI.getMF()
674 ->getFunction()
675 .getFnAttribute("patchable-function-prefix")
676 .getValueAsString()
677 .getAsInteger(10, PrefixNops);
678
679 // Load the target function type hash.
680 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
681 .addReg(ScratchRegs[0])
682 .addReg(AddrReg)
683 .addImm(-(PrefixNops * 4 + 4)));
684 }
685
686 // Load the expected type hash.
687 const int64_t Type = MI.getOperand(1).getImm();
688 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
689 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
690
691 // Compare the hashes and trap if there's a mismatch.
692 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
693 .addReg(AArch64::WZR)
694 .addReg(ScratchRegs[0])
695 .addReg(ScratchRegs[1])
696 .addImm(0));
697
698 MCSymbol *Pass = OutContext.createTempSymbol();
699 EmitToStreamer(*OutStreamer,
700 MCInstBuilder(AArch64::Bcc)
701 .addImm(AArch64CC::EQ)
702 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
703
704 // The base ESR is 0x8000 and the register information is encoded in bits
705 // 0-9 as follows:
706 // - 0-4: n, where the register Xn contains the target address
707 // - 5-9: m, where the register Wm contains the expected type hash
708 // Where n, m are in [0, 30].
709 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
710 unsigned AddrIndex;
711 switch (AddrReg) {
712 default:
713 AddrIndex = AddrReg - AArch64::X0;
714 break;
715 case AArch64::FP:
716 AddrIndex = 29;
717 break;
718 case AArch64::LR:
719 AddrIndex = 30;
720 break;
721 }
722
723 assert(AddrIndex < 31 && TypeIndex < 31);
724
725 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
726 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
727 OutStreamer->emitLabel(Pass);
728}
729
730void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
731 Register Reg = MI.getOperand(0).getReg();
732
733 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
734 // statically known to be zero. However, conceivably, the HWASan pass may
735 // encounter a "cannot currently statically prove to be null" pointer (and is
736 // therefore unable to omit the intrinsic) that later optimization passes
737 // convert into a statically known-null pointer.
738 if (Reg == AArch64::XZR)
739 return;
740
741 bool IsShort =
742 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
743 (MI.getOpcode() ==
744 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
745 uint32_t AccessInfo = MI.getOperand(1).getImm();
746 bool IsFixedShadow =
747 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
748 (MI.getOpcode() ==
749 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
750 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
751
752 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
753 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
754 if (!Sym) {
755 // FIXME: Make this work on non-ELF.
756 if (!TM.getTargetTriple().isOSBinFormatELF())
757 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
758
759 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
760 utostr(AccessInfo);
761 if (IsFixedShadow)
762 SymName += "_fixed_" + utostr(FixedShadowOffset);
763 if (IsShort)
764 SymName += "_short_v2";
765 Sym = OutContext.getOrCreateSymbol(SymName);
766 }
767
768 EmitToStreamer(*OutStreamer,
769 MCInstBuilder(AArch64::BL)
770 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
771}
772
773void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
774 if (HwasanMemaccessSymbols.empty())
775 return;
776
777 const Triple &TT = TM.getTargetTriple();
778 assert(TT.isOSBinFormatELF());
779 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
780 // space.
781 auto STI = std::make_unique<AArch64Subtarget>(
782 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
783 true);
784 this->STI = STI.get();
785
786 MCSymbol *HwasanTagMismatchV1Sym =
787 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
788 MCSymbol *HwasanTagMismatchV2Sym =
789 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
790
791 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
792 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
793 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
794 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
795
796 for (auto &P : HwasanMemaccessSymbols) {
797 unsigned Reg = std::get<0>(P.first);
798 bool IsShort = std::get<1>(P.first);
799 uint32_t AccessInfo = std::get<2>(P.first);
800 bool IsFixedShadow = std::get<3>(P.first);
801 uint64_t FixedShadowOffset = std::get<4>(P.first);
802 const MCSymbolRefExpr *HwasanTagMismatchRef =
803 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
804 MCSymbol *Sym = P.second;
805
806 bool HasMatchAllTag =
807 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
808 uint8_t MatchAllTag =
809 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
810 unsigned Size =
811 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
812 bool CompileKernel =
813 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
814
815 OutStreamer->switchSection(OutContext.getELFSection(
816 ".text.hot", ELF::SHT_PROGBITS,
818 /*IsComdat=*/true));
819
820 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
821 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
822 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
823 OutStreamer->emitLabel(Sym);
824
825 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
826 .addReg(AArch64::X16)
827 .addReg(Reg)
828 .addImm(4)
829 .addImm(55));
830
831 if (IsFixedShadow) {
832 // Aarch64 makes it difficult to embed large constants in the code.
833 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
834 // left-shift option in the MOV instruction. Combined with the 16-bit
835 // immediate, this is enough to represent any offset up to 2**48.
836 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
837 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
838 .addReg(AArch64::W16)
839 .addReg(AArch64::X17)
840 .addReg(AArch64::X16)
841 .addImm(0)
842 .addImm(0));
843 } else {
844 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
845 .addReg(AArch64::W16)
846 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
847 .addReg(AArch64::X16)
848 .addImm(0)
849 .addImm(0));
850 }
851
852 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
853 .addReg(AArch64::XZR)
854 .addReg(AArch64::X16)
855 .addReg(Reg)
857 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
858 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
859 .addImm(AArch64CC::NE)
861 HandleMismatchOrPartialSym, OutContext)));
862 MCSymbol *ReturnSym = OutContext.createTempSymbol();
863 OutStreamer->emitLabel(ReturnSym);
864 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
865 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
866
867 if (HasMatchAllTag) {
868 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
869 .addReg(AArch64::X17)
870 .addReg(Reg)
871 .addImm(56)
872 .addImm(63));
873 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
874 .addReg(AArch64::XZR)
875 .addReg(AArch64::X17)
876 .addImm(MatchAllTag)
877 .addImm(0));
878 EmitToStreamer(
879 MCInstBuilder(AArch64::Bcc)
880 .addImm(AArch64CC::EQ)
881 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
882 }
883
884 if (IsShort) {
885 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
886 .addReg(AArch64::WZR)
887 .addReg(AArch64::W16)
888 .addImm(15)
889 .addImm(0));
890 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
891 EmitToStreamer(
892 MCInstBuilder(AArch64::Bcc)
893 .addImm(AArch64CC::HI)
894 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
895
896 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
897 .addReg(AArch64::X17)
898 .addReg(Reg)
899 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
900 if (Size != 1)
901 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
902 .addReg(AArch64::X17)
903 .addReg(AArch64::X17)
904 .addImm(Size - 1)
905 .addImm(0));
906 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
907 .addReg(AArch64::WZR)
908 .addReg(AArch64::W16)
909 .addReg(AArch64::W17)
910 .addImm(0));
911 EmitToStreamer(
912 MCInstBuilder(AArch64::Bcc)
913 .addImm(AArch64CC::LS)
914 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
915
916 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
917 .addReg(AArch64::X16)
918 .addReg(Reg)
919 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
920 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
921 .addReg(AArch64::W16)
922 .addReg(AArch64::X16)
923 .addImm(0));
924 EmitToStreamer(
925 MCInstBuilder(AArch64::SUBSXrs)
926 .addReg(AArch64::XZR)
927 .addReg(AArch64::X16)
928 .addReg(Reg)
930 EmitToStreamer(
931 MCInstBuilder(AArch64::Bcc)
932 .addImm(AArch64CC::EQ)
933 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
934
935 OutStreamer->emitLabel(HandleMismatchSym);
936 }
937
938 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
939 .addReg(AArch64::SP)
940 .addReg(AArch64::X0)
941 .addReg(AArch64::X1)
942 .addReg(AArch64::SP)
943 .addImm(-32));
944 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
945 .addReg(AArch64::FP)
946 .addReg(AArch64::LR)
947 .addReg(AArch64::SP)
948 .addImm(29));
949
950 if (Reg != AArch64::X0)
951 emitMovXReg(AArch64::X0, Reg);
952 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
953
954 if (CompileKernel) {
955 // The Linux kernel's dynamic loader doesn't support GOT relative
956 // relocations, but it doesn't support late binding either, so just call
957 // the function directly.
958 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
959 } else {
960 // Intentionally load the GOT entry and branch to it, rather than possibly
961 // late binding the function, which may clobber the registers before we
962 // have a chance to save them.
963 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
964 .addReg(AArch64::X16)
965 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
967 OutContext)));
968 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
969 .addReg(AArch64::X16)
970 .addReg(AArch64::X16)
971 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
973 OutContext)));
974 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
975 }
976 }
977 this->STI = nullptr;
978}
979
980static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
981 MCSymbol *StubLabel,
982 const MCExpr *StubAuthPtrRef) {
983 // sym$auth_ptr$key$disc:
984 OutStreamer.emitLabel(StubLabel);
985 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
986}
987
988void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
989 emitHwasanMemaccessSymbols(M);
990
991 const Triple &TT = TM.getTargetTriple();
992 if (TT.isOSBinFormatMachO()) {
993 // Output authenticated pointers as indirect symbols, if we have any.
994 MachineModuleInfoMachO &MMIMacho =
995 MMI->getObjFileInfo<MachineModuleInfoMachO>();
996
997 auto Stubs = MMIMacho.getAuthGVStubList();
998
999 if (!Stubs.empty()) {
1000 // Switch to the "__auth_ptr" section.
1001 OutStreamer->switchSection(
1002 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
1004 emitAlignment(Align(8));
1005
1006 for (const auto &Stub : Stubs)
1007 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1008
1009 OutStreamer->addBlankLine();
1010 }
1011
1012 // Funny Darwin hack: This flag tells the linker that no global symbols
1013 // contain code that falls through to other global symbols (e.g. the obvious
1014 // implementation of multiple entry points). If this doesn't occur, the
1015 // linker can safely perform dead code stripping. Since LLVM never
1016 // generates code that does this, it is always safe to set.
1017 OutStreamer->emitSubsectionsViaSymbols();
1018 }
1019
1020 if (TT.isOSBinFormatELF()) {
1021 // Output authenticated pointers as indirect symbols, if we have any.
1022 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
1023
1024 auto Stubs = MMIELF.getAuthGVStubList();
1025
1026 if (!Stubs.empty()) {
1027 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1028 OutStreamer->switchSection(TLOF.getDataSection());
1029 emitAlignment(Align(8));
1030
1031 for (const auto &Stub : Stubs)
1032 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1033
1034 OutStreamer->addBlankLine();
1035 }
1036
1037 // With signed ELF GOT enabled, the linker looks at the symbol type to
1038 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1039 // for functions not defined in the module have STT_NOTYPE type by default.
1040 // This makes linker to emit signing schema with DA key (instead of IA) for
1041 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1042 // all function symbols used in the module to have STT_FUNC type. See
1043 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1044 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1045 M.getModuleFlag("ptrauth-elf-got"));
1046 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1047 for (const GlobalValue &GV : M.global_values())
1048 if (!GV.use_empty() && isa<Function>(GV) &&
1049 !GV.getName().starts_with("llvm."))
1050 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1052 }
1053
1054 // Emit stack and fault map information.
1056
1057 // If import call optimization is enabled, emit the appropriate section.
1058 // We do this whether or not we recorded any import calls.
1059 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1060 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1061
1062 // Section always starts with some magic.
1063 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1064 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1065
1066 // Layout of this section is:
1067 // Per section that contains calls to imported functions:
1068 // uint32_t SectionSize: Size in bytes for information in this section.
1069 // uint32_t Section Number
1070 // Per call to imported function in section:
1071 // uint32_t Kind: the kind of imported function.
1072 // uint32_t BranchOffset: the offset of the branch instruction in its
1073 // parent section.
1074 // uint32_t TargetSymbolId: the symbol id of the called function.
1075 for (auto &[Section, CallsToImportedFuncs] :
1076 SectionToImportedFunctionCalls) {
1077 unsigned SectionSize =
1078 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1079 OutStreamer->emitInt32(SectionSize);
1080 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1081 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1082 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1083 OutStreamer->emitInt32(0x13);
1084 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1085 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1086 }
1087 }
1088 }
1089}
1090
1091void AArch64AsmPrinter::emitLOHs() {
1093
1094 for (const auto &D : AArch64FI->getLOHContainer()) {
1095 for (const MachineInstr *MI : D.getArgs()) {
1096 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1097 assert(LabelIt != LOHInstToLabel.end() &&
1098 "Label hasn't been inserted for LOH related instruction");
1099 MCArgs.push_back(LabelIt->second);
1100 }
1101 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1102 MCArgs.clear();
1103 }
1104}
1105
1106void AArch64AsmPrinter::emitFunctionBodyEnd() {
1107 if (!AArch64FI->getLOHRelated().empty())
1108 emitLOHs();
1109}
1110
1111/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1112MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1113 // Darwin uses a linker-private symbol name for constant-pools (to
1114 // avoid addends on the relocation?), ELF has no such concept and
1115 // uses a normal private symbol.
1116 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1117 return OutContext.getOrCreateSymbol(
1118 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1119 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1120
1121 return AsmPrinter::GetCPISymbol(CPID);
1122}
1123
1124void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1125 raw_ostream &O) {
1126 const MachineOperand &MO = MI->getOperand(OpNum);
1127 switch (MO.getType()) {
1128 default:
1129 llvm_unreachable("<unknown operand type>");
1131 Register Reg = MO.getReg();
1133 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1135 break;
1136 }
1138 O << MO.getImm();
1139 break;
1140 }
1142 PrintSymbolOperand(MO, O);
1143 break;
1144 }
1146 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1147 Sym->print(O, MAI);
1148 break;
1149 }
1150 }
1151}
1152
1153bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1154 raw_ostream &O) {
1155 Register Reg = MO.getReg();
1156 switch (Mode) {
1157 default:
1158 return true; // Unknown mode.
1159 case 'w':
1161 break;
1162 case 'x':
1164 break;
1165 case 't':
1167 break;
1168 }
1169
1171 return false;
1172}
1173
1174// Prints the register in MO using class RC using the offset in the
1175// new register class. This should not be used for cross class
1176// printing.
1177bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1178 const TargetRegisterClass *RC,
1179 unsigned AltName, raw_ostream &O) {
1180 assert(MO.isReg() && "Should only get here with a register!");
1181 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1182 Register Reg = MO.getReg();
1183 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1184 if (!RI->regsOverlap(RegToPrint, Reg))
1185 return true;
1186 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1187 return false;
1188}
1189
1190bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1191 const char *ExtraCode, raw_ostream &O) {
1192 const MachineOperand &MO = MI->getOperand(OpNum);
1193
1194 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1195 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1196 return false;
1197
1198 // Does this asm operand have a single letter operand modifier?
1199 if (ExtraCode && ExtraCode[0]) {
1200 if (ExtraCode[1] != 0)
1201 return true; // Unknown modifier.
1202
1203 switch (ExtraCode[0]) {
1204 default:
1205 return true; // Unknown modifier.
1206 case 'w': // Print W register
1207 case 'x': // Print X register
1208 if (MO.isReg())
1209 return printAsmMRegister(MO, ExtraCode[0], O);
1210 if (MO.isImm() && MO.getImm() == 0) {
1211 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1213 return false;
1214 }
1215 printOperand(MI, OpNum, O);
1216 return false;
1217 case 'b': // Print B register.
1218 case 'h': // Print H register.
1219 case 's': // Print S register.
1220 case 'd': // Print D register.
1221 case 'q': // Print Q register.
1222 case 'z': // Print Z register.
1223 if (MO.isReg()) {
1224 const TargetRegisterClass *RC;
1225 switch (ExtraCode[0]) {
1226 case 'b':
1227 RC = &AArch64::FPR8RegClass;
1228 break;
1229 case 'h':
1230 RC = &AArch64::FPR16RegClass;
1231 break;
1232 case 's':
1233 RC = &AArch64::FPR32RegClass;
1234 break;
1235 case 'd':
1236 RC = &AArch64::FPR64RegClass;
1237 break;
1238 case 'q':
1239 RC = &AArch64::FPR128RegClass;
1240 break;
1241 case 'z':
1242 RC = &AArch64::ZPRRegClass;
1243 break;
1244 default:
1245 return true;
1246 }
1247 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1248 }
1249 printOperand(MI, OpNum, O);
1250 return false;
1251 }
1252 }
1253
1254 // According to ARM, we should emit x and v registers unless we have a
1255 // modifier.
1256 if (MO.isReg()) {
1257 Register Reg = MO.getReg();
1258
1259 // If this is a w or x register, print an x register.
1260 if (AArch64::GPR32allRegClass.contains(Reg) ||
1261 AArch64::GPR64allRegClass.contains(Reg))
1262 return printAsmMRegister(MO, 'x', O);
1263
1264 // If this is an x register tuple, print an x register.
1265 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1266 return printAsmMRegister(MO, 't', O);
1267
1268 unsigned AltName = AArch64::NoRegAltName;
1269 const TargetRegisterClass *RegClass;
1270 if (AArch64::ZPRRegClass.contains(Reg)) {
1271 RegClass = &AArch64::ZPRRegClass;
1272 } else if (AArch64::PPRRegClass.contains(Reg)) {
1273 RegClass = &AArch64::PPRRegClass;
1274 } else if (AArch64::PNRRegClass.contains(Reg)) {
1275 RegClass = &AArch64::PNRRegClass;
1276 } else {
1277 RegClass = &AArch64::FPR128RegClass;
1278 AltName = AArch64::vreg;
1279 }
1280
1281 // If this is a b, h, s, d, or q register, print it as a v register.
1282 return printAsmRegInClass(MO, RegClass, AltName, O);
1283 }
1284
1285 printOperand(MI, OpNum, O);
1286 return false;
1287}
1288
1289bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1290 unsigned OpNum,
1291 const char *ExtraCode,
1292 raw_ostream &O) {
1293 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1294 return true; // Unknown modifier.
1295
1296 const MachineOperand &MO = MI->getOperand(OpNum);
1297 assert(MO.isReg() && "unexpected inline asm memory operand");
1298 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1299 return false;
1300}
1301
1302void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1303 raw_ostream &OS) {
1304 unsigned NOps = MI->getNumOperands();
1305 assert(NOps == 4);
1306 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1307 // cast away const; DIetc do not take const operands for some reason.
1308 OS << MI->getDebugVariable()->getName();
1309 OS << " <- ";
1310 // Frame address. Currently handles register +- offset only.
1311 assert(MI->isIndirectDebugValue());
1312 OS << '[';
1313 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1314 if (I != 0)
1315 OS << ", ";
1316 printOperand(MI, I, OS);
1317 }
1318 OS << ']';
1319 OS << "+";
1320 printOperand(MI, NOps - 2, OS);
1321}
1322
1323void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1324 ArrayRef<unsigned> JumpTableIndices) {
1325 // Fast return if there is nothing to emit to avoid creating empty sections.
1326 if (JumpTableIndices.empty())
1327 return;
1328 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1329 const auto &F = MF->getFunction();
1331
1332 MCSection *ReadOnlySec = nullptr;
1333 if (TM.Options.EnableStaticDataPartitioning) {
1334 ReadOnlySec =
1335 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1336 } else {
1337 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1338 }
1339 OutStreamer->switchSection(ReadOnlySec);
1340
1341 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1342 for (unsigned JTI : JumpTableIndices) {
1343 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1344
1345 // If this jump table was deleted, ignore it.
1346 if (JTBBs.empty()) continue;
1347
1348 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1349 emitAlignment(Align(Size));
1350 OutStreamer->emitLabel(GetJTISymbol(JTI));
1351
1352 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1353 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1354
1355 for (auto *JTBB : JTBBs) {
1356 const MCExpr *Value =
1357 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1358
1359 // Each entry is:
1360 // .byte/.hword (LBB - Lbase)>>2
1361 // or plain:
1362 // .word LBB - Lbase
1363 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1364 if (Size != 4)
1366 Value, MCConstantExpr::create(2, OutContext), OutContext);
1367
1368 OutStreamer->emitValue(Value, Size);
1369 }
1370 }
1371}
1372
1373std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1375AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1376 const MachineInstr *BranchInstr,
1377 const MCSymbol *BranchLabel) const {
1378 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1379 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1381 switch (AFI->getJumpTableEntrySize(JTI)) {
1382 case 1:
1383 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1384 break;
1385 case 2:
1386 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1387 break;
1388 case 4:
1389 EntrySize = codeview::JumpTableEntrySize::Int32;
1390 break;
1391 default:
1392 llvm_unreachable("Unexpected jump table entry size");
1393 }
1394 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1395}
1396
1397void AArch64AsmPrinter::emitFunctionEntryLabel() {
1398 const Triple &TT = TM.getTargetTriple();
1399 if (TT.isOSBinFormatELF() &&
1400 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1401 MF->getFunction().getCallingConv() ==
1402 CallingConv::AArch64_SVE_VectorCall ||
1403 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1404 auto *TS =
1405 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1406 TS->emitDirectiveVariantPCS(CurrentFnSym);
1407 }
1408
1410
1411 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1412 // For ARM64EC targets, a function definition's name is mangled differently
1413 // from the normal symbol, emit required aliases here.
1414 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1415 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1416 OutStreamer->emitAssignment(
1417 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1418 };
1419
1420 auto getSymbolFromMetadata = [&](StringRef Name) {
1421 MCSymbol *Sym = nullptr;
1422 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1423 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1424 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1425 }
1426 return Sym;
1427 };
1428
1429 SmallVector<MDNode *> UnmangledNames;
1430 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1431 for (MDNode *Node : UnmangledNames) {
1432 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1433 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1434 if (std::optional<std::string> MangledName =
1435 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1436 MCSymbol *ECMangledSym =
1437 MMI->getContext().getOrCreateSymbol(*MangledName);
1438 emitFunctionAlias(UnmangledSym, ECMangledSym);
1439 }
1440 }
1441 if (MCSymbol *ECMangledSym =
1442 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1443 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1444 }
1445}
1446
1447void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1448 const Constant *CV) {
1449 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1450 if (CPA->hasAddressDiscriminator() &&
1451 !CPA->hasSpecialAddressDiscriminator(
1454 "unexpected address discrimination value for ctors/dtors entry, only "
1455 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1456 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1457 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1458 // actual address discrimination value and only checks
1459 // hasAddressDiscriminator(), so it's OK to leave special address
1460 // discrimination value here.
1462}
1463
1464void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1465 const GlobalAlias &GA) {
1466 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1467 // Global aliases must point to a definition, but unmangled patchable
1468 // symbols are special and need to point to an undefined symbol with "EXP+"
1469 // prefix. Such undefined symbol is resolved by the linker by creating
1470 // x86 thunk that jumps back to the actual EC target.
1471 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1472 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1473 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1474 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1475
1476 OutStreamer->beginCOFFSymbolDef(ExpSym);
1477 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1478 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1480 OutStreamer->endCOFFSymbolDef();
1481
1482 OutStreamer->beginCOFFSymbolDef(Sym);
1483 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1484 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1486 OutStreamer->endCOFFSymbolDef();
1487 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1488 OutStreamer->emitAssignment(
1489 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1490 return;
1491 }
1492 }
1494}
1495
1496/// Small jump tables contain an unsigned byte or half, representing the offset
1497/// from the lowest-addressed possible destination to the desired basic
1498/// block. Since all instructions are 4-byte aligned, this is further compressed
1499/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1500/// materialize the correct destination we need:
1501///
1502/// adr xDest, .LBB0_0
1503/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1504/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1505void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1506 const llvm::MachineInstr &MI) {
1507 Register DestReg = MI.getOperand(0).getReg();
1508 Register ScratchReg = MI.getOperand(1).getReg();
1509 Register ScratchRegW =
1510 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1511 Register TableReg = MI.getOperand(2).getReg();
1512 Register EntryReg = MI.getOperand(3).getReg();
1513 int JTIdx = MI.getOperand(4).getIndex();
1514 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1515
1516 // This has to be first because the compression pass based its reachability
1517 // calculations on the start of the JumpTableDest instruction.
1518 auto Label =
1519 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1520
1521 // If we don't already have a symbol to use as the base, use the ADR
1522 // instruction itself.
1523 if (!Label) {
1525 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1526 OutStreamer.emitLabel(Label);
1527 }
1528
1529 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1530 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1531 .addReg(DestReg)
1532 .addExpr(LabelExpr));
1533
1534 // Load the number of instruction-steps to offset from the label.
1535 unsigned LdrOpcode;
1536 switch (Size) {
1537 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1538 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1539 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1540 default:
1541 llvm_unreachable("Unknown jump table size");
1542 }
1543
1544 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1545 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1546 .addReg(TableReg)
1547 .addReg(EntryReg)
1548 .addImm(0)
1549 .addImm(Size == 1 ? 0 : 1));
1550
1551 // Add to the already materialized base label address, multiplying by 4 if
1552 // compressed.
1553 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1554 .addReg(DestReg)
1555 .addReg(DestReg)
1556 .addReg(ScratchReg)
1557 .addImm(Size == 4 ? 0 : 2));
1558}
1559
1560void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1561 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1562 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1563
1564 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1565 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1566
1567 // Emit:
1568 // mov x17, #<size of table> ; depending on table size, with MOVKs
1569 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1570 // csel x16, x16, xzr, ls ; check for index overflow
1571 //
1572 // adrp x17, Ltable@PAGE ; materialize table address
1573 // add x17, Ltable@PAGEOFF
1574 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1575 //
1576 // Lanchor:
1577 // adr x17, Lanchor ; compute target address
1578 // add x16, x17, x16
1579 // br x16 ; branch to target
1580
1581 MachineOperand JTOp = MI.getOperand(0);
1582
1583 unsigned JTI = JTOp.getIndex();
1584 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1585 "unsupported compressed jump table");
1586
1587 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1588
1589 // cmp only supports a 12-bit immediate. If we need more, materialize the
1590 // immediate, using x17 as a scratch register.
1591 uint64_t MaxTableEntry = NumTableEntries - 1;
1592 if (isUInt<12>(MaxTableEntry)) {
1593 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1594 .addReg(AArch64::XZR)
1595 .addReg(AArch64::X16)
1596 .addImm(MaxTableEntry)
1597 .addImm(0));
1598 } else {
1599 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1600 // It's sad that we have to manually materialize instructions, but we can't
1601 // trivially reuse the main pseudo expansion logic.
1602 // A MOVK sequence is easy enough to generate and handles the general case.
1603 for (int Offset = 16; Offset < 64; Offset += 16) {
1604 if ((MaxTableEntry >> Offset) == 0)
1605 break;
1606 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1607 Offset);
1608 }
1609 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1610 .addReg(AArch64::XZR)
1611 .addReg(AArch64::X16)
1612 .addReg(AArch64::X17)
1613 .addImm(0));
1614 }
1615
1616 // This picks entry #0 on failure.
1617 // We might want to trap instead.
1618 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1619 .addReg(AArch64::X16)
1620 .addReg(AArch64::X16)
1621 .addReg(AArch64::XZR)
1622 .addImm(AArch64CC::LS));
1623
1624 // Prepare the @PAGE/@PAGEOFF low/high operands.
1625 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1626 MCOperand JTMCHi, JTMCLo;
1627
1628 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1629 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1630
1631 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1632 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1633
1634 EmitToStreamer(
1635 *OutStreamer,
1636 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1637
1638 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1639 .addReg(AArch64::X17)
1640 .addReg(AArch64::X17)
1641 .addOperand(JTMCLo)
1642 .addImm(0));
1643
1644 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1645 .addReg(AArch64::X16)
1646 .addReg(AArch64::X17)
1647 .addReg(AArch64::X16)
1648 .addImm(0)
1649 .addImm(1));
1650
1651 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1652 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1653 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1654
1655 OutStreamer->emitLabel(AdrLabel);
1656 EmitToStreamer(
1657 *OutStreamer,
1658 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1659
1660 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1661 .addReg(AArch64::X16)
1662 .addReg(AArch64::X17)
1663 .addReg(AArch64::X16)
1664 .addImm(0));
1665
1666 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1667}
1668
1669void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1670 const llvm::MachineInstr &MI) {
1671 unsigned Opcode = MI.getOpcode();
1672 assert(STI->hasMOPS());
1673 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1674
1675 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1676 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1677 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1678 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1679 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1680 if (Opcode == AArch64::MOPSMemorySetPseudo)
1681 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1682 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1683 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1684 llvm_unreachable("Unhandled memory operation pseudo");
1685 }();
1686 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1687 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1688
1689 for (auto Op : Ops) {
1690 int i = 0;
1691 auto MCIB = MCInstBuilder(Op);
1692 // Destination registers
1693 MCIB.addReg(MI.getOperand(i++).getReg());
1694 MCIB.addReg(MI.getOperand(i++).getReg());
1695 if (!IsSet)
1696 MCIB.addReg(MI.getOperand(i++).getReg());
1697 // Input registers
1698 MCIB.addReg(MI.getOperand(i++).getReg());
1699 MCIB.addReg(MI.getOperand(i++).getReg());
1700 MCIB.addReg(MI.getOperand(i++).getReg());
1701
1702 EmitToStreamer(OutStreamer, MCIB);
1703 }
1704}
1705
1706void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1707 const MachineInstr &MI) {
1708 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1709
1710 auto &Ctx = OutStreamer.getContext();
1711 MCSymbol *MILabel = Ctx.createTempSymbol();
1712 OutStreamer.emitLabel(MILabel);
1713
1714 SM.recordStackMap(*MILabel, MI);
1715 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1716
1717 // Scan ahead to trim the shadow.
1718 const MachineBasicBlock &MBB = *MI.getParent();
1720 ++MII;
1721 while (NumNOPBytes > 0) {
1722 if (MII == MBB.end() || MII->isCall() ||
1723 MII->getOpcode() == AArch64::DBG_VALUE ||
1724 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1725 MII->getOpcode() == TargetOpcode::STACKMAP)
1726 break;
1727 ++MII;
1728 NumNOPBytes -= 4;
1729 }
1730
1731 // Emit nops.
1732 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1733 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1734}
1735
1736// Lower a patchpoint of the form:
1737// [<def>], <id>, <numBytes>, <target>, <numArgs>
1738void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1739 const MachineInstr &MI) {
1740 auto &Ctx = OutStreamer.getContext();
1741 MCSymbol *MILabel = Ctx.createTempSymbol();
1742 OutStreamer.emitLabel(MILabel);
1743 SM.recordPatchPoint(*MILabel, MI);
1744
1745 PatchPointOpers Opers(&MI);
1746
1747 int64_t CallTarget = Opers.getCallTarget().getImm();
1748 unsigned EncodedBytes = 0;
1749 if (CallTarget) {
1750 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1751 "High 16 bits of call target should be zero.");
1752 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1753 EncodedBytes = 16;
1754 // Materialize the jump address:
1755 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1756 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1757 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1758 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1759 }
1760 // Emit padding.
1761 unsigned NumBytes = Opers.getNumPatchBytes();
1762 assert(NumBytes >= EncodedBytes &&
1763 "Patchpoint can't request size less than the length of a call.");
1764 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1765 "Invalid number of NOP bytes requested!");
1766 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1767 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1768}
1769
1770void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1771 const MachineInstr &MI) {
1772 StatepointOpers SOpers(&MI);
1773 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1774 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1775 for (unsigned i = 0; i < PatchBytes; i += 4)
1776 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1777 } else {
1778 // Lower call target and choose correct opcode
1779 const MachineOperand &CallTarget = SOpers.getCallTarget();
1780 MCOperand CallTargetMCOp;
1781 unsigned CallOpcode;
1782 switch (CallTarget.getType()) {
1785 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1786 CallOpcode = AArch64::BL;
1787 break;
1789 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1790 CallOpcode = AArch64::BL;
1791 break;
1793 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1794 CallOpcode = AArch64::BLR;
1795 break;
1796 default:
1797 llvm_unreachable("Unsupported operand type in statepoint call target");
1798 break;
1799 }
1800
1801 EmitToStreamer(OutStreamer,
1802 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1803 }
1804
1805 auto &Ctx = OutStreamer.getContext();
1806 MCSymbol *MILabel = Ctx.createTempSymbol();
1807 OutStreamer.emitLabel(MILabel);
1808 SM.recordStatepoint(*MILabel, MI);
1809}
1810
1811void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1812 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1813 // <opcode>, <operands>
1814
1815 Register DefRegister = FaultingMI.getOperand(0).getReg();
1817 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1818 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1819 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1820 unsigned OperandsBeginIdx = 4;
1821
1822 auto &Ctx = OutStreamer->getContext();
1823 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1824 OutStreamer->emitLabel(FaultingLabel);
1825
1826 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1827 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1828
1829 MCInst MI;
1830 MI.setOpcode(Opcode);
1831
1832 if (DefRegister != (Register)0)
1833 MI.addOperand(MCOperand::createReg(DefRegister));
1834
1835 for (const MachineOperand &MO :
1836 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1837 MCOperand Dest;
1838 lowerOperand(MO, Dest);
1839 MI.addOperand(Dest);
1840 }
1841
1842 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1843 EmitToStreamer(MI);
1844}
1845
1846void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1847 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1848 .addReg(Dest)
1849 .addReg(AArch64::XZR)
1850 .addReg(Src)
1851 .addImm(0));
1852}
1853
1854void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1855 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1856 EmitToStreamer(*OutStreamer,
1857 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1858 .addReg(Dest)
1859 .addImm(Imm)
1860 .addImm(Shift));
1861}
1862
1863void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1864 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1865 EmitToStreamer(*OutStreamer,
1866 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1867 .addReg(Dest)
1868 .addReg(Dest)
1869 .addImm(Imm)
1870 .addImm(Shift));
1871}
1872
1873void AArch64AsmPrinter::emitAUT(AArch64PACKey::ID Key, Register Pointer,
1874 Register Disc) {
1875 bool IsZeroDisc = Disc == AArch64::XZR;
1876 unsigned Opcode = getAUTOpcodeForKey(Key, IsZeroDisc);
1877
1878 // autiza x16 ; if IsZeroDisc
1879 // autia x16, x17 ; if !IsZeroDisc
1880 MCInst AUTInst;
1881 AUTInst.setOpcode(Opcode);
1882 AUTInst.addOperand(MCOperand::createReg(Pointer));
1883 AUTInst.addOperand(MCOperand::createReg(Pointer));
1884 if (!IsZeroDisc)
1885 AUTInst.addOperand(MCOperand::createReg(Disc));
1886
1887 EmitToStreamer(AUTInst);
1888}
1889
1890void AArch64AsmPrinter::emitPAC(AArch64PACKey::ID Key, Register Pointer,
1891 Register Disc) {
1892 bool IsZeroDisc = Disc == AArch64::XZR;
1893 unsigned Opcode = getPACOpcodeForKey(Key, IsZeroDisc);
1894
1895 // paciza x16 ; if IsZeroDisc
1896 // pacia x16, x17 ; if !IsZeroDisc
1897 MCInst PACInst;
1898 PACInst.setOpcode(Opcode);
1899 PACInst.addOperand(MCOperand::createReg(Pointer));
1900 PACInst.addOperand(MCOperand::createReg(Pointer));
1901 if (!IsZeroDisc)
1902 PACInst.addOperand(MCOperand::createReg(Disc));
1903
1904 EmitToStreamer(PACInst);
1905}
1906
1907void AArch64AsmPrinter::emitBLRA(bool IsCall, AArch64PACKey::ID Key,
1908 Register Target, Register Disc) {
1909 bool IsZeroDisc = Disc == AArch64::XZR;
1910 unsigned Opcode = getBranchOpcodeForKey(IsCall, Key, IsZeroDisc);
1911
1912 // blraaz x16 ; if IsZeroDisc
1913 // blraa x16, x17 ; if !IsZeroDisc
1914 MCInst Inst;
1915 Inst.setOpcode(Opcode);
1916 Inst.addOperand(MCOperand::createReg(Target));
1917 if (!IsZeroDisc)
1918 Inst.addOperand(MCOperand::createReg(Disc));
1919 EmitToStreamer(Inst);
1920}
1921
1922void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1923 Register DestReg = MI.getOperand(0).getReg();
1924 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1925 if (STI->hasZeroCycleZeroingFPR64()) {
1926 // Convert H/S register to corresponding D register
1927 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1928 if (AArch64::FPR16RegClass.contains(DestReg))
1929 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1930 &AArch64::FPR64RegClass);
1931 else if (AArch64::FPR32RegClass.contains(DestReg))
1932 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1933 &AArch64::FPR64RegClass);
1934 else
1935 assert(AArch64::FPR64RegClass.contains(DestReg));
1936
1937 MCInst MOVI;
1938 MOVI.setOpcode(AArch64::MOVID);
1939 MOVI.addOperand(MCOperand::createReg(DestReg));
1941 EmitToStreamer(*OutStreamer, MOVI);
1942 ++NumZCZeroingInstrsFPR;
1943 } else if (STI->hasZeroCycleZeroingFPR128()) {
1944 // Convert H/S/D register to corresponding Q register
1945 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1946 if (AArch64::FPR16RegClass.contains(DestReg)) {
1947 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1948 &AArch64::FPR128RegClass);
1949 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1950 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1951 &AArch64::FPR128RegClass);
1952 } else {
1953 assert(AArch64::FPR64RegClass.contains(DestReg));
1954 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1955 &AArch64::FPR128RegClass);
1956 }
1957
1958 MCInst MOVI;
1959 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1960 MOVI.addOperand(MCOperand::createReg(DestReg));
1962 EmitToStreamer(*OutStreamer, MOVI);
1963 ++NumZCZeroingInstrsFPR;
1964 } else {
1965 emitFMov0AsFMov(MI, DestReg);
1966 }
1967 } else {
1968 emitFMov0AsFMov(MI, DestReg);
1969 }
1970}
1971
1972void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1973 Register DestReg) {
1974 MCInst FMov;
1975 switch (MI.getOpcode()) {
1976 default:
1977 llvm_unreachable("Unexpected opcode");
1978 case AArch64::FMOVH0:
1979 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1980 if (!STI->hasFullFP16())
1981 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1982 FMov.addOperand(MCOperand::createReg(DestReg));
1983 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1984 break;
1985 case AArch64::FMOVS0:
1986 FMov.setOpcode(AArch64::FMOVWSr);
1987 FMov.addOperand(MCOperand::createReg(DestReg));
1988 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1989 break;
1990 case AArch64::FMOVD0:
1991 FMov.setOpcode(AArch64::FMOVXDr);
1992 FMov.addOperand(MCOperand::createReg(DestReg));
1993 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1994 break;
1995 }
1996 EmitToStreamer(*OutStreamer, FMov);
1997}
1998
1999Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint64_t Disc,
2000 Register AddrDisc,
2001 Register ScratchReg,
2002 bool MayClobberAddrDisc) {
2003 assert(isPtrauthRegSafe(ScratchReg) &&
2004 "Safe scratch register must be provided by the caller");
2005 assert(isUInt<16>(Disc) && "Constant discriminator is too wide");
2006
2007 // So far we've used NoRegister in pseudos. Now we need real encodings.
2008 if (AddrDisc == AArch64::NoRegister)
2009 AddrDisc = AArch64::XZR;
2010
2011 // If there is no constant discriminator, there's no blend involved:
2012 // just use the address discriminator register as-is (XZR or not).
2013 if (!Disc)
2014 return AddrDisc;
2015
2016 // If there's only a constant discriminator, MOV it into the scratch register.
2017 if (AddrDisc == AArch64::XZR) {
2018 emitMOVZ(ScratchReg, Disc, 0);
2019 return ScratchReg;
2020 }
2021
2022 // If there are both, emit a blend into the scratch register.
2023
2024 // Check if we can save one MOV instruction.
2025 if (MayClobberAddrDisc && isPtrauthRegSafe(AddrDisc)) {
2026 ScratchReg = AddrDisc;
2027 } else {
2028 emitMovXReg(ScratchReg, AddrDisc);
2029 assert(ScratchReg != AddrDisc &&
2030 "Forbidden to clobber AddrDisc, but have to");
2031 }
2032
2033 emitMOVK(ScratchReg, Disc, 48);
2034 return ScratchReg;
2035}
2036
2037/// Emit a code sequence to check an authenticated pointer value.
2038///
2039/// This function emits a sequence of instructions that checks if TestedReg was
2040/// authenticated successfully. On success, execution continues at the next
2041/// instruction after the sequence.
2042///
2043/// The action performed on failure depends on the OnFailure argument:
2044/// * if OnFailure is not nullptr, control is transferred to that label after
2045/// clearing the PAC field
2046/// * otherwise, BRK instruction is emitted to generate an error
2047void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
2048 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
2049 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
2050 // Insert a sequence to check if authentication of TestedReg succeeded,
2051 // such as:
2052 //
2053 // - checked and clearing:
2054 // ; x16 is TestedReg, x17 is ScratchReg
2055 // mov x17, x16
2056 // xpaci x17
2057 // cmp x16, x17
2058 // b.eq Lsuccess
2059 // mov x16, x17
2060 // b Lend
2061 // Lsuccess:
2062 // ; skipped if authentication failed
2063 // Lend:
2064 // ...
2065 //
2066 // - checked and trapping:
2067 // mov x17, x16
2068 // xpaci x17
2069 // cmp x16, x17
2070 // b.eq Lsuccess
2071 // brk #<0xc470 + aut key>
2072 // Lsuccess:
2073 // ...
2074 //
2075 // See the documentation on AuthCheckMethod enumeration constants for
2076 // the specific code sequences that can be used to perform the check.
2078
2079 if (Method == AuthCheckMethod::None)
2080 return;
2081 if (Method == AuthCheckMethod::DummyLoad) {
2082 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
2083 .addReg(getWRegFromXReg(ScratchReg))
2084 .addReg(TestedReg)
2085 .addImm(0));
2086 assert(!OnFailure && "DummyLoad always traps on error");
2087 return;
2088 }
2089
2090 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2091 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2092 // mov Xscratch, Xtested
2093 emitMovXReg(ScratchReg, TestedReg);
2094
2095 if (Method == AuthCheckMethod::XPAC) {
2096 // xpac(i|d) Xscratch
2097 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2098 EmitToStreamer(
2099 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2100 } else {
2101 // xpaclri
2102
2103 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2104 assert(TestedReg == AArch64::LR &&
2105 "XPACHint mode is only compatible with checking the LR register");
2107 "XPACHint mode is only compatible with I-keys");
2108 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2109 }
2110
2111 // cmp Xtested, Xscratch
2112 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2113 .addReg(AArch64::XZR)
2114 .addReg(TestedReg)
2115 .addReg(ScratchReg)
2116 .addImm(0));
2117
2118 // b.eq Lsuccess
2119 EmitToStreamer(
2120 MCInstBuilder(AArch64::Bcc)
2121 .addImm(AArch64CC::EQ)
2122 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2123 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2124 // eor Xscratch, Xtested, Xtested, lsl #1
2125 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2126 .addReg(ScratchReg)
2127 .addReg(TestedReg)
2128 .addReg(TestedReg)
2129 .addImm(1));
2130 // tbz Xscratch, #62, Lsuccess
2131 EmitToStreamer(
2132 MCInstBuilder(AArch64::TBZX)
2133 .addReg(ScratchReg)
2134 .addImm(62)
2135 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2136 } else {
2137 llvm_unreachable("Unsupported check method");
2138 }
2139
2140 if (!OnFailure) {
2141 // Trapping sequences do a 'brk'.
2142 // brk #<0xc470 + aut key>
2143 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2144 } else {
2145 // Non-trapping checked sequences return the stripped result in TestedReg,
2146 // skipping over success-only code (such as re-signing the pointer) by
2147 // jumping to OnFailure label.
2148 // Note that this can introduce an authentication oracle (such as based on
2149 // the high bits of the re-signed value).
2150
2151 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2152 // instead of ScratchReg, thus eliminating one `mov` instruction.
2153 // Both XPAC and XPACHint can be further optimized by not using a
2154 // conditional branch jumping over an unconditional one.
2155
2156 switch (Method) {
2157 case AuthCheckMethod::XPACHint:
2158 // LR is already XPAC-ed at this point.
2159 break;
2160 case AuthCheckMethod::XPAC:
2161 // mov Xtested, Xscratch
2162 emitMovXReg(TestedReg, ScratchReg);
2163 break;
2164 default:
2165 // If Xtested was not XPAC-ed so far, emit XPAC here.
2166 // xpac(i|d) Xtested
2167 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2168 EmitToStreamer(
2169 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2170 }
2171
2172 // b Lend
2173 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2174 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2175 }
2176
2177 // If the auth check succeeds, we can continue.
2178 // Lsuccess:
2179 OutStreamer->emitLabel(SuccessSym);
2180}
2181
2182// With Pointer Authentication, it may be needed to explicitly check the
2183// authenticated value in LR before performing a tail call.
2184// Otherwise, the callee may re-sign the invalid return address,
2185// introducing a signing oracle.
2186void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2187 if (!AArch64FI->shouldSignReturnAddress(*MF))
2188 return;
2189
2190 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2191 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2192 return;
2193
2194 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2195 Register ScratchReg =
2196 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2197 assert(!TC->readsRegister(ScratchReg, TRI) &&
2198 "Neither x16 nor x17 is available as a scratch register");
2201 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2202 LRCheckMethod);
2203}
2204
2205bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2206 if (!DS)
2207 return false;
2208
2209 if (isa<GlobalAlias>(DS)) {
2210 // Just emit the nop directly.
2211 EmitToStreamer(MCInstBuilder(AArch64::NOP));
2212 return true;
2213 }
2214 MCSymbol *Dot = OutContext.createTempSymbol();
2215 OutStreamer->emitLabel(Dot);
2216 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2217
2218 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2219 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2220 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2221 SMLoc());
2222 return false;
2223}
2224
2225void AArch64AsmPrinter::emitPtrauthAuthResign(
2226 Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc,
2227 const MachineOperand *AUTAddrDisc, Register Scratch,
2228 std::optional<AArch64PACKey::ID> PACKey, uint64_t PACDisc,
2229 Register PACAddrDisc, Value *DS) {
2230 const bool IsAUTPAC = PACKey.has_value();
2231
2232 // We expand AUT/AUTPAC into a sequence of the form
2233 //
2234 // ; authenticate x16
2235 // ; check pointer in x16
2236 // Lsuccess:
2237 // ; sign x16 (if AUTPAC)
2238 // Lend: ; if not trapping on failure
2239 //
2240 // with the checking sequence chosen depending on whether/how we should check
2241 // the pointer and whether we should trap on failure.
2242
2243 // By default, auth/resign sequences check for auth failures.
2244 bool ShouldCheck = true;
2245 // In the checked sequence, we only trap if explicitly requested.
2246 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2247
2248 // On an FPAC CPU, you get traps whether you want them or not: there's
2249 // no point in emitting checks or traps.
2250 if (STI->hasFPAC())
2251 ShouldCheck = ShouldTrap = false;
2252
2253 // However, command-line flags can override this, for experimentation.
2254 switch (PtrauthAuthChecks) {
2256 break;
2258 ShouldCheck = ShouldTrap = false;
2259 break;
2261 ShouldCheck = true;
2262 ShouldTrap = false;
2263 break;
2265 ShouldCheck = ShouldTrap = true;
2266 break;
2267 }
2268
2269 // Compute aut discriminator
2270 Register AUTDiscReg = emitPtrauthDiscriminator(
2271 AUTDisc, AUTAddrDisc->getReg(), Scratch, AUTAddrDisc->isKill());
2272
2273 if (!emitDeactivationSymbolRelocation(DS))
2274 emitAUT(AUTKey, AUTVal, AUTDiscReg);
2275
2276 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2277 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2278 return;
2279
2280 MCSymbol *EndSym = nullptr;
2281
2282 if (ShouldCheck) {
2283 if (IsAUTPAC && !ShouldTrap)
2284 EndSym = createTempSymbol("resign_end_");
2285
2286 emitPtrauthCheckAuthenticatedValue(
2287 AUTVal, Scratch, AUTKey, AArch64PAuth::AuthCheckMethod::XPAC, EndSym);
2288 }
2289
2290 // We already emitted unchecked and checked-but-non-trapping AUTs.
2291 // That left us with trapping AUTs, and AUTPACs.
2292 // Trapping AUTs don't need PAC: we're done.
2293 if (!IsAUTPAC)
2294 return;
2295
2296 // Compute pac discriminator
2297 Register PACDiscReg = emitPtrauthDiscriminator(PACDisc, PACAddrDisc, Scratch);
2298 emitPAC(*PACKey, AUTVal, PACDiscReg);
2299
2300 // Lend:
2301 if (EndSym)
2302 OutStreamer->emitLabel(EndSym);
2303}
2304
2305void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2306 Register Val = MI->getOperand(1).getReg();
2307 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2308 uint64_t Disc = MI->getOperand(3).getImm();
2309 Register AddrDisc = MI->getOperand(4).getReg();
2310 bool AddrDiscKilled = MI->getOperand(4).isKill();
2311
2312 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2313 // register is available.
2314 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2315 assert(ScratchReg != AddrDisc &&
2316 "Neither X16 nor X17 is available as a scratch register");
2317
2318 // Compute pac discriminator
2319 Register DiscReg = emitPtrauthDiscriminator(
2320 Disc, AddrDisc, ScratchReg, /*MayClobberAddrDisc=*/AddrDiscKilled);
2321
2322 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2323 return;
2324
2325 emitPAC(Key, Val, DiscReg);
2326}
2327
2328void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2329 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2330 unsigned BrTarget = MI->getOperand(0).getReg();
2331
2332 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2333 uint64_t Disc = MI->getOperand(2).getImm();
2334
2335 unsigned AddrDisc = MI->getOperand(3).getReg();
2336
2337 // Make sure AddrDisc is solely used to compute the discriminator.
2338 // While hardly meaningful, it is still possible to describe an authentication
2339 // of a pointer against its own value (instead of storage address) with
2340 // intrinsics, so use report_fatal_error instead of assert.
2341 if (BrTarget == AddrDisc)
2342 report_fatal_error("Branch target is signed with its own value");
2343
2344 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2345 // fact that x16 and x17 are described as clobbered by the MI instruction and
2346 // AddrDisc is not used as any other input.
2347 //
2348 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2349 // either x16 or x17, meaning the returned register is always among the
2350 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2351 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2352 // among x16 and x17 to prevent clobbering unexpected registers.
2353 //
2354 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2355 // declared as clobbering x16/x17.
2356 //
2357 // FIXME: Make use of `killed` flags and register masks instead.
2358 bool AddrDiscIsImplicitDef =
2359 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2360 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2361 AddrDiscIsImplicitDef);
2362 emitBLRA(IsCall, Key, BrTarget, DiscReg);
2363}
2364
2365void AArch64AsmPrinter::emitAddImm(MCRegister Reg, int64_t Addend,
2366 MCRegister Tmp) {
2367 if (Addend != 0) {
2368 const uint64_t AbsOffset = (Addend > 0 ? Addend : -((uint64_t)Addend));
2369 const bool IsNeg = Addend < 0;
2370 if (isUInt<24>(AbsOffset)) {
2371 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2372 BitPos += 12) {
2373 EmitToStreamer(
2374 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2375 .addReg(Reg)
2376 .addReg(Reg)
2377 .addImm((AbsOffset >> BitPos) & 0xfff)
2378 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2379 }
2380 } else {
2381 const uint64_t UAddend = Addend;
2382 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2383 .addReg(Tmp)
2384 .addImm((IsNeg ? ~UAddend : UAddend) & 0xffff)
2385 .addImm(/*shift=*/0));
2386 auto NeedMovk = [IsNeg, UAddend](int BitPos) -> bool {
2387 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2388 uint64_t Shifted = UAddend >> BitPos;
2389 if (!IsNeg)
2390 return Shifted != 0;
2391 for (int I = 0; I != 64 - BitPos; I += 16)
2392 if (((Shifted >> I) & 0xffff) != 0xffff)
2393 return true;
2394 return false;
2395 };
2396 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2397 emitMOVK(Tmp, (UAddend >> BitPos) & 0xffff, BitPos);
2398
2399 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2400 .addReg(Reg)
2401 .addReg(Reg)
2402 .addReg(Tmp)
2403 .addImm(/*shift=*/0));
2404 }
2405 }
2406}
2407
2408void AArch64AsmPrinter::emitAddress(MCRegister Reg, const MCExpr *Expr,
2409 MCRegister Tmp, bool DSOLocal,
2410 const MCSubtargetInfo &STI) {
2411 MCValue Val;
2412 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2413 report_fatal_error("emitAddress could not evaluate");
2414 if (DSOLocal) {
2415 EmitToStreamer(
2416 MCInstBuilder(AArch64::ADRP)
2417 .addReg(Reg)
2419 OutStreamer->getContext())));
2420 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2421 .addReg(Reg)
2422 .addReg(Reg)
2423 .addExpr(MCSpecifierExpr::create(
2424 Expr, AArch64::S_LO12, OutStreamer->getContext()))
2425 .addImm(0));
2426 } else {
2427 auto *SymRef =
2428 MCSymbolRefExpr::create(Val.getAddSym(), OutStreamer->getContext());
2429 EmitToStreamer(
2430 MCInstBuilder(AArch64::ADRP)
2431 .addReg(Reg)
2433 OutStreamer->getContext())));
2434 EmitToStreamer(
2435 MCInstBuilder(AArch64::LDRXui)
2436 .addReg(Reg)
2437 .addReg(Reg)
2439 OutStreamer->getContext())));
2440 emitAddImm(Reg, Val.getConstant(), Tmp);
2441 }
2442}
2443
2445 // IFUNCs are ELF-only.
2446 if (!TT.isOSBinFormatELF())
2447 return false;
2448
2449 // IFUNCs are supported on glibc, bionic, and some but not all of the BSDs.
2450 return TT.isOSGlibc() || TT.isAndroid() || TT.isOSFreeBSD() ||
2451 TT.isOSDragonFly() || TT.isOSNetBSD();
2452}
2453
2454// Emit an ifunc resolver that returns a signed pointer to the specified target,
2455// and return a FUNCINIT reference to the resolver. In the linked binary, this
2456// function becomes the target of an IRELATIVE relocation. This resolver is used
2457// to relocate signed pointers in global variable initializers in special cases
2458// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2459//
2460// Example (signed null pointer, not address discriminated):
2461//
2462// .8byte .Lpauth_ifunc0
2463// .pushsection .text.startup,"ax",@progbits
2464// .Lpauth_ifunc0:
2465// mov x0, #0
2466// mov x1, #12345
2467// b __emupac_pacda
2468//
2469// Example (signed null pointer, address discriminated):
2470//
2471// .Ltmp:
2472// .8byte .Lpauth_ifunc0
2473// .pushsection .text.startup,"ax",@progbits
2474// .Lpauth_ifunc0:
2475// mov x0, #0
2476// adrp x1, .Ltmp
2477// add x1, x1, :lo12:.Ltmp
2478// b __emupac_pacda
2479// .popsection
2480//
2481// Example (signed pointer to symbol, not address discriminated):
2482//
2483// .Ltmp:
2484// .8byte .Lpauth_ifunc0
2485// .pushsection .text.startup,"ax",@progbits
2486// .Lpauth_ifunc0:
2487// adrp x0, symbol
2488// add x0, x0, :lo12:symbol
2489// mov x1, #12345
2490// b __emupac_pacda
2491// .popsection
2492//
2493// Example (signed null pointer, not address discriminated, with deactivation
2494// symbol ds):
2495//
2496// .8byte .Lpauth_ifunc0
2497// .pushsection .text.startup,"ax",@progbits
2498// .Lpauth_ifunc0:
2499// mov x0, #0
2500// mov x1, #12345
2501// .reloc ., R_AARCH64_PATCHINST, ds
2502// b __emupac_pacda
2503// ret
2504// .popsection
2505const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2506 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
2507 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2508 const Triple &TT = TM.getTargetTriple();
2509
2510 // We only emit an IRELATIVE relocation if the target supports IRELATIVE.
2512 return nullptr;
2513
2514 // For now, only the DA key is supported.
2515 if (KeyID != AArch64PACKey::DA)
2516 return nullptr;
2517
2518 // AArch64Subtarget is huge, so heap allocate it so we don't run out of stack
2519 // space.
2520 auto STI = std::make_unique<AArch64Subtarget>(
2521 TT, TM.getTargetCPU(), TM.getTargetCPU(), TM.getTargetFeatureString(), TM,
2522 true);
2523 this->STI = STI.get();
2524
2525 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2526 OutStreamer->emitLabel(Place);
2527 OutStreamer->pushSection();
2528
2529 const MCSymbolELF *Group =
2530 static_cast<MCSectionELF *>(OutStreamer->getCurrentSectionOnly())
2531 ->getGroup();
2533 if (Group)
2535 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2536 ".text.startup", ELF::SHT_PROGBITS, Flags, 0, Group, true,
2537 Group ? MCSection::NonUniqueID : PAuthIFuncNextUniqueID++, nullptr));
2538
2539 MCSymbol *IRelativeSym =
2540 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2541 OutStreamer->emitLabel(IRelativeSym);
2542 if (isa<MCConstantExpr>(Target)) {
2543 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2544 .addReg(AArch64::X0)
2545 .addExpr(Target)
2546 .addImm(0),
2547 *STI);
2548 } else {
2549 emitAddress(AArch64::X0, Target, AArch64::X16, IsDSOLocal, *STI);
2550 }
2551 if (HasAddressDiversity) {
2552 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2553 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2554 MCConstantExpr::create(Disc, OutStreamer->getContext()),
2555 OutStreamer->getContext());
2556 emitAddress(AArch64::X1, PlacePlusDisc, AArch64::X16, /*IsDSOLocal=*/true,
2557 *STI);
2558 } else {
2559 if (!isUInt<16>(Disc)) {
2560 OutContext.reportError(SMLoc(), "AArch64 PAC Discriminator '" +
2561 Twine(Disc) +
2562 "' out of range [0, 0xFFFF]");
2563 }
2564 emitMOVZ(AArch64::X1, Disc, 0);
2565 }
2566
2567 if (DSExpr) {
2568 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2569 OutStreamer->emitLabel(PrePACInst);
2570
2571 auto *PrePACInstExpr =
2572 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2573 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2574 DSExpr, SMLoc());
2575 }
2576
2577 // We don't know the subtarget because this is being emitted for a global
2578 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2579 // always call the EmuPAC runtime, which will end up using the PAC instruction
2580 // if the target supports PAC.
2581 MCSymbol *EmuPAC =
2582 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2583 const MCSymbolRefExpr *EmuPACRef =
2584 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2585 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2586 *STI);
2587
2588 // We need a RET despite the above tail call because the deactivation symbol
2589 // may replace the tail call with a NOP.
2590 if (DSExpr)
2591 OutStreamer->emitInstruction(
2592 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
2593 OutStreamer->popSection();
2594
2595 return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT,
2596 OutStreamer->getContext());
2597}
2598
2599const MCExpr *
2600AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2601 MCContext &Ctx = OutContext;
2602
2603 // Figure out the base symbol and the addend, if any.
2604 APInt Offset(64, 0);
2605 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2606 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2607
2608 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2609
2610 const MCExpr *Sym;
2611 if (BaseGVB) {
2612 // If there is an addend, turn that into the appropriate MCExpr.
2613 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2614 if (Offset.sgt(0))
2616 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2617 else if (Offset.slt(0))
2619 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2620 } else {
2621 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2622 }
2623
2624 const MCExpr *DSExpr = nullptr;
2625 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2626 if (isa<GlobalAlias>(DS))
2627 return Sym;
2628 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2629 }
2630
2631 uint64_t KeyID = CPA.getKey()->getZExtValue();
2632 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2633 // AArch64AuthMCExpr::printImpl, so fail fast.
2634 if (KeyID > AArch64PACKey::LAST) {
2635 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2636 "' out of range [0, " +
2637 Twine((unsigned)AArch64PACKey::LAST) + "]");
2638 KeyID = 0;
2639 }
2640
2641 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2642
2643 // Check if we can represent this with an IRELATIVE and emit it if so.
2644 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2645 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2646 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2647 return IFuncSym;
2648
2649 if (!isUInt<16>(Disc)) {
2650 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2651 "' out of range [0, 0xFFFF]");
2652 Disc = 0;
2653 }
2654
2655 if (DSExpr)
2656 report_fatal_error("deactivation symbols unsupported in constant "
2657 "expressions on this target");
2658
2659 // Finally build the complete @AUTH expr.
2660 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2661 CPA.hasAddressDiscriminator(), Ctx);
2662}
2663
2664void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2665 unsigned DstReg = MI.getOperand(0).getReg();
2666 const MachineOperand &GAOp = MI.getOperand(1);
2667 const uint64_t KeyC = MI.getOperand(2).getImm();
2668 assert(KeyC <= AArch64PACKey::LAST &&
2669 "key is out of range [0, AArch64PACKey::LAST]");
2670 const auto Key = (AArch64PACKey::ID)KeyC;
2671 const uint64_t Disc = MI.getOperand(3).getImm();
2672 assert(isUInt<16>(Disc) &&
2673 "constant discriminator is out of range [0, 0xffff]");
2674
2675 // Emit instruction sequence like the following:
2676 // ADRP x16, symbol$auth_ptr$key$disc
2677 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2678 //
2679 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2680 // to symbol.
2681 MCSymbol *AuthPtrStubSym;
2682 if (TM.getTargetTriple().isOSBinFormatELF()) {
2683 const auto &TLOF =
2684 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2685
2686 assert(GAOp.getOffset() == 0 &&
2687 "non-zero offset for $auth_ptr$ stub slots is not supported");
2688 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2689 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2690 } else {
2691 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2692 "LOADauthptrstatic is implemented only for MachO/ELF");
2693
2694 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2695 getObjFileLowering());
2696
2697 assert(GAOp.getOffset() == 0 &&
2698 "non-zero offset for $auth_ptr$ stub slots is not supported");
2699 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2700 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2701 }
2702
2703 MachineOperand StubMOHi =
2705 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2706 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2707 MCOperand StubMCHi, StubMCLo;
2708
2709 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2710 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2711
2712 EmitToStreamer(
2713 *OutStreamer,
2714 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2715
2716 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2717 .addReg(DstReg)
2718 .addReg(DstReg)
2719 .addOperand(StubMCLo));
2720}
2721
2722void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2723 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2724 const bool IsELFSignedGOT = MI.getParent()
2725 ->getParent()
2726 ->getInfo<AArch64FunctionInfo>()
2727 ->hasELFSignedGOT();
2728 MachineOperand GAOp = MI.getOperand(0);
2729 const uint64_t KeyC = MI.getOperand(1).getImm();
2730 assert(KeyC <= AArch64PACKey::LAST &&
2731 "key is out of range [0, AArch64PACKey::LAST]");
2732 const auto Key = (AArch64PACKey::ID)KeyC;
2733 const unsigned AddrDisc = MI.getOperand(2).getReg();
2734 const uint64_t Disc = MI.getOperand(3).getImm();
2735
2736 const int64_t Offset = GAOp.getOffset();
2737 GAOp.setOffset(0);
2738
2739 // Emit:
2740 // target materialization:
2741 // - via GOT:
2742 // - unsigned GOT:
2743 // adrp x16, :got:target
2744 // ldr x16, [x16, :got_lo12:target]
2745 // add offset to x16 if offset != 0
2746 // - ELF signed GOT:
2747 // adrp x17, :got:target
2748 // add x17, x17, :got_auth_lo12:target
2749 // ldr x16, [x17]
2750 // aut{i|d}a x16, x17
2751 // check+trap sequence (if no FPAC)
2752 // add offset to x16 if offset != 0
2753 //
2754 // - direct:
2755 // adrp x16, target
2756 // add x16, x16, :lo12:target
2757 // add offset to x16 if offset != 0
2758 //
2759 // add offset to x16:
2760 // - abs(offset) fits 24 bits:
2761 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2762 // - abs(offset) does not fit 24 bits:
2763 // - offset < 0:
2764 // movn+movk sequence filling x17 register with the offset (up to 4
2765 // instructions)
2766 // add x16, x16, x17
2767 // - offset > 0:
2768 // movz+movk sequence filling x17 register with the offset (up to 4
2769 // instructions)
2770 // add x16, x16, x17
2771 //
2772 // signing:
2773 // - 0 discriminator:
2774 // paciza x16
2775 // - Non-0 discriminator, no address discriminator:
2776 // mov x17, #Disc
2777 // pacia x16, x17
2778 // - address discriminator (with potentially folded immediate discriminator):
2779 // pacia x16, xAddrDisc
2780
2781 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2782 MCOperand GAMCHi, GAMCLo;
2783
2784 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2785 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2786 if (IsGOTLoad) {
2787 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2788 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2789 }
2790
2791 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2792 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2793
2794 EmitToStreamer(
2795 MCInstBuilder(AArch64::ADRP)
2796 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2797 .addOperand(GAMCHi));
2798
2799 if (IsGOTLoad) {
2800 if (IsELFSignedGOT) {
2801 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2802 .addReg(AArch64::X17)
2803 .addReg(AArch64::X17)
2804 .addOperand(GAMCLo)
2805 .addImm(0));
2806
2807 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2808 .addReg(AArch64::X16)
2809 .addReg(AArch64::X17)
2810 .addImm(0));
2811
2812 assert(GAOp.isGlobal());
2813 assert(GAOp.getGlobal()->getValueType() != nullptr);
2814
2815 bool IsFunctionTy = GAOp.getGlobal()->getValueType()->isFunctionTy();
2816 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2817 emitAUT(AuthKey, AArch64::X16, AArch64::X17);
2818
2819 if (!STI->hasFPAC())
2820 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2821 AArch64PAuth::AuthCheckMethod::XPAC);
2822 } else {
2823 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2824 .addReg(AArch64::X16)
2825 .addReg(AArch64::X16)
2826 .addOperand(GAMCLo));
2827 }
2828 } else {
2829 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2830 .addReg(AArch64::X16)
2831 .addReg(AArch64::X16)
2832 .addOperand(GAMCLo)
2833 .addImm(0));
2834 }
2835
2836 emitAddImm(AArch64::X16, Offset, AArch64::X17);
2837 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2838
2839 emitPAC(Key, AArch64::X16, DiscReg);
2840}
2841
2842void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2843 Register DstReg = MI.getOperand(0).getReg();
2844 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2845 const MachineOperand &GAMO = MI.getOperand(1);
2846 assert(GAMO.getOffset() == 0);
2847
2848 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2849 MCOperand GAMC;
2850 MCInstLowering.lowerOperand(GAMO, GAMC);
2851 EmitToStreamer(
2852 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2853 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2854 .addReg(AuthResultReg)
2855 .addReg(AArch64::X17)
2856 .addImm(0));
2857 } else {
2858 MachineOperand GAHiOp(GAMO);
2859 MachineOperand GALoOp(GAMO);
2860 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2861 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2862
2863 MCOperand GAMCHi, GAMCLo;
2864 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2865 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2866
2867 EmitToStreamer(
2868 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2869
2870 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2871 .addReg(AArch64::X17)
2872 .addReg(AArch64::X17)
2873 .addOperand(GAMCLo)
2874 .addImm(0));
2875
2876 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2877 .addReg(AuthResultReg)
2878 .addReg(AArch64::X17)
2879 .addImm(0));
2880 }
2881
2882 assert(GAMO.isGlobal());
2883 MCSymbol *UndefWeakSym;
2884 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2885 UndefWeakSym = createTempSymbol("undef_weak");
2886 EmitToStreamer(
2887 MCInstBuilder(AArch64::CBZX)
2888 .addReg(AuthResultReg)
2889 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2890 }
2891
2892 assert(GAMO.getGlobal()->getValueType() != nullptr);
2893
2894 bool IsFunctionTy = GAMO.getGlobal()->getValueType()->isFunctionTy();
2895 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2896 emitAUT(AuthKey, AuthResultReg, AArch64::X17);
2897
2898 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2899 OutStreamer->emitLabel(UndefWeakSym);
2900
2901 if (!STI->hasFPAC()) {
2902 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2903 AArch64PAuth::AuthCheckMethod::XPAC);
2904
2905 emitMovXReg(DstReg, AuthResultReg);
2906 }
2907}
2908
2909const MCExpr *
2910AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2911 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2912 const Function &Fn = *BA.getFunction();
2913
2914 if (std::optional<uint16_t> BADisc =
2915 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2916 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2917 /*HasAddressDiversity=*/false, OutContext);
2918
2919 return BAE;
2920}
2921
2922void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
2923 bool IsImm = false;
2924 unsigned Width = 0;
2925
2926 switch (MI->getOpcode()) {
2927 default:
2928 llvm_unreachable("This is not a CB pseudo instruction");
2929 case AArch64::CBBAssertExt:
2930 IsImm = false;
2931 Width = 8;
2932 break;
2933 case AArch64::CBHAssertExt:
2934 IsImm = false;
2935 Width = 16;
2936 break;
2937 case AArch64::CBWPrr:
2938 Width = 32;
2939 break;
2940 case AArch64::CBXPrr:
2941 Width = 64;
2942 break;
2943 case AArch64::CBWPri:
2944 IsImm = true;
2945 Width = 32;
2946 break;
2947 case AArch64::CBXPri:
2948 IsImm = true;
2949 Width = 64;
2950 break;
2951 }
2952
2954 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
2955 bool NeedsRegSwap = false;
2956 bool NeedsImmDec = false;
2957 bool NeedsImmInc = false;
2958
2959#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
2960 (IsImm \
2961 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
2962 : (Width == 8 \
2963 ? AArch64::CBB##RegCond##Wrr \
2964 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
2965 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
2966 : AArch64::CB##RegCond##Xrr))))
2967 unsigned MCOpC;
2968
2969 // Decide if we need to either swap register operands or increment/decrement
2970 // immediate operands
2971 switch (CC) {
2972 default:
2973 llvm_unreachable("Invalid CB condition code");
2974 case AArch64CC::EQ:
2975 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
2976 break;
2977 case AArch64CC::NE:
2978 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
2979 break;
2980 case AArch64CC::HS:
2981 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
2982 NeedsImmDec = IsImm;
2983 break;
2984 case AArch64CC::LO:
2985 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
2986 NeedsRegSwap = !IsImm;
2987 break;
2988 case AArch64CC::HI:
2989 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
2990 break;
2991 case AArch64CC::LS:
2992 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
2993 NeedsRegSwap = !IsImm;
2994 NeedsImmInc = IsImm;
2995 break;
2996 case AArch64CC::GE:
2997 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
2998 NeedsImmDec = IsImm;
2999 break;
3000 case AArch64CC::LT:
3001 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
3002 NeedsRegSwap = !IsImm;
3003 break;
3004 case AArch64CC::GT:
3005 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
3006 break;
3007 case AArch64CC::LE:
3008 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
3009 NeedsRegSwap = !IsImm;
3010 NeedsImmInc = IsImm;
3011 break;
3012 }
3013#undef GET_CB_OPC
3014
3015 MCInst Inst;
3016 Inst.setOpcode(MCOpC);
3017
3018 MCOperand Lhs, Rhs, Trgt;
3019 lowerOperand(MI->getOperand(1), Lhs);
3020 lowerOperand(MI->getOperand(2), Rhs);
3021 lowerOperand(MI->getOperand(3), Trgt);
3022
3023 // Now swap, increment or decrement
3024 if (NeedsRegSwap) {
3025 assert(Lhs.isReg() && "Expected register operand for CB");
3026 assert(Rhs.isReg() && "Expected register operand for CB");
3027 Inst.addOperand(Rhs);
3028 Inst.addOperand(Lhs);
3029 } else if (NeedsImmDec) {
3030 Rhs.setImm(Rhs.getImm() - 1);
3031 Inst.addOperand(Lhs);
3032 Inst.addOperand(Rhs);
3033 } else if (NeedsImmInc) {
3034 Rhs.setImm(Rhs.getImm() + 1);
3035 Inst.addOperand(Lhs);
3036 Inst.addOperand(Rhs);
3037 } else {
3038 Inst.addOperand(Lhs);
3039 Inst.addOperand(Rhs);
3040 }
3041
3042 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3043 "CB immediate operand out-of-bounds");
3044
3045 Inst.addOperand(Trgt);
3046 EmitToStreamer(*OutStreamer, Inst);
3047}
3048
3049// Simple pseudo-instructions have their lowering (with expansion to real
3050// instructions) auto-generated.
3051#include "AArch64GenMCPseudoLowering.inc"
3052
3053void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3054 S.emitInstruction(Inst, *STI);
3055#ifndef NDEBUG
3056 ++InstsEmitted;
3057#endif
3058}
3059
3060void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3061 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3062
3063#ifndef NDEBUG
3064 InstsEmitted = 0;
3065 llvm::scope_exit CheckMISize([&]() {
3066 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3067 });
3068#endif
3069
3070 // Do any auto-generated pseudo lowerings.
3071 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3072 EmitToStreamer(*OutStreamer, OutInst);
3073 return;
3074 }
3075
3076 if (MI->getOpcode() == AArch64::ADRP) {
3077 for (auto &Opd : MI->operands()) {
3078 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3079 "swift_async_extendedFramePointerFlags") {
3080 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3081 }
3082 }
3083 }
3084
3085 if (AArch64FI->getLOHRelated().count(MI)) {
3086 // Generate a label for LOH related instruction
3087 MCSymbol *LOHLabel = createTempSymbol("loh");
3088 // Associate the instruction with the label
3089 LOHInstToLabel[MI] = LOHLabel;
3090 OutStreamer->emitLabel(LOHLabel);
3091 }
3092
3093 AArch64TargetStreamer *TS =
3094 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3095 // Do any manual lowerings.
3096 switch (MI->getOpcode()) {
3097 default:
3099 "Unhandled tail call instruction");
3100 break;
3101 case AArch64::HINT: {
3102 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3103 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3104 // non-empty. If MI is the initial BTI, place the
3105 // __patchable_function_entries label after BTI.
3106 if (CurrentPatchableFunctionEntrySym &&
3107 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3108 MI == &MF->front().front()) {
3109 int64_t Imm = MI->getOperand(0).getImm();
3110 if ((Imm & 32) && (Imm & 6)) {
3111 MCInst Inst;
3112 MCInstLowering.Lower(MI, Inst);
3113 EmitToStreamer(*OutStreamer, Inst);
3114 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3115 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3116 return;
3117 }
3118 }
3119 break;
3120 }
3121 case AArch64::MOVMCSym: {
3122 Register DestReg = MI->getOperand(0).getReg();
3123 const MachineOperand &MO_Sym = MI->getOperand(1);
3124 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3125 MCOperand Hi_MCSym, Lo_MCSym;
3126
3127 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3128 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3129
3130 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3131 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3132
3133 MCInst MovZ;
3134 MovZ.setOpcode(AArch64::MOVZXi);
3135 MovZ.addOperand(MCOperand::createReg(DestReg));
3136 MovZ.addOperand(Hi_MCSym);
3138 EmitToStreamer(*OutStreamer, MovZ);
3139
3140 MCInst MovK;
3141 MovK.setOpcode(AArch64::MOVKXi);
3142 MovK.addOperand(MCOperand::createReg(DestReg));
3143 MovK.addOperand(MCOperand::createReg(DestReg));
3144 MovK.addOperand(Lo_MCSym);
3146 EmitToStreamer(*OutStreamer, MovK);
3147 return;
3148 }
3149 case AArch64::MOVIv2d_ns:
3150 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3151 // as movi is more efficient across all cores. Newer cores can eliminate
3152 // fmovs early and there is no difference with movi, but this not true for
3153 // all implementations.
3154 //
3155 // The floating-point version doesn't quite work in rare cases on older
3156 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3157 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3158 MI->getOperand(1).getImm() == 0) {
3159 MCInst TmpInst;
3160 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3161 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3162 TmpInst.addOperand(MCOperand::createImm(0));
3163 EmitToStreamer(*OutStreamer, TmpInst);
3164 return;
3165 }
3166 break;
3167
3168 case AArch64::DBG_VALUE:
3169 case AArch64::DBG_VALUE_LIST:
3170 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3171 SmallString<128> TmpStr;
3172 raw_svector_ostream OS(TmpStr);
3173 PrintDebugValueComment(MI, OS);
3174 OutStreamer->emitRawText(StringRef(OS.str()));
3175 }
3176 return;
3177
3178 case AArch64::EMITBKEY: {
3179 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3180 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3181 ExceptionHandlingType != ExceptionHandling::ARM)
3182 return;
3183
3184 if (getFunctionCFISectionType(*MF) == CFISection::None)
3185 return;
3186
3187 OutStreamer->emitCFIBKeyFrame();
3188 return;
3189 }
3190
3191 case AArch64::EMITMTETAGGED: {
3192 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3193 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3194 ExceptionHandlingType != ExceptionHandling::ARM)
3195 return;
3196
3197 if (getFunctionCFISectionType(*MF) != CFISection::None)
3198 OutStreamer->emitCFIMTETaggedFrame();
3199 return;
3200 }
3201
3202 case AArch64::AUTx16x17:
3203 emitPtrauthAuthResign(
3204 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3205 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3206 std::nullopt, 0, 0, MI->getDeactivationSymbol());
3207 return;
3208
3209 case AArch64::AUTxMxN:
3210 emitPtrauthAuthResign(MI->getOperand(0).getReg(),
3211 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3212 MI->getOperand(4).getImm(), &MI->getOperand(5),
3213 MI->getOperand(1).getReg(), std::nullopt, 0, 0,
3214 MI->getDeactivationSymbol());
3215 return;
3216
3217 case AArch64::AUTPAC:
3218 emitPtrauthAuthResign(
3219 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3220 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3221 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3222 MI->getOperand(4).getImm(), MI->getOperand(5).getReg(),
3223 MI->getDeactivationSymbol());
3224 return;
3225
3226 case AArch64::PAC:
3227 emitPtrauthSign(MI);
3228 return;
3229
3230 case AArch64::LOADauthptrstatic:
3231 LowerLOADauthptrstatic(*MI);
3232 return;
3233
3234 case AArch64::LOADgotPAC:
3235 case AArch64::MOVaddrPAC:
3236 LowerMOVaddrPAC(*MI);
3237 return;
3238
3239 case AArch64::LOADgotAUTH:
3240 LowerLOADgotAUTH(*MI);
3241 return;
3242
3243 case AArch64::BRA:
3244 case AArch64::BLRA:
3245 emitPtrauthBranch(MI);
3246 return;
3247
3248 // Tail calls use pseudo instructions so they have the proper code-gen
3249 // attributes (isCall, isReturn, etc.). We lower them to the real
3250 // instruction here.
3251 case AArch64::AUTH_TCRETURN:
3252 case AArch64::AUTH_TCRETURN_BTI: {
3253 Register Callee = MI->getOperand(0).getReg();
3254 const auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
3255 const uint64_t Disc = MI->getOperand(3).getImm();
3256
3257 Register AddrDisc = MI->getOperand(4).getReg();
3258
3259 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3260
3261 emitPtrauthTailCallHardening(MI);
3262
3263 // See the comments in emitPtrauthBranch.
3264 if (Callee == AddrDisc)
3265 report_fatal_error("Call target is signed with its own value");
3266
3267 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3268 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3269 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3270 // restriction manually not to clobber an unexpected register.
3271 bool AddrDiscIsImplicitDef =
3272 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3273 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3274 AddrDiscIsImplicitDef);
3275 emitBLRA(/*IsCall*/ false, Key, Callee, DiscReg);
3276 return;
3277 }
3278
3279 case AArch64::TCRETURNri:
3280 case AArch64::TCRETURNrix16x17:
3281 case AArch64::TCRETURNrix17:
3282 case AArch64::TCRETURNrinotx16:
3283 case AArch64::TCRETURNriALL: {
3284 emitPtrauthTailCallHardening(MI);
3285
3286 recordIfImportCall(MI);
3287 MCInst TmpInst;
3288 TmpInst.setOpcode(AArch64::BR);
3289 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3290 EmitToStreamer(*OutStreamer, TmpInst);
3291 return;
3292 }
3293 case AArch64::TCRETURNdi: {
3294 emitPtrauthTailCallHardening(MI);
3295
3296 MCOperand Dest;
3297 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3298 recordIfImportCall(MI);
3299 MCInst TmpInst;
3300 TmpInst.setOpcode(AArch64::B);
3301 TmpInst.addOperand(Dest);
3302 EmitToStreamer(*OutStreamer, TmpInst);
3303 return;
3304 }
3305 case AArch64::SpeculationBarrierISBDSBEndBB: {
3306 // Print DSB SYS + ISB
3307 MCInst TmpInstDSB;
3308 TmpInstDSB.setOpcode(AArch64::DSB);
3309 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3310 EmitToStreamer(*OutStreamer, TmpInstDSB);
3311 MCInst TmpInstISB;
3312 TmpInstISB.setOpcode(AArch64::ISB);
3313 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3314 EmitToStreamer(*OutStreamer, TmpInstISB);
3315 return;
3316 }
3317 case AArch64::SpeculationBarrierSBEndBB: {
3318 // Print SB
3319 MCInst TmpInstSB;
3320 TmpInstSB.setOpcode(AArch64::SB);
3321 EmitToStreamer(*OutStreamer, TmpInstSB);
3322 return;
3323 }
3324 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3325 /// lower this to:
3326 /// adrp x0, :tlsdesc_auth:var
3327 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3328 /// add x0, x0, #:tlsdesc_auth_lo12:var
3329 /// blraa x16, x0
3330 /// (TPIDR_EL0 offset now in x0)
3331 const MachineOperand &MO_Sym = MI->getOperand(0);
3332 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3333 MCOperand SymTLSDescLo12, SymTLSDesc;
3334 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3335 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3336 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3337 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3338
3339 MCInst Adrp;
3340 Adrp.setOpcode(AArch64::ADRP);
3341 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3342 Adrp.addOperand(SymTLSDesc);
3343 EmitToStreamer(*OutStreamer, Adrp);
3344
3345 MCInst Ldr;
3346 Ldr.setOpcode(AArch64::LDRXui);
3347 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3348 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3349 Ldr.addOperand(SymTLSDescLo12);
3351 EmitToStreamer(*OutStreamer, Ldr);
3352
3353 MCInst Add;
3354 Add.setOpcode(AArch64::ADDXri);
3355 Add.addOperand(MCOperand::createReg(AArch64::X0));
3356 Add.addOperand(MCOperand::createReg(AArch64::X0));
3357 Add.addOperand(SymTLSDescLo12);
3359 EmitToStreamer(*OutStreamer, Add);
3360
3361 // Authenticated TLSDESC accesses are not relaxed.
3362 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3363
3364 MCInst Blraa;
3365 Blraa.setOpcode(AArch64::BLRAA);
3366 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3367 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3368 EmitToStreamer(*OutStreamer, Blraa);
3369
3370 return;
3371 }
3372 case AArch64::TLSDESC_CALLSEQ: {
3373 /// lower this to:
3374 /// adrp x0, :tlsdesc:var
3375 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3376 /// add x0, x0, #:tlsdesc_lo12:var
3377 /// .tlsdesccall var
3378 /// blr x1
3379 /// (TPIDR_EL0 offset now in x0)
3380 const MachineOperand &MO_Sym = MI->getOperand(0);
3381 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3382 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3383 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3384 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3385 MCInstLowering.lowerOperand(MO_Sym, Sym);
3386 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3387 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3388
3389 MCInst Adrp;
3390 Adrp.setOpcode(AArch64::ADRP);
3391 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3392 Adrp.addOperand(SymTLSDesc);
3393 EmitToStreamer(*OutStreamer, Adrp);
3394
3395 MCInst Ldr;
3396 if (STI->isTargetILP32()) {
3397 Ldr.setOpcode(AArch64::LDRWui);
3398 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3399 } else {
3400 Ldr.setOpcode(AArch64::LDRXui);
3401 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3402 }
3403 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3404 Ldr.addOperand(SymTLSDescLo12);
3406 EmitToStreamer(*OutStreamer, Ldr);
3407
3408 MCInst Add;
3409 if (STI->isTargetILP32()) {
3410 Add.setOpcode(AArch64::ADDWri);
3411 Add.addOperand(MCOperand::createReg(AArch64::W0));
3412 Add.addOperand(MCOperand::createReg(AArch64::W0));
3413 } else {
3414 Add.setOpcode(AArch64::ADDXri);
3415 Add.addOperand(MCOperand::createReg(AArch64::X0));
3416 Add.addOperand(MCOperand::createReg(AArch64::X0));
3417 }
3418 Add.addOperand(SymTLSDescLo12);
3420 EmitToStreamer(*OutStreamer, Add);
3421
3422 // Emit a relocation-annotation. This expands to no code, but requests
3423 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3424 MCInst TLSDescCall;
3425 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3426 TLSDescCall.addOperand(Sym);
3427 EmitToStreamer(*OutStreamer, TLSDescCall);
3428#ifndef NDEBUG
3429 --InstsEmitted; // no code emitted
3430#endif
3431
3432 MCInst Blr;
3433 Blr.setOpcode(AArch64::BLR);
3434 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3435 EmitToStreamer(*OutStreamer, Blr);
3436
3437 return;
3438 }
3439
3440 case AArch64::JumpTableDest32:
3441 case AArch64::JumpTableDest16:
3442 case AArch64::JumpTableDest8:
3443 LowerJumpTableDest(*OutStreamer, *MI);
3444 return;
3445
3446 case AArch64::BR_JumpTable:
3447 LowerHardenedBRJumpTable(*MI);
3448 return;
3449
3450 case AArch64::FMOVH0:
3451 case AArch64::FMOVS0:
3452 case AArch64::FMOVD0:
3453 emitFMov0(*MI);
3454 return;
3455
3456 case AArch64::MOPSMemoryCopyPseudo:
3457 case AArch64::MOPSMemoryMovePseudo:
3458 case AArch64::MOPSMemorySetPseudo:
3459 case AArch64::MOPSMemorySetTaggingPseudo:
3460 LowerMOPS(*OutStreamer, *MI);
3461 return;
3462
3463 case TargetOpcode::STACKMAP:
3464 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3465
3466 case TargetOpcode::PATCHPOINT:
3467 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3468
3469 case TargetOpcode::STATEPOINT:
3470 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3471
3472 case TargetOpcode::FAULTING_OP:
3473 return LowerFAULTING_OP(*MI);
3474
3475 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3476 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3477 return;
3478
3479 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3480 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3481 return;
3482
3483 case TargetOpcode::PATCHABLE_TAIL_CALL:
3484 LowerPATCHABLE_TAIL_CALL(*MI);
3485 return;
3486 case TargetOpcode::PATCHABLE_EVENT_CALL:
3487 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3488 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3489 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3490
3491 case AArch64::KCFI_CHECK:
3492 LowerKCFI_CHECK(*MI);
3493 return;
3494
3495 case AArch64::HWASAN_CHECK_MEMACCESS:
3496 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3497 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3498 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3499 LowerHWASAN_CHECK_MEMACCESS(*MI);
3500 return;
3501
3502 case AArch64::SEH_StackAlloc:
3503 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3504 return;
3505
3506 case AArch64::SEH_SaveFPLR:
3507 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3508 return;
3509
3510 case AArch64::SEH_SaveFPLR_X:
3511 assert(MI->getOperand(0).getImm() < 0 &&
3512 "Pre increment SEH opcode must have a negative offset");
3513 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3514 return;
3515
3516 case AArch64::SEH_SaveReg:
3517 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3518 MI->getOperand(1).getImm());
3519 return;
3520
3521 case AArch64::SEH_SaveReg_X:
3522 assert(MI->getOperand(1).getImm() < 0 &&
3523 "Pre increment SEH opcode must have a negative offset");
3524 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3525 -MI->getOperand(1).getImm());
3526 return;
3527
3528 case AArch64::SEH_SaveRegP:
3529 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3530 MI->getOperand(0).getImm() <= 28) {
3531 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3532 "Register paired with LR must be odd");
3533 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3534 MI->getOperand(2).getImm());
3535 return;
3536 }
3537 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3538 "Non-consecutive registers not allowed for save_regp");
3539 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3540 MI->getOperand(2).getImm());
3541 return;
3542
3543 case AArch64::SEH_SaveRegP_X:
3544 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3545 "Non-consecutive registers not allowed for save_regp_x");
3546 assert(MI->getOperand(2).getImm() < 0 &&
3547 "Pre increment SEH opcode must have a negative offset");
3548 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3549 -MI->getOperand(2).getImm());
3550 return;
3551
3552 case AArch64::SEH_SaveFReg:
3553 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3554 MI->getOperand(1).getImm());
3555 return;
3556
3557 case AArch64::SEH_SaveFReg_X:
3558 assert(MI->getOperand(1).getImm() < 0 &&
3559 "Pre increment SEH opcode must have a negative offset");
3560 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3561 -MI->getOperand(1).getImm());
3562 return;
3563
3564 case AArch64::SEH_SaveFRegP:
3565 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3566 "Non-consecutive registers not allowed for save_regp");
3567 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3568 MI->getOperand(2).getImm());
3569 return;
3570
3571 case AArch64::SEH_SaveFRegP_X:
3572 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3573 "Non-consecutive registers not allowed for save_regp_x");
3574 assert(MI->getOperand(2).getImm() < 0 &&
3575 "Pre increment SEH opcode must have a negative offset");
3576 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3577 -MI->getOperand(2).getImm());
3578 return;
3579
3580 case AArch64::SEH_SetFP:
3582 return;
3583
3584 case AArch64::SEH_AddFP:
3585 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3586 return;
3587
3588 case AArch64::SEH_Nop:
3589 TS->emitARM64WinCFINop();
3590 return;
3591
3592 case AArch64::SEH_PrologEnd:
3594 return;
3595
3596 case AArch64::SEH_EpilogStart:
3598 return;
3599
3600 case AArch64::SEH_EpilogEnd:
3602 return;
3603
3604 case AArch64::SEH_PACSignLR:
3606 return;
3607
3608 case AArch64::SEH_SaveAnyRegI:
3609 assert(MI->getOperand(1).getImm() <= 1008 &&
3610 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3611 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3612 MI->getOperand(1).getImm());
3613 return;
3614
3615 case AArch64::SEH_SaveAnyRegIP:
3616 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3617 "Non-consecutive registers not allowed for save_any_reg");
3618 assert(MI->getOperand(2).getImm() <= 1008 &&
3619 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3620 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3621 MI->getOperand(2).getImm());
3622 return;
3623
3624 case AArch64::SEH_SaveAnyRegQP:
3625 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3626 "Non-consecutive registers not allowed for save_any_reg");
3627 assert(MI->getOperand(2).getImm() >= 0 &&
3628 "SaveAnyRegQP SEH opcode offset must be non-negative");
3629 assert(MI->getOperand(2).getImm() <= 1008 &&
3630 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3631 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3632 MI->getOperand(2).getImm());
3633 return;
3634
3635 case AArch64::SEH_SaveAnyRegQPX:
3636 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3637 "Non-consecutive registers not allowed for save_any_reg");
3638 assert(MI->getOperand(2).getImm() < 0 &&
3639 "SaveAnyRegQPX SEH opcode offset must be negative");
3640 assert(MI->getOperand(2).getImm() >= -1008 &&
3641 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3642 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3643 -MI->getOperand(2).getImm());
3644 return;
3645
3646 case AArch64::SEH_AllocZ:
3647 assert(MI->getOperand(0).getImm() >= 0 &&
3648 "AllocZ SEH opcode offset must be non-negative");
3649 assert(MI->getOperand(0).getImm() <= 255 &&
3650 "AllocZ SEH opcode offset must fit into 8 bits");
3651 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3652 return;
3653
3654 case AArch64::SEH_SaveZReg:
3655 assert(MI->getOperand(1).getImm() >= 0 &&
3656 "SaveZReg SEH opcode offset must be non-negative");
3657 assert(MI->getOperand(1).getImm() <= 255 &&
3658 "SaveZReg SEH opcode offset must fit into 8 bits");
3659 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3660 MI->getOperand(1).getImm());
3661 return;
3662
3663 case AArch64::SEH_SavePReg:
3664 assert(MI->getOperand(1).getImm() >= 0 &&
3665 "SavePReg SEH opcode offset must be non-negative");
3666 assert(MI->getOperand(1).getImm() <= 255 &&
3667 "SavePReg SEH opcode offset must fit into 8 bits");
3668 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3669 MI->getOperand(1).getImm());
3670 return;
3671
3672 case AArch64::BLR:
3673 case AArch64::BR: {
3674 recordIfImportCall(MI);
3675 MCInst TmpInst;
3676 MCInstLowering.Lower(MI, TmpInst);
3677 EmitToStreamer(*OutStreamer, TmpInst);
3678 return;
3679 }
3680 case AArch64::CBWPri:
3681 case AArch64::CBXPri:
3682 case AArch64::CBBAssertExt:
3683 case AArch64::CBHAssertExt:
3684 case AArch64::CBWPrr:
3685 case AArch64::CBXPrr:
3686 emitCBPseudoExpansion(MI);
3687 return;
3688 }
3689
3690 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3691 return;
3692
3693 // Finally, do the automated lowerings for everything else.
3694 MCInst TmpInst;
3695 MCInstLowering.Lower(MI, TmpInst);
3696 EmitToStreamer(*OutStreamer, TmpInst);
3697}
3698
3699void AArch64AsmPrinter::recordIfImportCall(
3700 const llvm::MachineInstr *BranchInst) {
3701 if (!EnableImportCallOptimization)
3702 return;
3703
3704 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3705 if (GV && GV->hasDLLImportStorageClass()) {
3706 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3707 OutStreamer->emitLabel(CallSiteSymbol);
3708
3709 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3710 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3711 .push_back({CallSiteSymbol, CalledSymbol});
3712 }
3713}
3714
3715void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3716 MCSymbol *LazyPointer) {
3717 // _ifunc:
3718 // adrp x16, lazy_pointer@GOTPAGE
3719 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3720 // ldr x16, [x16]
3721 // br x16
3722
3723 {
3724 MCInst Adrp;
3725 Adrp.setOpcode(AArch64::ADRP);
3726 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3727 MCOperand SymPage;
3728 MCInstLowering.lowerOperand(
3731 SymPage);
3732 Adrp.addOperand(SymPage);
3733 EmitToStreamer(Adrp);
3734 }
3735
3736 {
3737 MCInst Ldr;
3738 Ldr.setOpcode(AArch64::LDRXui);
3739 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3740 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3741 MCOperand SymPageOff;
3742 MCInstLowering.lowerOperand(
3745 SymPageOff);
3746 Ldr.addOperand(SymPageOff);
3748 EmitToStreamer(Ldr);
3749 }
3750
3751 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3752 .addReg(AArch64::X16)
3753 .addReg(AArch64::X16)
3754 .addImm(0));
3755
3756 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3757 : AArch64::BR)
3758 .addReg(AArch64::X16));
3759}
3760
3761void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3762 const GlobalIFunc &GI,
3763 MCSymbol *LazyPointer) {
3764 // These stub helpers are only ever called once, so here we're optimizing for
3765 // minimum size by using the pre-indexed store variants, which saves a few
3766 // bytes of instructions to bump & restore sp.
3767
3768 // _ifunc.stub_helper:
3769 // stp fp, lr, [sp, #-16]!
3770 // mov fp, sp
3771 // stp x1, x0, [sp, #-16]!
3772 // stp x3, x2, [sp, #-16]!
3773 // stp x5, x4, [sp, #-16]!
3774 // stp x7, x6, [sp, #-16]!
3775 // stp d1, d0, [sp, #-16]!
3776 // stp d3, d2, [sp, #-16]!
3777 // stp d5, d4, [sp, #-16]!
3778 // stp d7, d6, [sp, #-16]!
3779 // bl _resolver
3780 // adrp x16, lazy_pointer@GOTPAGE
3781 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3782 // str x0, [x16]
3783 // mov x16, x0
3784 // ldp d7, d6, [sp], #16
3785 // ldp d5, d4, [sp], #16
3786 // ldp d3, d2, [sp], #16
3787 // ldp d1, d0, [sp], #16
3788 // ldp x7, x6, [sp], #16
3789 // ldp x5, x4, [sp], #16
3790 // ldp x3, x2, [sp], #16
3791 // ldp x1, x0, [sp], #16
3792 // ldp fp, lr, [sp], #16
3793 // br x16
3794
3795 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3796 .addReg(AArch64::SP)
3797 .addReg(AArch64::FP)
3798 .addReg(AArch64::LR)
3799 .addReg(AArch64::SP)
3800 .addImm(-2));
3801
3802 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3803 .addReg(AArch64::FP)
3804 .addReg(AArch64::SP)
3805 .addImm(0)
3806 .addImm(0));
3807
3808 for (int I = 0; I != 4; ++I)
3809 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3810 .addReg(AArch64::SP)
3811 .addReg(AArch64::X1 + 2 * I)
3812 .addReg(AArch64::X0 + 2 * I)
3813 .addReg(AArch64::SP)
3814 .addImm(-2));
3815
3816 for (int I = 0; I != 4; ++I)
3817 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3818 .addReg(AArch64::SP)
3819 .addReg(AArch64::D1 + 2 * I)
3820 .addReg(AArch64::D0 + 2 * I)
3821 .addReg(AArch64::SP)
3822 .addImm(-2));
3823
3824 EmitToStreamer(
3825 MCInstBuilder(AArch64::BL)
3827
3828 {
3829 MCInst Adrp;
3830 Adrp.setOpcode(AArch64::ADRP);
3831 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3832 MCOperand SymPage;
3833 MCInstLowering.lowerOperand(
3834 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3836 SymPage);
3837 Adrp.addOperand(SymPage);
3838 EmitToStreamer(Adrp);
3839 }
3840
3841 {
3842 MCInst Ldr;
3843 Ldr.setOpcode(AArch64::LDRXui);
3844 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3845 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3846 MCOperand SymPageOff;
3847 MCInstLowering.lowerOperand(
3848 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3850 SymPageOff);
3851 Ldr.addOperand(SymPageOff);
3853 EmitToStreamer(Ldr);
3854 }
3855
3856 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3857 .addReg(AArch64::X0)
3858 .addReg(AArch64::X16)
3859 .addImm(0));
3860
3861 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3862 .addReg(AArch64::X16)
3863 .addReg(AArch64::X0)
3864 .addImm(0)
3865 .addImm(0));
3866
3867 for (int I = 3; I != -1; --I)
3868 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3869 .addReg(AArch64::SP)
3870 .addReg(AArch64::D1 + 2 * I)
3871 .addReg(AArch64::D0 + 2 * I)
3872 .addReg(AArch64::SP)
3873 .addImm(2));
3874
3875 for (int I = 3; I != -1; --I)
3876 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3877 .addReg(AArch64::SP)
3878 .addReg(AArch64::X1 + 2 * I)
3879 .addReg(AArch64::X0 + 2 * I)
3880 .addReg(AArch64::SP)
3881 .addImm(2));
3882
3883 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3884 .addReg(AArch64::SP)
3885 .addReg(AArch64::FP)
3886 .addReg(AArch64::LR)
3887 .addReg(AArch64::SP)
3888 .addImm(2));
3889
3890 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3891 : AArch64::BR)
3892 .addReg(AArch64::X16));
3893}
3894
3895const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
3896 const Constant *BaseCV,
3897 uint64_t Offset) {
3898 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3899 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3900 OutContext);
3901 }
3902
3903 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
3904}
3905
3906char AArch64AsmPrinter::ID = 0;
3907
3908INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3909 "AArch64 Assembly Printer", false, false)
3910
3911// Force static initialization.
3912extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
3913LLVMInitializeAArch64AsmPrinter() {
3919}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
void setPreservesAll()
Set by analyses that do not transform their input at all.
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:636
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
Function * getFunction() const
Definition Constants.h:940
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1083
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:553
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
static constexpr unsigned NonUniqueID
Definition MCSection.h:522
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitCFIBKeyFrame()
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:368
MCContext & getContext() const
Definition MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:387
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:443
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
const FeatureBitset & getFeatureBits() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1248
@ SHF_GROUP
Definition ELF.h:1270
@ SHF_EXECINSTR
Definition ELF.h:1251
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1858
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1859
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1860
@ SHT_PROGBITS
Definition ELF.h:1147
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:292
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
scope_exit(Callable) -> scope_exit< Callable >
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
static unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K, bool Zero)
Return B(L)RA opcode to be used for an authenticated branch or call using the given key,...
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...