LLVM 22.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/StringRef.h"
32#include "llvm/ADT/Twine.h"
46#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/Mangler.h"
49#include "llvm/IR/Module.h"
50#include "llvm/MC/MCAsmInfo.h"
51#include "llvm/MC/MCContext.h"
52#include "llvm/MC/MCExpr.h"
53#include "llvm/MC/MCInst.h"
57#include "llvm/MC/MCStreamer.h"
58#include "llvm/MC/MCSymbol.h"
59#include "llvm/MC/MCValue.h"
69#include <cassert>
70#include <cstdint>
71#include <map>
72#include <memory>
73
74using namespace llvm;
75
78 "aarch64-ptrauth-auth-checks", cl::Hidden,
79 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
80 clEnumValN(Poison, "poison", "poison on failure"),
81 clEnumValN(Trap, "trap", "trap on failure")),
82 cl::desc("Check pointer authentication auth/resign failures"),
84
85#define DEBUG_TYPE "asm-printer"
86
87namespace {
88
89class AArch64AsmPrinter : public AsmPrinter {
90 AArch64MCInstLower MCInstLowering;
91 FaultMaps FM;
92 const AArch64Subtarget *STI;
93 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
94#ifndef NDEBUG
95 unsigned InstsEmitted;
96#endif
97 bool EnableImportCallOptimization = false;
99 SectionToImportedFunctionCalls;
100 unsigned PAuthIFuncNextUniqueID = 1;
101
102public:
103 static char ID;
104
105 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
106 : AsmPrinter(TM, std::move(Streamer), ID),
107 MCInstLowering(OutContext, *this), FM(*this) {}
108
109 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
110
111 /// Wrapper for MCInstLowering.lowerOperand() for the
112 /// tblgen'erated pseudo lowering.
113 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
114 return MCInstLowering.lowerOperand(MO, MCOp);
115 }
116
117 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
118
119 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
120
121 void emitStartOfAsmFile(Module &M) override;
122 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
123 ArrayRef<unsigned> JumpTableIndices) override;
124 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
126 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
127 const MCSymbol *BranchLabel) const override;
128
129 void emitFunctionEntryLabel() override;
130
131 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
132
133 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
134
135 void LowerHardenedBRJumpTable(const MachineInstr &MI);
136
137 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
138
139 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
140 const MachineInstr &MI);
141 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
142 const MachineInstr &MI);
143 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
144 const MachineInstr &MI);
145 void LowerFAULTING_OP(const MachineInstr &MI);
146
147 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
148 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
149 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
150 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
151
152 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
153 HwasanMemaccessTuple;
154 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
155 void LowerKCFI_CHECK(const MachineInstr &MI);
156 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
157 void emitHwasanMemaccessSymbols(Module &M);
158
159 void emitSled(const MachineInstr &MI, SledKind Kind);
160
161 // Returns whether Reg may be used to store sensitive temporary values when
162 // expanding PtrAuth pseudos. Some OSes may take extra care to protect a
163 // small subset of GPRs on context switches - use these registers then.
164 //
165 // If there are no preferred registers, returns true for any Reg.
166 bool isPtrauthRegSafe(Register Reg) const {
167 if (STI->isX16X17Safer())
168 return Reg == AArch64::X16 || Reg == AArch64::X17;
169
170 return true;
171 }
172
173 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
174 void emitPtrauthBranch(const MachineInstr *MI);
175
176 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
177 Register ScratchReg,
180 const MCSymbol *OnFailure = nullptr);
181
182 // Check authenticated LR before tail calling.
183 void emitPtrauthTailCallHardening(const MachineInstr *TC);
184
185 // Emit the sequence for AUT or AUTPAC.
186 void emitPtrauthAuthResign(Register AUTVal, AArch64PACKey::ID AUTKey,
187 uint64_t AUTDisc,
188 const MachineOperand *AUTAddrDisc,
189 Register Scratch,
190 std::optional<AArch64PACKey::ID> PACKey,
191 uint64_t PACDisc, Register PACAddrDisc, Value *DS);
192
193 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
194 // if no instruction should be emitted because the deactivation symbol is
195 // defined in the current module so this function emitted a NOP instead.
196 bool emitDeactivationSymbolRelocation(Value *DS);
197
198 // Emit the sequence for PAC.
199 void emitPtrauthSign(const MachineInstr *MI);
200
201 // Emit the sequence to compute the discriminator.
202 //
203 // The Scratch register passed to this function must be safe, as returned by
204 // isPtrauthRegSafe(ScratchReg).
205 //
206 // The returned register is either ScratchReg, AddrDisc, or XZR. Furthermore,
207 // it is guaranteed to be safe (or XZR), with the only exception of
208 // passing-through an *unmodified* unsafe AddrDisc register.
209 //
210 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
211 // MayClobberAddrDisc may save one MOV instruction, provided
212 // isPtrauthRegSafe(AddrDisc) is true:
213 //
214 // mov x17, x16
215 // movk x17, #1234, lsl #48
216 // ; x16 is not used anymore
217 //
218 // can be replaced by
219 //
220 // movk x16, #1234, lsl #48
221 Register emitPtrauthDiscriminator(uint64_t Disc, Register AddrDisc,
222 Register ScratchReg,
223 bool MayClobberAddrDisc = false);
224
225 // Emit the sequence for LOADauthptrstatic
226 void LowerLOADauthptrstatic(const MachineInstr &MI);
227
228 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
229 // adrp-add followed by PAC sign)
230 void LowerMOVaddrPAC(const MachineInstr &MI);
231
232 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
233 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
234 // authenticating)
235 void LowerLOADgotAUTH(const MachineInstr &MI);
236
237 void emitAddImm(MCRegister Val, int64_t Addend, MCRegister Tmp);
238 void emitAddress(MCRegister Reg, const MCExpr *Expr, MCRegister Tmp,
239 bool DSOLocal, const MCSubtargetInfo &STI);
240
241 const MCExpr *emitPAuthRelocationAsIRelative(
242 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
243 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
244
245 /// tblgen'erated driver function for lowering simple MI->MC
246 /// pseudo instructions.
247 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
248
249 // Emit Build Attributes
250 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
251 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
252
253 // Emit expansion of Compare-and-branch pseudo instructions
254 void emitCBPseudoExpansion(const MachineInstr *MI);
255
256 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
257 void EmitToStreamer(const MCInst &Inst) {
258 EmitToStreamer(*OutStreamer, Inst);
259 }
260
261 void emitInstruction(const MachineInstr *MI) override;
262
263 void emitFunctionHeaderComment() override;
264
265 void getAnalysisUsage(AnalysisUsage &AU) const override {
267 AU.setPreservesAll();
268 }
269
270 bool runOnMachineFunction(MachineFunction &MF) override {
271 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
272 PSI = &PSIW->getPSI();
273 if (auto *SDPIW =
274 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
275 SDPI = &SDPIW->getStaticDataProfileInfo();
276
277 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
278 STI = &MF.getSubtarget<AArch64Subtarget>();
279
280 SetupMachineFunction(MF);
281
282 if (STI->isTargetCOFF()) {
283 bool Local = MF.getFunction().hasLocalLinkage();
286 int Type =
288
289 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
290 OutStreamer->emitCOFFSymbolStorageClass(Scl);
291 OutStreamer->emitCOFFSymbolType(Type);
292 OutStreamer->endCOFFSymbolDef();
293 }
294
295 // Emit the rest of the function body.
296 emitFunctionBody();
297
298 // Emit the XRay table for this function.
299 emitXRayTable();
300
301 // We didn't modify anything.
302 return false;
303 }
304
305 const MCExpr *lowerConstant(const Constant *CV,
306 const Constant *BaseCV = nullptr,
307 uint64_t Offset = 0) override;
308
309private:
310 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
311 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
312 bool printAsmRegInClass(const MachineOperand &MO,
313 const TargetRegisterClass *RC, unsigned AltName,
314 raw_ostream &O);
315
316 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
317 const char *ExtraCode, raw_ostream &O) override;
318 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
319 const char *ExtraCode, raw_ostream &O) override;
320
321 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
322
323 void emitFunctionBodyEnd() override;
324 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
325
326 MCSymbol *GetCPISymbol(unsigned CPID) const override;
327 void emitEndOfAsmFile(Module &M) override;
328
329 AArch64FunctionInfo *AArch64FI = nullptr;
330
331 /// Emit the LOHs contained in AArch64FI.
332 void emitLOHs();
333
334 void emitMovXReg(Register Dest, Register Src);
335 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
336 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
337
338 void emitAUT(AArch64PACKey::ID Key, Register Pointer, Register Disc);
339 void emitPAC(AArch64PACKey::ID Key, Register Pointer, Register Disc);
340 void emitBLRA(bool IsCall, AArch64PACKey::ID Key, Register Target,
341 Register Disc);
342
343 /// Emit instruction to set float register to zero.
344 void emitFMov0(const MachineInstr &MI);
345 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
346
347 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
348
349 MInstToMCSymbol LOHInstToLabel;
350
351 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
352 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
353 }
354
355 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
356 assert(STI);
357 return STI;
358 }
359 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
360 MCSymbol *LazyPointer) override;
361 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
362 MCSymbol *LazyPointer) override;
363
364 /// Checks if this instruction is part of a sequence that is eligle for import
365 /// call optimization and, if so, records it to be emitted in the import call
366 /// section.
367 void recordIfImportCall(const MachineInstr *BranchInst);
368};
369
370} // end anonymous namespace
371
372void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
373 const Triple &TT = TM.getTargetTriple();
374
375 if (TT.isOSBinFormatCOFF()) {
376 emitCOFFFeatureSymbol(M);
377 emitCOFFReplaceableFunctionData(M);
378
379 if (M.getModuleFlag("import-call-optimization"))
380 EnableImportCallOptimization = true;
381 }
382
383 if (!TT.isOSBinFormatELF())
384 return;
385
386 // For emitting build attributes and .note.gnu.property section
387 auto *TS =
388 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
389 // Assemble feature flags that may require creation of build attributes and a
390 // note section.
391 unsigned BAFlags = 0;
392 unsigned GNUFlags = 0;
393 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
394 M.getModuleFlag("branch-target-enforcement"))) {
395 if (!BTE->isZero()) {
396 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
398 }
399 }
400
401 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
402 M.getModuleFlag("guarded-control-stack"))) {
403 if (!GCS->isZero()) {
404 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
406 }
407 }
408
409 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
410 M.getModuleFlag("sign-return-address"))) {
411 if (!Sign->isZero()) {
412 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
414 }
415 }
416
417 uint64_t PAuthABIPlatform = -1;
418 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
419 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
420 PAuthABIPlatform = PAP->getZExtValue();
421 }
422
423 uint64_t PAuthABIVersion = -1;
424 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
425 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
426 PAuthABIVersion = PAV->getZExtValue();
427 }
428
429 // Emit AArch64 Build Attributes
430 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
431 // Emit a .note.gnu.property section with the flags.
432 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
433}
434
435void AArch64AsmPrinter::emitFunctionHeaderComment() {
436 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
437 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
438 if (OutlinerString != std::nullopt)
439 OutStreamer->getCommentOS() << ' ' << OutlinerString;
440}
441
442void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
443{
444 const Function &F = MF->getFunction();
445 if (F.hasFnAttribute("patchable-function-entry")) {
446 unsigned Num;
447 if (F.getFnAttribute("patchable-function-entry")
448 .getValueAsString()
449 .getAsInteger(10, Num))
450 return;
451 emitNops(Num);
452 return;
453 }
454
455 emitSled(MI, SledKind::FUNCTION_ENTER);
456}
457
458void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
459 emitSled(MI, SledKind::FUNCTION_EXIT);
460}
461
462void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
463 emitSled(MI, SledKind::TAIL_CALL);
464}
465
466void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
467 static const int8_t NoopsInSledCount = 7;
468 // We want to emit the following pattern:
469 //
470 // .Lxray_sled_N:
471 // ALIGN
472 // B #32
473 // ; 7 NOP instructions (28 bytes)
474 // .tmpN
475 //
476 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
477 // over the full 32 bytes (8 instructions) with the following pattern:
478 //
479 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
480 // LDR W17, #12 ; W17 := function ID
481 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
482 // BLR X16 ; call the tracing trampoline
483 // ;DATA: 32 bits of function ID
484 // ;DATA: lower 32 bits of the address of the trampoline
485 // ;DATA: higher 32 bits of the address of the trampoline
486 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
487 //
488 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
489 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
490 OutStreamer->emitLabel(CurSled);
491 auto Target = OutContext.createTempSymbol();
492
493 // Emit "B #32" instruction, which jumps over the next 28 bytes.
494 // The operand has to be the number of 4-byte instructions to jump over,
495 // including the current instruction.
496 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
497
498 for (int8_t I = 0; I < NoopsInSledCount; I++)
499 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::NOP));
500
501 OutStreamer->emitLabel(Target);
502 recordSled(CurSled, MI, Kind, 2);
503}
504
505void AArch64AsmPrinter::emitAttributes(unsigned Flags,
506 uint64_t PAuthABIPlatform,
507 uint64_t PAuthABIVersion,
508 AArch64TargetStreamer *TS) {
509
510 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
511 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
512
513 if (PAuthABIPlatform || PAuthABIVersion) {
517 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
518 AArch64BuildAttributes::SubsectionType::ULEB128);
522 PAuthABIPlatform, "");
526 "");
527 }
528
529 unsigned BTIValue =
531 unsigned PACValue =
533 unsigned GCSValue =
535
536 if (BTIValue || PACValue || GCSValue) {
540 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
541 AArch64BuildAttributes::SubsectionType::ULEB128);
551 }
552}
553
554// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
555// (built-in functions __xray_customevent/__xray_typedevent).
556//
557// .Lxray_event_sled_N:
558// b 1f
559// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
560// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
561// bl __xray_CustomEvent or __xray_TypedEvent
562// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
563// 1:
564//
565// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
566//
567// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
568// After patching, b .+N will become a nop.
569void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
570 bool Typed) {
571 auto &O = *OutStreamer;
572 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
573 O.emitLabel(CurSled);
574 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
575 auto *Sym = MCSymbolRefExpr::create(
576 OutContext.getOrCreateSymbol(
577 Twine(MachO ? "_" : "") +
578 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
579 OutContext);
580 if (Typed) {
581 O.AddComment("Begin XRay typed event");
582 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
583 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
584 .addReg(AArch64::SP)
585 .addReg(AArch64::X0)
586 .addReg(AArch64::X1)
587 .addReg(AArch64::SP)
588 .addImm(-4));
589 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
590 .addReg(AArch64::X2)
591 .addReg(AArch64::SP)
592 .addImm(2));
593 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
594 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
595 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
596 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
597 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
598 .addReg(AArch64::X2)
599 .addReg(AArch64::SP)
600 .addImm(2));
601 O.AddComment("End XRay typed event");
602 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
603 .addReg(AArch64::SP)
604 .addReg(AArch64::X0)
605 .addReg(AArch64::X1)
606 .addReg(AArch64::SP)
607 .addImm(4));
608
609 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
610 } else {
611 O.AddComment("Begin XRay custom event");
612 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
613 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
614 .addReg(AArch64::SP)
615 .addReg(AArch64::X0)
616 .addReg(AArch64::X1)
617 .addReg(AArch64::SP)
618 .addImm(-2));
619 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
620 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
621 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
622 O.AddComment("End XRay custom event");
623 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
624 .addReg(AArch64::SP)
625 .addReg(AArch64::X0)
626 .addReg(AArch64::X1)
627 .addReg(AArch64::SP)
628 .addImm(2));
629
630 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
631 }
632}
633
634void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
635 Register AddrReg = MI.getOperand(0).getReg();
636 assert(std::next(MI.getIterator())->isCall() &&
637 "KCFI_CHECK not followed by a call instruction");
638 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
639 "KCFI_CHECK call target doesn't match call operand");
640
641 // Default to using the intra-procedure-call temporary registers for
642 // comparing the hashes.
643 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
644 if (AddrReg == AArch64::XZR) {
645 // Checking XZR makes no sense. Instead of emitting a load, zero
646 // ScratchRegs[0] and use it for the ESR AddrIndex below.
647 AddrReg = getXRegFromWReg(ScratchRegs[0]);
648 emitMovXReg(AddrReg, AArch64::XZR);
649 } else {
650 // If one of the scratch registers is used for the call target (e.g.
651 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
652 // temporary register instead (in this case, AArch64::W9) as the check
653 // is immediately followed by the call instruction.
654 for (auto &Reg : ScratchRegs) {
655 if (Reg == getWRegFromXReg(AddrReg)) {
656 Reg = AArch64::W9;
657 break;
658 }
659 }
660 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
661 "Invalid scratch registers for KCFI_CHECK");
662
663 // Adjust the offset for patchable-function-prefix. This assumes that
664 // patchable-function-prefix is the same for all functions.
665 int64_t PrefixNops = 0;
666 (void)MI.getMF()
667 ->getFunction()
668 .getFnAttribute("patchable-function-prefix")
669 .getValueAsString()
670 .getAsInteger(10, PrefixNops);
671
672 // Load the target function type hash.
673 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
674 .addReg(ScratchRegs[0])
675 .addReg(AddrReg)
676 .addImm(-(PrefixNops * 4 + 4)));
677 }
678
679 // Load the expected type hash.
680 const int64_t Type = MI.getOperand(1).getImm();
681 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
682 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
683
684 // Compare the hashes and trap if there's a mismatch.
685 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
686 .addReg(AArch64::WZR)
687 .addReg(ScratchRegs[0])
688 .addReg(ScratchRegs[1])
689 .addImm(0));
690
691 MCSymbol *Pass = OutContext.createTempSymbol();
692 EmitToStreamer(*OutStreamer,
693 MCInstBuilder(AArch64::Bcc)
694 .addImm(AArch64CC::EQ)
695 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
696
697 // The base ESR is 0x8000 and the register information is encoded in bits
698 // 0-9 as follows:
699 // - 0-4: n, where the register Xn contains the target address
700 // - 5-9: m, where the register Wm contains the expected type hash
701 // Where n, m are in [0, 30].
702 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
703 unsigned AddrIndex;
704 switch (AddrReg) {
705 default:
706 AddrIndex = AddrReg - AArch64::X0;
707 break;
708 case AArch64::FP:
709 AddrIndex = 29;
710 break;
711 case AArch64::LR:
712 AddrIndex = 30;
713 break;
714 }
715
716 assert(AddrIndex < 31 && TypeIndex < 31);
717
718 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
719 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
720 OutStreamer->emitLabel(Pass);
721}
722
723void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
724 Register Reg = MI.getOperand(0).getReg();
725
726 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
727 // statically known to be zero. However, conceivably, the HWASan pass may
728 // encounter a "cannot currently statically prove to be null" pointer (and is
729 // therefore unable to omit the intrinsic) that later optimization passes
730 // convert into a statically known-null pointer.
731 if (Reg == AArch64::XZR)
732 return;
733
734 bool IsShort =
735 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
736 (MI.getOpcode() ==
737 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
738 uint32_t AccessInfo = MI.getOperand(1).getImm();
739 bool IsFixedShadow =
740 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
741 (MI.getOpcode() ==
742 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
743 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
744
745 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
746 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
747 if (!Sym) {
748 // FIXME: Make this work on non-ELF.
749 if (!TM.getTargetTriple().isOSBinFormatELF())
750 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
751
752 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
753 utostr(AccessInfo);
754 if (IsFixedShadow)
755 SymName += "_fixed_" + utostr(FixedShadowOffset);
756 if (IsShort)
757 SymName += "_short_v2";
758 Sym = OutContext.getOrCreateSymbol(SymName);
759 }
760
761 EmitToStreamer(*OutStreamer,
762 MCInstBuilder(AArch64::BL)
763 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
764}
765
766void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
767 if (HwasanMemaccessSymbols.empty())
768 return;
769
770 const Triple &TT = TM.getTargetTriple();
771 assert(TT.isOSBinFormatELF());
772 AArch64Subtarget STI(TT, TM.getTargetCPU(), TM.getTargetCPU(),
773 TM.getTargetFeatureString(), TM, true);
774 this->STI = &STI;
775
776 MCSymbol *HwasanTagMismatchV1Sym =
777 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
778 MCSymbol *HwasanTagMismatchV2Sym =
779 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
780
781 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
782 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
783 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
784 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
785
786 for (auto &P : HwasanMemaccessSymbols) {
787 unsigned Reg = std::get<0>(P.first);
788 bool IsShort = std::get<1>(P.first);
789 uint32_t AccessInfo = std::get<2>(P.first);
790 bool IsFixedShadow = std::get<3>(P.first);
791 uint64_t FixedShadowOffset = std::get<4>(P.first);
792 const MCSymbolRefExpr *HwasanTagMismatchRef =
793 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
794 MCSymbol *Sym = P.second;
795
796 bool HasMatchAllTag =
797 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
798 uint8_t MatchAllTag =
799 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
800 unsigned Size =
801 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
802 bool CompileKernel =
803 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
804
805 OutStreamer->switchSection(OutContext.getELFSection(
806 ".text.hot", ELF::SHT_PROGBITS,
808 /*IsComdat=*/true));
809
810 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
811 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
812 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
813 OutStreamer->emitLabel(Sym);
814
815 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
816 .addReg(AArch64::X16)
817 .addReg(Reg)
818 .addImm(4)
819 .addImm(55));
820
821 if (IsFixedShadow) {
822 // Aarch64 makes it difficult to embed large constants in the code.
823 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
824 // left-shift option in the MOV instruction. Combined with the 16-bit
825 // immediate, this is enough to represent any offset up to 2**48.
826 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
827 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
828 .addReg(AArch64::W16)
829 .addReg(AArch64::X17)
830 .addReg(AArch64::X16)
831 .addImm(0)
832 .addImm(0));
833 } else {
834 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
835 .addReg(AArch64::W16)
836 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
837 .addReg(AArch64::X16)
838 .addImm(0)
839 .addImm(0));
840 }
841
842 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
843 .addReg(AArch64::XZR)
844 .addReg(AArch64::X16)
845 .addReg(Reg)
847 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
848 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
849 .addImm(AArch64CC::NE)
851 HandleMismatchOrPartialSym, OutContext)));
852 MCSymbol *ReturnSym = OutContext.createTempSymbol();
853 OutStreamer->emitLabel(ReturnSym);
854 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
855 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
856
857 if (HasMatchAllTag) {
858 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
859 .addReg(AArch64::X17)
860 .addReg(Reg)
861 .addImm(56)
862 .addImm(63));
863 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
864 .addReg(AArch64::XZR)
865 .addReg(AArch64::X17)
866 .addImm(MatchAllTag)
867 .addImm(0));
868 EmitToStreamer(
869 MCInstBuilder(AArch64::Bcc)
870 .addImm(AArch64CC::EQ)
871 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
872 }
873
874 if (IsShort) {
875 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
876 .addReg(AArch64::WZR)
877 .addReg(AArch64::W16)
878 .addImm(15)
879 .addImm(0));
880 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
881 EmitToStreamer(
882 MCInstBuilder(AArch64::Bcc)
883 .addImm(AArch64CC::HI)
884 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
885
886 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
887 .addReg(AArch64::X17)
888 .addReg(Reg)
889 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
890 if (Size != 1)
891 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
892 .addReg(AArch64::X17)
893 .addReg(AArch64::X17)
894 .addImm(Size - 1)
895 .addImm(0));
896 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
897 .addReg(AArch64::WZR)
898 .addReg(AArch64::W16)
899 .addReg(AArch64::W17)
900 .addImm(0));
901 EmitToStreamer(
902 MCInstBuilder(AArch64::Bcc)
903 .addImm(AArch64CC::LS)
904 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
905
906 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
907 .addReg(AArch64::X16)
908 .addReg(Reg)
909 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
910 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
911 .addReg(AArch64::W16)
912 .addReg(AArch64::X16)
913 .addImm(0));
914 EmitToStreamer(
915 MCInstBuilder(AArch64::SUBSXrs)
916 .addReg(AArch64::XZR)
917 .addReg(AArch64::X16)
918 .addReg(Reg)
920 EmitToStreamer(
921 MCInstBuilder(AArch64::Bcc)
922 .addImm(AArch64CC::EQ)
923 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
924
925 OutStreamer->emitLabel(HandleMismatchSym);
926 }
927
928 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
929 .addReg(AArch64::SP)
930 .addReg(AArch64::X0)
931 .addReg(AArch64::X1)
932 .addReg(AArch64::SP)
933 .addImm(-32));
934 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
935 .addReg(AArch64::FP)
936 .addReg(AArch64::LR)
937 .addReg(AArch64::SP)
938 .addImm(29));
939
940 if (Reg != AArch64::X0)
941 emitMovXReg(AArch64::X0, Reg);
942 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
943
944 if (CompileKernel) {
945 // The Linux kernel's dynamic loader doesn't support GOT relative
946 // relocations, but it doesn't support late binding either, so just call
947 // the function directly.
948 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
949 } else {
950 // Intentionally load the GOT entry and branch to it, rather than possibly
951 // late binding the function, which may clobber the registers before we
952 // have a chance to save them.
953 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
954 .addReg(AArch64::X16)
955 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
957 OutContext)));
958 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
959 .addReg(AArch64::X16)
960 .addReg(AArch64::X16)
961 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
963 OutContext)));
964 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
965 }
966 }
967 this->STI = nullptr;
968}
969
970static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
971 MCSymbol *StubLabel,
972 const MCExpr *StubAuthPtrRef) {
973 // sym$auth_ptr$key$disc:
974 OutStreamer.emitLabel(StubLabel);
975 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
976}
977
978void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
979 emitHwasanMemaccessSymbols(M);
980
981 const Triple &TT = TM.getTargetTriple();
982 if (TT.isOSBinFormatMachO()) {
983 // Output authenticated pointers as indirect symbols, if we have any.
984 MachineModuleInfoMachO &MMIMacho =
985 MMI->getObjFileInfo<MachineModuleInfoMachO>();
986
987 auto Stubs = MMIMacho.getAuthGVStubList();
988
989 if (!Stubs.empty()) {
990 // Switch to the "__auth_ptr" section.
991 OutStreamer->switchSection(
992 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
994 emitAlignment(Align(8));
995
996 for (const auto &Stub : Stubs)
997 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
998
999 OutStreamer->addBlankLine();
1000 }
1001
1002 // Funny Darwin hack: This flag tells the linker that no global symbols
1003 // contain code that falls through to other global symbols (e.g. the obvious
1004 // implementation of multiple entry points). If this doesn't occur, the
1005 // linker can safely perform dead code stripping. Since LLVM never
1006 // generates code that does this, it is always safe to set.
1007 OutStreamer->emitSubsectionsViaSymbols();
1008 }
1009
1010 if (TT.isOSBinFormatELF()) {
1011 // Output authenticated pointers as indirect symbols, if we have any.
1012 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
1013
1014 auto Stubs = MMIELF.getAuthGVStubList();
1015
1016 if (!Stubs.empty()) {
1017 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1018 OutStreamer->switchSection(TLOF.getDataSection());
1019 emitAlignment(Align(8));
1020
1021 for (const auto &Stub : Stubs)
1022 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1023
1024 OutStreamer->addBlankLine();
1025 }
1026
1027 // With signed ELF GOT enabled, the linker looks at the symbol type to
1028 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1029 // for functions not defined in the module have STT_NOTYPE type by default.
1030 // This makes linker to emit signing schema with DA key (instead of IA) for
1031 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1032 // all function symbols used in the module to have STT_FUNC type. See
1033 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1034 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1035 M.getModuleFlag("ptrauth-elf-got"));
1036 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1037 for (const GlobalValue &GV : M.global_values())
1038 if (!GV.use_empty() && isa<Function>(GV) &&
1039 !GV.getName().starts_with("llvm."))
1040 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1042 }
1043
1044 // Emit stack and fault map information.
1046
1047 // If import call optimization is enabled, emit the appropriate section.
1048 // We do this whether or not we recorded any import calls.
1049 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1050 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1051
1052 // Section always starts with some magic.
1053 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1054 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1055
1056 // Layout of this section is:
1057 // Per section that contains calls to imported functions:
1058 // uint32_t SectionSize: Size in bytes for information in this section.
1059 // uint32_t Section Number
1060 // Per call to imported function in section:
1061 // uint32_t Kind: the kind of imported function.
1062 // uint32_t BranchOffset: the offset of the branch instruction in its
1063 // parent section.
1064 // uint32_t TargetSymbolId: the symbol id of the called function.
1065 for (auto &[Section, CallsToImportedFuncs] :
1066 SectionToImportedFunctionCalls) {
1067 unsigned SectionSize =
1068 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1069 OutStreamer->emitInt32(SectionSize);
1070 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1071 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1072 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1073 OutStreamer->emitInt32(0x13);
1074 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1075 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1076 }
1077 }
1078 }
1079}
1080
1081void AArch64AsmPrinter::emitLOHs() {
1083
1084 for (const auto &D : AArch64FI->getLOHContainer()) {
1085 for (const MachineInstr *MI : D.getArgs()) {
1086 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1087 assert(LabelIt != LOHInstToLabel.end() &&
1088 "Label hasn't been inserted for LOH related instruction");
1089 MCArgs.push_back(LabelIt->second);
1090 }
1091 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1092 MCArgs.clear();
1093 }
1094}
1095
1096void AArch64AsmPrinter::emitFunctionBodyEnd() {
1097 if (!AArch64FI->getLOHRelated().empty())
1098 emitLOHs();
1099}
1100
1101/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1102MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1103 // Darwin uses a linker-private symbol name for constant-pools (to
1104 // avoid addends on the relocation?), ELF has no such concept and
1105 // uses a normal private symbol.
1106 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1107 return OutContext.getOrCreateSymbol(
1108 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1109 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1110
1111 return AsmPrinter::GetCPISymbol(CPID);
1112}
1113
1114void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1115 raw_ostream &O) {
1116 const MachineOperand &MO = MI->getOperand(OpNum);
1117 switch (MO.getType()) {
1118 default:
1119 llvm_unreachable("<unknown operand type>");
1121 Register Reg = MO.getReg();
1123 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1125 break;
1126 }
1128 O << MO.getImm();
1129 break;
1130 }
1132 PrintSymbolOperand(MO, O);
1133 break;
1134 }
1136 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1137 Sym->print(O, MAI);
1138 break;
1139 }
1140 }
1141}
1142
1143bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1144 raw_ostream &O) {
1145 Register Reg = MO.getReg();
1146 switch (Mode) {
1147 default:
1148 return true; // Unknown mode.
1149 case 'w':
1151 break;
1152 case 'x':
1154 break;
1155 case 't':
1157 break;
1158 }
1159
1161 return false;
1162}
1163
1164// Prints the register in MO using class RC using the offset in the
1165// new register class. This should not be used for cross class
1166// printing.
1167bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1168 const TargetRegisterClass *RC,
1169 unsigned AltName, raw_ostream &O) {
1170 assert(MO.isReg() && "Should only get here with a register!");
1171 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1172 Register Reg = MO.getReg();
1173 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1174 if (!RI->regsOverlap(RegToPrint, Reg))
1175 return true;
1176 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1177 return false;
1178}
1179
1180bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1181 const char *ExtraCode, raw_ostream &O) {
1182 const MachineOperand &MO = MI->getOperand(OpNum);
1183
1184 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1185 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1186 return false;
1187
1188 // Does this asm operand have a single letter operand modifier?
1189 if (ExtraCode && ExtraCode[0]) {
1190 if (ExtraCode[1] != 0)
1191 return true; // Unknown modifier.
1192
1193 switch (ExtraCode[0]) {
1194 default:
1195 return true; // Unknown modifier.
1196 case 'w': // Print W register
1197 case 'x': // Print X register
1198 if (MO.isReg())
1199 return printAsmMRegister(MO, ExtraCode[0], O);
1200 if (MO.isImm() && MO.getImm() == 0) {
1201 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1203 return false;
1204 }
1205 printOperand(MI, OpNum, O);
1206 return false;
1207 case 'b': // Print B register.
1208 case 'h': // Print H register.
1209 case 's': // Print S register.
1210 case 'd': // Print D register.
1211 case 'q': // Print Q register.
1212 case 'z': // Print Z register.
1213 if (MO.isReg()) {
1214 const TargetRegisterClass *RC;
1215 switch (ExtraCode[0]) {
1216 case 'b':
1217 RC = &AArch64::FPR8RegClass;
1218 break;
1219 case 'h':
1220 RC = &AArch64::FPR16RegClass;
1221 break;
1222 case 's':
1223 RC = &AArch64::FPR32RegClass;
1224 break;
1225 case 'd':
1226 RC = &AArch64::FPR64RegClass;
1227 break;
1228 case 'q':
1229 RC = &AArch64::FPR128RegClass;
1230 break;
1231 case 'z':
1232 RC = &AArch64::ZPRRegClass;
1233 break;
1234 default:
1235 return true;
1236 }
1237 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1238 }
1239 printOperand(MI, OpNum, O);
1240 return false;
1241 }
1242 }
1243
1244 // According to ARM, we should emit x and v registers unless we have a
1245 // modifier.
1246 if (MO.isReg()) {
1247 Register Reg = MO.getReg();
1248
1249 // If this is a w or x register, print an x register.
1250 if (AArch64::GPR32allRegClass.contains(Reg) ||
1251 AArch64::GPR64allRegClass.contains(Reg))
1252 return printAsmMRegister(MO, 'x', O);
1253
1254 // If this is an x register tuple, print an x register.
1255 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1256 return printAsmMRegister(MO, 't', O);
1257
1258 unsigned AltName = AArch64::NoRegAltName;
1259 const TargetRegisterClass *RegClass;
1260 if (AArch64::ZPRRegClass.contains(Reg)) {
1261 RegClass = &AArch64::ZPRRegClass;
1262 } else if (AArch64::PPRRegClass.contains(Reg)) {
1263 RegClass = &AArch64::PPRRegClass;
1264 } else if (AArch64::PNRRegClass.contains(Reg)) {
1265 RegClass = &AArch64::PNRRegClass;
1266 } else {
1267 RegClass = &AArch64::FPR128RegClass;
1268 AltName = AArch64::vreg;
1269 }
1270
1271 // If this is a b, h, s, d, or q register, print it as a v register.
1272 return printAsmRegInClass(MO, RegClass, AltName, O);
1273 }
1274
1275 printOperand(MI, OpNum, O);
1276 return false;
1277}
1278
1279bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1280 unsigned OpNum,
1281 const char *ExtraCode,
1282 raw_ostream &O) {
1283 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1284 return true; // Unknown modifier.
1285
1286 const MachineOperand &MO = MI->getOperand(OpNum);
1287 assert(MO.isReg() && "unexpected inline asm memory operand");
1288 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1289 return false;
1290}
1291
1292void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1293 raw_ostream &OS) {
1294 unsigned NOps = MI->getNumOperands();
1295 assert(NOps == 4);
1296 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1297 // cast away const; DIetc do not take const operands for some reason.
1298 OS << MI->getDebugVariable()->getName();
1299 OS << " <- ";
1300 // Frame address. Currently handles register +- offset only.
1301 assert(MI->isIndirectDebugValue());
1302 OS << '[';
1303 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1304 if (I != 0)
1305 OS << ", ";
1306 printOperand(MI, I, OS);
1307 }
1308 OS << ']';
1309 OS << "+";
1310 printOperand(MI, NOps - 2, OS);
1311}
1312
1313void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1314 ArrayRef<unsigned> JumpTableIndices) {
1315 // Fast return if there is nothing to emit to avoid creating empty sections.
1316 if (JumpTableIndices.empty())
1317 return;
1318 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1319 const auto &F = MF->getFunction();
1321
1322 MCSection *ReadOnlySec = nullptr;
1323 if (TM.Options.EnableStaticDataPartitioning) {
1324 ReadOnlySec =
1325 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1326 } else {
1327 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1328 }
1329 OutStreamer->switchSection(ReadOnlySec);
1330
1331 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1332 for (unsigned JTI : JumpTableIndices) {
1333 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1334
1335 // If this jump table was deleted, ignore it.
1336 if (JTBBs.empty()) continue;
1337
1338 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1339 emitAlignment(Align(Size));
1340 OutStreamer->emitLabel(GetJTISymbol(JTI));
1341
1342 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1343 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1344
1345 for (auto *JTBB : JTBBs) {
1346 const MCExpr *Value =
1347 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1348
1349 // Each entry is:
1350 // .byte/.hword (LBB - Lbase)>>2
1351 // or plain:
1352 // .word LBB - Lbase
1353 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1354 if (Size != 4)
1356 Value, MCConstantExpr::create(2, OutContext), OutContext);
1357
1358 OutStreamer->emitValue(Value, Size);
1359 }
1360 }
1361}
1362
1363std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1365AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1366 const MachineInstr *BranchInstr,
1367 const MCSymbol *BranchLabel) const {
1368 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1369 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1371 switch (AFI->getJumpTableEntrySize(JTI)) {
1372 case 1:
1373 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1374 break;
1375 case 2:
1376 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1377 break;
1378 case 4:
1379 EntrySize = codeview::JumpTableEntrySize::Int32;
1380 break;
1381 default:
1382 llvm_unreachable("Unexpected jump table entry size");
1383 }
1384 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1385}
1386
1387void AArch64AsmPrinter::emitFunctionEntryLabel() {
1388 const Triple &TT = TM.getTargetTriple();
1389 if (TT.isOSBinFormatELF() &&
1390 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1391 MF->getFunction().getCallingConv() ==
1392 CallingConv::AArch64_SVE_VectorCall ||
1393 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1394 auto *TS =
1395 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1396 TS->emitDirectiveVariantPCS(CurrentFnSym);
1397 }
1398
1400
1401 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1402 // For ARM64EC targets, a function definition's name is mangled differently
1403 // from the normal symbol, emit required aliases here.
1404 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1405 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1406 OutStreamer->emitAssignment(
1407 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1408 };
1409
1410 auto getSymbolFromMetadata = [&](StringRef Name) {
1411 MCSymbol *Sym = nullptr;
1412 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1413 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1414 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1415 }
1416 return Sym;
1417 };
1418
1419 SmallVector<MDNode *> UnmangledNames;
1420 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1421 for (MDNode *Node : UnmangledNames) {
1422 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1423 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1424 if (std::optional<std::string> MangledName =
1425 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1426 MCSymbol *ECMangledSym =
1427 MMI->getContext().getOrCreateSymbol(*MangledName);
1428 emitFunctionAlias(UnmangledSym, ECMangledSym);
1429 }
1430 }
1431 if (MCSymbol *ECMangledSym =
1432 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1433 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1434 }
1435}
1436
1437void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1438 const Constant *CV) {
1439 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1440 if (CPA->hasAddressDiscriminator() &&
1441 !CPA->hasSpecialAddressDiscriminator(
1444 "unexpected address discrimination value for ctors/dtors entry, only "
1445 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1446 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1447 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1448 // actual address discrimination value and only checks
1449 // hasAddressDiscriminator(), so it's OK to leave special address
1450 // discrimination value here.
1452}
1453
1454void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1455 const GlobalAlias &GA) {
1456 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1457 // Global aliases must point to a definition, but unmangled patchable
1458 // symbols are special and need to point to an undefined symbol with "EXP+"
1459 // prefix. Such undefined symbol is resolved by the linker by creating
1460 // x86 thunk that jumps back to the actual EC target.
1461 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1462 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1463 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1464 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1465
1466 OutStreamer->beginCOFFSymbolDef(ExpSym);
1467 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1468 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1470 OutStreamer->endCOFFSymbolDef();
1471
1472 OutStreamer->beginCOFFSymbolDef(Sym);
1473 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1474 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1476 OutStreamer->endCOFFSymbolDef();
1477 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1478 OutStreamer->emitAssignment(
1479 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1480 return;
1481 }
1482 }
1484}
1485
1486/// Small jump tables contain an unsigned byte or half, representing the offset
1487/// from the lowest-addressed possible destination to the desired basic
1488/// block. Since all instructions are 4-byte aligned, this is further compressed
1489/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1490/// materialize the correct destination we need:
1491///
1492/// adr xDest, .LBB0_0
1493/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1494/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1495void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1496 const llvm::MachineInstr &MI) {
1497 Register DestReg = MI.getOperand(0).getReg();
1498 Register ScratchReg = MI.getOperand(1).getReg();
1499 Register ScratchRegW =
1500 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1501 Register TableReg = MI.getOperand(2).getReg();
1502 Register EntryReg = MI.getOperand(3).getReg();
1503 int JTIdx = MI.getOperand(4).getIndex();
1504 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1505
1506 // This has to be first because the compression pass based its reachability
1507 // calculations on the start of the JumpTableDest instruction.
1508 auto Label =
1509 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1510
1511 // If we don't already have a symbol to use as the base, use the ADR
1512 // instruction itself.
1513 if (!Label) {
1514 Label = MF->getContext().createTempSymbol();
1515 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1516 OutStreamer.emitLabel(Label);
1517 }
1518
1519 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1520 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1521 .addReg(DestReg)
1522 .addExpr(LabelExpr));
1523
1524 // Load the number of instruction-steps to offset from the label.
1525 unsigned LdrOpcode;
1526 switch (Size) {
1527 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1528 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1529 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1530 default:
1531 llvm_unreachable("Unknown jump table size");
1532 }
1533
1534 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1535 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1536 .addReg(TableReg)
1537 .addReg(EntryReg)
1538 .addImm(0)
1539 .addImm(Size == 1 ? 0 : 1));
1540
1541 // Add to the already materialized base label address, multiplying by 4 if
1542 // compressed.
1543 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1544 .addReg(DestReg)
1545 .addReg(DestReg)
1546 .addReg(ScratchReg)
1547 .addImm(Size == 4 ? 0 : 2));
1548}
1549
1550void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1551 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1552 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1553
1554 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1555 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1556
1557 // Emit:
1558 // mov x17, #<size of table> ; depending on table size, with MOVKs
1559 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1560 // csel x16, x16, xzr, ls ; check for index overflow
1561 //
1562 // adrp x17, Ltable@PAGE ; materialize table address
1563 // add x17, Ltable@PAGEOFF
1564 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1565 //
1566 // Lanchor:
1567 // adr x17, Lanchor ; compute target address
1568 // add x16, x17, x16
1569 // br x16 ; branch to target
1570
1571 MachineOperand JTOp = MI.getOperand(0);
1572
1573 unsigned JTI = JTOp.getIndex();
1574 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1575 "unsupported compressed jump table");
1576
1577 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1578
1579 // cmp only supports a 12-bit immediate. If we need more, materialize the
1580 // immediate, using x17 as a scratch register.
1581 uint64_t MaxTableEntry = NumTableEntries - 1;
1582 if (isUInt<12>(MaxTableEntry)) {
1583 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1584 .addReg(AArch64::XZR)
1585 .addReg(AArch64::X16)
1586 .addImm(MaxTableEntry)
1587 .addImm(0));
1588 } else {
1589 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1590 // It's sad that we have to manually materialize instructions, but we can't
1591 // trivially reuse the main pseudo expansion logic.
1592 // A MOVK sequence is easy enough to generate and handles the general case.
1593 for (int Offset = 16; Offset < 64; Offset += 16) {
1594 if ((MaxTableEntry >> Offset) == 0)
1595 break;
1596 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1597 Offset);
1598 }
1599 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1600 .addReg(AArch64::XZR)
1601 .addReg(AArch64::X16)
1602 .addReg(AArch64::X17)
1603 .addImm(0));
1604 }
1605
1606 // This picks entry #0 on failure.
1607 // We might want to trap instead.
1608 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1609 .addReg(AArch64::X16)
1610 .addReg(AArch64::X16)
1611 .addReg(AArch64::XZR)
1612 .addImm(AArch64CC::LS));
1613
1614 // Prepare the @PAGE/@PAGEOFF low/high operands.
1615 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1616 MCOperand JTMCHi, JTMCLo;
1617
1618 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1619 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1620
1621 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1622 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1623
1624 EmitToStreamer(
1625 *OutStreamer,
1626 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1627
1628 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1629 .addReg(AArch64::X17)
1630 .addReg(AArch64::X17)
1631 .addOperand(JTMCLo)
1632 .addImm(0));
1633
1634 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1635 .addReg(AArch64::X16)
1636 .addReg(AArch64::X17)
1637 .addReg(AArch64::X16)
1638 .addImm(0)
1639 .addImm(1));
1640
1641 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1642 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1643 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1644
1645 OutStreamer->emitLabel(AdrLabel);
1646 EmitToStreamer(
1647 *OutStreamer,
1648 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1649
1650 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1651 .addReg(AArch64::X16)
1652 .addReg(AArch64::X17)
1653 .addReg(AArch64::X16)
1654 .addImm(0));
1655
1656 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1657}
1658
1659void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1660 const llvm::MachineInstr &MI) {
1661 unsigned Opcode = MI.getOpcode();
1662 assert(STI->hasMOPS());
1663 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1664
1665 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1666 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1667 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1668 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1669 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1670 if (Opcode == AArch64::MOPSMemorySetPseudo)
1671 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1672 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1673 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1674 llvm_unreachable("Unhandled memory operation pseudo");
1675 }();
1676 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1677 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1678
1679 for (auto Op : Ops) {
1680 int i = 0;
1681 auto MCIB = MCInstBuilder(Op);
1682 // Destination registers
1683 MCIB.addReg(MI.getOperand(i++).getReg());
1684 MCIB.addReg(MI.getOperand(i++).getReg());
1685 if (!IsSet)
1686 MCIB.addReg(MI.getOperand(i++).getReg());
1687 // Input registers
1688 MCIB.addReg(MI.getOperand(i++).getReg());
1689 MCIB.addReg(MI.getOperand(i++).getReg());
1690 MCIB.addReg(MI.getOperand(i++).getReg());
1691
1692 EmitToStreamer(OutStreamer, MCIB);
1693 }
1694}
1695
1696void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1697 const MachineInstr &MI) {
1698 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1699
1700 auto &Ctx = OutStreamer.getContext();
1701 MCSymbol *MILabel = Ctx.createTempSymbol();
1702 OutStreamer.emitLabel(MILabel);
1703
1704 SM.recordStackMap(*MILabel, MI);
1705 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1706
1707 // Scan ahead to trim the shadow.
1708 const MachineBasicBlock &MBB = *MI.getParent();
1710 ++MII;
1711 while (NumNOPBytes > 0) {
1712 if (MII == MBB.end() || MII->isCall() ||
1713 MII->getOpcode() == AArch64::DBG_VALUE ||
1714 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1715 MII->getOpcode() == TargetOpcode::STACKMAP)
1716 break;
1717 ++MII;
1718 NumNOPBytes -= 4;
1719 }
1720
1721 // Emit nops.
1722 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1723 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1724}
1725
1726// Lower a patchpoint of the form:
1727// [<def>], <id>, <numBytes>, <target>, <numArgs>
1728void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1729 const MachineInstr &MI) {
1730 auto &Ctx = OutStreamer.getContext();
1731 MCSymbol *MILabel = Ctx.createTempSymbol();
1732 OutStreamer.emitLabel(MILabel);
1733 SM.recordPatchPoint(*MILabel, MI);
1734
1735 PatchPointOpers Opers(&MI);
1736
1737 int64_t CallTarget = Opers.getCallTarget().getImm();
1738 unsigned EncodedBytes = 0;
1739 if (CallTarget) {
1740 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1741 "High 16 bits of call target should be zero.");
1742 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1743 EncodedBytes = 16;
1744 // Materialize the jump address:
1745 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1746 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1747 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1748 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1749 }
1750 // Emit padding.
1751 unsigned NumBytes = Opers.getNumPatchBytes();
1752 assert(NumBytes >= EncodedBytes &&
1753 "Patchpoint can't request size less than the length of a call.");
1754 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1755 "Invalid number of NOP bytes requested!");
1756 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1757 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1758}
1759
1760void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1761 const MachineInstr &MI) {
1762 StatepointOpers SOpers(&MI);
1763 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1764 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1765 for (unsigned i = 0; i < PatchBytes; i += 4)
1766 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1767 } else {
1768 // Lower call target and choose correct opcode
1769 const MachineOperand &CallTarget = SOpers.getCallTarget();
1770 MCOperand CallTargetMCOp;
1771 unsigned CallOpcode;
1772 switch (CallTarget.getType()) {
1775 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1776 CallOpcode = AArch64::BL;
1777 break;
1779 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1780 CallOpcode = AArch64::BL;
1781 break;
1783 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1784 CallOpcode = AArch64::BLR;
1785 break;
1786 default:
1787 llvm_unreachable("Unsupported operand type in statepoint call target");
1788 break;
1789 }
1790
1791 EmitToStreamer(OutStreamer,
1792 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1793 }
1794
1795 auto &Ctx = OutStreamer.getContext();
1796 MCSymbol *MILabel = Ctx.createTempSymbol();
1797 OutStreamer.emitLabel(MILabel);
1798 SM.recordStatepoint(*MILabel, MI);
1799}
1800
1801void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1802 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1803 // <opcode>, <operands>
1804
1805 Register DefRegister = FaultingMI.getOperand(0).getReg();
1807 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1808 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1809 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1810 unsigned OperandsBeginIdx = 4;
1811
1812 auto &Ctx = OutStreamer->getContext();
1813 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1814 OutStreamer->emitLabel(FaultingLabel);
1815
1816 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1817 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1818
1819 MCInst MI;
1820 MI.setOpcode(Opcode);
1821
1822 if (DefRegister != (Register)0)
1823 MI.addOperand(MCOperand::createReg(DefRegister));
1824
1825 for (const MachineOperand &MO :
1826 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1827 MCOperand Dest;
1828 lowerOperand(MO, Dest);
1829 MI.addOperand(Dest);
1830 }
1831
1832 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1833 EmitToStreamer(MI);
1834}
1835
1836void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1837 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1838 .addReg(Dest)
1839 .addReg(AArch64::XZR)
1840 .addReg(Src)
1841 .addImm(0));
1842}
1843
1844void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1845 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1846 EmitToStreamer(*OutStreamer,
1847 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1848 .addReg(Dest)
1849 .addImm(Imm)
1850 .addImm(Shift));
1851}
1852
1853void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1854 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1855 EmitToStreamer(*OutStreamer,
1856 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1857 .addReg(Dest)
1858 .addReg(Dest)
1859 .addImm(Imm)
1860 .addImm(Shift));
1861}
1862
1863void AArch64AsmPrinter::emitAUT(AArch64PACKey::ID Key, Register Pointer,
1864 Register Disc) {
1865 bool IsZeroDisc = Disc == AArch64::XZR;
1866 unsigned Opcode = getAUTOpcodeForKey(Key, IsZeroDisc);
1867
1868 // autiza x16 ; if IsZeroDisc
1869 // autia x16, x17 ; if !IsZeroDisc
1870 MCInst AUTInst;
1871 AUTInst.setOpcode(Opcode);
1872 AUTInst.addOperand(MCOperand::createReg(Pointer));
1873 AUTInst.addOperand(MCOperand::createReg(Pointer));
1874 if (!IsZeroDisc)
1875 AUTInst.addOperand(MCOperand::createReg(Disc));
1876
1877 EmitToStreamer(AUTInst);
1878}
1879
1880void AArch64AsmPrinter::emitPAC(AArch64PACKey::ID Key, Register Pointer,
1881 Register Disc) {
1882 bool IsZeroDisc = Disc == AArch64::XZR;
1883 unsigned Opcode = getPACOpcodeForKey(Key, IsZeroDisc);
1884
1885 // paciza x16 ; if IsZeroDisc
1886 // pacia x16, x17 ; if !IsZeroDisc
1887 MCInst PACInst;
1888 PACInst.setOpcode(Opcode);
1889 PACInst.addOperand(MCOperand::createReg(Pointer));
1890 PACInst.addOperand(MCOperand::createReg(Pointer));
1891 if (!IsZeroDisc)
1892 PACInst.addOperand(MCOperand::createReg(Disc));
1893
1894 EmitToStreamer(PACInst);
1895}
1896
1897void AArch64AsmPrinter::emitBLRA(bool IsCall, AArch64PACKey::ID Key,
1898 Register Target, Register Disc) {
1899 bool IsZeroDisc = Disc == AArch64::XZR;
1900 unsigned Opcode = getBranchOpcodeForKey(IsCall, Key, IsZeroDisc);
1901
1902 // blraaz x16 ; if IsZeroDisc
1903 // blraa x16, x17 ; if !IsZeroDisc
1904 MCInst Inst;
1905 Inst.setOpcode(Opcode);
1906 Inst.addOperand(MCOperand::createReg(Target));
1907 if (!IsZeroDisc)
1908 Inst.addOperand(MCOperand::createReg(Disc));
1909 EmitToStreamer(Inst);
1910}
1911
1912void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1913 Register DestReg = MI.getOperand(0).getReg();
1914 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1915 if (STI->hasZeroCycleZeroingFPR64()) {
1916 // Convert H/S register to corresponding D register
1917 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1918 if (AArch64::FPR16RegClass.contains(DestReg))
1919 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1920 &AArch64::FPR64RegClass);
1921 else if (AArch64::FPR32RegClass.contains(DestReg))
1922 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1923 &AArch64::FPR64RegClass);
1924 else
1925 assert(AArch64::FPR64RegClass.contains(DestReg));
1926
1927 MCInst MOVI;
1928 MOVI.setOpcode(AArch64::MOVID);
1929 MOVI.addOperand(MCOperand::createReg(DestReg));
1931 EmitToStreamer(*OutStreamer, MOVI);
1932 } else if (STI->hasZeroCycleZeroingFPR128()) {
1933 // Convert H/S/D register to corresponding Q register
1934 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1935 if (AArch64::FPR16RegClass.contains(DestReg)) {
1936 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1937 &AArch64::FPR128RegClass);
1938 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1939 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1940 &AArch64::FPR128RegClass);
1941 } else {
1942 assert(AArch64::FPR64RegClass.contains(DestReg));
1943 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1944 &AArch64::FPR128RegClass);
1945 }
1946
1947 MCInst MOVI;
1948 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1949 MOVI.addOperand(MCOperand::createReg(DestReg));
1951 EmitToStreamer(*OutStreamer, MOVI);
1952 } else {
1953 emitFMov0AsFMov(MI, DestReg);
1954 }
1955 } else {
1956 emitFMov0AsFMov(MI, DestReg);
1957 }
1958}
1959
1960void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1961 Register DestReg) {
1962 MCInst FMov;
1963 switch (MI.getOpcode()) {
1964 default:
1965 llvm_unreachable("Unexpected opcode");
1966 case AArch64::FMOVH0:
1967 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1968 if (!STI->hasFullFP16())
1969 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1970 FMov.addOperand(MCOperand::createReg(DestReg));
1971 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1972 break;
1973 case AArch64::FMOVS0:
1974 FMov.setOpcode(AArch64::FMOVWSr);
1975 FMov.addOperand(MCOperand::createReg(DestReg));
1976 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1977 break;
1978 case AArch64::FMOVD0:
1979 FMov.setOpcode(AArch64::FMOVXDr);
1980 FMov.addOperand(MCOperand::createReg(DestReg));
1981 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1982 break;
1983 }
1984 EmitToStreamer(*OutStreamer, FMov);
1985}
1986
1987Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint64_t Disc,
1988 Register AddrDisc,
1989 Register ScratchReg,
1990 bool MayClobberAddrDisc) {
1991 assert(isPtrauthRegSafe(ScratchReg) &&
1992 "Safe scratch register must be provided by the caller");
1993 assert(isUInt<16>(Disc) && "Constant discriminator is too wide");
1994
1995 // So far we've used NoRegister in pseudos. Now we need real encodings.
1996 if (AddrDisc == AArch64::NoRegister)
1997 AddrDisc = AArch64::XZR;
1998
1999 // If there is no constant discriminator, there's no blend involved:
2000 // just use the address discriminator register as-is (XZR or not).
2001 if (!Disc)
2002 return AddrDisc;
2003
2004 // If there's only a constant discriminator, MOV it into the scratch register.
2005 if (AddrDisc == AArch64::XZR) {
2006 emitMOVZ(ScratchReg, Disc, 0);
2007 return ScratchReg;
2008 }
2009
2010 // If there are both, emit a blend into the scratch register.
2011
2012 // Check if we can save one MOV instruction.
2013 if (MayClobberAddrDisc && isPtrauthRegSafe(AddrDisc)) {
2014 ScratchReg = AddrDisc;
2015 } else {
2016 emitMovXReg(ScratchReg, AddrDisc);
2017 assert(ScratchReg != AddrDisc &&
2018 "Forbidden to clobber AddrDisc, but have to");
2019 }
2020
2021 emitMOVK(ScratchReg, Disc, 48);
2022 return ScratchReg;
2023}
2024
2025/// Emit a code sequence to check an authenticated pointer value.
2026///
2027/// This function emits a sequence of instructions that checks if TestedReg was
2028/// authenticated successfully. On success, execution continues at the next
2029/// instruction after the sequence.
2030///
2031/// The action performed on failure depends on the OnFailure argument:
2032/// * if OnFailure is not nullptr, control is transferred to that label after
2033/// clearing the PAC field
2034/// * otherwise, BRK instruction is emitted to generate an error
2035void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
2036 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
2037 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
2038 // Insert a sequence to check if authentication of TestedReg succeeded,
2039 // such as:
2040 //
2041 // - checked and clearing:
2042 // ; x16 is TestedReg, x17 is ScratchReg
2043 // mov x17, x16
2044 // xpaci x17
2045 // cmp x16, x17
2046 // b.eq Lsuccess
2047 // mov x16, x17
2048 // b Lend
2049 // Lsuccess:
2050 // ; skipped if authentication failed
2051 // Lend:
2052 // ...
2053 //
2054 // - checked and trapping:
2055 // mov x17, x16
2056 // xpaci x17
2057 // cmp x16, x17
2058 // b.eq Lsuccess
2059 // brk #<0xc470 + aut key>
2060 // Lsuccess:
2061 // ...
2062 //
2063 // See the documentation on AuthCheckMethod enumeration constants for
2064 // the specific code sequences that can be used to perform the check.
2066
2067 if (Method == AuthCheckMethod::None)
2068 return;
2069 if (Method == AuthCheckMethod::DummyLoad) {
2070 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
2071 .addReg(getWRegFromXReg(ScratchReg))
2072 .addReg(TestedReg)
2073 .addImm(0));
2074 assert(!OnFailure && "DummyLoad always traps on error");
2075 return;
2076 }
2077
2078 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2079 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2080 // mov Xscratch, Xtested
2081 emitMovXReg(ScratchReg, TestedReg);
2082
2083 if (Method == AuthCheckMethod::XPAC) {
2084 // xpac(i|d) Xscratch
2085 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2086 EmitToStreamer(
2087 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2088 } else {
2089 // xpaclri
2090
2091 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2092 assert(TestedReg == AArch64::LR &&
2093 "XPACHint mode is only compatible with checking the LR register");
2095 "XPACHint mode is only compatible with I-keys");
2096 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2097 }
2098
2099 // cmp Xtested, Xscratch
2100 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2101 .addReg(AArch64::XZR)
2102 .addReg(TestedReg)
2103 .addReg(ScratchReg)
2104 .addImm(0));
2105
2106 // b.eq Lsuccess
2107 EmitToStreamer(
2108 MCInstBuilder(AArch64::Bcc)
2109 .addImm(AArch64CC::EQ)
2110 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2111 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2112 // eor Xscratch, Xtested, Xtested, lsl #1
2113 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2114 .addReg(ScratchReg)
2115 .addReg(TestedReg)
2116 .addReg(TestedReg)
2117 .addImm(1));
2118 // tbz Xscratch, #62, Lsuccess
2119 EmitToStreamer(
2120 MCInstBuilder(AArch64::TBZX)
2121 .addReg(ScratchReg)
2122 .addImm(62)
2123 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2124 } else {
2125 llvm_unreachable("Unsupported check method");
2126 }
2127
2128 if (!OnFailure) {
2129 // Trapping sequences do a 'brk'.
2130 // brk #<0xc470 + aut key>
2131 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2132 } else {
2133 // Non-trapping checked sequences return the stripped result in TestedReg,
2134 // skipping over success-only code (such as re-signing the pointer) by
2135 // jumping to OnFailure label.
2136 // Note that this can introduce an authentication oracle (such as based on
2137 // the high bits of the re-signed value).
2138
2139 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2140 // instead of ScratchReg, thus eliminating one `mov` instruction.
2141 // Both XPAC and XPACHint can be further optimized by not using a
2142 // conditional branch jumping over an unconditional one.
2143
2144 switch (Method) {
2145 case AuthCheckMethod::XPACHint:
2146 // LR is already XPAC-ed at this point.
2147 break;
2148 case AuthCheckMethod::XPAC:
2149 // mov Xtested, Xscratch
2150 emitMovXReg(TestedReg, ScratchReg);
2151 break;
2152 default:
2153 // If Xtested was not XPAC-ed so far, emit XPAC here.
2154 // xpac(i|d) Xtested
2155 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2156 EmitToStreamer(
2157 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2158 }
2159
2160 // b Lend
2161 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2162 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2163 }
2164
2165 // If the auth check succeeds, we can continue.
2166 // Lsuccess:
2167 OutStreamer->emitLabel(SuccessSym);
2168}
2169
2170// With Pointer Authentication, it may be needed to explicitly check the
2171// authenticated value in LR before performing a tail call.
2172// Otherwise, the callee may re-sign the invalid return address,
2173// introducing a signing oracle.
2174void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2175 if (!AArch64FI->shouldSignReturnAddress(*MF))
2176 return;
2177
2178 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2179 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2180 return;
2181
2182 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2183 Register ScratchReg =
2184 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2185 assert(!TC->readsRegister(ScratchReg, TRI) &&
2186 "Neither x16 nor x17 is available as a scratch register");
2189 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2190 LRCheckMethod);
2191}
2192
2193bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2194 if (!DS)
2195 return false;
2196
2197 if (isa<GlobalAlias>(DS)) {
2198 // Just emit the nop directly.
2199 EmitToStreamer(MCInstBuilder(AArch64::NOP));
2200 return true;
2201 }
2202 MCSymbol *Dot = OutContext.createTempSymbol();
2203 OutStreamer->emitLabel(Dot);
2204 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2205
2206 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2207 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2208 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2209 SMLoc());
2210 return false;
2211}
2212
2213void AArch64AsmPrinter::emitPtrauthAuthResign(
2214 Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc,
2215 const MachineOperand *AUTAddrDisc, Register Scratch,
2216 std::optional<AArch64PACKey::ID> PACKey, uint64_t PACDisc,
2217 Register PACAddrDisc, Value *DS) {
2218 const bool IsAUTPAC = PACKey.has_value();
2219
2220 // We expand AUT/AUTPAC into a sequence of the form
2221 //
2222 // ; authenticate x16
2223 // ; check pointer in x16
2224 // Lsuccess:
2225 // ; sign x16 (if AUTPAC)
2226 // Lend: ; if not trapping on failure
2227 //
2228 // with the checking sequence chosen depending on whether/how we should check
2229 // the pointer and whether we should trap on failure.
2230
2231 // By default, auth/resign sequences check for auth failures.
2232 bool ShouldCheck = true;
2233 // In the checked sequence, we only trap if explicitly requested.
2234 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2235
2236 // On an FPAC CPU, you get traps whether you want them or not: there's
2237 // no point in emitting checks or traps.
2238 if (STI->hasFPAC())
2239 ShouldCheck = ShouldTrap = false;
2240
2241 // However, command-line flags can override this, for experimentation.
2242 switch (PtrauthAuthChecks) {
2244 break;
2246 ShouldCheck = ShouldTrap = false;
2247 break;
2249 ShouldCheck = true;
2250 ShouldTrap = false;
2251 break;
2253 ShouldCheck = ShouldTrap = true;
2254 break;
2255 }
2256
2257 // Compute aut discriminator
2258 Register AUTDiscReg = emitPtrauthDiscriminator(
2259 AUTDisc, AUTAddrDisc->getReg(), Scratch, AUTAddrDisc->isKill());
2260
2261 if (!emitDeactivationSymbolRelocation(DS))
2262 emitAUT(AUTKey, AUTVal, AUTDiscReg);
2263
2264 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2265 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2266 return;
2267
2268 MCSymbol *EndSym = nullptr;
2269
2270 if (ShouldCheck) {
2271 if (IsAUTPAC && !ShouldTrap)
2272 EndSym = createTempSymbol("resign_end_");
2273
2274 emitPtrauthCheckAuthenticatedValue(
2275 AUTVal, Scratch, AUTKey, AArch64PAuth::AuthCheckMethod::XPAC, EndSym);
2276 }
2277
2278 // We already emitted unchecked and checked-but-non-trapping AUTs.
2279 // That left us with trapping AUTs, and AUTPACs.
2280 // Trapping AUTs don't need PAC: we're done.
2281 if (!IsAUTPAC)
2282 return;
2283
2284 // Compute pac discriminator
2285 Register PACDiscReg = emitPtrauthDiscriminator(PACDisc, PACAddrDisc, Scratch);
2286 emitPAC(*PACKey, AUTVal, PACDiscReg);
2287
2288 // Lend:
2289 if (EndSym)
2290 OutStreamer->emitLabel(EndSym);
2291}
2292
2293void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2294 Register Val = MI->getOperand(1).getReg();
2295 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2296 uint64_t Disc = MI->getOperand(3).getImm();
2297 Register AddrDisc = MI->getOperand(4).getReg();
2298 bool AddrDiscKilled = MI->getOperand(4).isKill();
2299
2300 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2301 // register is available.
2302 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2303 assert(ScratchReg != AddrDisc &&
2304 "Neither X16 nor X17 is available as a scratch register");
2305
2306 // Compute pac discriminator
2307 Register DiscReg = emitPtrauthDiscriminator(
2308 Disc, AddrDisc, ScratchReg, /*MayClobberAddrDisc=*/AddrDiscKilled);
2309
2310 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2311 return;
2312
2313 emitPAC(Key, Val, DiscReg);
2314}
2315
2316void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2317 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2318 unsigned BrTarget = MI->getOperand(0).getReg();
2319
2320 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2321 uint64_t Disc = MI->getOperand(2).getImm();
2322
2323 unsigned AddrDisc = MI->getOperand(3).getReg();
2324
2325 // Make sure AddrDisc is solely used to compute the discriminator.
2326 // While hardly meaningful, it is still possible to describe an authentication
2327 // of a pointer against its own value (instead of storage address) with
2328 // intrinsics, so use report_fatal_error instead of assert.
2329 if (BrTarget == AddrDisc)
2330 report_fatal_error("Branch target is signed with its own value");
2331
2332 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2333 // fact that x16 and x17 are described as clobbered by the MI instruction and
2334 // AddrDisc is not used as any other input.
2335 //
2336 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2337 // either x16 or x17, meaning the returned register is always among the
2338 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2339 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2340 // among x16 and x17 to prevent clobbering unexpected registers.
2341 //
2342 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2343 // declared as clobbering x16/x17.
2344 //
2345 // FIXME: Make use of `killed` flags and register masks instead.
2346 bool AddrDiscIsImplicitDef =
2347 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2348 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2349 AddrDiscIsImplicitDef);
2350 emitBLRA(IsCall, Key, BrTarget, DiscReg);
2351}
2352
2353void AArch64AsmPrinter::emitAddImm(MCRegister Reg, int64_t Addend,
2354 MCRegister Tmp) {
2355 if (Addend != 0) {
2356 const uint64_t AbsOffset = (Addend > 0 ? Addend : -((uint64_t)Addend));
2357 const bool IsNeg = Addend < 0;
2358 if (isUInt<24>(AbsOffset)) {
2359 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2360 BitPos += 12) {
2361 EmitToStreamer(
2362 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2363 .addReg(Reg)
2364 .addReg(Reg)
2365 .addImm((AbsOffset >> BitPos) & 0xfff)
2366 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2367 }
2368 } else {
2369 const uint64_t UAddend = Addend;
2370 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2371 .addReg(Tmp)
2372 .addImm((IsNeg ? ~UAddend : UAddend) & 0xffff)
2373 .addImm(/*shift=*/0));
2374 auto NeedMovk = [IsNeg, UAddend](int BitPos) -> bool {
2375 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2376 uint64_t Shifted = UAddend >> BitPos;
2377 if (!IsNeg)
2378 return Shifted != 0;
2379 for (int I = 0; I != 64 - BitPos; I += 16)
2380 if (((Shifted >> I) & 0xffff) != 0xffff)
2381 return true;
2382 return false;
2383 };
2384 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2385 emitMOVK(Tmp, (UAddend >> BitPos) & 0xffff, BitPos);
2386
2387 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2388 .addReg(Reg)
2389 .addReg(Reg)
2390 .addReg(Tmp)
2391 .addImm(/*shift=*/0));
2392 }
2393 }
2394}
2395
2396void AArch64AsmPrinter::emitAddress(MCRegister Reg, const MCExpr *Expr,
2397 MCRegister Tmp, bool DSOLocal,
2398 const MCSubtargetInfo &STI) {
2399 MCValue Val;
2400 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2401 report_fatal_error("emitAddress could not evaluate");
2402 if (DSOLocal) {
2403 EmitToStreamer(
2404 MCInstBuilder(AArch64::ADRP)
2405 .addReg(Reg)
2407 OutStreamer->getContext())));
2408 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2409 .addReg(Reg)
2410 .addReg(Reg)
2411 .addExpr(MCSpecifierExpr::create(
2412 Expr, AArch64::S_LO12, OutStreamer->getContext()))
2413 .addImm(0));
2414 } else {
2415 auto *SymRef =
2416 MCSymbolRefExpr::create(Val.getAddSym(), OutStreamer->getContext());
2417 EmitToStreamer(
2418 MCInstBuilder(AArch64::ADRP)
2419 .addReg(Reg)
2421 OutStreamer->getContext())));
2422 EmitToStreamer(
2423 MCInstBuilder(AArch64::LDRXui)
2424 .addReg(Reg)
2425 .addReg(Reg)
2427 OutStreamer->getContext())));
2428 emitAddImm(Reg, Val.getConstant(), Tmp);
2429 }
2430}
2431
2433 // IFUNCs are ELF-only.
2434 if (!TT.isOSBinFormatELF())
2435 return false;
2436
2437 // IFUNCs are supported on glibc, bionic, and some but not all of the BSDs.
2438 return TT.isOSGlibc() || TT.isAndroid() || TT.isOSFreeBSD() ||
2439 TT.isOSDragonFly() || TT.isOSNetBSD();
2440}
2441
2442// Emit an ifunc resolver that returns a signed pointer to the specified target,
2443// and return a FUNCINIT reference to the resolver. In the linked binary, this
2444// function becomes the target of an IRELATIVE relocation. This resolver is used
2445// to relocate signed pointers in global variable initializers in special cases
2446// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2447//
2448// Example (signed null pointer, not address discriminated):
2449//
2450// .8byte .Lpauth_ifunc0
2451// .pushsection .text.startup,"ax",@progbits
2452// .Lpauth_ifunc0:
2453// mov x0, #0
2454// mov x1, #12345
2455// b __emupac_pacda
2456//
2457// Example (signed null pointer, address discriminated):
2458//
2459// .Ltmp:
2460// .8byte .Lpauth_ifunc0
2461// .pushsection .text.startup,"ax",@progbits
2462// .Lpauth_ifunc0:
2463// mov x0, #0
2464// adrp x1, .Ltmp
2465// add x1, x1, :lo12:.Ltmp
2466// b __emupac_pacda
2467// .popsection
2468//
2469// Example (signed pointer to symbol, not address discriminated):
2470//
2471// .Ltmp:
2472// .8byte .Lpauth_ifunc0
2473// .pushsection .text.startup,"ax",@progbits
2474// .Lpauth_ifunc0:
2475// adrp x0, symbol
2476// add x0, x0, :lo12:symbol
2477// mov x1, #12345
2478// b __emupac_pacda
2479// .popsection
2480//
2481// Example (signed null pointer, not address discriminated, with deactivation
2482// symbol ds):
2483//
2484// .8byte .Lpauth_ifunc0
2485// .pushsection .text.startup,"ax",@progbits
2486// .Lpauth_ifunc0:
2487// mov x0, #0
2488// mov x1, #12345
2489// .reloc ., R_AARCH64_PATCHINST, ds
2490// b __emupac_pacda
2491// ret
2492// .popsection
2493const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2494 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
2495 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2496 const Triple &TT = TM.getTargetTriple();
2497
2498 // We only emit an IRELATIVE relocation if the target supports IRELATIVE.
2500 return nullptr;
2501
2502 // For now, only the DA key is supported.
2503 if (KeyID != AArch64PACKey::DA)
2504 return nullptr;
2505
2506 AArch64Subtarget STI(TT, TM.getTargetCPU(), TM.getTargetCPU(),
2507 TM.getTargetFeatureString(), TM, true);
2508 this->STI = &STI;
2509
2510 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2511 OutStreamer->emitLabel(Place);
2512 OutStreamer->pushSection();
2513
2514 const MCSymbolELF *Group =
2515 static_cast<MCSectionELF *>(OutStreamer->getCurrentSectionOnly())
2516 ->getGroup();
2518 if (Group)
2520 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2521 ".text.startup", ELF::SHT_PROGBITS, Flags, 0, Group, true,
2522 Group ? MCSection::NonUniqueID : PAuthIFuncNextUniqueID++, nullptr));
2523
2524 MCSymbol *IRelativeSym =
2525 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2526 OutStreamer->emitLabel(IRelativeSym);
2527 if (isa<MCConstantExpr>(Target)) {
2528 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2529 .addReg(AArch64::X0)
2530 .addExpr(Target)
2531 .addImm(0),
2532 STI);
2533 } else {
2534 emitAddress(AArch64::X0, Target, AArch64::X16, IsDSOLocal, STI);
2535 }
2536 if (HasAddressDiversity) {
2537 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2538 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2539 MCConstantExpr::create(Disc, OutStreamer->getContext()),
2540 OutStreamer->getContext());
2541 emitAddress(AArch64::X1, PlacePlusDisc, AArch64::X16, /*IsDSOLocal=*/true,
2542 STI);
2543 } else {
2544 if (!isUInt<16>(Disc)) {
2545 OutContext.reportError(SMLoc(), "AArch64 PAC Discriminator '" +
2546 Twine(Disc) +
2547 "' out of range [0, 0xFFFF]");
2548 }
2549 emitMOVZ(AArch64::X1, Disc, 0);
2550 }
2551
2552 if (DSExpr) {
2553 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2554 OutStreamer->emitLabel(PrePACInst);
2555
2556 auto *PrePACInstExpr =
2557 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2558 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2559 DSExpr, SMLoc());
2560 }
2561
2562 // We don't know the subtarget because this is being emitted for a global
2563 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2564 // always call the EmuPAC runtime, which will end up using the PAC instruction
2565 // if the target supports PAC.
2566 MCSymbol *EmuPAC =
2567 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2568 const MCSymbolRefExpr *EmuPACRef =
2569 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2570 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2571 STI);
2572
2573 // We need a RET despite the above tail call because the deactivation symbol
2574 // may replace the tail call with a NOP.
2575 if (DSExpr)
2576 OutStreamer->emitInstruction(
2577 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), STI);
2578 OutStreamer->popSection();
2579
2580 return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT,
2581 OutStreamer->getContext());
2582}
2583
2584const MCExpr *
2585AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2586 MCContext &Ctx = OutContext;
2587
2588 // Figure out the base symbol and the addend, if any.
2589 APInt Offset(64, 0);
2590 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2591 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2592
2593 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2594
2595 const MCExpr *Sym;
2596 if (BaseGVB) {
2597 // If there is an addend, turn that into the appropriate MCExpr.
2598 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2599 if (Offset.sgt(0))
2601 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2602 else if (Offset.slt(0))
2604 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2605 } else {
2606 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2607 }
2608
2609 const MCExpr *DSExpr = nullptr;
2610 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2611 if (isa<GlobalAlias>(DS))
2612 return Sym;
2613 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2614 }
2615
2616 uint64_t KeyID = CPA.getKey()->getZExtValue();
2617 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2618 // AArch64AuthMCExpr::printImpl, so fail fast.
2619 if (KeyID > AArch64PACKey::LAST) {
2620 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2621 "' out of range [0, " +
2622 Twine((unsigned)AArch64PACKey::LAST) + "]");
2623 KeyID = 0;
2624 }
2625
2626 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2627
2628 // Check if we can represent this with an IRELATIVE and emit it if so.
2629 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2630 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2631 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2632 return IFuncSym;
2633
2634 if (!isUInt<16>(Disc)) {
2635 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2636 "' out of range [0, 0xFFFF]");
2637 Disc = 0;
2638 }
2639
2640 if (DSExpr)
2641 report_fatal_error("deactivation symbols unsupported in constant "
2642 "expressions on this target");
2643
2644 // Finally build the complete @AUTH expr.
2645 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2646 CPA.hasAddressDiscriminator(), Ctx);
2647}
2648
2649void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2650 unsigned DstReg = MI.getOperand(0).getReg();
2651 const MachineOperand &GAOp = MI.getOperand(1);
2652 const uint64_t KeyC = MI.getOperand(2).getImm();
2653 assert(KeyC <= AArch64PACKey::LAST &&
2654 "key is out of range [0, AArch64PACKey::LAST]");
2655 const auto Key = (AArch64PACKey::ID)KeyC;
2656 const uint64_t Disc = MI.getOperand(3).getImm();
2657 assert(isUInt<16>(Disc) &&
2658 "constant discriminator is out of range [0, 0xffff]");
2659
2660 // Emit instruction sequence like the following:
2661 // ADRP x16, symbol$auth_ptr$key$disc
2662 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2663 //
2664 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2665 // to symbol.
2666 MCSymbol *AuthPtrStubSym;
2667 if (TM.getTargetTriple().isOSBinFormatELF()) {
2668 const auto &TLOF =
2669 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2670
2671 assert(GAOp.getOffset() == 0 &&
2672 "non-zero offset for $auth_ptr$ stub slots is not supported");
2673 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2674 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2675 } else {
2676 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2677 "LOADauthptrstatic is implemented only for MachO/ELF");
2678
2679 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2680 getObjFileLowering());
2681
2682 assert(GAOp.getOffset() == 0 &&
2683 "non-zero offset for $auth_ptr$ stub slots is not supported");
2684 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2685 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2686 }
2687
2688 MachineOperand StubMOHi =
2690 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2691 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2692 MCOperand StubMCHi, StubMCLo;
2693
2694 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2695 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2696
2697 EmitToStreamer(
2698 *OutStreamer,
2699 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2700
2701 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2702 .addReg(DstReg)
2703 .addReg(DstReg)
2704 .addOperand(StubMCLo));
2705}
2706
2707void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2708 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2709 const bool IsELFSignedGOT = MI.getParent()
2710 ->getParent()
2711 ->getInfo<AArch64FunctionInfo>()
2712 ->hasELFSignedGOT();
2713 MachineOperand GAOp = MI.getOperand(0);
2714 const uint64_t KeyC = MI.getOperand(1).getImm();
2715 assert(KeyC <= AArch64PACKey::LAST &&
2716 "key is out of range [0, AArch64PACKey::LAST]");
2717 const auto Key = (AArch64PACKey::ID)KeyC;
2718 const unsigned AddrDisc = MI.getOperand(2).getReg();
2719 const uint64_t Disc = MI.getOperand(3).getImm();
2720
2721 const int64_t Offset = GAOp.getOffset();
2722 GAOp.setOffset(0);
2723
2724 // Emit:
2725 // target materialization:
2726 // - via GOT:
2727 // - unsigned GOT:
2728 // adrp x16, :got:target
2729 // ldr x16, [x16, :got_lo12:target]
2730 // add offset to x16 if offset != 0
2731 // - ELF signed GOT:
2732 // adrp x17, :got:target
2733 // add x17, x17, :got_auth_lo12:target
2734 // ldr x16, [x17]
2735 // aut{i|d}a x16, x17
2736 // check+trap sequence (if no FPAC)
2737 // add offset to x16 if offset != 0
2738 //
2739 // - direct:
2740 // adrp x16, target
2741 // add x16, x16, :lo12:target
2742 // add offset to x16 if offset != 0
2743 //
2744 // add offset to x16:
2745 // - abs(offset) fits 24 bits:
2746 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2747 // - abs(offset) does not fit 24 bits:
2748 // - offset < 0:
2749 // movn+movk sequence filling x17 register with the offset (up to 4
2750 // instructions)
2751 // add x16, x16, x17
2752 // - offset > 0:
2753 // movz+movk sequence filling x17 register with the offset (up to 4
2754 // instructions)
2755 // add x16, x16, x17
2756 //
2757 // signing:
2758 // - 0 discriminator:
2759 // paciza x16
2760 // - Non-0 discriminator, no address discriminator:
2761 // mov x17, #Disc
2762 // pacia x16, x17
2763 // - address discriminator (with potentially folded immediate discriminator):
2764 // pacia x16, xAddrDisc
2765
2766 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2767 MCOperand GAMCHi, GAMCLo;
2768
2769 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2770 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2771 if (IsGOTLoad) {
2772 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2773 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2774 }
2775
2776 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2777 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2778
2779 EmitToStreamer(
2780 MCInstBuilder(AArch64::ADRP)
2781 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2782 .addOperand(GAMCHi));
2783
2784 if (IsGOTLoad) {
2785 if (IsELFSignedGOT) {
2786 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2787 .addReg(AArch64::X17)
2788 .addReg(AArch64::X17)
2789 .addOperand(GAMCLo)
2790 .addImm(0));
2791
2792 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2793 .addReg(AArch64::X16)
2794 .addReg(AArch64::X17)
2795 .addImm(0));
2796
2797 assert(GAOp.isGlobal());
2798 assert(GAOp.getGlobal()->getValueType() != nullptr);
2799
2800 bool IsFunctionTy = GAOp.getGlobal()->getValueType()->isFunctionTy();
2801 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2802 emitAUT(AuthKey, AArch64::X16, AArch64::X17);
2803
2804 if (!STI->hasFPAC())
2805 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2806 AArch64PAuth::AuthCheckMethod::XPAC);
2807 } else {
2808 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2809 .addReg(AArch64::X16)
2810 .addReg(AArch64::X16)
2811 .addOperand(GAMCLo));
2812 }
2813 } else {
2814 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2815 .addReg(AArch64::X16)
2816 .addReg(AArch64::X16)
2817 .addOperand(GAMCLo)
2818 .addImm(0));
2819 }
2820
2821 emitAddImm(AArch64::X16, Offset, AArch64::X17);
2822 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2823
2824 emitPAC(Key, AArch64::X16, DiscReg);
2825}
2826
2827void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2828 Register DstReg = MI.getOperand(0).getReg();
2829 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2830 const MachineOperand &GAMO = MI.getOperand(1);
2831 assert(GAMO.getOffset() == 0);
2832
2833 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2834 MCOperand GAMC;
2835 MCInstLowering.lowerOperand(GAMO, GAMC);
2836 EmitToStreamer(
2837 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2838 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2839 .addReg(AuthResultReg)
2840 .addReg(AArch64::X17)
2841 .addImm(0));
2842 } else {
2843 MachineOperand GAHiOp(GAMO);
2844 MachineOperand GALoOp(GAMO);
2845 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2846 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2847
2848 MCOperand GAMCHi, GAMCLo;
2849 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2850 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2851
2852 EmitToStreamer(
2853 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2854
2855 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2856 .addReg(AArch64::X17)
2857 .addReg(AArch64::X17)
2858 .addOperand(GAMCLo)
2859 .addImm(0));
2860
2861 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2862 .addReg(AuthResultReg)
2863 .addReg(AArch64::X17)
2864 .addImm(0));
2865 }
2866
2867 assert(GAMO.isGlobal());
2868 MCSymbol *UndefWeakSym;
2869 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2870 UndefWeakSym = createTempSymbol("undef_weak");
2871 EmitToStreamer(
2872 MCInstBuilder(AArch64::CBZX)
2873 .addReg(AuthResultReg)
2874 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2875 }
2876
2877 assert(GAMO.getGlobal()->getValueType() != nullptr);
2878
2879 bool IsFunctionTy = GAMO.getGlobal()->getValueType()->isFunctionTy();
2880 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2881 emitAUT(AuthKey, AuthResultReg, AArch64::X17);
2882
2883 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2884 OutStreamer->emitLabel(UndefWeakSym);
2885
2886 if (!STI->hasFPAC()) {
2887 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2888 AArch64PAuth::AuthCheckMethod::XPAC);
2889
2890 emitMovXReg(DstReg, AuthResultReg);
2891 }
2892}
2893
2894const MCExpr *
2895AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2896 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2897 const Function &Fn = *BA.getFunction();
2898
2899 if (std::optional<uint16_t> BADisc =
2900 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2901 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2902 /*HasAddressDiversity=*/false, OutContext);
2903
2904 return BAE;
2905}
2906
2907void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
2908 bool IsImm = false;
2909 unsigned Width = 0;
2910
2911 switch (MI->getOpcode()) {
2912 default:
2913 llvm_unreachable("This is not a CB pseudo instruction");
2914 case AArch64::CBBAssertExt:
2915 IsImm = false;
2916 Width = 8;
2917 break;
2918 case AArch64::CBHAssertExt:
2919 IsImm = false;
2920 Width = 16;
2921 break;
2922 case AArch64::CBWPrr:
2923 Width = 32;
2924 break;
2925 case AArch64::CBXPrr:
2926 Width = 64;
2927 break;
2928 case AArch64::CBWPri:
2929 IsImm = true;
2930 Width = 32;
2931 break;
2932 case AArch64::CBXPri:
2933 IsImm = true;
2934 Width = 64;
2935 break;
2936 }
2937
2939 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
2940 bool NeedsRegSwap = false;
2941 bool NeedsImmDec = false;
2942 bool NeedsImmInc = false;
2943
2944#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
2945 (IsImm \
2946 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
2947 : (Width == 8 \
2948 ? AArch64::CBB##RegCond##Wrr \
2949 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
2950 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
2951 : AArch64::CB##RegCond##Xrr))))
2952 unsigned MCOpC;
2953
2954 // Decide if we need to either swap register operands or increment/decrement
2955 // immediate operands
2956 switch (CC) {
2957 default:
2958 llvm_unreachable("Invalid CB condition code");
2959 case AArch64CC::EQ:
2960 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
2961 break;
2962 case AArch64CC::NE:
2963 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
2964 break;
2965 case AArch64CC::HS:
2966 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
2967 NeedsImmDec = IsImm;
2968 break;
2969 case AArch64CC::LO:
2970 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
2971 NeedsRegSwap = !IsImm;
2972 break;
2973 case AArch64CC::HI:
2974 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
2975 break;
2976 case AArch64CC::LS:
2977 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
2978 NeedsRegSwap = !IsImm;
2979 NeedsImmInc = IsImm;
2980 break;
2981 case AArch64CC::GE:
2982 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
2983 NeedsImmDec = IsImm;
2984 break;
2985 case AArch64CC::LT:
2986 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
2987 NeedsRegSwap = !IsImm;
2988 break;
2989 case AArch64CC::GT:
2990 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
2991 break;
2992 case AArch64CC::LE:
2993 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
2994 NeedsRegSwap = !IsImm;
2995 NeedsImmInc = IsImm;
2996 break;
2997 }
2998#undef GET_CB_OPC
2999
3000 MCInst Inst;
3001 Inst.setOpcode(MCOpC);
3002
3003 MCOperand Lhs, Rhs, Trgt;
3004 lowerOperand(MI->getOperand(1), Lhs);
3005 lowerOperand(MI->getOperand(2), Rhs);
3006 lowerOperand(MI->getOperand(3), Trgt);
3007
3008 // Now swap, increment or decrement
3009 if (NeedsRegSwap) {
3010 assert(Lhs.isReg() && "Expected register operand for CB");
3011 assert(Rhs.isReg() && "Expected register operand for CB");
3012 Inst.addOperand(Rhs);
3013 Inst.addOperand(Lhs);
3014 } else if (NeedsImmDec) {
3015 Rhs.setImm(Rhs.getImm() - 1);
3016 Inst.addOperand(Lhs);
3017 Inst.addOperand(Rhs);
3018 } else if (NeedsImmInc) {
3019 Rhs.setImm(Rhs.getImm() + 1);
3020 Inst.addOperand(Lhs);
3021 Inst.addOperand(Rhs);
3022 } else {
3023 Inst.addOperand(Lhs);
3024 Inst.addOperand(Rhs);
3025 }
3026
3027 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3028 "CB immediate operand out-of-bounds");
3029
3030 Inst.addOperand(Trgt);
3031 EmitToStreamer(*OutStreamer, Inst);
3032}
3033
3034// Simple pseudo-instructions have their lowering (with expansion to real
3035// instructions) auto-generated.
3036#include "AArch64GenMCPseudoLowering.inc"
3037
3038void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3039 S.emitInstruction(Inst, *STI);
3040#ifndef NDEBUG
3041 ++InstsEmitted;
3042#endif
3043}
3044
3045void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3046 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3047
3048#ifndef NDEBUG
3049 InstsEmitted = 0;
3050 auto CheckMISize = make_scope_exit([&]() {
3051 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3052 });
3053#endif
3054
3055 // Do any auto-generated pseudo lowerings.
3056 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3057 EmitToStreamer(*OutStreamer, OutInst);
3058 return;
3059 }
3060
3061 if (MI->getOpcode() == AArch64::ADRP) {
3062 for (auto &Opd : MI->operands()) {
3063 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3064 "swift_async_extendedFramePointerFlags") {
3065 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3066 }
3067 }
3068 }
3069
3070 if (AArch64FI->getLOHRelated().count(MI)) {
3071 // Generate a label for LOH related instruction
3072 MCSymbol *LOHLabel = createTempSymbol("loh");
3073 // Associate the instruction with the label
3074 LOHInstToLabel[MI] = LOHLabel;
3075 OutStreamer->emitLabel(LOHLabel);
3076 }
3077
3078 AArch64TargetStreamer *TS =
3079 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3080 // Do any manual lowerings.
3081 switch (MI->getOpcode()) {
3082 default:
3084 "Unhandled tail call instruction");
3085 break;
3086 case AArch64::HINT: {
3087 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3088 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3089 // non-empty. If MI is the initial BTI, place the
3090 // __patchable_function_entries label after BTI.
3091 if (CurrentPatchableFunctionEntrySym &&
3092 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3093 MI == &MF->front().front()) {
3094 int64_t Imm = MI->getOperand(0).getImm();
3095 if ((Imm & 32) && (Imm & 6)) {
3096 MCInst Inst;
3097 MCInstLowering.Lower(MI, Inst);
3098 EmitToStreamer(*OutStreamer, Inst);
3099 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3100 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3101 return;
3102 }
3103 }
3104 break;
3105 }
3106 case AArch64::MOVMCSym: {
3107 Register DestReg = MI->getOperand(0).getReg();
3108 const MachineOperand &MO_Sym = MI->getOperand(1);
3109 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3110 MCOperand Hi_MCSym, Lo_MCSym;
3111
3112 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3113 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3114
3115 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3116 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3117
3118 MCInst MovZ;
3119 MovZ.setOpcode(AArch64::MOVZXi);
3120 MovZ.addOperand(MCOperand::createReg(DestReg));
3121 MovZ.addOperand(Hi_MCSym);
3123 EmitToStreamer(*OutStreamer, MovZ);
3124
3125 MCInst MovK;
3126 MovK.setOpcode(AArch64::MOVKXi);
3127 MovK.addOperand(MCOperand::createReg(DestReg));
3128 MovK.addOperand(MCOperand::createReg(DestReg));
3129 MovK.addOperand(Lo_MCSym);
3131 EmitToStreamer(*OutStreamer, MovK);
3132 return;
3133 }
3134 case AArch64::MOVIv2d_ns:
3135 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3136 // as movi is more efficient across all cores. Newer cores can eliminate
3137 // fmovs early and there is no difference with movi, but this not true for
3138 // all implementations.
3139 //
3140 // The floating-point version doesn't quite work in rare cases on older
3141 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3142 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3143 MI->getOperand(1).getImm() == 0) {
3144 MCInst TmpInst;
3145 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3146 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3147 TmpInst.addOperand(MCOperand::createImm(0));
3148 EmitToStreamer(*OutStreamer, TmpInst);
3149 return;
3150 }
3151 break;
3152
3153 case AArch64::DBG_VALUE:
3154 case AArch64::DBG_VALUE_LIST:
3155 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3156 SmallString<128> TmpStr;
3157 raw_svector_ostream OS(TmpStr);
3158 PrintDebugValueComment(MI, OS);
3159 OutStreamer->emitRawText(StringRef(OS.str()));
3160 }
3161 return;
3162
3163 case AArch64::EMITBKEY: {
3164 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3165 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3166 ExceptionHandlingType != ExceptionHandling::ARM)
3167 return;
3168
3169 if (getFunctionCFISectionType(*MF) == CFISection::None)
3170 return;
3171
3172 OutStreamer->emitCFIBKeyFrame();
3173 return;
3174 }
3175
3176 case AArch64::EMITMTETAGGED: {
3177 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3178 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3179 ExceptionHandlingType != ExceptionHandling::ARM)
3180 return;
3181
3182 if (getFunctionCFISectionType(*MF) != CFISection::None)
3183 OutStreamer->emitCFIMTETaggedFrame();
3184 return;
3185 }
3186
3187 case AArch64::AUTx16x17:
3188 emitPtrauthAuthResign(
3189 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3190 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3191 std::nullopt, 0, 0, MI->getDeactivationSymbol());
3192 return;
3193
3194 case AArch64::AUTxMxN:
3195 emitPtrauthAuthResign(MI->getOperand(0).getReg(),
3196 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3197 MI->getOperand(4).getImm(), &MI->getOperand(5),
3198 MI->getOperand(1).getReg(), std::nullopt, 0, 0,
3199 MI->getDeactivationSymbol());
3200 return;
3201
3202 case AArch64::AUTPAC:
3203 emitPtrauthAuthResign(
3204 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3205 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3206 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3207 MI->getOperand(4).getImm(), MI->getOperand(5).getReg(),
3208 MI->getDeactivationSymbol());
3209 return;
3210
3211 case AArch64::PAC:
3212 emitPtrauthSign(MI);
3213 return;
3214
3215 case AArch64::LOADauthptrstatic:
3216 LowerLOADauthptrstatic(*MI);
3217 return;
3218
3219 case AArch64::LOADgotPAC:
3220 case AArch64::MOVaddrPAC:
3221 LowerMOVaddrPAC(*MI);
3222 return;
3223
3224 case AArch64::LOADgotAUTH:
3225 LowerLOADgotAUTH(*MI);
3226 return;
3227
3228 case AArch64::BRA:
3229 case AArch64::BLRA:
3230 emitPtrauthBranch(MI);
3231 return;
3232
3233 // Tail calls use pseudo instructions so they have the proper code-gen
3234 // attributes (isCall, isReturn, etc.). We lower them to the real
3235 // instruction here.
3236 case AArch64::AUTH_TCRETURN:
3237 case AArch64::AUTH_TCRETURN_BTI: {
3238 Register Callee = MI->getOperand(0).getReg();
3239 const auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
3240 const uint64_t Disc = MI->getOperand(3).getImm();
3241
3242 Register AddrDisc = MI->getOperand(4).getReg();
3243
3244 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3245
3246 emitPtrauthTailCallHardening(MI);
3247
3248 // See the comments in emitPtrauthBranch.
3249 if (Callee == AddrDisc)
3250 report_fatal_error("Call target is signed with its own value");
3251
3252 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3253 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3254 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3255 // restriction manually not to clobber an unexpected register.
3256 bool AddrDiscIsImplicitDef =
3257 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3258 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3259 AddrDiscIsImplicitDef);
3260 emitBLRA(/*IsCall*/ false, Key, Callee, DiscReg);
3261 return;
3262 }
3263
3264 case AArch64::TCRETURNri:
3265 case AArch64::TCRETURNrix16x17:
3266 case AArch64::TCRETURNrix17:
3267 case AArch64::TCRETURNrinotx16:
3268 case AArch64::TCRETURNriALL: {
3269 emitPtrauthTailCallHardening(MI);
3270
3271 recordIfImportCall(MI);
3272 MCInst TmpInst;
3273 TmpInst.setOpcode(AArch64::BR);
3274 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3275 EmitToStreamer(*OutStreamer, TmpInst);
3276 return;
3277 }
3278 case AArch64::TCRETURNdi: {
3279 emitPtrauthTailCallHardening(MI);
3280
3281 MCOperand Dest;
3282 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3283 recordIfImportCall(MI);
3284 MCInst TmpInst;
3285 TmpInst.setOpcode(AArch64::B);
3286 TmpInst.addOperand(Dest);
3287 EmitToStreamer(*OutStreamer, TmpInst);
3288 return;
3289 }
3290 case AArch64::SpeculationBarrierISBDSBEndBB: {
3291 // Print DSB SYS + ISB
3292 MCInst TmpInstDSB;
3293 TmpInstDSB.setOpcode(AArch64::DSB);
3294 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3295 EmitToStreamer(*OutStreamer, TmpInstDSB);
3296 MCInst TmpInstISB;
3297 TmpInstISB.setOpcode(AArch64::ISB);
3298 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3299 EmitToStreamer(*OutStreamer, TmpInstISB);
3300 return;
3301 }
3302 case AArch64::SpeculationBarrierSBEndBB: {
3303 // Print SB
3304 MCInst TmpInstSB;
3305 TmpInstSB.setOpcode(AArch64::SB);
3306 EmitToStreamer(*OutStreamer, TmpInstSB);
3307 return;
3308 }
3309 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3310 /// lower this to:
3311 /// adrp x0, :tlsdesc_auth:var
3312 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3313 /// add x0, x0, #:tlsdesc_auth_lo12:var
3314 /// blraa x16, x0
3315 /// (TPIDR_EL0 offset now in x0)
3316 const MachineOperand &MO_Sym = MI->getOperand(0);
3317 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3318 MCOperand SymTLSDescLo12, SymTLSDesc;
3319 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3320 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3321 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3322 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3323
3324 MCInst Adrp;
3325 Adrp.setOpcode(AArch64::ADRP);
3326 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3327 Adrp.addOperand(SymTLSDesc);
3328 EmitToStreamer(*OutStreamer, Adrp);
3329
3330 MCInst Ldr;
3331 Ldr.setOpcode(AArch64::LDRXui);
3332 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3333 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3334 Ldr.addOperand(SymTLSDescLo12);
3336 EmitToStreamer(*OutStreamer, Ldr);
3337
3338 MCInst Add;
3339 Add.setOpcode(AArch64::ADDXri);
3340 Add.addOperand(MCOperand::createReg(AArch64::X0));
3341 Add.addOperand(MCOperand::createReg(AArch64::X0));
3342 Add.addOperand(SymTLSDescLo12);
3344 EmitToStreamer(*OutStreamer, Add);
3345
3346 // Authenticated TLSDESC accesses are not relaxed.
3347 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3348
3349 MCInst Blraa;
3350 Blraa.setOpcode(AArch64::BLRAA);
3351 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3352 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3353 EmitToStreamer(*OutStreamer, Blraa);
3354
3355 return;
3356 }
3357 case AArch64::TLSDESC_CALLSEQ: {
3358 /// lower this to:
3359 /// adrp x0, :tlsdesc:var
3360 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3361 /// add x0, x0, #:tlsdesc_lo12:var
3362 /// .tlsdesccall var
3363 /// blr x1
3364 /// (TPIDR_EL0 offset now in x0)
3365 const MachineOperand &MO_Sym = MI->getOperand(0);
3366 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3367 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3368 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3369 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3370 MCInstLowering.lowerOperand(MO_Sym, Sym);
3371 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3372 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3373
3374 MCInst Adrp;
3375 Adrp.setOpcode(AArch64::ADRP);
3376 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3377 Adrp.addOperand(SymTLSDesc);
3378 EmitToStreamer(*OutStreamer, Adrp);
3379
3380 MCInst Ldr;
3381 if (STI->isTargetILP32()) {
3382 Ldr.setOpcode(AArch64::LDRWui);
3383 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3384 } else {
3385 Ldr.setOpcode(AArch64::LDRXui);
3386 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3387 }
3388 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3389 Ldr.addOperand(SymTLSDescLo12);
3391 EmitToStreamer(*OutStreamer, Ldr);
3392
3393 MCInst Add;
3394 if (STI->isTargetILP32()) {
3395 Add.setOpcode(AArch64::ADDWri);
3396 Add.addOperand(MCOperand::createReg(AArch64::W0));
3397 Add.addOperand(MCOperand::createReg(AArch64::W0));
3398 } else {
3399 Add.setOpcode(AArch64::ADDXri);
3400 Add.addOperand(MCOperand::createReg(AArch64::X0));
3401 Add.addOperand(MCOperand::createReg(AArch64::X0));
3402 }
3403 Add.addOperand(SymTLSDescLo12);
3405 EmitToStreamer(*OutStreamer, Add);
3406
3407 // Emit a relocation-annotation. This expands to no code, but requests
3408 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3409 MCInst TLSDescCall;
3410 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3411 TLSDescCall.addOperand(Sym);
3412 EmitToStreamer(*OutStreamer, TLSDescCall);
3413#ifndef NDEBUG
3414 --InstsEmitted; // no code emitted
3415#endif
3416
3417 MCInst Blr;
3418 Blr.setOpcode(AArch64::BLR);
3419 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3420 EmitToStreamer(*OutStreamer, Blr);
3421
3422 return;
3423 }
3424
3425 case AArch64::JumpTableDest32:
3426 case AArch64::JumpTableDest16:
3427 case AArch64::JumpTableDest8:
3428 LowerJumpTableDest(*OutStreamer, *MI);
3429 return;
3430
3431 case AArch64::BR_JumpTable:
3432 LowerHardenedBRJumpTable(*MI);
3433 return;
3434
3435 case AArch64::FMOVH0:
3436 case AArch64::FMOVS0:
3437 case AArch64::FMOVD0:
3438 emitFMov0(*MI);
3439 return;
3440
3441 case AArch64::MOPSMemoryCopyPseudo:
3442 case AArch64::MOPSMemoryMovePseudo:
3443 case AArch64::MOPSMemorySetPseudo:
3444 case AArch64::MOPSMemorySetTaggingPseudo:
3445 LowerMOPS(*OutStreamer, *MI);
3446 return;
3447
3448 case TargetOpcode::STACKMAP:
3449 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3450
3451 case TargetOpcode::PATCHPOINT:
3452 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3453
3454 case TargetOpcode::STATEPOINT:
3455 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3456
3457 case TargetOpcode::FAULTING_OP:
3458 return LowerFAULTING_OP(*MI);
3459
3460 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3461 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3462 return;
3463
3464 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3465 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3466 return;
3467
3468 case TargetOpcode::PATCHABLE_TAIL_CALL:
3469 LowerPATCHABLE_TAIL_CALL(*MI);
3470 return;
3471 case TargetOpcode::PATCHABLE_EVENT_CALL:
3472 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3473 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3474 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3475
3476 case AArch64::KCFI_CHECK:
3477 LowerKCFI_CHECK(*MI);
3478 return;
3479
3480 case AArch64::HWASAN_CHECK_MEMACCESS:
3481 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3482 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3483 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3484 LowerHWASAN_CHECK_MEMACCESS(*MI);
3485 return;
3486
3487 case AArch64::SEH_StackAlloc:
3488 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3489 return;
3490
3491 case AArch64::SEH_SaveFPLR:
3492 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3493 return;
3494
3495 case AArch64::SEH_SaveFPLR_X:
3496 assert(MI->getOperand(0).getImm() < 0 &&
3497 "Pre increment SEH opcode must have a negative offset");
3498 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3499 return;
3500
3501 case AArch64::SEH_SaveReg:
3502 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3503 MI->getOperand(1).getImm());
3504 return;
3505
3506 case AArch64::SEH_SaveReg_X:
3507 assert(MI->getOperand(1).getImm() < 0 &&
3508 "Pre increment SEH opcode must have a negative offset");
3509 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3510 -MI->getOperand(1).getImm());
3511 return;
3512
3513 case AArch64::SEH_SaveRegP:
3514 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3515 MI->getOperand(0).getImm() <= 28) {
3516 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3517 "Register paired with LR must be odd");
3518 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3519 MI->getOperand(2).getImm());
3520 return;
3521 }
3522 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3523 "Non-consecutive registers not allowed for save_regp");
3524 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3525 MI->getOperand(2).getImm());
3526 return;
3527
3528 case AArch64::SEH_SaveRegP_X:
3529 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3530 "Non-consecutive registers not allowed for save_regp_x");
3531 assert(MI->getOperand(2).getImm() < 0 &&
3532 "Pre increment SEH opcode must have a negative offset");
3533 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3534 -MI->getOperand(2).getImm());
3535 return;
3536
3537 case AArch64::SEH_SaveFReg:
3538 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3539 MI->getOperand(1).getImm());
3540 return;
3541
3542 case AArch64::SEH_SaveFReg_X:
3543 assert(MI->getOperand(1).getImm() < 0 &&
3544 "Pre increment SEH opcode must have a negative offset");
3545 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3546 -MI->getOperand(1).getImm());
3547 return;
3548
3549 case AArch64::SEH_SaveFRegP:
3550 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3551 "Non-consecutive registers not allowed for save_regp");
3552 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3553 MI->getOperand(2).getImm());
3554 return;
3555
3556 case AArch64::SEH_SaveFRegP_X:
3557 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3558 "Non-consecutive registers not allowed for save_regp_x");
3559 assert(MI->getOperand(2).getImm() < 0 &&
3560 "Pre increment SEH opcode must have a negative offset");
3561 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3562 -MI->getOperand(2).getImm());
3563 return;
3564
3565 case AArch64::SEH_SetFP:
3567 return;
3568
3569 case AArch64::SEH_AddFP:
3570 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3571 return;
3572
3573 case AArch64::SEH_Nop:
3574 TS->emitARM64WinCFINop();
3575 return;
3576
3577 case AArch64::SEH_PrologEnd:
3579 return;
3580
3581 case AArch64::SEH_EpilogStart:
3583 return;
3584
3585 case AArch64::SEH_EpilogEnd:
3587 return;
3588
3589 case AArch64::SEH_PACSignLR:
3591 return;
3592
3593 case AArch64::SEH_SaveAnyRegI:
3594 assert(MI->getOperand(1).getImm() <= 1008 &&
3595 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3596 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3597 MI->getOperand(1).getImm());
3598 return;
3599
3600 case AArch64::SEH_SaveAnyRegIP:
3601 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3602 "Non-consecutive registers not allowed for save_any_reg");
3603 assert(MI->getOperand(2).getImm() <= 1008 &&
3604 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3605 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3606 MI->getOperand(2).getImm());
3607 return;
3608
3609 case AArch64::SEH_SaveAnyRegQP:
3610 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3611 "Non-consecutive registers not allowed for save_any_reg");
3612 assert(MI->getOperand(2).getImm() >= 0 &&
3613 "SaveAnyRegQP SEH opcode offset must be non-negative");
3614 assert(MI->getOperand(2).getImm() <= 1008 &&
3615 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3616 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3617 MI->getOperand(2).getImm());
3618 return;
3619
3620 case AArch64::SEH_SaveAnyRegQPX:
3621 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3622 "Non-consecutive registers not allowed for save_any_reg");
3623 assert(MI->getOperand(2).getImm() < 0 &&
3624 "SaveAnyRegQPX SEH opcode offset must be negative");
3625 assert(MI->getOperand(2).getImm() >= -1008 &&
3626 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3627 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3628 -MI->getOperand(2).getImm());
3629 return;
3630
3631 case AArch64::SEH_AllocZ:
3632 assert(MI->getOperand(0).getImm() >= 0 &&
3633 "AllocZ SEH opcode offset must be non-negative");
3634 assert(MI->getOperand(0).getImm() <= 255 &&
3635 "AllocZ SEH opcode offset must fit into 8 bits");
3636 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3637 return;
3638
3639 case AArch64::SEH_SaveZReg:
3640 assert(MI->getOperand(1).getImm() >= 0 &&
3641 "SaveZReg SEH opcode offset must be non-negative");
3642 assert(MI->getOperand(1).getImm() <= 255 &&
3643 "SaveZReg SEH opcode offset must fit into 8 bits");
3644 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3645 MI->getOperand(1).getImm());
3646 return;
3647
3648 case AArch64::SEH_SavePReg:
3649 assert(MI->getOperand(1).getImm() >= 0 &&
3650 "SavePReg SEH opcode offset must be non-negative");
3651 assert(MI->getOperand(1).getImm() <= 255 &&
3652 "SavePReg SEH opcode offset must fit into 8 bits");
3653 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3654 MI->getOperand(1).getImm());
3655 return;
3656
3657 case AArch64::BLR:
3658 case AArch64::BR: {
3659 recordIfImportCall(MI);
3660 MCInst TmpInst;
3661 MCInstLowering.Lower(MI, TmpInst);
3662 EmitToStreamer(*OutStreamer, TmpInst);
3663 return;
3664 }
3665 case AArch64::CBWPri:
3666 case AArch64::CBXPri:
3667 case AArch64::CBBAssertExt:
3668 case AArch64::CBHAssertExt:
3669 case AArch64::CBWPrr:
3670 case AArch64::CBXPrr:
3671 emitCBPseudoExpansion(MI);
3672 return;
3673 }
3674
3675 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3676 return;
3677
3678 // Finally, do the automated lowerings for everything else.
3679 MCInst TmpInst;
3680 MCInstLowering.Lower(MI, TmpInst);
3681 EmitToStreamer(*OutStreamer, TmpInst);
3682}
3683
3684void AArch64AsmPrinter::recordIfImportCall(
3685 const llvm::MachineInstr *BranchInst) {
3686 if (!EnableImportCallOptimization)
3687 return;
3688
3689 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3690 if (GV && GV->hasDLLImportStorageClass()) {
3691 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3692 OutStreamer->emitLabel(CallSiteSymbol);
3693
3694 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3695 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3696 .push_back({CallSiteSymbol, CalledSymbol});
3697 }
3698}
3699
3700void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3701 MCSymbol *LazyPointer) {
3702 // _ifunc:
3703 // adrp x16, lazy_pointer@GOTPAGE
3704 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3705 // ldr x16, [x16]
3706 // br x16
3707
3708 {
3709 MCInst Adrp;
3710 Adrp.setOpcode(AArch64::ADRP);
3711 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3712 MCOperand SymPage;
3713 MCInstLowering.lowerOperand(
3716 SymPage);
3717 Adrp.addOperand(SymPage);
3718 EmitToStreamer(Adrp);
3719 }
3720
3721 {
3722 MCInst Ldr;
3723 Ldr.setOpcode(AArch64::LDRXui);
3724 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3725 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3726 MCOperand SymPageOff;
3727 MCInstLowering.lowerOperand(
3730 SymPageOff);
3731 Ldr.addOperand(SymPageOff);
3733 EmitToStreamer(Ldr);
3734 }
3735
3736 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3737 .addReg(AArch64::X16)
3738 .addReg(AArch64::X16)
3739 .addImm(0));
3740
3741 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3742 : AArch64::BR)
3743 .addReg(AArch64::X16));
3744}
3745
3746void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3747 const GlobalIFunc &GI,
3748 MCSymbol *LazyPointer) {
3749 // These stub helpers are only ever called once, so here we're optimizing for
3750 // minimum size by using the pre-indexed store variants, which saves a few
3751 // bytes of instructions to bump & restore sp.
3752
3753 // _ifunc.stub_helper:
3754 // stp fp, lr, [sp, #-16]!
3755 // mov fp, sp
3756 // stp x1, x0, [sp, #-16]!
3757 // stp x3, x2, [sp, #-16]!
3758 // stp x5, x4, [sp, #-16]!
3759 // stp x7, x6, [sp, #-16]!
3760 // stp d1, d0, [sp, #-16]!
3761 // stp d3, d2, [sp, #-16]!
3762 // stp d5, d4, [sp, #-16]!
3763 // stp d7, d6, [sp, #-16]!
3764 // bl _resolver
3765 // adrp x16, lazy_pointer@GOTPAGE
3766 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3767 // str x0, [x16]
3768 // mov x16, x0
3769 // ldp d7, d6, [sp], #16
3770 // ldp d5, d4, [sp], #16
3771 // ldp d3, d2, [sp], #16
3772 // ldp d1, d0, [sp], #16
3773 // ldp x7, x6, [sp], #16
3774 // ldp x5, x4, [sp], #16
3775 // ldp x3, x2, [sp], #16
3776 // ldp x1, x0, [sp], #16
3777 // ldp fp, lr, [sp], #16
3778 // br x16
3779
3780 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3781 .addReg(AArch64::SP)
3782 .addReg(AArch64::FP)
3783 .addReg(AArch64::LR)
3784 .addReg(AArch64::SP)
3785 .addImm(-2));
3786
3787 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3788 .addReg(AArch64::FP)
3789 .addReg(AArch64::SP)
3790 .addImm(0)
3791 .addImm(0));
3792
3793 for (int I = 0; I != 4; ++I)
3794 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3795 .addReg(AArch64::SP)
3796 .addReg(AArch64::X1 + 2 * I)
3797 .addReg(AArch64::X0 + 2 * I)
3798 .addReg(AArch64::SP)
3799 .addImm(-2));
3800
3801 for (int I = 0; I != 4; ++I)
3802 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3803 .addReg(AArch64::SP)
3804 .addReg(AArch64::D1 + 2 * I)
3805 .addReg(AArch64::D0 + 2 * I)
3806 .addReg(AArch64::SP)
3807 .addImm(-2));
3808
3809 EmitToStreamer(
3810 MCInstBuilder(AArch64::BL)
3812
3813 {
3814 MCInst Adrp;
3815 Adrp.setOpcode(AArch64::ADRP);
3816 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3817 MCOperand SymPage;
3818 MCInstLowering.lowerOperand(
3819 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3821 SymPage);
3822 Adrp.addOperand(SymPage);
3823 EmitToStreamer(Adrp);
3824 }
3825
3826 {
3827 MCInst Ldr;
3828 Ldr.setOpcode(AArch64::LDRXui);
3829 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3830 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3831 MCOperand SymPageOff;
3832 MCInstLowering.lowerOperand(
3833 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3835 SymPageOff);
3836 Ldr.addOperand(SymPageOff);
3838 EmitToStreamer(Ldr);
3839 }
3840
3841 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3842 .addReg(AArch64::X0)
3843 .addReg(AArch64::X16)
3844 .addImm(0));
3845
3846 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3847 .addReg(AArch64::X16)
3848 .addReg(AArch64::X0)
3849 .addImm(0)
3850 .addImm(0));
3851
3852 for (int I = 3; I != -1; --I)
3853 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3854 .addReg(AArch64::SP)
3855 .addReg(AArch64::D1 + 2 * I)
3856 .addReg(AArch64::D0 + 2 * I)
3857 .addReg(AArch64::SP)
3858 .addImm(2));
3859
3860 for (int I = 3; I != -1; --I)
3861 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3862 .addReg(AArch64::SP)
3863 .addReg(AArch64::X1 + 2 * I)
3864 .addReg(AArch64::X0 + 2 * I)
3865 .addReg(AArch64::SP)
3866 .addImm(2));
3867
3868 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3869 .addReg(AArch64::SP)
3870 .addReg(AArch64::FP)
3871 .addReg(AArch64::LR)
3872 .addReg(AArch64::SP)
3873 .addImm(2));
3874
3875 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3876 : AArch64::BR)
3877 .addReg(AArch64::X16));
3878}
3879
3880const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
3881 const Constant *BaseCV,
3882 uint64_t Offset) {
3883 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3884 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3885 OutContext);
3886 }
3887
3888 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
3889}
3890
3891char AArch64AsmPrinter::ID = 0;
3892
3893INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3894 "AArch64 Assembly Printer", false, false)
3895
3896// Force static initialization.
3897extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
3898LLVMInitializeAArch64AsmPrinter() {
3904}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
bool isX16X17Safer() const
Returns whether the operating system makes it safer to store sensitive values in x16 and x17 as oppos...
AArch64PAuth::AuthCheckMethod getAuthenticatedLRCheckMethod(const MachineFunction &MF) const
Choose a method of checking LR before performing a tail call.
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:636
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
The address of a basic block.
Definition Constants.h:907
Function * getFunction() const
Definition Constants.h:943
Conditional or Unconditional Branch instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:171
A signed pointer, in the ptrauth sense.
Definition Constants.h:1040
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1068
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1071
Constant * getDeactivationSymbol() const
Definition Constants.h:1090
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1086
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1074
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:553
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
Instances of this class represent operands of the MCInst class.
Definition MCInst.h:40
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
static constexpr unsigned NonUniqueID
Definition MCSection.h:522
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitCFIBKeyFrame()
virtual void beginCOFFSymbolDef(const MCSymbol *Symbol)
Start emitting COFF symbol definition.
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitCOFFSymbolType(int Type)
Emit the type of the symbol.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:368
virtual void endCOFFSymbolDef()
Marks the end of the symbol definition.
MCContext & getContext() const
Definition MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:387
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:443
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
virtual void emitCOFFSymbolStorageClass(int StorageClass)
Emit the storage class of the symbol.
Generic base class for all target subtargets.
const FeatureBitset & getFeatureBits() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1248
@ SHF_GROUP
Definition ELF.h:1270
@ SHF_EXECINSTR
Definition ELF.h:1251
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1858
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1859
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1860
@ SHT_PROGBITS
Definition ELF.h:1147
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:294
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition ScopeExit.h:59
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
static unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K, bool Zero)
Return B(L)RA opcode to be used for an authenticated branch or call using the given key,...
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...