LLVM 22.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/StringRef.h"
32#include "llvm/ADT/Twine.h"
46#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/Mangler.h"
49#include "llvm/IR/Module.h"
50#include "llvm/MC/MCAsmInfo.h"
51#include "llvm/MC/MCContext.h"
52#include "llvm/MC/MCExpr.h"
53#include "llvm/MC/MCInst.h"
57#include "llvm/MC/MCStreamer.h"
58#include "llvm/MC/MCSymbol.h"
59#include "llvm/MC/MCValue.h"
69#include <cassert>
70#include <cstdint>
71#include <map>
72#include <memory>
73
74using namespace llvm;
75
78 "aarch64-ptrauth-auth-checks", cl::Hidden,
79 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
80 clEnumValN(Poison, "poison", "poison on failure"),
81 clEnumValN(Trap, "trap", "trap on failure")),
82 cl::desc("Check pointer authentication auth/resign failures"),
84
85#define DEBUG_TYPE "asm-printer"
86
87namespace {
88
89class AArch64AsmPrinter : public AsmPrinter {
90 AArch64MCInstLower MCInstLowering;
91 FaultMaps FM;
92 const AArch64Subtarget *STI;
93 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
94#ifndef NDEBUG
95 unsigned InstsEmitted;
96#endif
97 bool EnableImportCallOptimization = false;
99 SectionToImportedFunctionCalls;
100 unsigned PAuthIFuncNextUniqueID = 1;
101
102public:
103 static char ID;
104
105 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
106 : AsmPrinter(TM, std::move(Streamer), ID),
107 MCInstLowering(OutContext, *this), FM(*this) {}
108
109 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
110
111 /// Wrapper for MCInstLowering.lowerOperand() for the
112 /// tblgen'erated pseudo lowering.
113 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
114 return MCInstLowering.lowerOperand(MO, MCOp);
115 }
116
117 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
118
119 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
120
121 void emitStartOfAsmFile(Module &M) override;
122 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
123 ArrayRef<unsigned> JumpTableIndices) override;
124 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
126 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
127 const MCSymbol *BranchLabel) const override;
128
129 void emitFunctionEntryLabel() override;
130
131 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
132
133 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
134
135 void LowerHardenedBRJumpTable(const MachineInstr &MI);
136
137 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
138
139 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
140 const MachineInstr &MI);
141 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
142 const MachineInstr &MI);
143 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
144 const MachineInstr &MI);
145 void LowerFAULTING_OP(const MachineInstr &MI);
146
147 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
148 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
149 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
150 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
151
152 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
153 HwasanMemaccessTuple;
154 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
155 void LowerKCFI_CHECK(const MachineInstr &MI);
156 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
157 void emitHwasanMemaccessSymbols(Module &M);
158
159 void emitSled(const MachineInstr &MI, SledKind Kind);
160
161 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
162 void emitPtrauthBranch(const MachineInstr *MI);
163
164 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
165 Register ScratchReg,
168 const MCSymbol *OnFailure = nullptr);
169
170 // Check authenticated LR before tail calling.
171 void emitPtrauthTailCallHardening(const MachineInstr *TC);
172
173 // Emit the sequence for AUT or AUTPAC.
174 void emitPtrauthAuthResign(Register AUTVal, AArch64PACKey::ID AUTKey,
175 uint64_t AUTDisc,
176 const MachineOperand *AUTAddrDisc,
177 Register Scratch,
178 std::optional<AArch64PACKey::ID> PACKey,
179 uint64_t PACDisc, Register PACAddrDisc, Value *DS);
180
181 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
182 // if no instruction should be emitted because the deactivation symbol is
183 // defined in the current module so this function emitted a NOP instead.
184 bool emitDeactivationSymbolRelocation(Value *DS);
185
186 // Emit the sequence for PAC.
187 void emitPtrauthSign(const MachineInstr *MI);
188
189 // Emit the sequence to compute the discriminator.
190 //
191 // The returned register is either unmodified AddrDisc or ScratchReg.
192 //
193 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
194 // MayUseAddrAsScratch may save one MOV instruction, provided the address
195 // is already in x16/x17 (i.e. return x16/x17 which is the *modified* AddrDisc
196 // register at the same time) or the OS doesn't make it safer to use x16/x17
197 // (see AArch64Subtarget::isX16X17Safer()):
198 //
199 // mov x17, x16
200 // movk x17, #1234, lsl #48
201 // ; x16 is not used anymore
202 //
203 // can be replaced by
204 //
205 // movk x16, #1234, lsl #48
206 Register emitPtrauthDiscriminator(uint16_t Disc, Register AddrDisc,
207 Register ScratchReg,
208 bool MayUseAddrAsScratch = false);
209
210 // Emit the sequence for LOADauthptrstatic
211 void LowerLOADauthptrstatic(const MachineInstr &MI);
212
213 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
214 // adrp-add followed by PAC sign)
215 void LowerMOVaddrPAC(const MachineInstr &MI);
216
217 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
218 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
219 // authenticating)
220 void LowerLOADgotAUTH(const MachineInstr &MI);
221
222 const MCExpr *emitPAuthRelocationAsIRelative(
223 const MCExpr *Target, uint16_t Disc, AArch64PACKey::ID KeyID,
224 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
225
226 /// tblgen'erated driver function for lowering simple MI->MC
227 /// pseudo instructions.
228 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
229
230 // Emit Build Attributes
231 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
232 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
233
234 // Emit expansion of Compare-and-branch pseudo instructions
235 void emitCBPseudoExpansion(const MachineInstr *MI);
236
237 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
238 void EmitToStreamer(const MCInst &Inst) {
239 EmitToStreamer(*OutStreamer, Inst);
240 }
241
242 void emitInstruction(const MachineInstr *MI) override;
243
244 void emitFunctionHeaderComment() override;
245
246 void getAnalysisUsage(AnalysisUsage &AU) const override {
248 AU.setPreservesAll();
249 }
250
251 bool runOnMachineFunction(MachineFunction &MF) override {
252 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
253 PSI = &PSIW->getPSI();
254 if (auto *SDPIW =
255 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
256 SDPI = &SDPIW->getStaticDataProfileInfo();
257
258 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
259 STI = &MF.getSubtarget<AArch64Subtarget>();
260
261 SetupMachineFunction(MF);
262
263 if (STI->isTargetCOFF()) {
264 bool Local = MF.getFunction().hasLocalLinkage();
267 int Type =
269
270 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
271 OutStreamer->emitCOFFSymbolStorageClass(Scl);
272 OutStreamer->emitCOFFSymbolType(Type);
273 OutStreamer->endCOFFSymbolDef();
274 }
275
276 // Emit the rest of the function body.
277 emitFunctionBody();
278
279 // Emit the XRay table for this function.
280 emitXRayTable();
281
282 // We didn't modify anything.
283 return false;
284 }
285
286 const MCExpr *lowerConstant(const Constant *CV,
287 const Constant *BaseCV = nullptr,
288 uint64_t Offset = 0) override;
289
290private:
291 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
292 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
293 bool printAsmRegInClass(const MachineOperand &MO,
294 const TargetRegisterClass *RC, unsigned AltName,
295 raw_ostream &O);
296
297 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
298 const char *ExtraCode, raw_ostream &O) override;
299 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
300 const char *ExtraCode, raw_ostream &O) override;
301
302 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
303
304 void emitFunctionBodyEnd() override;
305 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
306
307 MCSymbol *GetCPISymbol(unsigned CPID) const override;
308 void emitEndOfAsmFile(Module &M) override;
309
310 AArch64FunctionInfo *AArch64FI = nullptr;
311
312 /// Emit the LOHs contained in AArch64FI.
313 void emitLOHs();
314
315 void emitMovXReg(Register Dest, Register Src);
316 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
317 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
318
319 /// Emit instruction to set float register to zero.
320 void emitFMov0(const MachineInstr &MI);
321 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
322
323 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
324
325 MInstToMCSymbol LOHInstToLabel;
326
327 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
328 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
329 }
330
331 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
332 assert(STI);
333 return STI;
334 }
335 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
336 MCSymbol *LazyPointer) override;
337 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
338 MCSymbol *LazyPointer) override;
339
340 /// Checks if this instruction is part of a sequence that is eligle for import
341 /// call optimization and, if so, records it to be emitted in the import call
342 /// section.
343 void recordIfImportCall(const MachineInstr *BranchInst);
344};
345
346} // end anonymous namespace
347
348void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
349 const Triple &TT = TM.getTargetTriple();
350
351 if (TT.isOSBinFormatCOFF()) {
352 emitCOFFFeatureSymbol(M);
353 emitCOFFReplaceableFunctionData(M);
354
355 if (M.getModuleFlag("import-call-optimization"))
356 EnableImportCallOptimization = true;
357 }
358
359 if (!TT.isOSBinFormatELF())
360 return;
361
362 // For emitting build attributes and .note.gnu.property section
363 auto *TS =
364 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
365 // Assemble feature flags that may require creation of build attributes and a
366 // note section.
367 unsigned BAFlags = 0;
368 unsigned GNUFlags = 0;
369 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
370 M.getModuleFlag("branch-target-enforcement"))) {
371 if (!BTE->isZero()) {
372 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
374 }
375 }
376
377 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
378 M.getModuleFlag("guarded-control-stack"))) {
379 if (!GCS->isZero()) {
380 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
382 }
383 }
384
385 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
386 M.getModuleFlag("sign-return-address"))) {
387 if (!Sign->isZero()) {
388 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
390 }
391 }
392
393 uint64_t PAuthABIPlatform = -1;
394 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
395 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
396 PAuthABIPlatform = PAP->getZExtValue();
397 }
398
399 uint64_t PAuthABIVersion = -1;
400 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
401 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
402 PAuthABIVersion = PAV->getZExtValue();
403 }
404
405 // Emit AArch64 Build Attributes
406 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
407 // Emit a .note.gnu.property section with the flags.
408 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
409}
410
411void AArch64AsmPrinter::emitFunctionHeaderComment() {
412 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
413 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
414 if (OutlinerString != std::nullopt)
415 OutStreamer->getCommentOS() << ' ' << OutlinerString;
416}
417
418void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
419{
420 const Function &F = MF->getFunction();
421 if (F.hasFnAttribute("patchable-function-entry")) {
422 unsigned Num;
423 if (F.getFnAttribute("patchable-function-entry")
424 .getValueAsString()
425 .getAsInteger(10, Num))
426 return;
427 emitNops(Num);
428 return;
429 }
430
431 emitSled(MI, SledKind::FUNCTION_ENTER);
432}
433
434void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
435 emitSled(MI, SledKind::FUNCTION_EXIT);
436}
437
438void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
439 emitSled(MI, SledKind::TAIL_CALL);
440}
441
442void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
443 static const int8_t NoopsInSledCount = 7;
444 // We want to emit the following pattern:
445 //
446 // .Lxray_sled_N:
447 // ALIGN
448 // B #32
449 // ; 7 NOP instructions (28 bytes)
450 // .tmpN
451 //
452 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
453 // over the full 32 bytes (8 instructions) with the following pattern:
454 //
455 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
456 // LDR W17, #12 ; W17 := function ID
457 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
458 // BLR X16 ; call the tracing trampoline
459 // ;DATA: 32 bits of function ID
460 // ;DATA: lower 32 bits of the address of the trampoline
461 // ;DATA: higher 32 bits of the address of the trampoline
462 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
463 //
464 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
465 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
466 OutStreamer->emitLabel(CurSled);
467 auto Target = OutContext.createTempSymbol();
468
469 // Emit "B #32" instruction, which jumps over the next 28 bytes.
470 // The operand has to be the number of 4-byte instructions to jump over,
471 // including the current instruction.
472 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
473
474 for (int8_t I = 0; I < NoopsInSledCount; I++)
475 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
476
477 OutStreamer->emitLabel(Target);
478 recordSled(CurSled, MI, Kind, 2);
479}
480
481void AArch64AsmPrinter::emitAttributes(unsigned Flags,
482 uint64_t PAuthABIPlatform,
483 uint64_t PAuthABIVersion,
484 AArch64TargetStreamer *TS) {
485
486 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
487 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
488
489 if (PAuthABIPlatform || PAuthABIVersion) {
493 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
494 AArch64BuildAttributes::SubsectionType::ULEB128);
498 PAuthABIPlatform, "");
502 "");
503 }
504
505 unsigned BTIValue =
507 unsigned PACValue =
509 unsigned GCSValue =
511
512 if (BTIValue || PACValue || GCSValue) {
516 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
517 AArch64BuildAttributes::SubsectionType::ULEB128);
527 }
528}
529
530// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
531// (built-in functions __xray_customevent/__xray_typedevent).
532//
533// .Lxray_event_sled_N:
534// b 1f
535// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
536// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
537// bl __xray_CustomEvent or __xray_TypedEvent
538// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
539// 1:
540//
541// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
542//
543// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
544// After patching, b .+N will become a nop.
545void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
546 bool Typed) {
547 auto &O = *OutStreamer;
548 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
549 O.emitLabel(CurSled);
550 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
551 auto *Sym = MCSymbolRefExpr::create(
552 OutContext.getOrCreateSymbol(
553 Twine(MachO ? "_" : "") +
554 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
555 OutContext);
556 if (Typed) {
557 O.AddComment("Begin XRay typed event");
558 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
559 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
560 .addReg(AArch64::SP)
561 .addReg(AArch64::X0)
562 .addReg(AArch64::X1)
563 .addReg(AArch64::SP)
564 .addImm(-4));
565 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
566 .addReg(AArch64::X2)
567 .addReg(AArch64::SP)
568 .addImm(2));
569 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
570 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
571 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
572 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
573 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
574 .addReg(AArch64::X2)
575 .addReg(AArch64::SP)
576 .addImm(2));
577 O.AddComment("End XRay typed event");
578 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
579 .addReg(AArch64::SP)
580 .addReg(AArch64::X0)
581 .addReg(AArch64::X1)
582 .addReg(AArch64::SP)
583 .addImm(4));
584
585 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
586 } else {
587 O.AddComment("Begin XRay custom event");
588 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
589 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
590 .addReg(AArch64::SP)
591 .addReg(AArch64::X0)
592 .addReg(AArch64::X1)
593 .addReg(AArch64::SP)
594 .addImm(-2));
595 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
596 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
597 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
598 O.AddComment("End XRay custom event");
599 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
600 .addReg(AArch64::SP)
601 .addReg(AArch64::X0)
602 .addReg(AArch64::X1)
603 .addReg(AArch64::SP)
604 .addImm(2));
605
606 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
607 }
608}
609
610void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
611 Register AddrReg = MI.getOperand(0).getReg();
612 assert(std::next(MI.getIterator())->isCall() &&
613 "KCFI_CHECK not followed by a call instruction");
614 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
615 "KCFI_CHECK call target doesn't match call operand");
616
617 // Default to using the intra-procedure-call temporary registers for
618 // comparing the hashes.
619 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
620 if (AddrReg == AArch64::XZR) {
621 // Checking XZR makes no sense. Instead of emitting a load, zero
622 // ScratchRegs[0] and use it for the ESR AddrIndex below.
623 AddrReg = getXRegFromWReg(ScratchRegs[0]);
624 emitMovXReg(AddrReg, AArch64::XZR);
625 } else {
626 // If one of the scratch registers is used for the call target (e.g.
627 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
628 // temporary register instead (in this case, AArch64::W9) as the check
629 // is immediately followed by the call instruction.
630 for (auto &Reg : ScratchRegs) {
631 if (Reg == getWRegFromXReg(AddrReg)) {
632 Reg = AArch64::W9;
633 break;
634 }
635 }
636 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
637 "Invalid scratch registers for KCFI_CHECK");
638
639 // Adjust the offset for patchable-function-prefix. This assumes that
640 // patchable-function-prefix is the same for all functions.
641 int64_t PrefixNops = 0;
642 (void)MI.getMF()
643 ->getFunction()
644 .getFnAttribute("patchable-function-prefix")
645 .getValueAsString()
646 .getAsInteger(10, PrefixNops);
647
648 // Load the target function type hash.
649 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
650 .addReg(ScratchRegs[0])
651 .addReg(AddrReg)
652 .addImm(-(PrefixNops * 4 + 4)));
653 }
654
655 // Load the expected type hash.
656 const int64_t Type = MI.getOperand(1).getImm();
657 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
658 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
659
660 // Compare the hashes and trap if there's a mismatch.
661 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
662 .addReg(AArch64::WZR)
663 .addReg(ScratchRegs[0])
664 .addReg(ScratchRegs[1])
665 .addImm(0));
666
667 MCSymbol *Pass = OutContext.createTempSymbol();
668 EmitToStreamer(*OutStreamer,
669 MCInstBuilder(AArch64::Bcc)
670 .addImm(AArch64CC::EQ)
671 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
672
673 // The base ESR is 0x8000 and the register information is encoded in bits
674 // 0-9 as follows:
675 // - 0-4: n, where the register Xn contains the target address
676 // - 5-9: m, where the register Wm contains the expected type hash
677 // Where n, m are in [0, 30].
678 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
679 unsigned AddrIndex;
680 switch (AddrReg) {
681 default:
682 AddrIndex = AddrReg - AArch64::X0;
683 break;
684 case AArch64::FP:
685 AddrIndex = 29;
686 break;
687 case AArch64::LR:
688 AddrIndex = 30;
689 break;
690 }
691
692 assert(AddrIndex < 31 && TypeIndex < 31);
693
694 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
695 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
696 OutStreamer->emitLabel(Pass);
697}
698
699void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
700 Register Reg = MI.getOperand(0).getReg();
701
702 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
703 // statically known to be zero. However, conceivably, the HWASan pass may
704 // encounter a "cannot currently statically prove to be null" pointer (and is
705 // therefore unable to omit the intrinsic) that later optimization passes
706 // convert into a statically known-null pointer.
707 if (Reg == AArch64::XZR)
708 return;
709
710 bool IsShort =
711 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
712 (MI.getOpcode() ==
713 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
714 uint32_t AccessInfo = MI.getOperand(1).getImm();
715 bool IsFixedShadow =
716 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
717 (MI.getOpcode() ==
718 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
719 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
720
721 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
722 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
723 if (!Sym) {
724 // FIXME: Make this work on non-ELF.
725 if (!TM.getTargetTriple().isOSBinFormatELF())
726 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
727
728 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
729 utostr(AccessInfo);
730 if (IsFixedShadow)
731 SymName += "_fixed_" + utostr(FixedShadowOffset);
732 if (IsShort)
733 SymName += "_short_v2";
734 Sym = OutContext.getOrCreateSymbol(SymName);
735 }
736
737 EmitToStreamer(*OutStreamer,
738 MCInstBuilder(AArch64::BL)
739 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
740}
741
742void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
743 if (HwasanMemaccessSymbols.empty())
744 return;
745
746 const Triple &TT = TM.getTargetTriple();
747 assert(TT.isOSBinFormatELF());
748 std::unique_ptr<MCSubtargetInfo> STI(
749 TM.getTarget().createMCSubtargetInfo(TT, "", ""));
750 assert(STI && "Unable to create subtarget info");
751 this->STI = static_cast<const AArch64Subtarget *>(&*STI);
752
753 MCSymbol *HwasanTagMismatchV1Sym =
754 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
755 MCSymbol *HwasanTagMismatchV2Sym =
756 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
757
758 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
759 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
760 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
761 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
762
763 for (auto &P : HwasanMemaccessSymbols) {
764 unsigned Reg = std::get<0>(P.first);
765 bool IsShort = std::get<1>(P.first);
766 uint32_t AccessInfo = std::get<2>(P.first);
767 bool IsFixedShadow = std::get<3>(P.first);
768 uint64_t FixedShadowOffset = std::get<4>(P.first);
769 const MCSymbolRefExpr *HwasanTagMismatchRef =
770 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
771 MCSymbol *Sym = P.second;
772
773 bool HasMatchAllTag =
774 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
775 uint8_t MatchAllTag =
776 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
777 unsigned Size =
778 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
779 bool CompileKernel =
780 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
781
782 OutStreamer->switchSection(OutContext.getELFSection(
783 ".text.hot", ELF::SHT_PROGBITS,
785 /*IsComdat=*/true));
786
787 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
788 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
789 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
790 OutStreamer->emitLabel(Sym);
791
792 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
793 .addReg(AArch64::X16)
794 .addReg(Reg)
795 .addImm(4)
796 .addImm(55));
797
798 if (IsFixedShadow) {
799 // Aarch64 makes it difficult to embed large constants in the code.
800 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
801 // left-shift option in the MOV instruction. Combined with the 16-bit
802 // immediate, this is enough to represent any offset up to 2**48.
803 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
804 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
805 .addReg(AArch64::W16)
806 .addReg(AArch64::X17)
807 .addReg(AArch64::X16)
808 .addImm(0)
809 .addImm(0));
810 } else {
811 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
812 .addReg(AArch64::W16)
813 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
814 .addReg(AArch64::X16)
815 .addImm(0)
816 .addImm(0));
817 }
818
819 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
820 .addReg(AArch64::XZR)
821 .addReg(AArch64::X16)
822 .addReg(Reg)
824 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
825 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
826 .addImm(AArch64CC::NE)
828 HandleMismatchOrPartialSym, OutContext)));
829 MCSymbol *ReturnSym = OutContext.createTempSymbol();
830 OutStreamer->emitLabel(ReturnSym);
831 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
832 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
833
834 if (HasMatchAllTag) {
835 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
836 .addReg(AArch64::X17)
837 .addReg(Reg)
838 .addImm(56)
839 .addImm(63));
840 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
841 .addReg(AArch64::XZR)
842 .addReg(AArch64::X17)
843 .addImm(MatchAllTag)
844 .addImm(0));
845 EmitToStreamer(
846 MCInstBuilder(AArch64::Bcc)
847 .addImm(AArch64CC::EQ)
848 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
849 }
850
851 if (IsShort) {
852 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
853 .addReg(AArch64::WZR)
854 .addReg(AArch64::W16)
855 .addImm(15)
856 .addImm(0));
857 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
858 EmitToStreamer(
859 MCInstBuilder(AArch64::Bcc)
860 .addImm(AArch64CC::HI)
861 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
862
863 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
864 .addReg(AArch64::X17)
865 .addReg(Reg)
866 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
867 if (Size != 1)
868 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
869 .addReg(AArch64::X17)
870 .addReg(AArch64::X17)
871 .addImm(Size - 1)
872 .addImm(0));
873 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
874 .addReg(AArch64::WZR)
875 .addReg(AArch64::W16)
876 .addReg(AArch64::W17)
877 .addImm(0));
878 EmitToStreamer(
879 MCInstBuilder(AArch64::Bcc)
880 .addImm(AArch64CC::LS)
881 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
882
883 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
884 .addReg(AArch64::X16)
885 .addReg(Reg)
886 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
887 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
888 .addReg(AArch64::W16)
889 .addReg(AArch64::X16)
890 .addImm(0));
891 EmitToStreamer(
892 MCInstBuilder(AArch64::SUBSXrs)
893 .addReg(AArch64::XZR)
894 .addReg(AArch64::X16)
895 .addReg(Reg)
897 EmitToStreamer(
898 MCInstBuilder(AArch64::Bcc)
899 .addImm(AArch64CC::EQ)
900 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
901
902 OutStreamer->emitLabel(HandleMismatchSym);
903 }
904
905 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
906 .addReg(AArch64::SP)
907 .addReg(AArch64::X0)
908 .addReg(AArch64::X1)
909 .addReg(AArch64::SP)
910 .addImm(-32));
911 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
912 .addReg(AArch64::FP)
913 .addReg(AArch64::LR)
914 .addReg(AArch64::SP)
915 .addImm(29));
916
917 if (Reg != AArch64::X0)
918 emitMovXReg(AArch64::X0, Reg);
919 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
920
921 if (CompileKernel) {
922 // The Linux kernel's dynamic loader doesn't support GOT relative
923 // relocations, but it doesn't support late binding either, so just call
924 // the function directly.
925 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
926 } else {
927 // Intentionally load the GOT entry and branch to it, rather than possibly
928 // late binding the function, which may clobber the registers before we
929 // have a chance to save them.
930 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
931 .addReg(AArch64::X16)
932 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
934 OutContext)));
935 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
936 .addReg(AArch64::X16)
937 .addReg(AArch64::X16)
938 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
940 OutContext)));
941 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
942 }
943 }
944 this->STI = nullptr;
945}
946
947static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
948 MCSymbol *StubLabel,
949 const MCExpr *StubAuthPtrRef) {
950 // sym$auth_ptr$key$disc:
951 OutStreamer.emitLabel(StubLabel);
952 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
953}
954
955void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
956 emitHwasanMemaccessSymbols(M);
957
958 const Triple &TT = TM.getTargetTriple();
959 if (TT.isOSBinFormatMachO()) {
960 // Output authenticated pointers as indirect symbols, if we have any.
961 MachineModuleInfoMachO &MMIMacho =
962 MMI->getObjFileInfo<MachineModuleInfoMachO>();
963
964 auto Stubs = MMIMacho.getAuthGVStubList();
965
966 if (!Stubs.empty()) {
967 // Switch to the "__auth_ptr" section.
968 OutStreamer->switchSection(
969 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
971 emitAlignment(Align(8));
972
973 for (const auto &Stub : Stubs)
974 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
975
976 OutStreamer->addBlankLine();
977 }
978
979 // Funny Darwin hack: This flag tells the linker that no global symbols
980 // contain code that falls through to other global symbols (e.g. the obvious
981 // implementation of multiple entry points). If this doesn't occur, the
982 // linker can safely perform dead code stripping. Since LLVM never
983 // generates code that does this, it is always safe to set.
984 OutStreamer->emitSubsectionsViaSymbols();
985 }
986
987 if (TT.isOSBinFormatELF()) {
988 // Output authenticated pointers as indirect symbols, if we have any.
989 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
990
991 auto Stubs = MMIELF.getAuthGVStubList();
992
993 if (!Stubs.empty()) {
994 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
995 OutStreamer->switchSection(TLOF.getDataSection());
996 emitAlignment(Align(8));
997
998 for (const auto &Stub : Stubs)
999 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1000
1001 OutStreamer->addBlankLine();
1002 }
1003
1004 // With signed ELF GOT enabled, the linker looks at the symbol type to
1005 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1006 // for functions not defined in the module have STT_NOTYPE type by default.
1007 // This makes linker to emit signing schema with DA key (instead of IA) for
1008 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1009 // all function symbols used in the module to have STT_FUNC type. See
1010 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1011 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1012 M.getModuleFlag("ptrauth-elf-got"));
1013 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1014 for (const GlobalValue &GV : M.global_values())
1015 if (!GV.use_empty() && isa<Function>(GV) &&
1016 !GV.getName().starts_with("llvm."))
1017 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1019 }
1020
1021 // Emit stack and fault map information.
1023
1024 // If import call optimization is enabled, emit the appropriate section.
1025 // We do this whether or not we recorded any import calls.
1026 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1027 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1028
1029 // Section always starts with some magic.
1030 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1031 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1032
1033 // Layout of this section is:
1034 // Per section that contains calls to imported functions:
1035 // uint32_t SectionSize: Size in bytes for information in this section.
1036 // uint32_t Section Number
1037 // Per call to imported function in section:
1038 // uint32_t Kind: the kind of imported function.
1039 // uint32_t BranchOffset: the offset of the branch instruction in its
1040 // parent section.
1041 // uint32_t TargetSymbolId: the symbol id of the called function.
1042 for (auto &[Section, CallsToImportedFuncs] :
1043 SectionToImportedFunctionCalls) {
1044 unsigned SectionSize =
1045 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1046 OutStreamer->emitInt32(SectionSize);
1047 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1048 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1049 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1050 OutStreamer->emitInt32(0x13);
1051 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1052 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1053 }
1054 }
1055 }
1056}
1057
1058void AArch64AsmPrinter::emitLOHs() {
1060
1061 for (const auto &D : AArch64FI->getLOHContainer()) {
1062 for (const MachineInstr *MI : D.getArgs()) {
1063 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1064 assert(LabelIt != LOHInstToLabel.end() &&
1065 "Label hasn't been inserted for LOH related instruction");
1066 MCArgs.push_back(LabelIt->second);
1067 }
1068 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1069 MCArgs.clear();
1070 }
1071}
1072
1073void AArch64AsmPrinter::emitFunctionBodyEnd() {
1074 if (!AArch64FI->getLOHRelated().empty())
1075 emitLOHs();
1076}
1077
1078/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1079MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1080 // Darwin uses a linker-private symbol name for constant-pools (to
1081 // avoid addends on the relocation?), ELF has no such concept and
1082 // uses a normal private symbol.
1083 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1084 return OutContext.getOrCreateSymbol(
1085 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1086 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1087
1088 return AsmPrinter::GetCPISymbol(CPID);
1089}
1090
1091void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1092 raw_ostream &O) {
1093 const MachineOperand &MO = MI->getOperand(OpNum);
1094 switch (MO.getType()) {
1095 default:
1096 llvm_unreachable("<unknown operand type>");
1098 Register Reg = MO.getReg();
1100 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1102 break;
1103 }
1105 O << MO.getImm();
1106 break;
1107 }
1109 PrintSymbolOperand(MO, O);
1110 break;
1111 }
1113 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1114 Sym->print(O, MAI);
1115 break;
1116 }
1117 }
1118}
1119
1120bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1121 raw_ostream &O) {
1122 Register Reg = MO.getReg();
1123 switch (Mode) {
1124 default:
1125 return true; // Unknown mode.
1126 case 'w':
1128 break;
1129 case 'x':
1131 break;
1132 case 't':
1134 break;
1135 }
1136
1138 return false;
1139}
1140
1141// Prints the register in MO using class RC using the offset in the
1142// new register class. This should not be used for cross class
1143// printing.
1144bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1145 const TargetRegisterClass *RC,
1146 unsigned AltName, raw_ostream &O) {
1147 assert(MO.isReg() && "Should only get here with a register!");
1148 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1149 Register Reg = MO.getReg();
1150 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1151 if (!RI->regsOverlap(RegToPrint, Reg))
1152 return true;
1153 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1154 return false;
1155}
1156
1157bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1158 const char *ExtraCode, raw_ostream &O) {
1159 const MachineOperand &MO = MI->getOperand(OpNum);
1160
1161 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1162 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1163 return false;
1164
1165 // Does this asm operand have a single letter operand modifier?
1166 if (ExtraCode && ExtraCode[0]) {
1167 if (ExtraCode[1] != 0)
1168 return true; // Unknown modifier.
1169
1170 switch (ExtraCode[0]) {
1171 default:
1172 return true; // Unknown modifier.
1173 case 'w': // Print W register
1174 case 'x': // Print X register
1175 if (MO.isReg())
1176 return printAsmMRegister(MO, ExtraCode[0], O);
1177 if (MO.isImm() && MO.getImm() == 0) {
1178 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1180 return false;
1181 }
1182 printOperand(MI, OpNum, O);
1183 return false;
1184 case 'b': // Print B register.
1185 case 'h': // Print H register.
1186 case 's': // Print S register.
1187 case 'd': // Print D register.
1188 case 'q': // Print Q register.
1189 case 'z': // Print Z register.
1190 if (MO.isReg()) {
1191 const TargetRegisterClass *RC;
1192 switch (ExtraCode[0]) {
1193 case 'b':
1194 RC = &AArch64::FPR8RegClass;
1195 break;
1196 case 'h':
1197 RC = &AArch64::FPR16RegClass;
1198 break;
1199 case 's':
1200 RC = &AArch64::FPR32RegClass;
1201 break;
1202 case 'd':
1203 RC = &AArch64::FPR64RegClass;
1204 break;
1205 case 'q':
1206 RC = &AArch64::FPR128RegClass;
1207 break;
1208 case 'z':
1209 RC = &AArch64::ZPRRegClass;
1210 break;
1211 default:
1212 return true;
1213 }
1214 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1215 }
1216 printOperand(MI, OpNum, O);
1217 return false;
1218 }
1219 }
1220
1221 // According to ARM, we should emit x and v registers unless we have a
1222 // modifier.
1223 if (MO.isReg()) {
1224 Register Reg = MO.getReg();
1225
1226 // If this is a w or x register, print an x register.
1227 if (AArch64::GPR32allRegClass.contains(Reg) ||
1228 AArch64::GPR64allRegClass.contains(Reg))
1229 return printAsmMRegister(MO, 'x', O);
1230
1231 // If this is an x register tuple, print an x register.
1232 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1233 return printAsmMRegister(MO, 't', O);
1234
1235 unsigned AltName = AArch64::NoRegAltName;
1236 const TargetRegisterClass *RegClass;
1237 if (AArch64::ZPRRegClass.contains(Reg)) {
1238 RegClass = &AArch64::ZPRRegClass;
1239 } else if (AArch64::PPRRegClass.contains(Reg)) {
1240 RegClass = &AArch64::PPRRegClass;
1241 } else if (AArch64::PNRRegClass.contains(Reg)) {
1242 RegClass = &AArch64::PNRRegClass;
1243 } else {
1244 RegClass = &AArch64::FPR128RegClass;
1245 AltName = AArch64::vreg;
1246 }
1247
1248 // If this is a b, h, s, d, or q register, print it as a v register.
1249 return printAsmRegInClass(MO, RegClass, AltName, O);
1250 }
1251
1252 printOperand(MI, OpNum, O);
1253 return false;
1254}
1255
1256bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1257 unsigned OpNum,
1258 const char *ExtraCode,
1259 raw_ostream &O) {
1260 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1261 return true; // Unknown modifier.
1262
1263 const MachineOperand &MO = MI->getOperand(OpNum);
1264 assert(MO.isReg() && "unexpected inline asm memory operand");
1265 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1266 return false;
1267}
1268
1269void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1270 raw_ostream &OS) {
1271 unsigned NOps = MI->getNumOperands();
1272 assert(NOps == 4);
1273 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1274 // cast away const; DIetc do not take const operands for some reason.
1275 OS << MI->getDebugVariable()->getName();
1276 OS << " <- ";
1277 // Frame address. Currently handles register +- offset only.
1278 assert(MI->isIndirectDebugValue());
1279 OS << '[';
1280 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1281 if (I != 0)
1282 OS << ", ";
1283 printOperand(MI, I, OS);
1284 }
1285 OS << ']';
1286 OS << "+";
1287 printOperand(MI, NOps - 2, OS);
1288}
1289
1290void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1291 ArrayRef<unsigned> JumpTableIndices) {
1292 // Fast return if there is nothing to emit to avoid creating empty sections.
1293 if (JumpTableIndices.empty())
1294 return;
1295 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1296 const auto &F = MF->getFunction();
1298
1299 MCSection *ReadOnlySec = nullptr;
1300 if (TM.Options.EnableStaticDataPartitioning) {
1301 ReadOnlySec =
1302 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1303 } else {
1304 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1305 }
1306 OutStreamer->switchSection(ReadOnlySec);
1307
1308 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1309 for (unsigned JTI : JumpTableIndices) {
1310 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1311
1312 // If this jump table was deleted, ignore it.
1313 if (JTBBs.empty()) continue;
1314
1315 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1316 emitAlignment(Align(Size));
1317 OutStreamer->emitLabel(GetJTISymbol(JTI));
1318
1319 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1320 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1321
1322 for (auto *JTBB : JTBBs) {
1323 const MCExpr *Value =
1324 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1325
1326 // Each entry is:
1327 // .byte/.hword (LBB - Lbase)>>2
1328 // or plain:
1329 // .word LBB - Lbase
1330 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1331 if (Size != 4)
1333 Value, MCConstantExpr::create(2, OutContext), OutContext);
1334
1335 OutStreamer->emitValue(Value, Size);
1336 }
1337 }
1338}
1339
1340std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1342AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1343 const MachineInstr *BranchInstr,
1344 const MCSymbol *BranchLabel) const {
1345 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1346 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1348 switch (AFI->getJumpTableEntrySize(JTI)) {
1349 case 1:
1350 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1351 break;
1352 case 2:
1353 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1354 break;
1355 case 4:
1356 EntrySize = codeview::JumpTableEntrySize::Int32;
1357 break;
1358 default:
1359 llvm_unreachable("Unexpected jump table entry size");
1360 }
1361 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1362}
1363
1364void AArch64AsmPrinter::emitFunctionEntryLabel() {
1365 const Triple &TT = TM.getTargetTriple();
1366 if (TT.isOSBinFormatELF() &&
1367 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1368 MF->getFunction().getCallingConv() ==
1369 CallingConv::AArch64_SVE_VectorCall ||
1370 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1371 auto *TS =
1372 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1373 TS->emitDirectiveVariantPCS(CurrentFnSym);
1374 }
1375
1377
1378 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1379 // For ARM64EC targets, a function definition's name is mangled differently
1380 // from the normal symbol, emit required aliases here.
1381 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1382 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1383 OutStreamer->emitAssignment(
1384 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1385 };
1386
1387 auto getSymbolFromMetadata = [&](StringRef Name) {
1388 MCSymbol *Sym = nullptr;
1389 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1390 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1391 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1392 }
1393 return Sym;
1394 };
1395
1396 SmallVector<MDNode *> UnmangledNames;
1397 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1398 for (MDNode *Node : UnmangledNames) {
1399 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1400 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1401 if (std::optional<std::string> MangledName =
1402 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1403 MCSymbol *ECMangledSym =
1404 MMI->getContext().getOrCreateSymbol(*MangledName);
1405 emitFunctionAlias(UnmangledSym, ECMangledSym);
1406 }
1407 }
1408 if (MCSymbol *ECMangledSym =
1409 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1410 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1411 }
1412}
1413
1414void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1415 const Constant *CV) {
1416 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1417 if (CPA->hasAddressDiscriminator() &&
1418 !CPA->hasSpecialAddressDiscriminator(
1421 "unexpected address discrimination value for ctors/dtors entry, only "
1422 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1423 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1424 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1425 // actual address discrimination value and only checks
1426 // hasAddressDiscriminator(), so it's OK to leave special address
1427 // discrimination value here.
1429}
1430
1431void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1432 const GlobalAlias &GA) {
1433 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1434 // Global aliases must point to a definition, but unmangled patchable
1435 // symbols are special and need to point to an undefined symbol with "EXP+"
1436 // prefix. Such undefined symbol is resolved by the linker by creating
1437 // x86 thunk that jumps back to the actual EC target.
1438 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1439 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1440 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1441 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1442
1443 OutStreamer->beginCOFFSymbolDef(ExpSym);
1444 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1445 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1447 OutStreamer->endCOFFSymbolDef();
1448
1449 OutStreamer->beginCOFFSymbolDef(Sym);
1450 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1451 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1453 OutStreamer->endCOFFSymbolDef();
1454 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1455 OutStreamer->emitAssignment(
1456 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1457 return;
1458 }
1459 }
1461}
1462
1463/// Small jump tables contain an unsigned byte or half, representing the offset
1464/// from the lowest-addressed possible destination to the desired basic
1465/// block. Since all instructions are 4-byte aligned, this is further compressed
1466/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1467/// materialize the correct destination we need:
1468///
1469/// adr xDest, .LBB0_0
1470/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1471/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1472void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1473 const llvm::MachineInstr &MI) {
1474 Register DestReg = MI.getOperand(0).getReg();
1475 Register ScratchReg = MI.getOperand(1).getReg();
1476 Register ScratchRegW =
1477 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1478 Register TableReg = MI.getOperand(2).getReg();
1479 Register EntryReg = MI.getOperand(3).getReg();
1480 int JTIdx = MI.getOperand(4).getIndex();
1481 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1482
1483 // This has to be first because the compression pass based its reachability
1484 // calculations on the start of the JumpTableDest instruction.
1485 auto Label =
1486 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1487
1488 // If we don't already have a symbol to use as the base, use the ADR
1489 // instruction itself.
1490 if (!Label) {
1491 Label = MF->getContext().createTempSymbol();
1492 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1493 OutStreamer.emitLabel(Label);
1494 }
1495
1496 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1497 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1498 .addReg(DestReg)
1499 .addExpr(LabelExpr));
1500
1501 // Load the number of instruction-steps to offset from the label.
1502 unsigned LdrOpcode;
1503 switch (Size) {
1504 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1505 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1506 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1507 default:
1508 llvm_unreachable("Unknown jump table size");
1509 }
1510
1511 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1512 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1513 .addReg(TableReg)
1514 .addReg(EntryReg)
1515 .addImm(0)
1516 .addImm(Size == 1 ? 0 : 1));
1517
1518 // Add to the already materialized base label address, multiplying by 4 if
1519 // compressed.
1520 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1521 .addReg(DestReg)
1522 .addReg(DestReg)
1523 .addReg(ScratchReg)
1524 .addImm(Size == 4 ? 0 : 2));
1525}
1526
1527void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1528 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1529 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1530
1531 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1532 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1533
1534 // Emit:
1535 // mov x17, #<size of table> ; depending on table size, with MOVKs
1536 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1537 // csel x16, x16, xzr, ls ; check for index overflow
1538 //
1539 // adrp x17, Ltable@PAGE ; materialize table address
1540 // add x17, Ltable@PAGEOFF
1541 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1542 //
1543 // Lanchor:
1544 // adr x17, Lanchor ; compute target address
1545 // add x16, x17, x16
1546 // br x16 ; branch to target
1547
1548 MachineOperand JTOp = MI.getOperand(0);
1549
1550 unsigned JTI = JTOp.getIndex();
1551 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1552 "unsupported compressed jump table");
1553
1554 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1555
1556 // cmp only supports a 12-bit immediate. If we need more, materialize the
1557 // immediate, using x17 as a scratch register.
1558 uint64_t MaxTableEntry = NumTableEntries - 1;
1559 if (isUInt<12>(MaxTableEntry)) {
1560 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1561 .addReg(AArch64::XZR)
1562 .addReg(AArch64::X16)
1563 .addImm(MaxTableEntry)
1564 .addImm(0));
1565 } else {
1566 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1567 // It's sad that we have to manually materialize instructions, but we can't
1568 // trivially reuse the main pseudo expansion logic.
1569 // A MOVK sequence is easy enough to generate and handles the general case.
1570 for (int Offset = 16; Offset < 64; Offset += 16) {
1571 if ((MaxTableEntry >> Offset) == 0)
1572 break;
1573 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1574 Offset);
1575 }
1576 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1577 .addReg(AArch64::XZR)
1578 .addReg(AArch64::X16)
1579 .addReg(AArch64::X17)
1580 .addImm(0));
1581 }
1582
1583 // This picks entry #0 on failure.
1584 // We might want to trap instead.
1585 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1586 .addReg(AArch64::X16)
1587 .addReg(AArch64::X16)
1588 .addReg(AArch64::XZR)
1589 .addImm(AArch64CC::LS));
1590
1591 // Prepare the @PAGE/@PAGEOFF low/high operands.
1592 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1593 MCOperand JTMCHi, JTMCLo;
1594
1595 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1596 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1597
1598 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1599 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1600
1601 EmitToStreamer(
1602 *OutStreamer,
1603 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1604
1605 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1606 .addReg(AArch64::X17)
1607 .addReg(AArch64::X17)
1608 .addOperand(JTMCLo)
1609 .addImm(0));
1610
1611 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1612 .addReg(AArch64::X16)
1613 .addReg(AArch64::X17)
1614 .addReg(AArch64::X16)
1615 .addImm(0)
1616 .addImm(1));
1617
1618 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1619 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1620 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1621
1622 OutStreamer->emitLabel(AdrLabel);
1623 EmitToStreamer(
1624 *OutStreamer,
1625 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1626
1627 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1628 .addReg(AArch64::X16)
1629 .addReg(AArch64::X17)
1630 .addReg(AArch64::X16)
1631 .addImm(0));
1632
1633 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1634}
1635
1636void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1637 const llvm::MachineInstr &MI) {
1638 unsigned Opcode = MI.getOpcode();
1639 assert(STI->hasMOPS());
1640 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1641
1642 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1643 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1644 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1645 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1646 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1647 if (Opcode == AArch64::MOPSMemorySetPseudo)
1648 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1649 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1650 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1651 llvm_unreachable("Unhandled memory operation pseudo");
1652 }();
1653 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1654 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1655
1656 for (auto Op : Ops) {
1657 int i = 0;
1658 auto MCIB = MCInstBuilder(Op);
1659 // Destination registers
1660 MCIB.addReg(MI.getOperand(i++).getReg());
1661 MCIB.addReg(MI.getOperand(i++).getReg());
1662 if (!IsSet)
1663 MCIB.addReg(MI.getOperand(i++).getReg());
1664 // Input registers
1665 MCIB.addReg(MI.getOperand(i++).getReg());
1666 MCIB.addReg(MI.getOperand(i++).getReg());
1667 MCIB.addReg(MI.getOperand(i++).getReg());
1668
1669 EmitToStreamer(OutStreamer, MCIB);
1670 }
1671}
1672
1673void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1674 const MachineInstr &MI) {
1675 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1676
1677 auto &Ctx = OutStreamer.getContext();
1678 MCSymbol *MILabel = Ctx.createTempSymbol();
1679 OutStreamer.emitLabel(MILabel);
1680
1681 SM.recordStackMap(*MILabel, MI);
1682 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1683
1684 // Scan ahead to trim the shadow.
1685 const MachineBasicBlock &MBB = *MI.getParent();
1687 ++MII;
1688 while (NumNOPBytes > 0) {
1689 if (MII == MBB.end() || MII->isCall() ||
1690 MII->getOpcode() == AArch64::DBG_VALUE ||
1691 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1692 MII->getOpcode() == TargetOpcode::STACKMAP)
1693 break;
1694 ++MII;
1695 NumNOPBytes -= 4;
1696 }
1697
1698 // Emit nops.
1699 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1700 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1701}
1702
1703// Lower a patchpoint of the form:
1704// [<def>], <id>, <numBytes>, <target>, <numArgs>
1705void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1706 const MachineInstr &MI) {
1707 auto &Ctx = OutStreamer.getContext();
1708 MCSymbol *MILabel = Ctx.createTempSymbol();
1709 OutStreamer.emitLabel(MILabel);
1710 SM.recordPatchPoint(*MILabel, MI);
1711
1712 PatchPointOpers Opers(&MI);
1713
1714 int64_t CallTarget = Opers.getCallTarget().getImm();
1715 unsigned EncodedBytes = 0;
1716 if (CallTarget) {
1717 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1718 "High 16 bits of call target should be zero.");
1719 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1720 EncodedBytes = 16;
1721 // Materialize the jump address:
1722 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1723 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1724 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1725 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1726 }
1727 // Emit padding.
1728 unsigned NumBytes = Opers.getNumPatchBytes();
1729 assert(NumBytes >= EncodedBytes &&
1730 "Patchpoint can't request size less than the length of a call.");
1731 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1732 "Invalid number of NOP bytes requested!");
1733 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1734 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1735}
1736
1737void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1738 const MachineInstr &MI) {
1739 StatepointOpers SOpers(&MI);
1740 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1741 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1742 for (unsigned i = 0; i < PatchBytes; i += 4)
1743 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1744 } else {
1745 // Lower call target and choose correct opcode
1746 const MachineOperand &CallTarget = SOpers.getCallTarget();
1747 MCOperand CallTargetMCOp;
1748 unsigned CallOpcode;
1749 switch (CallTarget.getType()) {
1752 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1753 CallOpcode = AArch64::BL;
1754 break;
1756 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1757 CallOpcode = AArch64::BL;
1758 break;
1760 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1761 CallOpcode = AArch64::BLR;
1762 break;
1763 default:
1764 llvm_unreachable("Unsupported operand type in statepoint call target");
1765 break;
1766 }
1767
1768 EmitToStreamer(OutStreamer,
1769 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1770 }
1771
1772 auto &Ctx = OutStreamer.getContext();
1773 MCSymbol *MILabel = Ctx.createTempSymbol();
1774 OutStreamer.emitLabel(MILabel);
1775 SM.recordStatepoint(*MILabel, MI);
1776}
1777
1778void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1779 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1780 // <opcode>, <operands>
1781
1782 Register DefRegister = FaultingMI.getOperand(0).getReg();
1784 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1785 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1786 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1787 unsigned OperandsBeginIdx = 4;
1788
1789 auto &Ctx = OutStreamer->getContext();
1790 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1791 OutStreamer->emitLabel(FaultingLabel);
1792
1793 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1794 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1795
1796 MCInst MI;
1797 MI.setOpcode(Opcode);
1798
1799 if (DefRegister != (Register)0)
1800 MI.addOperand(MCOperand::createReg(DefRegister));
1801
1802 for (const MachineOperand &MO :
1803 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1804 MCOperand Dest;
1805 lowerOperand(MO, Dest);
1806 MI.addOperand(Dest);
1807 }
1808
1809 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1810 EmitToStreamer(MI);
1811}
1812
1813void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1814 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1815 .addReg(Dest)
1816 .addReg(AArch64::XZR)
1817 .addReg(Src)
1818 .addImm(0));
1819}
1820
1821void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1822 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1823 EmitToStreamer(*OutStreamer,
1824 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1825 .addReg(Dest)
1826 .addImm(Imm)
1827 .addImm(Shift));
1828}
1829
1830void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1831 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1832 EmitToStreamer(*OutStreamer,
1833 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1834 .addReg(Dest)
1835 .addReg(Dest)
1836 .addImm(Imm)
1837 .addImm(Shift));
1838}
1839
1840void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1841 Register DestReg = MI.getOperand(0).getReg();
1842 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1843 if (STI->hasZeroCycleZeroingFPR64()) {
1844 // Convert H/S register to corresponding D register
1845 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1846 if (AArch64::FPR16RegClass.contains(DestReg))
1847 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1848 &AArch64::FPR64RegClass);
1849 else if (AArch64::FPR32RegClass.contains(DestReg))
1850 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1851 &AArch64::FPR64RegClass);
1852 else
1853 assert(AArch64::FPR64RegClass.contains(DestReg));
1854
1855 MCInst MOVI;
1856 MOVI.setOpcode(AArch64::MOVID);
1857 MOVI.addOperand(MCOperand::createReg(DestReg));
1859 EmitToStreamer(*OutStreamer, MOVI);
1860 } else if (STI->hasZeroCycleZeroingFPR128()) {
1861 // Convert H/S/D register to corresponding Q register
1862 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1863 if (AArch64::FPR16RegClass.contains(DestReg)) {
1864 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1865 &AArch64::FPR128RegClass);
1866 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1867 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1868 &AArch64::FPR128RegClass);
1869 } else {
1870 assert(AArch64::FPR64RegClass.contains(DestReg));
1871 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1872 &AArch64::FPR128RegClass);
1873 }
1874
1875 MCInst MOVI;
1876 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1877 MOVI.addOperand(MCOperand::createReg(DestReg));
1879 EmitToStreamer(*OutStreamer, MOVI);
1880 } else {
1881 emitFMov0AsFMov(MI, DestReg);
1882 }
1883 } else {
1884 emitFMov0AsFMov(MI, DestReg);
1885 }
1886}
1887
1888void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1889 Register DestReg) {
1890 MCInst FMov;
1891 switch (MI.getOpcode()) {
1892 default:
1893 llvm_unreachable("Unexpected opcode");
1894 case AArch64::FMOVH0:
1895 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1896 if (!STI->hasFullFP16())
1897 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1898 FMov.addOperand(MCOperand::createReg(DestReg));
1899 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1900 break;
1901 case AArch64::FMOVS0:
1902 FMov.setOpcode(AArch64::FMOVWSr);
1903 FMov.addOperand(MCOperand::createReg(DestReg));
1904 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1905 break;
1906 case AArch64::FMOVD0:
1907 FMov.setOpcode(AArch64::FMOVXDr);
1908 FMov.addOperand(MCOperand::createReg(DestReg));
1909 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1910 break;
1911 }
1912 EmitToStreamer(*OutStreamer, FMov);
1913}
1914
1915Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint16_t Disc,
1916 Register AddrDisc,
1917 Register ScratchReg,
1918 bool MayUseAddrAsScratch) {
1919 assert(ScratchReg == AArch64::X16 || ScratchReg == AArch64::X17 ||
1920 !STI->isX16X17Safer());
1921 // So far we've used NoRegister in pseudos. Now we need real encodings.
1922 if (AddrDisc == AArch64::NoRegister)
1923 AddrDisc = AArch64::XZR;
1924
1925 // If there is no constant discriminator, there's no blend involved:
1926 // just use the address discriminator register as-is (XZR or not).
1927 if (!Disc)
1928 return AddrDisc;
1929
1930 // If there's only a constant discriminator, MOV it into the scratch register.
1931 if (AddrDisc == AArch64::XZR) {
1932 emitMOVZ(ScratchReg, Disc, 0);
1933 return ScratchReg;
1934 }
1935
1936 // If there are both, emit a blend into the scratch register.
1937
1938 // Check if we can save one MOV instruction.
1939 assert(MayUseAddrAsScratch || ScratchReg != AddrDisc);
1940 bool AddrDiscIsSafe = AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17 ||
1941 !STI->isX16X17Safer();
1942 if (MayUseAddrAsScratch && AddrDiscIsSafe)
1943 ScratchReg = AddrDisc;
1944 else
1945 emitMovXReg(ScratchReg, AddrDisc);
1946
1947 emitMOVK(ScratchReg, Disc, 48);
1948 return ScratchReg;
1949}
1950
1951/// Emit a code sequence to check an authenticated pointer value.
1952///
1953/// This function emits a sequence of instructions that checks if TestedReg was
1954/// authenticated successfully. On success, execution continues at the next
1955/// instruction after the sequence.
1956///
1957/// The action performed on failure depends on the OnFailure argument:
1958/// * if OnFailure is not nullptr, control is transferred to that label after
1959/// clearing the PAC field
1960/// * otherwise, BRK instruction is emitted to generate an error
1961void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
1962 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
1963 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
1964 // Insert a sequence to check if authentication of TestedReg succeeded,
1965 // such as:
1966 //
1967 // - checked and clearing:
1968 // ; x16 is TestedReg, x17 is ScratchReg
1969 // mov x17, x16
1970 // xpaci x17
1971 // cmp x16, x17
1972 // b.eq Lsuccess
1973 // mov x16, x17
1974 // b Lend
1975 // Lsuccess:
1976 // ; skipped if authentication failed
1977 // Lend:
1978 // ...
1979 //
1980 // - checked and trapping:
1981 // mov x17, x16
1982 // xpaci x17
1983 // cmp x16, x17
1984 // b.eq Lsuccess
1985 // brk #<0xc470 + aut key>
1986 // Lsuccess:
1987 // ...
1988 //
1989 // See the documentation on AuthCheckMethod enumeration constants for
1990 // the specific code sequences that can be used to perform the check.
1992
1993 if (Method == AuthCheckMethod::None)
1994 return;
1995 if (Method == AuthCheckMethod::DummyLoad) {
1996 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
1997 .addReg(getWRegFromXReg(ScratchReg))
1998 .addReg(TestedReg)
1999 .addImm(0));
2000 assert(!OnFailure && "DummyLoad always traps on error");
2001 return;
2002 }
2003
2004 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2005 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2006 // mov Xscratch, Xtested
2007 emitMovXReg(ScratchReg, TestedReg);
2008
2009 if (Method == AuthCheckMethod::XPAC) {
2010 // xpac(i|d) Xscratch
2011 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2012 EmitToStreamer(
2013 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2014 } else {
2015 // xpaclri
2016
2017 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2018 assert(TestedReg == AArch64::LR &&
2019 "XPACHint mode is only compatible with checking the LR register");
2021 "XPACHint mode is only compatible with I-keys");
2022 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2023 }
2024
2025 // cmp Xtested, Xscratch
2026 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2027 .addReg(AArch64::XZR)
2028 .addReg(TestedReg)
2029 .addReg(ScratchReg)
2030 .addImm(0));
2031
2032 // b.eq Lsuccess
2033 EmitToStreamer(
2034 MCInstBuilder(AArch64::Bcc)
2035 .addImm(AArch64CC::EQ)
2036 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2037 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2038 // eor Xscratch, Xtested, Xtested, lsl #1
2039 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2040 .addReg(ScratchReg)
2041 .addReg(TestedReg)
2042 .addReg(TestedReg)
2043 .addImm(1));
2044 // tbz Xscratch, #62, Lsuccess
2045 EmitToStreamer(
2046 MCInstBuilder(AArch64::TBZX)
2047 .addReg(ScratchReg)
2048 .addImm(62)
2049 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2050 } else {
2051 llvm_unreachable("Unsupported check method");
2052 }
2053
2054 if (!OnFailure) {
2055 // Trapping sequences do a 'brk'.
2056 // brk #<0xc470 + aut key>
2057 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2058 } else {
2059 // Non-trapping checked sequences return the stripped result in TestedReg,
2060 // skipping over success-only code (such as re-signing the pointer) by
2061 // jumping to OnFailure label.
2062 // Note that this can introduce an authentication oracle (such as based on
2063 // the high bits of the re-signed value).
2064
2065 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2066 // instead of ScratchReg, thus eliminating one `mov` instruction.
2067 // Both XPAC and XPACHint can be further optimized by not using a
2068 // conditional branch jumping over an unconditional one.
2069
2070 switch (Method) {
2071 case AuthCheckMethod::XPACHint:
2072 // LR is already XPAC-ed at this point.
2073 break;
2074 case AuthCheckMethod::XPAC:
2075 // mov Xtested, Xscratch
2076 emitMovXReg(TestedReg, ScratchReg);
2077 break;
2078 default:
2079 // If Xtested was not XPAC-ed so far, emit XPAC here.
2080 // xpac(i|d) Xtested
2081 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2082 EmitToStreamer(
2083 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2084 }
2085
2086 // b Lend
2087 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2088 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2089 }
2090
2091 // If the auth check succeeds, we can continue.
2092 // Lsuccess:
2093 OutStreamer->emitLabel(SuccessSym);
2094}
2095
2096// With Pointer Authentication, it may be needed to explicitly check the
2097// authenticated value in LR before performing a tail call.
2098// Otherwise, the callee may re-sign the invalid return address,
2099// introducing a signing oracle.
2100void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2101 if (!AArch64FI->shouldSignReturnAddress(*MF))
2102 return;
2103
2104 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2105 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2106 return;
2107
2108 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2109 Register ScratchReg =
2110 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2111 assert(!TC->readsRegister(ScratchReg, TRI) &&
2112 "Neither x16 nor x17 is available as a scratch register");
2115 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2116 LRCheckMethod);
2117}
2118
2119bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2120 if (!DS)
2121 return false;
2122
2123 if (isa<GlobalAlias>(DS)) {
2124 // Just emit the nop directly.
2125 EmitToStreamer(MCInstBuilder(AArch64::HINT).addImm(0));
2126 return true;
2127 }
2128 MCSymbol *Dot = OutContext.createTempSymbol();
2129 OutStreamer->emitLabel(Dot);
2130 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2131
2132 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2133 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2134 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2135 SMLoc());
2136 return false;
2137}
2138
2139void AArch64AsmPrinter::emitPtrauthAuthResign(
2140 Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc,
2141 const MachineOperand *AUTAddrDisc, Register Scratch,
2142 std::optional<AArch64PACKey::ID> PACKey, uint64_t PACDisc,
2143 Register PACAddrDisc, Value *DS) {
2144 const bool IsAUTPAC = PACKey.has_value();
2145
2146 // We expand AUT/AUTPAC into a sequence of the form
2147 //
2148 // ; authenticate x16
2149 // ; check pointer in x16
2150 // Lsuccess:
2151 // ; sign x16 (if AUTPAC)
2152 // Lend: ; if not trapping on failure
2153 //
2154 // with the checking sequence chosen depending on whether/how we should check
2155 // the pointer and whether we should trap on failure.
2156
2157 // By default, auth/resign sequences check for auth failures.
2158 bool ShouldCheck = true;
2159 // In the checked sequence, we only trap if explicitly requested.
2160 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2161
2162 // On an FPAC CPU, you get traps whether you want them or not: there's
2163 // no point in emitting checks or traps.
2164 if (STI->hasFPAC())
2165 ShouldCheck = ShouldTrap = false;
2166
2167 // However, command-line flags can override this, for experimentation.
2168 switch (PtrauthAuthChecks) {
2170 break;
2172 ShouldCheck = ShouldTrap = false;
2173 break;
2175 ShouldCheck = true;
2176 ShouldTrap = false;
2177 break;
2179 ShouldCheck = ShouldTrap = true;
2180 break;
2181 }
2182
2183 // Compute aut discriminator
2184 assert(isUInt<16>(AUTDisc));
2185 Register AUTDiscReg = emitPtrauthDiscriminator(
2186 AUTDisc, AUTAddrDisc->getReg(), Scratch, AUTAddrDisc->isKill());
2187 bool AUTZero = AUTDiscReg == AArch64::XZR;
2188 unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero);
2189
2190 if (!emitDeactivationSymbolRelocation(DS)) {
2191 // autiza x16 ; if AUTZero
2192 // autia x16, x17 ; if !AUTZero
2193 MCInst AUTInst;
2194 AUTInst.setOpcode(AUTOpc);
2195 AUTInst.addOperand(MCOperand::createReg(AUTVal));
2196 AUTInst.addOperand(MCOperand::createReg(AUTVal));
2197 if (!AUTZero)
2198 AUTInst.addOperand(MCOperand::createReg(AUTDiscReg));
2199 EmitToStreamer(*OutStreamer, AUTInst);
2200 }
2201
2202 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2203 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2204 return;
2205
2206 MCSymbol *EndSym = nullptr;
2207
2208 if (ShouldCheck) {
2209 if (IsAUTPAC && !ShouldTrap)
2210 EndSym = createTempSymbol("resign_end_");
2211
2212 emitPtrauthCheckAuthenticatedValue(
2213 AUTVal, Scratch, AUTKey, AArch64PAuth::AuthCheckMethod::XPAC, EndSym);
2214 }
2215
2216 // We already emitted unchecked and checked-but-non-trapping AUTs.
2217 // That left us with trapping AUTs, and AUTPACs.
2218 // Trapping AUTs don't need PAC: we're done.
2219 if (!IsAUTPAC)
2220 return;
2221
2222 // Compute pac discriminator
2223 assert(isUInt<16>(PACDisc));
2224 Register PACDiscReg =
2225 emitPtrauthDiscriminator(PACDisc, PACAddrDisc, Scratch);
2226 bool PACZero = PACDiscReg == AArch64::XZR;
2227 unsigned PACOpc = getPACOpcodeForKey(*PACKey, PACZero);
2228
2229 // pacizb x16 ; if PACZero
2230 // pacib x16, x17 ; if !PACZero
2231 MCInst PACInst;
2232 PACInst.setOpcode(PACOpc);
2233 PACInst.addOperand(MCOperand::createReg(AUTVal));
2234 PACInst.addOperand(MCOperand::createReg(AUTVal));
2235 if (!PACZero)
2236 PACInst.addOperand(MCOperand::createReg(PACDiscReg));
2237 EmitToStreamer(*OutStreamer, PACInst);
2238
2239 // Lend:
2240 if (EndSym)
2241 OutStreamer->emitLabel(EndSym);
2242}
2243
2244void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2245 Register Val = MI->getOperand(1).getReg();
2246 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2247 uint64_t Disc = MI->getOperand(3).getImm();
2248 Register AddrDisc = MI->getOperand(4).getReg();
2249 bool AddrDiscKilled = MI->getOperand(4).isKill();
2250
2251 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2252 // register is available.
2253 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2254 assert(ScratchReg != AddrDisc &&
2255 "Neither X16 nor X17 is available as a scratch register");
2256
2257 // Compute pac discriminator
2258 assert(isUInt<16>(Disc));
2259 Register DiscReg = emitPtrauthDiscriminator(
2260 Disc, AddrDisc, ScratchReg, /*MayUseAddrAsScratch=*/AddrDiscKilled);
2261 bool IsZeroDisc = DiscReg == AArch64::XZR;
2262 unsigned Opc = getPACOpcodeForKey(Key, IsZeroDisc);
2263
2264 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2265 return;
2266
2267 // paciza x16 ; if IsZeroDisc
2268 // pacia x16, x17 ; if !IsZeroDisc
2269 MCInst PACInst;
2270 PACInst.setOpcode(Opc);
2271 PACInst.addOperand(MCOperand::createReg(Val));
2272 PACInst.addOperand(MCOperand::createReg(Val));
2273 if (!IsZeroDisc)
2274 PACInst.addOperand(MCOperand::createReg(DiscReg));
2275 EmitToStreamer(*OutStreamer, PACInst);
2276}
2277
2278void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2279 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2280 unsigned BrTarget = MI->getOperand(0).getReg();
2281
2282 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2284 "Invalid auth call key");
2285
2286 uint64_t Disc = MI->getOperand(2).getImm();
2287 assert(isUInt<16>(Disc));
2288
2289 unsigned AddrDisc = MI->getOperand(3).getReg();
2290
2291 // Make sure AddrDisc is solely used to compute the discriminator.
2292 // While hardly meaningful, it is still possible to describe an authentication
2293 // of a pointer against its own value (instead of storage address) with
2294 // intrinsics, so use report_fatal_error instead of assert.
2295 if (BrTarget == AddrDisc)
2296 report_fatal_error("Branch target is signed with its own value");
2297
2298 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2299 // fact that x16 and x17 are described as clobbered by the MI instruction and
2300 // AddrDisc is not used as any other input.
2301 //
2302 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2303 // either x16 or x17, meaning the returned register is always among the
2304 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2305 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2306 // among x16 and x17 to prevent clobbering unexpected registers.
2307 //
2308 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2309 // declared as clobbering x16/x17.
2310 //
2311 // FIXME: Make use of `killed` flags and register masks instead.
2312 bool AddrDiscIsImplicitDef =
2313 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2314 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2315 AddrDiscIsImplicitDef);
2316 bool IsZeroDisc = DiscReg == AArch64::XZR;
2317
2318 unsigned Opc;
2319 if (IsCall) {
2320 if (Key == AArch64PACKey::IA)
2321 Opc = IsZeroDisc ? AArch64::BLRAAZ : AArch64::BLRAA;
2322 else
2323 Opc = IsZeroDisc ? AArch64::BLRABZ : AArch64::BLRAB;
2324 } else {
2325 if (Key == AArch64PACKey::IA)
2326 Opc = IsZeroDisc ? AArch64::BRAAZ : AArch64::BRAA;
2327 else
2328 Opc = IsZeroDisc ? AArch64::BRABZ : AArch64::BRAB;
2329 }
2330
2331 MCInst BRInst;
2332 BRInst.setOpcode(Opc);
2333 BRInst.addOperand(MCOperand::createReg(BrTarget));
2334 if (!IsZeroDisc)
2335 BRInst.addOperand(MCOperand::createReg(DiscReg));
2336 EmitToStreamer(*OutStreamer, BRInst);
2337}
2338
2339static void emitAddress(MCStreamer &Streamer, MCRegister Reg,
2340 const MCExpr *Expr, bool DSOLocal,
2341 const MCSubtargetInfo &STI) {
2342 MCValue Val;
2343 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2344 report_fatal_error("emitAddress could not evaluate");
2345 if (DSOLocal) {
2346 Streamer.emitInstruction(
2347 MCInstBuilder(AArch64::ADRP)
2348 .addReg(Reg)
2350 Streamer.getContext())),
2351 STI);
2352 Streamer.emitInstruction(
2353 MCInstBuilder(AArch64::ADDXri)
2354 .addReg(Reg)
2355 .addReg(Reg)
2357 Streamer.getContext()))
2358 .addImm(0),
2359 STI);
2360 } else {
2361 auto *SymRef =
2362 MCSymbolRefExpr::create(Val.getAddSym(), Streamer.getContext());
2363 Streamer.emitInstruction(
2364 MCInstBuilder(AArch64::ADRP)
2365 .addReg(Reg)
2367 Streamer.getContext())),
2368 STI);
2369 Streamer.emitInstruction(
2370 MCInstBuilder(AArch64::LDRXui)
2371 .addReg(Reg)
2372 .addReg(Reg)
2374 Streamer.getContext())),
2375 STI);
2376 if (Val.getConstant())
2377 Streamer.emitInstruction(MCInstBuilder(AArch64::ADDXri)
2378 .addReg(Reg)
2379 .addReg(Reg)
2380 .addImm(Val.getConstant())
2381 .addImm(0),
2382 STI);
2383 }
2384}
2385
2387 const MCExpr *Target,
2388 const MCExpr *DSExpr) {
2389 // No released version of glibc supports PAuth relocations.
2390 if (TT.isOSGlibc())
2391 return false;
2392
2393 // We emit PAuth constants as IRELATIVE relocations in cases where the
2394 // constant cannot be represented as a PAuth relocation:
2395 // 1) There is a deactivation symbol.
2396 // 2) The signed value is not a symbol.
2397 return !DSExpr && !isa<MCConstantExpr>(Target);
2398}
2399
2401 // IFUNCs are ELF-only.
2402 if (!TT.isOSBinFormatELF())
2403 return false;
2404
2405 // musl doesn't support IFUNCs.
2406 if (TT.isMusl())
2407 return false;
2408
2409 return true;
2410}
2411
2412// Emit an ifunc resolver that returns a signed pointer to the specified target,
2413// and return a FUNCINIT reference to the resolver. In the linked binary, this
2414// function becomes the target of an IRELATIVE relocation. This resolver is used
2415// to relocate signed pointers in global variable initializers in special cases
2416// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2417//
2418// Example (signed null pointer, not address discriminated):
2419//
2420// .8byte .Lpauth_ifunc0
2421// .pushsection .text.startup,"ax",@progbits
2422// .Lpauth_ifunc0:
2423// mov x0, #0
2424// mov x1, #12345
2425// b __emupac_pacda
2426//
2427// Example (signed null pointer, address discriminated):
2428//
2429// .Ltmp:
2430// .8byte .Lpauth_ifunc0
2431// .pushsection .text.startup,"ax",@progbits
2432// .Lpauth_ifunc0:
2433// mov x0, #0
2434// adrp x1, .Ltmp
2435// add x1, x1, :lo12:.Ltmp
2436// b __emupac_pacda
2437// .popsection
2438//
2439// Example (signed pointer to symbol, not address discriminated):
2440//
2441// .Ltmp:
2442// .8byte .Lpauth_ifunc0
2443// .pushsection .text.startup,"ax",@progbits
2444// .Lpauth_ifunc0:
2445// adrp x0, symbol
2446// add x0, x0, :lo12:symbol
2447// mov x1, #12345
2448// b __emupac_pacda
2449// .popsection
2450//
2451// Example (signed null pointer, not address discriminated, with deactivation
2452// symbol ds):
2453//
2454// .8byte .Lpauth_ifunc0
2455// .pushsection .text.startup,"ax",@progbits
2456// .Lpauth_ifunc0:
2457// mov x0, #0
2458// mov x1, #12345
2459// .reloc ., R_AARCH64_PATCHINST, ds
2460// b __emupac_pacda
2461// ret
2462// .popsection
2463const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2464 const MCExpr *Target, uint16_t Disc, AArch64PACKey::ID KeyID,
2465 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2466 const Triple &TT = TM.getTargetTriple();
2467
2468 // We only emit an IRELATIVE relocation if the target supports IRELATIVE and
2469 // does not support the kind of PAuth relocation that we are trying to emit.
2470 if (targetSupportsPAuthRelocation(TT, Target, DSExpr) ||
2472 return nullptr;
2473
2474 // For now, only the DA key is supported.
2475 if (KeyID != AArch64PACKey::DA)
2476 return nullptr;
2477
2478 std::unique_ptr<MCSubtargetInfo> STI(
2479 TM.getTarget().createMCSubtargetInfo(TT, "", ""));
2480 assert(STI && "Unable to create subtarget info");
2481 this->STI = static_cast<const AArch64Subtarget *>(&*STI);
2482
2483 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2484 OutStreamer->emitLabel(Place);
2485 OutStreamer->pushSection();
2486
2487 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2489 0, "", true, PAuthIFuncNextUniqueID++, nullptr));
2490
2491 MCSymbol *IRelativeSym =
2492 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2493 OutStreamer->emitLabel(IRelativeSym);
2494 if (isa<MCConstantExpr>(Target)) {
2495 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2496 .addReg(AArch64::X0)
2497 .addExpr(Target)
2498 .addImm(0),
2499 *STI);
2500 } else {
2501 emitAddress(*OutStreamer, AArch64::X0, Target, IsDSOLocal, *STI);
2502 }
2503 if (HasAddressDiversity) {
2504 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2505 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2506 MCConstantExpr::create(static_cast<int16_t>(Disc),
2507 OutStreamer->getContext()),
2508 OutStreamer->getContext());
2509 emitAddress(*OutStreamer, AArch64::X1, PlacePlusDisc, /*IsDSOLocal=*/true,
2510 *STI);
2511 } else {
2512 emitMOVZ(AArch64::X1, Disc, 0);
2513 }
2514
2515 if (DSExpr) {
2516 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2517 OutStreamer->emitLabel(PrePACInst);
2518
2519 auto *PrePACInstExpr =
2520 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2521 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2522 DSExpr, SMLoc());
2523 }
2524
2525 // We don't know the subtarget because this is being emitted for a global
2526 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2527 // always call the EmuPAC runtime, which will end up using the PAC instruction
2528 // if the target supports PAC.
2529 MCSymbol *EmuPAC =
2530 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2531 const MCSymbolRefExpr *EmuPACRef =
2532 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2533 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2534 *STI);
2535
2536 // We need a RET despite the above tail call because the deactivation symbol
2537 // may replace the tail call with a NOP.
2538 if (DSExpr)
2539 OutStreamer->emitInstruction(
2540 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
2541 OutStreamer->popSection();
2542
2543 return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT,
2544 OutStreamer->getContext());
2545}
2546
2547const MCExpr *
2548AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2549 MCContext &Ctx = OutContext;
2550
2551 // Figure out the base symbol and the addend, if any.
2552 APInt Offset(64, 0);
2553 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2554 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2555
2556 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2557
2558 const MCExpr *Sym;
2559 if (BaseGVB) {
2560 // If there is an addend, turn that into the appropriate MCExpr.
2561 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2562 if (Offset.sgt(0))
2564 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2565 else if (Offset.slt(0))
2567 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2568 } else {
2569 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2570 }
2571
2572 const MCExpr *DSExpr = nullptr;
2573 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2574 if (isa<GlobalAlias>(DS))
2575 return Sym;
2576 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2577 }
2578
2579 uint64_t KeyID = CPA.getKey()->getZExtValue();
2580 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2581 // AArch64AuthMCExpr::printImpl, so fail fast.
2582 if (KeyID > AArch64PACKey::LAST) {
2583 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2584 "' out of range [0, " +
2585 Twine((unsigned)AArch64PACKey::LAST) + "]");
2586 KeyID = 0;
2587 }
2588
2589 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2590 if (!isUInt<16>(Disc)) {
2591 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2592 "' out of range [0, 0xFFFF]");
2593 Disc = 0;
2594 }
2595
2596 // Check if we need to represent this with an IRELATIVE and emit it if so.
2597 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2598 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2599 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2600 return IFuncSym;
2601
2602 if (DSExpr)
2603 report_fatal_error("deactivation symbols unsupported in constant "
2604 "expressions on this target");
2605
2606 // Finally build the complete @AUTH expr.
2607 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2608 CPA.hasAddressDiscriminator(), Ctx);
2609}
2610
2611void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2612 unsigned DstReg = MI.getOperand(0).getReg();
2613 const MachineOperand &GAOp = MI.getOperand(1);
2614 const uint64_t KeyC = MI.getOperand(2).getImm();
2615 assert(KeyC <= AArch64PACKey::LAST &&
2616 "key is out of range [0, AArch64PACKey::LAST]");
2617 const auto Key = (AArch64PACKey::ID)KeyC;
2618 const uint64_t Disc = MI.getOperand(3).getImm();
2619 assert(isUInt<16>(Disc) &&
2620 "constant discriminator is out of range [0, 0xffff]");
2621
2622 // Emit instruction sequence like the following:
2623 // ADRP x16, symbol$auth_ptr$key$disc
2624 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2625 //
2626 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2627 // to symbol.
2628 MCSymbol *AuthPtrStubSym;
2629 if (TM.getTargetTriple().isOSBinFormatELF()) {
2630 const auto &TLOF =
2631 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2632
2633 assert(GAOp.getOffset() == 0 &&
2634 "non-zero offset for $auth_ptr$ stub slots is not supported");
2635 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2636 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2637 } else {
2638 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2639 "LOADauthptrstatic is implemented only for MachO/ELF");
2640
2641 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2642 getObjFileLowering());
2643
2644 assert(GAOp.getOffset() == 0 &&
2645 "non-zero offset for $auth_ptr$ stub slots is not supported");
2646 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2647 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2648 }
2649
2650 MachineOperand StubMOHi =
2652 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2653 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2654 MCOperand StubMCHi, StubMCLo;
2655
2656 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2657 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2658
2659 EmitToStreamer(
2660 *OutStreamer,
2661 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2662
2663 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2664 .addReg(DstReg)
2665 .addReg(DstReg)
2666 .addOperand(StubMCLo));
2667}
2668
2669void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2670 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2671 const bool IsELFSignedGOT = MI.getParent()
2672 ->getParent()
2673 ->getInfo<AArch64FunctionInfo>()
2674 ->hasELFSignedGOT();
2675 MachineOperand GAOp = MI.getOperand(0);
2676 const uint64_t KeyC = MI.getOperand(1).getImm();
2677 assert(KeyC <= AArch64PACKey::LAST &&
2678 "key is out of range [0, AArch64PACKey::LAST]");
2679 const auto Key = (AArch64PACKey::ID)KeyC;
2680 const unsigned AddrDisc = MI.getOperand(2).getReg();
2681 const uint64_t Disc = MI.getOperand(3).getImm();
2682 assert(isUInt<16>(Disc) &&
2683 "constant discriminator is out of range [0, 0xffff]");
2684
2685 const int64_t Offset = GAOp.getOffset();
2686 GAOp.setOffset(0);
2687
2688 // Emit:
2689 // target materialization:
2690 // - via GOT:
2691 // - unsigned GOT:
2692 // adrp x16, :got:target
2693 // ldr x16, [x16, :got_lo12:target]
2694 // add offset to x16 if offset != 0
2695 // - ELF signed GOT:
2696 // adrp x17, :got:target
2697 // add x17, x17, :got_auth_lo12:target
2698 // ldr x16, [x17]
2699 // aut{i|d}a x16, x17
2700 // check+trap sequence (if no FPAC)
2701 // add offset to x16 if offset != 0
2702 //
2703 // - direct:
2704 // adrp x16, target
2705 // add x16, x16, :lo12:target
2706 // add offset to x16 if offset != 0
2707 //
2708 // add offset to x16:
2709 // - abs(offset) fits 24 bits:
2710 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2711 // - abs(offset) does not fit 24 bits:
2712 // - offset < 0:
2713 // movn+movk sequence filling x17 register with the offset (up to 4
2714 // instructions)
2715 // add x16, x16, x17
2716 // - offset > 0:
2717 // movz+movk sequence filling x17 register with the offset (up to 4
2718 // instructions)
2719 // add x16, x16, x17
2720 //
2721 // signing:
2722 // - 0 discriminator:
2723 // paciza x16
2724 // - Non-0 discriminator, no address discriminator:
2725 // mov x17, #Disc
2726 // pacia x16, x17
2727 // - address discriminator (with potentially folded immediate discriminator):
2728 // pacia x16, xAddrDisc
2729
2730 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2731 MCOperand GAMCHi, GAMCLo;
2732
2733 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2734 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2735 if (IsGOTLoad) {
2736 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2737 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2738 }
2739
2740 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2741 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2742
2743 EmitToStreamer(
2744 MCInstBuilder(AArch64::ADRP)
2745 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2746 .addOperand(GAMCHi));
2747
2748 if (IsGOTLoad) {
2749 if (IsELFSignedGOT) {
2750 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2751 .addReg(AArch64::X17)
2752 .addReg(AArch64::X17)
2753 .addOperand(GAMCLo)
2754 .addImm(0));
2755
2756 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2757 .addReg(AArch64::X16)
2758 .addReg(AArch64::X17)
2759 .addImm(0));
2760
2761 assert(GAOp.isGlobal());
2762 assert(GAOp.getGlobal()->getValueType() != nullptr);
2763 unsigned AuthOpcode = GAOp.getGlobal()->getValueType()->isFunctionTy()
2764 ? AArch64::AUTIA
2765 : AArch64::AUTDA;
2766
2767 EmitToStreamer(MCInstBuilder(AuthOpcode)
2768 .addReg(AArch64::X16)
2769 .addReg(AArch64::X16)
2770 .addReg(AArch64::X17));
2771
2772 if (!STI->hasFPAC()) {
2773 auto AuthKey = (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA
2775
2776 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2777 AArch64PAuth::AuthCheckMethod::XPAC);
2778 }
2779 } else {
2780 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2781 .addReg(AArch64::X16)
2782 .addReg(AArch64::X16)
2783 .addOperand(GAMCLo));
2784 }
2785 } else {
2786 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2787 .addReg(AArch64::X16)
2788 .addReg(AArch64::X16)
2789 .addOperand(GAMCLo)
2790 .addImm(0));
2791 }
2792
2793 if (Offset != 0) {
2794 const uint64_t AbsOffset = (Offset > 0 ? Offset : -((uint64_t)Offset));
2795 const bool IsNeg = Offset < 0;
2796 if (isUInt<24>(AbsOffset)) {
2797 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2798 BitPos += 12) {
2799 EmitToStreamer(
2800 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2801 .addReg(AArch64::X16)
2802 .addReg(AArch64::X16)
2803 .addImm((AbsOffset >> BitPos) & 0xfff)
2804 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2805 }
2806 } else {
2807 const uint64_t UOffset = Offset;
2808 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2809 .addReg(AArch64::X17)
2810 .addImm((IsNeg ? ~UOffset : UOffset) & 0xffff)
2811 .addImm(/*shift=*/0));
2812 auto NeedMovk = [IsNeg, UOffset](int BitPos) -> bool {
2813 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2814 uint64_t Shifted = UOffset >> BitPos;
2815 if (!IsNeg)
2816 return Shifted != 0;
2817 for (int I = 0; I != 64 - BitPos; I += 16)
2818 if (((Shifted >> I) & 0xffff) != 0xffff)
2819 return true;
2820 return false;
2821 };
2822 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2823 emitMOVK(AArch64::X17, (UOffset >> BitPos) & 0xffff, BitPos);
2824
2825 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2826 .addReg(AArch64::X16)
2827 .addReg(AArch64::X16)
2828 .addReg(AArch64::X17)
2829 .addImm(/*shift=*/0));
2830 }
2831 }
2832
2833 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2834
2835 auto MIB = MCInstBuilder(getPACOpcodeForKey(Key, DiscReg == AArch64::XZR))
2836 .addReg(AArch64::X16)
2837 .addReg(AArch64::X16);
2838 if (DiscReg != AArch64::XZR)
2839 MIB.addReg(DiscReg);
2840 EmitToStreamer(MIB);
2841}
2842
2843void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2844 Register DstReg = MI.getOperand(0).getReg();
2845 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2846 const MachineOperand &GAMO = MI.getOperand(1);
2847 assert(GAMO.getOffset() == 0);
2848
2849 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2850 MCOperand GAMC;
2851 MCInstLowering.lowerOperand(GAMO, GAMC);
2852 EmitToStreamer(
2853 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2854 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2855 .addReg(AuthResultReg)
2856 .addReg(AArch64::X17)
2857 .addImm(0));
2858 } else {
2859 MachineOperand GAHiOp(GAMO);
2860 MachineOperand GALoOp(GAMO);
2861 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2862 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2863
2864 MCOperand GAMCHi, GAMCLo;
2865 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2866 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2867
2868 EmitToStreamer(
2869 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2870
2871 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2872 .addReg(AArch64::X17)
2873 .addReg(AArch64::X17)
2874 .addOperand(GAMCLo)
2875 .addImm(0));
2876
2877 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2878 .addReg(AuthResultReg)
2879 .addReg(AArch64::X17)
2880 .addImm(0));
2881 }
2882
2883 assert(GAMO.isGlobal());
2884 MCSymbol *UndefWeakSym;
2885 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2886 UndefWeakSym = createTempSymbol("undef_weak");
2887 EmitToStreamer(
2888 MCInstBuilder(AArch64::CBZX)
2889 .addReg(AuthResultReg)
2890 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2891 }
2892
2893 assert(GAMO.getGlobal()->getValueType() != nullptr);
2894 unsigned AuthOpcode = GAMO.getGlobal()->getValueType()->isFunctionTy()
2895 ? AArch64::AUTIA
2896 : AArch64::AUTDA;
2897 EmitToStreamer(MCInstBuilder(AuthOpcode)
2898 .addReg(AuthResultReg)
2899 .addReg(AuthResultReg)
2900 .addReg(AArch64::X17));
2901
2902 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2903 OutStreamer->emitLabel(UndefWeakSym);
2904
2905 if (!STI->hasFPAC()) {
2906 auto AuthKey =
2907 (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA : AArch64PACKey::DA);
2908
2909 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2910 AArch64PAuth::AuthCheckMethod::XPAC);
2911
2912 emitMovXReg(DstReg, AuthResultReg);
2913 }
2914}
2915
2916const MCExpr *
2917AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2918 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2919 const Function &Fn = *BA.getFunction();
2920
2921 if (std::optional<uint16_t> BADisc =
2923 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2924 /*HasAddressDiversity=*/false, OutContext);
2925
2926 return BAE;
2927}
2928
2929void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
2930 bool IsImm = false;
2931 unsigned Width = 0;
2932
2933 switch (MI->getOpcode()) {
2934 default:
2935 llvm_unreachable("This is not a CB pseudo instruction");
2936 case AArch64::CBBAssertExt:
2937 IsImm = false;
2938 Width = 8;
2939 break;
2940 case AArch64::CBHAssertExt:
2941 IsImm = false;
2942 Width = 16;
2943 break;
2944 case AArch64::CBWPrr:
2945 Width = 32;
2946 break;
2947 case AArch64::CBXPrr:
2948 Width = 64;
2949 break;
2950 case AArch64::CBWPri:
2951 IsImm = true;
2952 Width = 32;
2953 break;
2954 case AArch64::CBXPri:
2955 IsImm = true;
2956 Width = 64;
2957 break;
2958 }
2959
2961 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
2962 bool NeedsRegSwap = false;
2963 bool NeedsImmDec = false;
2964 bool NeedsImmInc = false;
2965
2966#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
2967 (IsImm \
2968 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
2969 : (Width == 8 \
2970 ? AArch64::CBB##RegCond##Wrr \
2971 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
2972 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
2973 : AArch64::CB##RegCond##Xrr))))
2974 unsigned MCOpC;
2975
2976 // Decide if we need to either swap register operands or increment/decrement
2977 // immediate operands
2978 switch (CC) {
2979 default:
2980 llvm_unreachable("Invalid CB condition code");
2981 case AArch64CC::EQ:
2982 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
2983 break;
2984 case AArch64CC::NE:
2985 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
2986 break;
2987 case AArch64CC::HS:
2988 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
2989 NeedsImmDec = IsImm;
2990 break;
2991 case AArch64CC::LO:
2992 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
2993 NeedsRegSwap = !IsImm;
2994 break;
2995 case AArch64CC::HI:
2996 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
2997 break;
2998 case AArch64CC::LS:
2999 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
3000 NeedsRegSwap = !IsImm;
3001 NeedsImmInc = IsImm;
3002 break;
3003 case AArch64CC::GE:
3004 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
3005 NeedsImmDec = IsImm;
3006 break;
3007 case AArch64CC::LT:
3008 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
3009 NeedsRegSwap = !IsImm;
3010 break;
3011 case AArch64CC::GT:
3012 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
3013 break;
3014 case AArch64CC::LE:
3015 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
3016 NeedsRegSwap = !IsImm;
3017 NeedsImmInc = IsImm;
3018 break;
3019 }
3020#undef GET_CB_OPC
3021
3022 MCInst Inst;
3023 Inst.setOpcode(MCOpC);
3024
3025 MCOperand Lhs, Rhs, Trgt;
3026 lowerOperand(MI->getOperand(1), Lhs);
3027 lowerOperand(MI->getOperand(2), Rhs);
3028 lowerOperand(MI->getOperand(3), Trgt);
3029
3030 // Now swap, increment or decrement
3031 if (NeedsRegSwap) {
3032 assert(Lhs.isReg() && "Expected register operand for CB");
3033 assert(Rhs.isReg() && "Expected register operand for CB");
3034 Inst.addOperand(Rhs);
3035 Inst.addOperand(Lhs);
3036 } else if (NeedsImmDec) {
3037 Rhs.setImm(Rhs.getImm() - 1);
3038 Inst.addOperand(Lhs);
3039 Inst.addOperand(Rhs);
3040 } else if (NeedsImmInc) {
3041 Rhs.setImm(Rhs.getImm() + 1);
3042 Inst.addOperand(Lhs);
3043 Inst.addOperand(Rhs);
3044 } else {
3045 Inst.addOperand(Lhs);
3046 Inst.addOperand(Rhs);
3047 }
3048
3049 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3050 "CB immediate operand out-of-bounds");
3051
3052 Inst.addOperand(Trgt);
3053 EmitToStreamer(*OutStreamer, Inst);
3054}
3055
3056// Simple pseudo-instructions have their lowering (with expansion to real
3057// instructions) auto-generated.
3058#include "AArch64GenMCPseudoLowering.inc"
3059
3060void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3061 S.emitInstruction(Inst, *STI);
3062#ifndef NDEBUG
3063 ++InstsEmitted;
3064#endif
3065}
3066
3067void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3068 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3069
3070#ifndef NDEBUG
3071 InstsEmitted = 0;
3072 auto CheckMISize = make_scope_exit([&]() {
3073 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3074 });
3075#endif
3076
3077 // Do any auto-generated pseudo lowerings.
3078 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3079 EmitToStreamer(*OutStreamer, OutInst);
3080 return;
3081 }
3082
3083 if (MI->getOpcode() == AArch64::ADRP) {
3084 for (auto &Opd : MI->operands()) {
3085 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3086 "swift_async_extendedFramePointerFlags") {
3087 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3088 }
3089 }
3090 }
3091
3092 if (AArch64FI->getLOHRelated().count(MI)) {
3093 // Generate a label for LOH related instruction
3094 MCSymbol *LOHLabel = createTempSymbol("loh");
3095 // Associate the instruction with the label
3096 LOHInstToLabel[MI] = LOHLabel;
3097 OutStreamer->emitLabel(LOHLabel);
3098 }
3099
3100 AArch64TargetStreamer *TS =
3101 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3102 // Do any manual lowerings.
3103 switch (MI->getOpcode()) {
3104 default:
3106 "Unhandled tail call instruction");
3107 break;
3108 case AArch64::HINT: {
3109 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3110 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3111 // non-empty. If MI is the initial BTI, place the
3112 // __patchable_function_entries label after BTI.
3113 if (CurrentPatchableFunctionEntrySym &&
3114 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3115 MI == &MF->front().front()) {
3116 int64_t Imm = MI->getOperand(0).getImm();
3117 if ((Imm & 32) && (Imm & 6)) {
3118 MCInst Inst;
3119 MCInstLowering.Lower(MI, Inst);
3120 EmitToStreamer(*OutStreamer, Inst);
3121 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3122 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3123 return;
3124 }
3125 }
3126 break;
3127 }
3128 case AArch64::MOVMCSym: {
3129 Register DestReg = MI->getOperand(0).getReg();
3130 const MachineOperand &MO_Sym = MI->getOperand(1);
3131 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3132 MCOperand Hi_MCSym, Lo_MCSym;
3133
3134 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3135 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3136
3137 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3138 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3139
3140 MCInst MovZ;
3141 MovZ.setOpcode(AArch64::MOVZXi);
3142 MovZ.addOperand(MCOperand::createReg(DestReg));
3143 MovZ.addOperand(Hi_MCSym);
3145 EmitToStreamer(*OutStreamer, MovZ);
3146
3147 MCInst MovK;
3148 MovK.setOpcode(AArch64::MOVKXi);
3149 MovK.addOperand(MCOperand::createReg(DestReg));
3150 MovK.addOperand(MCOperand::createReg(DestReg));
3151 MovK.addOperand(Lo_MCSym);
3153 EmitToStreamer(*OutStreamer, MovK);
3154 return;
3155 }
3156 case AArch64::MOVIv2d_ns:
3157 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3158 // as movi is more efficient across all cores. Newer cores can eliminate
3159 // fmovs early and there is no difference with movi, but this not true for
3160 // all implementations.
3161 //
3162 // The floating-point version doesn't quite work in rare cases on older
3163 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3164 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3165 MI->getOperand(1).getImm() == 0) {
3166 MCInst TmpInst;
3167 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3168 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3169 TmpInst.addOperand(MCOperand::createImm(0));
3170 EmitToStreamer(*OutStreamer, TmpInst);
3171 return;
3172 }
3173 break;
3174
3175 case AArch64::DBG_VALUE:
3176 case AArch64::DBG_VALUE_LIST:
3177 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3178 SmallString<128> TmpStr;
3179 raw_svector_ostream OS(TmpStr);
3180 PrintDebugValueComment(MI, OS);
3181 OutStreamer->emitRawText(StringRef(OS.str()));
3182 }
3183 return;
3184
3185 case AArch64::EMITBKEY: {
3186 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3187 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3188 ExceptionHandlingType != ExceptionHandling::ARM)
3189 return;
3190
3191 if (getFunctionCFISectionType(*MF) == CFISection::None)
3192 return;
3193
3194 OutStreamer->emitCFIBKeyFrame();
3195 return;
3196 }
3197
3198 case AArch64::EMITMTETAGGED: {
3199 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3200 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3201 ExceptionHandlingType != ExceptionHandling::ARM)
3202 return;
3203
3204 if (getFunctionCFISectionType(*MF) != CFISection::None)
3205 OutStreamer->emitCFIMTETaggedFrame();
3206 return;
3207 }
3208
3209 case AArch64::AUTx16x17:
3210 emitPtrauthAuthResign(
3211 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3212 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3213 std::nullopt, 0, 0, MI->getDeactivationSymbol());
3214 return;
3215
3216 case AArch64::AUTxMxN:
3217 emitPtrauthAuthResign(MI->getOperand(0).getReg(),
3218 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3219 MI->getOperand(4).getImm(), &MI->getOperand(5),
3220 MI->getOperand(1).getReg(), std::nullopt, 0, 0,
3221 MI->getDeactivationSymbol());
3222 return;
3223
3224 case AArch64::AUTPAC:
3225 emitPtrauthAuthResign(
3226 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3227 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3228 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3229 MI->getOperand(4).getImm(), MI->getOperand(5).getReg(),
3230 MI->getDeactivationSymbol());
3231 return;
3232
3233 case AArch64::PAC:
3234 emitPtrauthSign(MI);
3235 return;
3236
3237 case AArch64::LOADauthptrstatic:
3238 LowerLOADauthptrstatic(*MI);
3239 return;
3240
3241 case AArch64::LOADgotPAC:
3242 case AArch64::MOVaddrPAC:
3243 LowerMOVaddrPAC(*MI);
3244 return;
3245
3246 case AArch64::LOADgotAUTH:
3247 LowerLOADgotAUTH(*MI);
3248 return;
3249
3250 case AArch64::BRA:
3251 case AArch64::BLRA:
3252 emitPtrauthBranch(MI);
3253 return;
3254
3255 // Tail calls use pseudo instructions so they have the proper code-gen
3256 // attributes (isCall, isReturn, etc.). We lower them to the real
3257 // instruction here.
3258 case AArch64::AUTH_TCRETURN:
3259 case AArch64::AUTH_TCRETURN_BTI: {
3260 Register Callee = MI->getOperand(0).getReg();
3261 const uint64_t Key = MI->getOperand(2).getImm();
3263 "Invalid auth key for tail-call return");
3264
3265 const uint64_t Disc = MI->getOperand(3).getImm();
3266 assert(isUInt<16>(Disc) && "Integer discriminator is too wide");
3267
3268 Register AddrDisc = MI->getOperand(4).getReg();
3269
3270 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3271
3272 emitPtrauthTailCallHardening(MI);
3273
3274 // See the comments in emitPtrauthBranch.
3275 if (Callee == AddrDisc)
3276 report_fatal_error("Call target is signed with its own value");
3277
3278 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3279 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3280 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3281 // restriction manually not to clobber an unexpected register.
3282 bool AddrDiscIsImplicitDef =
3283 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3284 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3285 AddrDiscIsImplicitDef);
3286
3287 const bool IsZero = DiscReg == AArch64::XZR;
3288 const unsigned Opcodes[2][2] = {{AArch64::BRAA, AArch64::BRAAZ},
3289 {AArch64::BRAB, AArch64::BRABZ}};
3290
3291 MCInst TmpInst;
3292 TmpInst.setOpcode(Opcodes[Key][IsZero]);
3293 TmpInst.addOperand(MCOperand::createReg(Callee));
3294 if (!IsZero)
3295 TmpInst.addOperand(MCOperand::createReg(DiscReg));
3296 EmitToStreamer(*OutStreamer, TmpInst);
3297 return;
3298 }
3299
3300 case AArch64::TCRETURNri:
3301 case AArch64::TCRETURNrix16x17:
3302 case AArch64::TCRETURNrix17:
3303 case AArch64::TCRETURNrinotx16:
3304 case AArch64::TCRETURNriALL: {
3305 emitPtrauthTailCallHardening(MI);
3306
3307 recordIfImportCall(MI);
3308 MCInst TmpInst;
3309 TmpInst.setOpcode(AArch64::BR);
3310 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3311 EmitToStreamer(*OutStreamer, TmpInst);
3312 return;
3313 }
3314 case AArch64::TCRETURNdi: {
3315 emitPtrauthTailCallHardening(MI);
3316
3317 MCOperand Dest;
3318 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3319 recordIfImportCall(MI);
3320 MCInst TmpInst;
3321 TmpInst.setOpcode(AArch64::B);
3322 TmpInst.addOperand(Dest);
3323 EmitToStreamer(*OutStreamer, TmpInst);
3324 return;
3325 }
3326 case AArch64::SpeculationBarrierISBDSBEndBB: {
3327 // Print DSB SYS + ISB
3328 MCInst TmpInstDSB;
3329 TmpInstDSB.setOpcode(AArch64::DSB);
3330 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3331 EmitToStreamer(*OutStreamer, TmpInstDSB);
3332 MCInst TmpInstISB;
3333 TmpInstISB.setOpcode(AArch64::ISB);
3334 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3335 EmitToStreamer(*OutStreamer, TmpInstISB);
3336 return;
3337 }
3338 case AArch64::SpeculationBarrierSBEndBB: {
3339 // Print SB
3340 MCInst TmpInstSB;
3341 TmpInstSB.setOpcode(AArch64::SB);
3342 EmitToStreamer(*OutStreamer, TmpInstSB);
3343 return;
3344 }
3345 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3346 /// lower this to:
3347 /// adrp x0, :tlsdesc_auth:var
3348 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3349 /// add x0, x0, #:tlsdesc_auth_lo12:var
3350 /// blraa x16, x0
3351 /// (TPIDR_EL0 offset now in x0)
3352 const MachineOperand &MO_Sym = MI->getOperand(0);
3353 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3354 MCOperand SymTLSDescLo12, SymTLSDesc;
3355 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3356 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3357 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3358 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3359
3360 MCInst Adrp;
3361 Adrp.setOpcode(AArch64::ADRP);
3362 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3363 Adrp.addOperand(SymTLSDesc);
3364 EmitToStreamer(*OutStreamer, Adrp);
3365
3366 MCInst Ldr;
3367 Ldr.setOpcode(AArch64::LDRXui);
3368 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3369 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3370 Ldr.addOperand(SymTLSDescLo12);
3372 EmitToStreamer(*OutStreamer, Ldr);
3373
3374 MCInst Add;
3375 Add.setOpcode(AArch64::ADDXri);
3376 Add.addOperand(MCOperand::createReg(AArch64::X0));
3377 Add.addOperand(MCOperand::createReg(AArch64::X0));
3378 Add.addOperand(SymTLSDescLo12);
3380 EmitToStreamer(*OutStreamer, Add);
3381
3382 // Authenticated TLSDESC accesses are not relaxed.
3383 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3384
3385 MCInst Blraa;
3386 Blraa.setOpcode(AArch64::BLRAA);
3387 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3388 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3389 EmitToStreamer(*OutStreamer, Blraa);
3390
3391 return;
3392 }
3393 case AArch64::TLSDESC_CALLSEQ: {
3394 /// lower this to:
3395 /// adrp x0, :tlsdesc:var
3396 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3397 /// add x0, x0, #:tlsdesc_lo12:var
3398 /// .tlsdesccall var
3399 /// blr x1
3400 /// (TPIDR_EL0 offset now in x0)
3401 const MachineOperand &MO_Sym = MI->getOperand(0);
3402 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3403 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3404 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3405 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3406 MCInstLowering.lowerOperand(MO_Sym, Sym);
3407 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3408 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3409
3410 MCInst Adrp;
3411 Adrp.setOpcode(AArch64::ADRP);
3412 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3413 Adrp.addOperand(SymTLSDesc);
3414 EmitToStreamer(*OutStreamer, Adrp);
3415
3416 MCInst Ldr;
3417 if (STI->isTargetILP32()) {
3418 Ldr.setOpcode(AArch64::LDRWui);
3419 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3420 } else {
3421 Ldr.setOpcode(AArch64::LDRXui);
3422 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3423 }
3424 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3425 Ldr.addOperand(SymTLSDescLo12);
3427 EmitToStreamer(*OutStreamer, Ldr);
3428
3429 MCInst Add;
3430 if (STI->isTargetILP32()) {
3431 Add.setOpcode(AArch64::ADDWri);
3432 Add.addOperand(MCOperand::createReg(AArch64::W0));
3433 Add.addOperand(MCOperand::createReg(AArch64::W0));
3434 } else {
3435 Add.setOpcode(AArch64::ADDXri);
3436 Add.addOperand(MCOperand::createReg(AArch64::X0));
3437 Add.addOperand(MCOperand::createReg(AArch64::X0));
3438 }
3439 Add.addOperand(SymTLSDescLo12);
3441 EmitToStreamer(*OutStreamer, Add);
3442
3443 // Emit a relocation-annotation. This expands to no code, but requests
3444 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3445 MCInst TLSDescCall;
3446 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3447 TLSDescCall.addOperand(Sym);
3448 EmitToStreamer(*OutStreamer, TLSDescCall);
3449#ifndef NDEBUG
3450 --InstsEmitted; // no code emitted
3451#endif
3452
3453 MCInst Blr;
3454 Blr.setOpcode(AArch64::BLR);
3455 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3456 EmitToStreamer(*OutStreamer, Blr);
3457
3458 return;
3459 }
3460
3461 case AArch64::JumpTableDest32:
3462 case AArch64::JumpTableDest16:
3463 case AArch64::JumpTableDest8:
3464 LowerJumpTableDest(*OutStreamer, *MI);
3465 return;
3466
3467 case AArch64::BR_JumpTable:
3468 LowerHardenedBRJumpTable(*MI);
3469 return;
3470
3471 case AArch64::FMOVH0:
3472 case AArch64::FMOVS0:
3473 case AArch64::FMOVD0:
3474 emitFMov0(*MI);
3475 return;
3476
3477 case AArch64::MOPSMemoryCopyPseudo:
3478 case AArch64::MOPSMemoryMovePseudo:
3479 case AArch64::MOPSMemorySetPseudo:
3480 case AArch64::MOPSMemorySetTaggingPseudo:
3481 LowerMOPS(*OutStreamer, *MI);
3482 return;
3483
3484 case TargetOpcode::STACKMAP:
3485 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3486
3487 case TargetOpcode::PATCHPOINT:
3488 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3489
3490 case TargetOpcode::STATEPOINT:
3491 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3492
3493 case TargetOpcode::FAULTING_OP:
3494 return LowerFAULTING_OP(*MI);
3495
3496 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3497 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3498 return;
3499
3500 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3501 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3502 return;
3503
3504 case TargetOpcode::PATCHABLE_TAIL_CALL:
3505 LowerPATCHABLE_TAIL_CALL(*MI);
3506 return;
3507 case TargetOpcode::PATCHABLE_EVENT_CALL:
3508 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3509 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3510 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3511
3512 case AArch64::KCFI_CHECK:
3513 LowerKCFI_CHECK(*MI);
3514 return;
3515
3516 case AArch64::HWASAN_CHECK_MEMACCESS:
3517 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3518 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3519 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3520 LowerHWASAN_CHECK_MEMACCESS(*MI);
3521 return;
3522
3523 case AArch64::SEH_StackAlloc:
3524 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3525 return;
3526
3527 case AArch64::SEH_SaveFPLR:
3528 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3529 return;
3530
3531 case AArch64::SEH_SaveFPLR_X:
3532 assert(MI->getOperand(0).getImm() < 0 &&
3533 "Pre increment SEH opcode must have a negative offset");
3534 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3535 return;
3536
3537 case AArch64::SEH_SaveReg:
3538 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3539 MI->getOperand(1).getImm());
3540 return;
3541
3542 case AArch64::SEH_SaveReg_X:
3543 assert(MI->getOperand(1).getImm() < 0 &&
3544 "Pre increment SEH opcode must have a negative offset");
3545 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3546 -MI->getOperand(1).getImm());
3547 return;
3548
3549 case AArch64::SEH_SaveRegP:
3550 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3551 MI->getOperand(0).getImm() <= 28) {
3552 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3553 "Register paired with LR must be odd");
3554 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3555 MI->getOperand(2).getImm());
3556 return;
3557 }
3558 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3559 "Non-consecutive registers not allowed for save_regp");
3560 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3561 MI->getOperand(2).getImm());
3562 return;
3563
3564 case AArch64::SEH_SaveRegP_X:
3565 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3566 "Non-consecutive registers not allowed for save_regp_x");
3567 assert(MI->getOperand(2).getImm() < 0 &&
3568 "Pre increment SEH opcode must have a negative offset");
3569 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3570 -MI->getOperand(2).getImm());
3571 return;
3572
3573 case AArch64::SEH_SaveFReg:
3574 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3575 MI->getOperand(1).getImm());
3576 return;
3577
3578 case AArch64::SEH_SaveFReg_X:
3579 assert(MI->getOperand(1).getImm() < 0 &&
3580 "Pre increment SEH opcode must have a negative offset");
3581 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3582 -MI->getOperand(1).getImm());
3583 return;
3584
3585 case AArch64::SEH_SaveFRegP:
3586 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3587 "Non-consecutive registers not allowed for save_regp");
3588 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3589 MI->getOperand(2).getImm());
3590 return;
3591
3592 case AArch64::SEH_SaveFRegP_X:
3593 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3594 "Non-consecutive registers not allowed for save_regp_x");
3595 assert(MI->getOperand(2).getImm() < 0 &&
3596 "Pre increment SEH opcode must have a negative offset");
3597 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3598 -MI->getOperand(2).getImm());
3599 return;
3600
3601 case AArch64::SEH_SetFP:
3603 return;
3604
3605 case AArch64::SEH_AddFP:
3606 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3607 return;
3608
3609 case AArch64::SEH_Nop:
3610 TS->emitARM64WinCFINop();
3611 return;
3612
3613 case AArch64::SEH_PrologEnd:
3615 return;
3616
3617 case AArch64::SEH_EpilogStart:
3619 return;
3620
3621 case AArch64::SEH_EpilogEnd:
3623 return;
3624
3625 case AArch64::SEH_PACSignLR:
3627 return;
3628
3629 case AArch64::SEH_SaveAnyRegI:
3630 assert(MI->getOperand(1).getImm() <= 1008 &&
3631 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3632 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3633 MI->getOperand(1).getImm());
3634 return;
3635
3636 case AArch64::SEH_SaveAnyRegIP:
3637 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3638 "Non-consecutive registers not allowed for save_any_reg");
3639 assert(MI->getOperand(2).getImm() <= 1008 &&
3640 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3641 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3642 MI->getOperand(2).getImm());
3643 return;
3644
3645 case AArch64::SEH_SaveAnyRegQP:
3646 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3647 "Non-consecutive registers not allowed for save_any_reg");
3648 assert(MI->getOperand(2).getImm() >= 0 &&
3649 "SaveAnyRegQP SEH opcode offset must be non-negative");
3650 assert(MI->getOperand(2).getImm() <= 1008 &&
3651 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3652 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3653 MI->getOperand(2).getImm());
3654 return;
3655
3656 case AArch64::SEH_SaveAnyRegQPX:
3657 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3658 "Non-consecutive registers not allowed for save_any_reg");
3659 assert(MI->getOperand(2).getImm() < 0 &&
3660 "SaveAnyRegQPX SEH opcode offset must be negative");
3661 assert(MI->getOperand(2).getImm() >= -1008 &&
3662 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3663 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3664 -MI->getOperand(2).getImm());
3665 return;
3666
3667 case AArch64::SEH_AllocZ:
3668 assert(MI->getOperand(0).getImm() >= 0 &&
3669 "AllocZ SEH opcode offset must be non-negative");
3670 assert(MI->getOperand(0).getImm() <= 255 &&
3671 "AllocZ SEH opcode offset must fit into 8 bits");
3672 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3673 return;
3674
3675 case AArch64::SEH_SaveZReg:
3676 assert(MI->getOperand(1).getImm() >= 0 &&
3677 "SaveZReg SEH opcode offset must be non-negative");
3678 assert(MI->getOperand(1).getImm() <= 255 &&
3679 "SaveZReg SEH opcode offset must fit into 8 bits");
3680 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3681 MI->getOperand(1).getImm());
3682 return;
3683
3684 case AArch64::SEH_SavePReg:
3685 assert(MI->getOperand(1).getImm() >= 0 &&
3686 "SavePReg SEH opcode offset must be non-negative");
3687 assert(MI->getOperand(1).getImm() <= 255 &&
3688 "SavePReg SEH opcode offset must fit into 8 bits");
3689 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3690 MI->getOperand(1).getImm());
3691 return;
3692
3693 case AArch64::BLR:
3694 case AArch64::BR: {
3695 recordIfImportCall(MI);
3696 MCInst TmpInst;
3697 MCInstLowering.Lower(MI, TmpInst);
3698 EmitToStreamer(*OutStreamer, TmpInst);
3699 return;
3700 }
3701 case AArch64::CBWPri:
3702 case AArch64::CBXPri:
3703 case AArch64::CBBAssertExt:
3704 case AArch64::CBHAssertExt:
3705 case AArch64::CBWPrr:
3706 case AArch64::CBXPrr:
3707 emitCBPseudoExpansion(MI);
3708 return;
3709 }
3710
3711 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3712 return;
3713
3714 // Finally, do the automated lowerings for everything else.
3715 MCInst TmpInst;
3716 MCInstLowering.Lower(MI, TmpInst);
3717 EmitToStreamer(*OutStreamer, TmpInst);
3718}
3719
3720void AArch64AsmPrinter::recordIfImportCall(
3721 const llvm::MachineInstr *BranchInst) {
3722 if (!EnableImportCallOptimization)
3723 return;
3724
3725 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3726 if (GV && GV->hasDLLImportStorageClass()) {
3727 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3728 OutStreamer->emitLabel(CallSiteSymbol);
3729
3730 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3731 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3732 .push_back({CallSiteSymbol, CalledSymbol});
3733 }
3734}
3735
3736void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3737 MCSymbol *LazyPointer) {
3738 // _ifunc:
3739 // adrp x16, lazy_pointer@GOTPAGE
3740 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3741 // ldr x16, [x16]
3742 // br x16
3743
3744 {
3745 MCInst Adrp;
3746 Adrp.setOpcode(AArch64::ADRP);
3747 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3748 MCOperand SymPage;
3749 MCInstLowering.lowerOperand(
3752 SymPage);
3753 Adrp.addOperand(SymPage);
3754 EmitToStreamer(Adrp);
3755 }
3756
3757 {
3758 MCInst Ldr;
3759 Ldr.setOpcode(AArch64::LDRXui);
3760 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3761 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3762 MCOperand SymPageOff;
3763 MCInstLowering.lowerOperand(
3766 SymPageOff);
3767 Ldr.addOperand(SymPageOff);
3769 EmitToStreamer(Ldr);
3770 }
3771
3772 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3773 .addReg(AArch64::X16)
3774 .addReg(AArch64::X16)
3775 .addImm(0));
3776
3777 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3778 : AArch64::BR)
3779 .addReg(AArch64::X16));
3780}
3781
3782void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3783 const GlobalIFunc &GI,
3784 MCSymbol *LazyPointer) {
3785 // These stub helpers are only ever called once, so here we're optimizing for
3786 // minimum size by using the pre-indexed store variants, which saves a few
3787 // bytes of instructions to bump & restore sp.
3788
3789 // _ifunc.stub_helper:
3790 // stp fp, lr, [sp, #-16]!
3791 // mov fp, sp
3792 // stp x1, x0, [sp, #-16]!
3793 // stp x3, x2, [sp, #-16]!
3794 // stp x5, x4, [sp, #-16]!
3795 // stp x7, x6, [sp, #-16]!
3796 // stp d1, d0, [sp, #-16]!
3797 // stp d3, d2, [sp, #-16]!
3798 // stp d5, d4, [sp, #-16]!
3799 // stp d7, d6, [sp, #-16]!
3800 // bl _resolver
3801 // adrp x16, lazy_pointer@GOTPAGE
3802 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3803 // str x0, [x16]
3804 // mov x16, x0
3805 // ldp d7, d6, [sp], #16
3806 // ldp d5, d4, [sp], #16
3807 // ldp d3, d2, [sp], #16
3808 // ldp d1, d0, [sp], #16
3809 // ldp x7, x6, [sp], #16
3810 // ldp x5, x4, [sp], #16
3811 // ldp x3, x2, [sp], #16
3812 // ldp x1, x0, [sp], #16
3813 // ldp fp, lr, [sp], #16
3814 // br x16
3815
3816 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3817 .addReg(AArch64::SP)
3818 .addReg(AArch64::FP)
3819 .addReg(AArch64::LR)
3820 .addReg(AArch64::SP)
3821 .addImm(-2));
3822
3823 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3824 .addReg(AArch64::FP)
3825 .addReg(AArch64::SP)
3826 .addImm(0)
3827 .addImm(0));
3828
3829 for (int I = 0; I != 4; ++I)
3830 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3831 .addReg(AArch64::SP)
3832 .addReg(AArch64::X1 + 2 * I)
3833 .addReg(AArch64::X0 + 2 * I)
3834 .addReg(AArch64::SP)
3835 .addImm(-2));
3836
3837 for (int I = 0; I != 4; ++I)
3838 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3839 .addReg(AArch64::SP)
3840 .addReg(AArch64::D1 + 2 * I)
3841 .addReg(AArch64::D0 + 2 * I)
3842 .addReg(AArch64::SP)
3843 .addImm(-2));
3844
3845 EmitToStreamer(
3846 MCInstBuilder(AArch64::BL)
3848
3849 {
3850 MCInst Adrp;
3851 Adrp.setOpcode(AArch64::ADRP);
3852 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3853 MCOperand SymPage;
3854 MCInstLowering.lowerOperand(
3855 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3857 SymPage);
3858 Adrp.addOperand(SymPage);
3859 EmitToStreamer(Adrp);
3860 }
3861
3862 {
3863 MCInst Ldr;
3864 Ldr.setOpcode(AArch64::LDRXui);
3865 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3866 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3867 MCOperand SymPageOff;
3868 MCInstLowering.lowerOperand(
3869 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3871 SymPageOff);
3872 Ldr.addOperand(SymPageOff);
3874 EmitToStreamer(Ldr);
3875 }
3876
3877 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3878 .addReg(AArch64::X0)
3879 .addReg(AArch64::X16)
3880 .addImm(0));
3881
3882 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3883 .addReg(AArch64::X16)
3884 .addReg(AArch64::X0)
3885 .addImm(0)
3886 .addImm(0));
3887
3888 for (int I = 3; I != -1; --I)
3889 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3890 .addReg(AArch64::SP)
3891 .addReg(AArch64::D1 + 2 * I)
3892 .addReg(AArch64::D0 + 2 * I)
3893 .addReg(AArch64::SP)
3894 .addImm(2));
3895
3896 for (int I = 3; I != -1; --I)
3897 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3898 .addReg(AArch64::SP)
3899 .addReg(AArch64::X1 + 2 * I)
3900 .addReg(AArch64::X0 + 2 * I)
3901 .addReg(AArch64::SP)
3902 .addImm(2));
3903
3904 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3905 .addReg(AArch64::SP)
3906 .addReg(AArch64::FP)
3907 .addReg(AArch64::LR)
3908 .addReg(AArch64::SP)
3909 .addImm(2));
3910
3911 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3912 : AArch64::BR)
3913 .addReg(AArch64::X16));
3914}
3915
3916const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
3917 const Constant *BaseCV,
3918 uint64_t Offset) {
3919 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3920 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3921 OutContext);
3922 }
3923
3924 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
3925}
3926
3927char AArch64AsmPrinter::ID = 0;
3928
3929INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3930 "AArch64 Assembly Printer", false, false)
3931
3932// Force static initialization.
3933extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
3934LLVMInitializeAArch64AsmPrinter() {
3940}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static void emitAddress(MCStreamer &Streamer, MCRegister Reg, const MCExpr *Expr, bool DSOLocal, const MCSubtargetInfo &STI)
static bool targetSupportsPAuthRelocation(const Triple &TT, const MCExpr *Target, const MCExpr *DSExpr)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
const AArch64InstrInfo * getInstrInfo() const override
std::optional< uint16_t > getPtrAuthBlockAddressDiscriminatorIfEnabled(const Function &ParentFn) const
Compute the integer discriminator for a given BlockAddress constant, if blockaddress signing is enabl...
bool isX16X17Safer() const
Returns whether the operating system makes it safer to store sensitive values in x16 and x17 as oppos...
AArch64PAuth::AuthCheckMethod getAuthenticatedLRCheckMethod(const MachineFunction &MF) const
Choose a method of checking LR before performing a tail call.
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:96
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:644
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
The address of a basic block.
Definition Constants.h:899
Function * getFunction() const
Definition Constants.h:935
Conditional or Unconditional Branch instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
A signed pointer, in the ptrauth sense.
Definition Constants.h:1032
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1060
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1063
Constant * getDeactivationSymbol() const
Definition Constants.h:1082
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1078
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1066
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:553
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
Instances of this class represent operands of the MCInst class.
Definition MCInst.h:40
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitCFIBKeyFrame()
virtual void beginCOFFSymbolDef(const MCSymbol *Symbol)
Start emitting COFF symbol definition.
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitCOFFSymbolType(int Type)
Emit the type of the symbol.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:368
virtual void endCOFFSymbolDef()
Marks the end of the symbol definition.
MCContext & getContext() const
Definition MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:387
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:443
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
virtual void emitCOFFSymbolStorageClass(int StorageClass)
Emit the storage class of the symbol.
Generic base class for all target subtargets.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1248
@ SHF_GROUP
Definition ELF.h:1270
@ SHF_EXECINSTR
Definition ELF.h:1251
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1858
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1859
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1860
@ SHT_PROGBITS
Definition ELF.h:1147
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:294
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition ScopeExit.h:59
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...