LLVM 20.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/StringRef.h"
32#include "llvm/ADT/Twine.h"
46#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/Module.h"
49#include "llvm/MC/MCAsmInfo.h"
50#include "llvm/MC/MCContext.h"
51#include "llvm/MC/MCInst.h"
55#include "llvm/MC/MCStreamer.h"
56#include "llvm/MC/MCSymbol.h"
65#include <cassert>
66#include <cstdint>
67#include <map>
68#include <memory>
69
70using namespace llvm;
71
74 "aarch64-ptrauth-auth-checks", cl::Hidden,
75 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
76 clEnumValN(Poison, "poison", "poison on failure"),
77 clEnumValN(Trap, "trap", "trap on failure")),
78 cl::desc("Check pointer authentication auth/resign failures"),
80
82 "aarch64-win-import-call-optimization", cl::Hidden,
83 cl::desc("Enable import call optimization for AArch64 Windows"),
84 cl::init(false));
85
86#define DEBUG_TYPE "asm-printer"
87
88namespace {
89
90class AArch64AsmPrinter : public AsmPrinter {
91 AArch64MCInstLower MCInstLowering;
92 FaultMaps FM;
93 const AArch64Subtarget *STI;
94 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
95#ifndef NDEBUG
96 unsigned InstsEmitted;
97#endif
99 SectionToImportedFunctionCalls;
100
101public:
102 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
103 : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
104 FM(*this) {}
105
106 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
107
108 /// Wrapper for MCInstLowering.lowerOperand() for the
109 /// tblgen'erated pseudo lowering.
110 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
111 return MCInstLowering.lowerOperand(MO, MCOp);
112 }
113
114 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
115
116 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
117
118 void emitStartOfAsmFile(Module &M) override;
119 void emitJumpTableInfo() override;
120 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
122 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
123 const MCSymbol *BranchLabel) const override;
124
125 void emitFunctionEntryLabel() override;
126
127 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
128
129 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
130
131 void LowerHardenedBRJumpTable(const MachineInstr &MI);
132
133 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
134
135 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
136 const MachineInstr &MI);
137 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
138 const MachineInstr &MI);
139 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
140 const MachineInstr &MI);
141 void LowerFAULTING_OP(const MachineInstr &MI);
142
143 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
144 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
145 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
146 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
147
148 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
149 HwasanMemaccessTuple;
150 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
151 void LowerKCFI_CHECK(const MachineInstr &MI);
152 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
153 void emitHwasanMemaccessSymbols(Module &M);
154
155 void emitSled(const MachineInstr &MI, SledKind Kind);
156
157 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
158 void emitPtrauthBranch(const MachineInstr *MI);
159
160 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
161 Register ScratchReg,
164 bool ShouldTrap,
165 const MCSymbol *OnFailure);
166
167 // Check authenticated LR before tail calling.
168 void emitPtrauthTailCallHardening(const MachineInstr *TC);
169
170 // Emit the sequence for AUT or AUTPAC.
171 void emitPtrauthAuthResign(const MachineInstr *MI);
172
173 // Emit the sequence to compute the discriminator.
174 //
175 // ScratchReg should be x16/x17.
176 //
177 // The returned register is either unmodified AddrDisc or x16/x17.
178 //
179 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
180 // MayUseAddrAsScratch may save one MOV instruction, provided the address
181 // is already in x16/x17 (i.e. return x16/x17 which is the *modified* AddrDisc
182 // register at the same time):
183 //
184 // mov x17, x16
185 // movk x17, #1234, lsl #48
186 // ; x16 is not used anymore
187 //
188 // can be replaced by
189 //
190 // movk x16, #1234, lsl #48
191 Register emitPtrauthDiscriminator(uint16_t Disc, Register AddrDisc,
192 Register ScratchReg,
193 bool MayUseAddrAsScratch = false);
194
195 // Emit the sequence for LOADauthptrstatic
196 void LowerLOADauthptrstatic(const MachineInstr &MI);
197
198 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
199 // adrp-add followed by PAC sign)
200 void LowerMOVaddrPAC(const MachineInstr &MI);
201
202 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
203 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
204 // authenticating)
205 void LowerLOADgotAUTH(const MachineInstr &MI);
206
207 /// tblgen'erated driver function for lowering simple MI->MC
208 /// pseudo instructions.
209 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
210
211 // Emit Build Attributes
212 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
213 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
214
215 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
216 void EmitToStreamer(const MCInst &Inst) {
217 EmitToStreamer(*OutStreamer, Inst);
218 }
219
220 void emitInstruction(const MachineInstr *MI) override;
221
222 void emitFunctionHeaderComment() override;
223
224 void getAnalysisUsage(AnalysisUsage &AU) const override {
226 AU.setPreservesAll();
227 }
228
229 bool runOnMachineFunction(MachineFunction &MF) override {
230 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
231 STI = &MF.getSubtarget<AArch64Subtarget>();
232
234
235 if (STI->isTargetCOFF()) {
236 bool Local = MF.getFunction().hasLocalLinkage();
239 int Type =
241
242 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
243 OutStreamer->emitCOFFSymbolStorageClass(Scl);
244 OutStreamer->emitCOFFSymbolType(Type);
245 OutStreamer->endCOFFSymbolDef();
246 }
247
248 // Emit the rest of the function body.
250
251 // Emit the XRay table for this function.
253
254 // We didn't modify anything.
255 return false;
256 }
257
258 const MCExpr *lowerConstant(const Constant *CV) override;
259
260private:
261 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
262 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
263 bool printAsmRegInClass(const MachineOperand &MO,
264 const TargetRegisterClass *RC, unsigned AltName,
265 raw_ostream &O);
266
267 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
268 const char *ExtraCode, raw_ostream &O) override;
269 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
270 const char *ExtraCode, raw_ostream &O) override;
271
272 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
273
274 void emitFunctionBodyEnd() override;
275 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
276
277 MCSymbol *GetCPISymbol(unsigned CPID) const override;
278 void emitEndOfAsmFile(Module &M) override;
279
280 AArch64FunctionInfo *AArch64FI = nullptr;
281
282 /// Emit the LOHs contained in AArch64FI.
283 void emitLOHs();
284
285 void emitMovXReg(Register Dest, Register Src);
286 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
287 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
288
289 /// Emit instruction to set float register to zero.
290 void emitFMov0(const MachineInstr &MI);
291
292 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
293
294 MInstToMCSymbol LOHInstToLabel;
295
297 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
298 }
299
300 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
301 assert(STI);
302 return STI;
303 }
304 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
305 MCSymbol *LazyPointer) override;
307 MCSymbol *LazyPointer) override;
308
309 /// Checks if this instruction is part of a sequence that is eligle for import
310 /// call optimization and, if so, records it to be emitted in the import call
311 /// section.
312 void recordIfImportCall(const MachineInstr *BranchInst);
313};
314
315} // end anonymous namespace
316
317void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
318 const Triple &TT = TM.getTargetTriple();
319
320 if (TT.isOSBinFormatCOFF()) {
321 // Emit an absolute @feat.00 symbol
322 MCSymbol *S = MMI->getContext().getOrCreateSymbol(StringRef("@feat.00"));
323 OutStreamer->beginCOFFSymbolDef(S);
324 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
325 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_NULL);
326 OutStreamer->endCOFFSymbolDef();
327 int64_t Feat00Value = 0;
328
329 if (M.getModuleFlag("cfguard")) {
330 // Object is CFG-aware.
331 Feat00Value |= COFF::Feat00Flags::GuardCF;
332 }
333
334 if (M.getModuleFlag("ehcontguard")) {
335 // Object also has EHCont.
336 Feat00Value |= COFF::Feat00Flags::GuardEHCont;
337 }
338
339 if (M.getModuleFlag("ms-kernel")) {
340 // Object is compiled with /kernel.
341 Feat00Value |= COFF::Feat00Flags::Kernel;
342 }
343
344 OutStreamer->emitSymbolAttribute(S, MCSA_Global);
345 OutStreamer->emitAssignment(
346 S, MCConstantExpr::create(Feat00Value, MMI->getContext()));
347 }
348
349 if (!TT.isOSBinFormatELF())
350 return;
351
352 // For emitting build attributes and .note.gnu.property section
353 auto *TS =
354 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
355 // Assemble feature flags that may require creation of build attributes and a
356 // note section.
357 unsigned BAFlags = 0;
358 unsigned GNUFlags = 0;
359 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
360 M.getModuleFlag("branch-target-enforcement"))) {
361 if (!BTE->isZero()) {
362 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
364 }
365 }
366
367 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
368 M.getModuleFlag("guarded-control-stack"))) {
369 if (!GCS->isZero()) {
370 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
372 }
373 }
374
375 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
376 M.getModuleFlag("sign-return-address"))) {
377 if (!Sign->isZero()) {
378 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
380 }
381 }
382
383 uint64_t PAuthABIPlatform = -1;
384 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
385 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
386 PAuthABIPlatform = PAP->getZExtValue();
387 }
388
389 uint64_t PAuthABIVersion = -1;
390 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
391 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
392 PAuthABIVersion = PAV->getZExtValue();
393 }
394
395 // Emit AArch64 Build Attributes
396 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
397 // Emit a .note.gnu.property section with the flags.
398 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
399}
400
401void AArch64AsmPrinter::emitFunctionHeaderComment() {
402 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
403 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
404 if (OutlinerString != std::nullopt)
405 OutStreamer->getCommentOS() << ' ' << OutlinerString;
406}
407
408void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
409{
410 const Function &F = MF->getFunction();
411 if (F.hasFnAttribute("patchable-function-entry")) {
412 unsigned Num;
413 if (F.getFnAttribute("patchable-function-entry")
414 .getValueAsString()
415 .getAsInteger(10, Num))
416 return;
417 emitNops(Num);
418 return;
419 }
420
421 emitSled(MI, SledKind::FUNCTION_ENTER);
422}
423
424void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
425 emitSled(MI, SledKind::FUNCTION_EXIT);
426}
427
428void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
429 emitSled(MI, SledKind::TAIL_CALL);
430}
431
432void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
433 static const int8_t NoopsInSledCount = 7;
434 // We want to emit the following pattern:
435 //
436 // .Lxray_sled_N:
437 // ALIGN
438 // B #32
439 // ; 7 NOP instructions (28 bytes)
440 // .tmpN
441 //
442 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
443 // over the full 32 bytes (8 instructions) with the following pattern:
444 //
445 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
446 // LDR W17, #12 ; W17 := function ID
447 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
448 // BLR X16 ; call the tracing trampoline
449 // ;DATA: 32 bits of function ID
450 // ;DATA: lower 32 bits of the address of the trampoline
451 // ;DATA: higher 32 bits of the address of the trampoline
452 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
453 //
454 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
455 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
456 OutStreamer->emitLabel(CurSled);
457 auto Target = OutContext.createTempSymbol();
458
459 // Emit "B #32" instruction, which jumps over the next 28 bytes.
460 // The operand has to be the number of 4-byte instructions to jump over,
461 // including the current instruction.
462 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
463
464 for (int8_t I = 0; I < NoopsInSledCount; I++)
465 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
466
467 OutStreamer->emitLabel(Target);
468 recordSled(CurSled, MI, Kind, 2);
469}
470
471void AArch64AsmPrinter::emitAttributes(unsigned Flags,
472 uint64_t PAuthABIPlatform,
473 uint64_t PAuthABIVersion,
475
476 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
477 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
478
479 if (PAuthABIPlatform || PAuthABIVersion) {
483 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
484 AArch64BuildAttributes::SubsectionType::ULEB128);
488 PAuthABIPlatform, "", false);
492 "", false);
493 }
494
495 unsigned BTIValue =
497 unsigned PACValue =
499 unsigned GCSValue =
501
502 if (BTIValue || PACValue || GCSValue) {
506 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
507 AArch64BuildAttributes::SubsectionType::ULEB128);
511 false);
515 false);
519 false);
520 }
521}
522
523// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
524// (built-in functions __xray_customevent/__xray_typedevent).
525//
526// .Lxray_event_sled_N:
527// b 1f
528// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
529// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
530// bl __xray_CustomEvent or __xray_TypedEvent
531// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
532// 1:
533//
534// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
535//
536// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
537// After patching, b .+N will become a nop.
538void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
539 bool Typed) {
540 auto &O = *OutStreamer;
541 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
542 O.emitLabel(CurSled);
543 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
545 OutContext.getOrCreateSymbol(
546 Twine(MachO ? "_" : "") +
547 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
548 OutContext);
549 if (Typed) {
550 O.AddComment("Begin XRay typed event");
551 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
552 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
553 .addReg(AArch64::SP)
554 .addReg(AArch64::X0)
555 .addReg(AArch64::X1)
556 .addReg(AArch64::SP)
557 .addImm(-4));
558 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
559 .addReg(AArch64::X2)
560 .addReg(AArch64::SP)
561 .addImm(2));
562 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
563 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
564 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
565 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
566 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
567 .addReg(AArch64::X2)
568 .addReg(AArch64::SP)
569 .addImm(2));
570 O.AddComment("End XRay typed event");
571 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
572 .addReg(AArch64::SP)
573 .addReg(AArch64::X0)
574 .addReg(AArch64::X1)
575 .addReg(AArch64::SP)
576 .addImm(4));
577
578 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
579 } else {
580 O.AddComment("Begin XRay custom event");
581 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
582 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
583 .addReg(AArch64::SP)
584 .addReg(AArch64::X0)
585 .addReg(AArch64::X1)
586 .addReg(AArch64::SP)
587 .addImm(-2));
588 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
589 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
590 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
591 O.AddComment("End XRay custom event");
592 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
593 .addReg(AArch64::SP)
594 .addReg(AArch64::X0)
595 .addReg(AArch64::X1)
596 .addReg(AArch64::SP)
597 .addImm(2));
598
599 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
600 }
601}
602
603void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
604 Register AddrReg = MI.getOperand(0).getReg();
605 assert(std::next(MI.getIterator())->isCall() &&
606 "KCFI_CHECK not followed by a call instruction");
607 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
608 "KCFI_CHECK call target doesn't match call operand");
609
610 // Default to using the intra-procedure-call temporary registers for
611 // comparing the hashes.
612 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
613 if (AddrReg == AArch64::XZR) {
614 // Checking XZR makes no sense. Instead of emitting a load, zero
615 // ScratchRegs[0] and use it for the ESR AddrIndex below.
616 AddrReg = getXRegFromWReg(ScratchRegs[0]);
617 emitMovXReg(AddrReg, AArch64::XZR);
618 } else {
619 // If one of the scratch registers is used for the call target (e.g.
620 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
621 // temporary register instead (in this case, AArch64::W9) as the check
622 // is immediately followed by the call instruction.
623 for (auto &Reg : ScratchRegs) {
624 if (Reg == getWRegFromXReg(AddrReg)) {
625 Reg = AArch64::W9;
626 break;
627 }
628 }
629 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
630 "Invalid scratch registers for KCFI_CHECK");
631
632 // Adjust the offset for patchable-function-prefix. This assumes that
633 // patchable-function-prefix is the same for all functions.
634 int64_t PrefixNops = 0;
635 (void)MI.getMF()
636 ->getFunction()
637 .getFnAttribute("patchable-function-prefix")
638 .getValueAsString()
639 .getAsInteger(10, PrefixNops);
640
641 // Load the target function type hash.
642 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
643 .addReg(ScratchRegs[0])
644 .addReg(AddrReg)
645 .addImm(-(PrefixNops * 4 + 4)));
646 }
647
648 // Load the expected type hash.
649 const int64_t Type = MI.getOperand(1).getImm();
650 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
651 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
652
653 // Compare the hashes and trap if there's a mismatch.
654 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
655 .addReg(AArch64::WZR)
656 .addReg(ScratchRegs[0])
657 .addReg(ScratchRegs[1])
658 .addImm(0));
659
660 MCSymbol *Pass = OutContext.createTempSymbol();
661 EmitToStreamer(*OutStreamer,
662 MCInstBuilder(AArch64::Bcc)
663 .addImm(AArch64CC::EQ)
664 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
665
666 // The base ESR is 0x8000 and the register information is encoded in bits
667 // 0-9 as follows:
668 // - 0-4: n, where the register Xn contains the target address
669 // - 5-9: m, where the register Wm contains the expected type hash
670 // Where n, m are in [0, 30].
671 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
672 unsigned AddrIndex;
673 switch (AddrReg) {
674 default:
675 AddrIndex = AddrReg - AArch64::X0;
676 break;
677 case AArch64::FP:
678 AddrIndex = 29;
679 break;
680 case AArch64::LR:
681 AddrIndex = 30;
682 break;
683 }
684
685 assert(AddrIndex < 31 && TypeIndex < 31);
686
687 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
688 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
689 OutStreamer->emitLabel(Pass);
690}
691
692void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
693 Register Reg = MI.getOperand(0).getReg();
694
695 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
696 // statically known to be zero. However, conceivably, the HWASan pass may
697 // encounter a "cannot currently statically prove to be null" pointer (and is
698 // therefore unable to omit the intrinsic) that later optimization passes
699 // convert into a statically known-null pointer.
700 if (Reg == AArch64::XZR)
701 return;
702
703 bool IsShort =
704 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
705 (MI.getOpcode() ==
706 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
707 uint32_t AccessInfo = MI.getOperand(1).getImm();
708 bool IsFixedShadow =
709 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
710 (MI.getOpcode() ==
711 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
712 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
713
714 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
715 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
716 if (!Sym) {
717 // FIXME: Make this work on non-ELF.
718 if (!TM.getTargetTriple().isOSBinFormatELF())
719 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
720
721 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
722 utostr(AccessInfo);
723 if (IsFixedShadow)
724 SymName += "_fixed_" + utostr(FixedShadowOffset);
725 if (IsShort)
726 SymName += "_short_v2";
727 Sym = OutContext.getOrCreateSymbol(SymName);
728 }
729
730 EmitToStreamer(*OutStreamer,
731 MCInstBuilder(AArch64::BL)
732 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
733}
734
735void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
736 if (HwasanMemaccessSymbols.empty())
737 return;
738
739 const Triple &TT = TM.getTargetTriple();
740 assert(TT.isOSBinFormatELF());
741 std::unique_ptr<MCSubtargetInfo> STI(
742 TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
743 assert(STI && "Unable to create subtarget info");
744 this->STI = static_cast<const AArch64Subtarget *>(&*STI);
745
746 MCSymbol *HwasanTagMismatchV1Sym =
747 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
748 MCSymbol *HwasanTagMismatchV2Sym =
749 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
750
751 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
752 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
753 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
754 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
755
756 for (auto &P : HwasanMemaccessSymbols) {
757 unsigned Reg = std::get<0>(P.first);
758 bool IsShort = std::get<1>(P.first);
759 uint32_t AccessInfo = std::get<2>(P.first);
760 bool IsFixedShadow = std::get<3>(P.first);
761 uint64_t FixedShadowOffset = std::get<4>(P.first);
762 const MCSymbolRefExpr *HwasanTagMismatchRef =
763 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
764 MCSymbol *Sym = P.second;
765
766 bool HasMatchAllTag =
767 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
768 uint8_t MatchAllTag =
769 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
770 unsigned Size =
771 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
772 bool CompileKernel =
773 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
774
775 OutStreamer->switchSection(OutContext.getELFSection(
776 ".text.hot", ELF::SHT_PROGBITS,
778 /*IsComdat=*/true));
779
780 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
781 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
782 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
783 OutStreamer->emitLabel(Sym);
784
785 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
786 .addReg(AArch64::X16)
787 .addReg(Reg)
788 .addImm(4)
789 .addImm(55));
790
791 if (IsFixedShadow) {
792 // Aarch64 makes it difficult to embed large constants in the code.
793 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
794 // left-shift option in the MOV instruction. Combined with the 16-bit
795 // immediate, this is enough to represent any offset up to 2**48.
796 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
797 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
798 .addReg(AArch64::W16)
799 .addReg(AArch64::X17)
800 .addReg(AArch64::X16)
801 .addImm(0)
802 .addImm(0));
803 } else {
804 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
805 .addReg(AArch64::W16)
806 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
807 .addReg(AArch64::X16)
808 .addImm(0)
809 .addImm(0));
810 }
811
812 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
813 .addReg(AArch64::XZR)
814 .addReg(AArch64::X16)
815 .addReg(Reg)
817 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
818 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
819 .addImm(AArch64CC::NE)
821 HandleMismatchOrPartialSym, OutContext)));
822 MCSymbol *ReturnSym = OutContext.createTempSymbol();
823 OutStreamer->emitLabel(ReturnSym);
824 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
825 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
826
827 if (HasMatchAllTag) {
828 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
829 .addReg(AArch64::X17)
830 .addReg(Reg)
831 .addImm(56)
832 .addImm(63));
833 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
834 .addReg(AArch64::XZR)
835 .addReg(AArch64::X17)
836 .addImm(MatchAllTag)
837 .addImm(0));
838 EmitToStreamer(
839 MCInstBuilder(AArch64::Bcc)
840 .addImm(AArch64CC::EQ)
841 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
842 }
843
844 if (IsShort) {
845 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
846 .addReg(AArch64::WZR)
847 .addReg(AArch64::W16)
848 .addImm(15)
849 .addImm(0));
850 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
851 EmitToStreamer(
852 MCInstBuilder(AArch64::Bcc)
853 .addImm(AArch64CC::HI)
854 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
855
856 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
857 .addReg(AArch64::X17)
858 .addReg(Reg)
859 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
860 if (Size != 1)
861 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
862 .addReg(AArch64::X17)
863 .addReg(AArch64::X17)
864 .addImm(Size - 1)
865 .addImm(0));
866 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
867 .addReg(AArch64::WZR)
868 .addReg(AArch64::W16)
869 .addReg(AArch64::W17)
870 .addImm(0));
871 EmitToStreamer(
872 MCInstBuilder(AArch64::Bcc)
873 .addImm(AArch64CC::LS)
874 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
875
876 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
877 .addReg(AArch64::X16)
878 .addReg(Reg)
879 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
880 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
881 .addReg(AArch64::W16)
882 .addReg(AArch64::X16)
883 .addImm(0));
884 EmitToStreamer(
885 MCInstBuilder(AArch64::SUBSXrs)
886 .addReg(AArch64::XZR)
887 .addReg(AArch64::X16)
888 .addReg(Reg)
890 EmitToStreamer(
891 MCInstBuilder(AArch64::Bcc)
892 .addImm(AArch64CC::EQ)
893 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
894
895 OutStreamer->emitLabel(HandleMismatchSym);
896 }
897
898 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
899 .addReg(AArch64::SP)
900 .addReg(AArch64::X0)
901 .addReg(AArch64::X1)
902 .addReg(AArch64::SP)
903 .addImm(-32));
904 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
905 .addReg(AArch64::FP)
906 .addReg(AArch64::LR)
907 .addReg(AArch64::SP)
908 .addImm(29));
909
910 if (Reg != AArch64::X0)
911 emitMovXReg(AArch64::X0, Reg);
912 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
913
914 if (CompileKernel) {
915 // The Linux kernel's dynamic loader doesn't support GOT relative
916 // relocations, but it doesn't support late binding either, so just call
917 // the function directly.
918 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
919 } else {
920 // Intentionally load the GOT entry and branch to it, rather than possibly
921 // late binding the function, which may clobber the registers before we
922 // have a chance to save them.
923 EmitToStreamer(
924 MCInstBuilder(AArch64::ADRP)
925 .addReg(AArch64::X16)
926 .addExpr(AArch64MCExpr::create(
927 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
928 OutContext)));
929 EmitToStreamer(
930 MCInstBuilder(AArch64::LDRXui)
931 .addReg(AArch64::X16)
932 .addReg(AArch64::X16)
933 .addExpr(AArch64MCExpr::create(
934 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
935 OutContext)));
936 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
937 }
938 }
939 this->STI = nullptr;
940}
941
942static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
943 MCSymbol *StubLabel,
944 const MCExpr *StubAuthPtrRef) {
945 // sym$auth_ptr$key$disc:
946 OutStreamer.emitLabel(StubLabel);
947 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
948}
949
950void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
951 emitHwasanMemaccessSymbols(M);
952
953 const Triple &TT = TM.getTargetTriple();
954 if (TT.isOSBinFormatMachO()) {
955 // Output authenticated pointers as indirect symbols, if we have any.
956 MachineModuleInfoMachO &MMIMacho =
957 MMI->getObjFileInfo<MachineModuleInfoMachO>();
958
959 auto Stubs = MMIMacho.getAuthGVStubList();
960
961 if (!Stubs.empty()) {
962 // Switch to the "__auth_ptr" section.
963 OutStreamer->switchSection(
964 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
966 emitAlignment(Align(8));
967
968 for (const auto &Stub : Stubs)
969 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
970
971 OutStreamer->addBlankLine();
972 }
973
974 // Funny Darwin hack: This flag tells the linker that no global symbols
975 // contain code that falls through to other global symbols (e.g. the obvious
976 // implementation of multiple entry points). If this doesn't occur, the
977 // linker can safely perform dead code stripping. Since LLVM never
978 // generates code that does this, it is always safe to set.
979 OutStreamer->emitAssemblerFlag(MCAF_SubsectionsViaSymbols);
980 }
981
982 if (TT.isOSBinFormatELF()) {
983 // Output authenticated pointers as indirect symbols, if we have any.
984 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
985
986 auto Stubs = MMIELF.getAuthGVStubList();
987
988 if (!Stubs.empty()) {
989 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
990 OutStreamer->switchSection(TLOF.getDataSection());
991 emitAlignment(Align(8));
992
993 for (const auto &Stub : Stubs)
994 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
995
996 OutStreamer->addBlankLine();
997 }
998
999 // With signed ELF GOT enabled, the linker looks at the symbol type to
1000 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1001 // for functions not defined in the module have STT_NOTYPE type by default.
1002 // This makes linker to emit signing schema with DA key (instead of IA) for
1003 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1004 // all function symbols used in the module to have STT_FUNC type. See
1005 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1006 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1007 M.getModuleFlag("ptrauth-elf-got"));
1008 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1009 for (const GlobalValue &GV : M.global_values())
1010 if (!GV.use_empty() && isa<Function>(GV) &&
1011 !GV.getName().starts_with("llvm."))
1012 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1014 }
1015
1016 // Emit stack and fault map information.
1017 FM.serializeToFaultMapSection();
1018
1019 // If import call optimization is enabled, emit the appropriate section.
1020 // We do this whether or not we recorded any import calls.
1021 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1022 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1023
1024 // Section always starts with some magic.
1025 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1026 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1027
1028 // Layout of this section is:
1029 // Per section that contains calls to imported functions:
1030 // uint32_t SectionSize: Size in bytes for information in this section.
1031 // uint32_t Section Number
1032 // Per call to imported function in section:
1033 // uint32_t Kind: the kind of imported function.
1034 // uint32_t BranchOffset: the offset of the branch instruction in its
1035 // parent section.
1036 // uint32_t TargetSymbolId: the symbol id of the called function.
1037 for (auto &[Section, CallsToImportedFuncs] :
1038 SectionToImportedFunctionCalls) {
1039 unsigned SectionSize =
1040 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1041 OutStreamer->emitInt32(SectionSize);
1042 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1043 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1044 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1045 OutStreamer->emitInt32(0x13);
1046 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1047 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1048 }
1049 }
1050 }
1051}
1052
1053void AArch64AsmPrinter::emitLOHs() {
1055
1056 for (const auto &D : AArch64FI->getLOHContainer()) {
1057 for (const MachineInstr *MI : D.getArgs()) {
1058 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1059 assert(LabelIt != LOHInstToLabel.end() &&
1060 "Label hasn't been inserted for LOH related instruction");
1061 MCArgs.push_back(LabelIt->second);
1062 }
1063 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1064 MCArgs.clear();
1065 }
1066}
1067
1068void AArch64AsmPrinter::emitFunctionBodyEnd() {
1069 if (!AArch64FI->getLOHRelated().empty())
1070 emitLOHs();
1071}
1072
1073/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1074MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1075 // Darwin uses a linker-private symbol name for constant-pools (to
1076 // avoid addends on the relocation?), ELF has no such concept and
1077 // uses a normal private symbol.
1078 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1079 return OutContext.getOrCreateSymbol(
1080 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1081 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1082
1083 return AsmPrinter::GetCPISymbol(CPID);
1084}
1085
1086void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1087 raw_ostream &O) {
1088 const MachineOperand &MO = MI->getOperand(OpNum);
1089 switch (MO.getType()) {
1090 default:
1091 llvm_unreachable("<unknown operand type>");
1093 Register Reg = MO.getReg();
1094 assert(Reg.isPhysical());
1095 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1097 break;
1098 }
1100 O << MO.getImm();
1101 break;
1102 }
1104 PrintSymbolOperand(MO, O);
1105 break;
1106 }
1108 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1109 Sym->print(O, MAI);
1110 break;
1111 }
1112 }
1113}
1114
1115bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1116 raw_ostream &O) {
1117 Register Reg = MO.getReg();
1118 switch (Mode) {
1119 default:
1120 return true; // Unknown mode.
1121 case 'w':
1122 Reg = getWRegFromXReg(Reg);
1123 break;
1124 case 'x':
1125 Reg = getXRegFromWReg(Reg);
1126 break;
1127 case 't':
1129 break;
1130 }
1131
1133 return false;
1134}
1135
1136// Prints the register in MO using class RC using the offset in the
1137// new register class. This should not be used for cross class
1138// printing.
1139bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1140 const TargetRegisterClass *RC,
1141 unsigned AltName, raw_ostream &O) {
1142 assert(MO.isReg() && "Should only get here with a register!");
1143 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1144 Register Reg = MO.getReg();
1145 unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1146 if (!RI->regsOverlap(RegToPrint, Reg))
1147 return true;
1148 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1149 return false;
1150}
1151
1152bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1153 const char *ExtraCode, raw_ostream &O) {
1154 const MachineOperand &MO = MI->getOperand(OpNum);
1155
1156 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1157 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1158 return false;
1159
1160 // Does this asm operand have a single letter operand modifier?
1161 if (ExtraCode && ExtraCode[0]) {
1162 if (ExtraCode[1] != 0)
1163 return true; // Unknown modifier.
1164
1165 switch (ExtraCode[0]) {
1166 default:
1167 return true; // Unknown modifier.
1168 case 'w': // Print W register
1169 case 'x': // Print X register
1170 if (MO.isReg())
1171 return printAsmMRegister(MO, ExtraCode[0], O);
1172 if (MO.isImm() && MO.getImm() == 0) {
1173 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1175 return false;
1176 }
1177 printOperand(MI, OpNum, O);
1178 return false;
1179 case 'b': // Print B register.
1180 case 'h': // Print H register.
1181 case 's': // Print S register.
1182 case 'd': // Print D register.
1183 case 'q': // Print Q register.
1184 case 'z': // Print Z register.
1185 if (MO.isReg()) {
1186 const TargetRegisterClass *RC;
1187 switch (ExtraCode[0]) {
1188 case 'b':
1189 RC = &AArch64::FPR8RegClass;
1190 break;
1191 case 'h':
1192 RC = &AArch64::FPR16RegClass;
1193 break;
1194 case 's':
1195 RC = &AArch64::FPR32RegClass;
1196 break;
1197 case 'd':
1198 RC = &AArch64::FPR64RegClass;
1199 break;
1200 case 'q':
1201 RC = &AArch64::FPR128RegClass;
1202 break;
1203 case 'z':
1204 RC = &AArch64::ZPRRegClass;
1205 break;
1206 default:
1207 return true;
1208 }
1209 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1210 }
1211 printOperand(MI, OpNum, O);
1212 return false;
1213 }
1214 }
1215
1216 // According to ARM, we should emit x and v registers unless we have a
1217 // modifier.
1218 if (MO.isReg()) {
1219 Register Reg = MO.getReg();
1220
1221 // If this is a w or x register, print an x register.
1222 if (AArch64::GPR32allRegClass.contains(Reg) ||
1223 AArch64::GPR64allRegClass.contains(Reg))
1224 return printAsmMRegister(MO, 'x', O);
1225
1226 // If this is an x register tuple, print an x register.
1227 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1228 return printAsmMRegister(MO, 't', O);
1229
1230 unsigned AltName = AArch64::NoRegAltName;
1231 const TargetRegisterClass *RegClass;
1232 if (AArch64::ZPRRegClass.contains(Reg)) {
1233 RegClass = &AArch64::ZPRRegClass;
1234 } else if (AArch64::PPRRegClass.contains(Reg)) {
1235 RegClass = &AArch64::PPRRegClass;
1236 } else if (AArch64::PNRRegClass.contains(Reg)) {
1237 RegClass = &AArch64::PNRRegClass;
1238 } else {
1239 RegClass = &AArch64::FPR128RegClass;
1240 AltName = AArch64::vreg;
1241 }
1242
1243 // If this is a b, h, s, d, or q register, print it as a v register.
1244 return printAsmRegInClass(MO, RegClass, AltName, O);
1245 }
1246
1247 printOperand(MI, OpNum, O);
1248 return false;
1249}
1250
1251bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1252 unsigned OpNum,
1253 const char *ExtraCode,
1254 raw_ostream &O) {
1255 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1256 return true; // Unknown modifier.
1257
1258 const MachineOperand &MO = MI->getOperand(OpNum);
1259 assert(MO.isReg() && "unexpected inline asm memory operand");
1260 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1261 return false;
1262}
1263
1264void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1265 raw_ostream &OS) {
1266 unsigned NOps = MI->getNumOperands();
1267 assert(NOps == 4);
1268 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1269 // cast away const; DIetc do not take const operands for some reason.
1270 OS << MI->getDebugVariable()->getName();
1271 OS << " <- ";
1272 // Frame address. Currently handles register +- offset only.
1273 assert(MI->isIndirectDebugValue());
1274 OS << '[';
1275 for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
1276 MI->debug_operands().end());
1277 I < E; ++I) {
1278 if (I != 0)
1279 OS << ", ";
1280 printOperand(MI, I, OS);
1281 }
1282 OS << ']';
1283 OS << "+";
1284 printOperand(MI, NOps - 2, OS);
1285}
1286
1287void AArch64AsmPrinter::emitJumpTableInfo() {
1288 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1289 if (!MJTI) return;
1290
1291 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1292 if (JT.empty()) return;
1293
1294 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1295 MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM);
1296 OutStreamer->switchSection(ReadOnlySec);
1297
1298 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1299 for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
1300 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1301
1302 // If this jump table was deleted, ignore it.
1303 if (JTBBs.empty()) continue;
1304
1305 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1306 emitAlignment(Align(Size));
1307 OutStreamer->emitLabel(GetJTISymbol(JTI));
1308
1309 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1310 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1311
1312 for (auto *JTBB : JTBBs) {
1313 const MCExpr *Value =
1314 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1315
1316 // Each entry is:
1317 // .byte/.hword (LBB - Lbase)>>2
1318 // or plain:
1319 // .word LBB - Lbase
1320 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1321 if (Size != 4)
1323 Value, MCConstantExpr::create(2, OutContext), OutContext);
1324
1325 OutStreamer->emitValue(Value, Size);
1326 }
1327 }
1328}
1329
1330std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1332AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1333 const MachineInstr *BranchInstr,
1334 const MCSymbol *BranchLabel) const {
1335 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1336 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1338 switch (AFI->getJumpTableEntrySize(JTI)) {
1339 case 1:
1340 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1341 break;
1342 case 2:
1343 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1344 break;
1345 case 4:
1346 EntrySize = codeview::JumpTableEntrySize::Int32;
1347 break;
1348 default:
1349 llvm_unreachable("Unexpected jump table entry size");
1350 }
1351 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1352}
1353
1354void AArch64AsmPrinter::emitFunctionEntryLabel() {
1355 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1356 MF->getFunction().getCallingConv() ==
1358 MF->getInfo<AArch64FunctionInfo>()->isSVECC()) {
1359 auto *TS =
1360 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1361 TS->emitDirectiveVariantPCS(CurrentFnSym);
1362 }
1363
1365
1366 if (TM.getTargetTriple().isWindowsArm64EC() &&
1367 !MF->getFunction().hasLocalLinkage()) {
1368 // For ARM64EC targets, a function definition's name is mangled differently
1369 // from the normal symbol, emit required aliases here.
1370 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1371 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1372 OutStreamer->emitAssignment(
1374 MMI->getContext()));
1375 };
1376
1377 auto getSymbolFromMetadata = [&](StringRef Name) {
1378 MCSymbol *Sym = nullptr;
1379 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1380 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1381 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1382 }
1383 return Sym;
1384 };
1385
1386 if (MCSymbol *UnmangledSym =
1387 getSymbolFromMetadata("arm64ec_unmangled_name")) {
1388 MCSymbol *ECMangledSym = getSymbolFromMetadata("arm64ec_ecmangled_name");
1389
1390 if (ECMangledSym) {
1391 // An external function, emit the alias from the unmangled symbol to
1392 // mangled symbol name and the alias from the mangled symbol to guest
1393 // exit thunk.
1394 emitFunctionAlias(UnmangledSym, ECMangledSym);
1395 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1396 } else {
1397 // A function implementation, emit the alias from the unmangled symbol
1398 // to mangled symbol name.
1399 emitFunctionAlias(UnmangledSym, CurrentFnSym);
1400 }
1401 }
1402 }
1403}
1404
1405void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1406 const Constant *CV) {
1407 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1408 if (CPA->hasAddressDiscriminator() &&
1409 !CPA->hasSpecialAddressDiscriminator(
1412 "unexpected address discrimination value for ctors/dtors entry, only "
1413 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1414 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1415 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1416 // actual address discrimination value and only checks
1417 // hasAddressDiscriminator(), so it's OK to leave special address
1418 // discrimination value here.
1420}
1421
1422void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1423 const GlobalAlias &GA) {
1424 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1425 // Global aliases must point to a definition, but unmangled patchable
1426 // symbols are special and need to point to an undefined symbol with "EXP+"
1427 // prefix. Such undefined symbol is resolved by the linker by creating
1428 // x86 thunk that jumps back to the actual EC target.
1429 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1430 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1431 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1432 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1433
1434 OutStreamer->beginCOFFSymbolDef(ExpSym);
1435 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1436 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1438 OutStreamer->endCOFFSymbolDef();
1439
1440 OutStreamer->beginCOFFSymbolDef(Sym);
1441 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1442 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1444 OutStreamer->endCOFFSymbolDef();
1445 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1446 OutStreamer->emitAssignment(
1448 MMI->getContext()));
1449 return;
1450 }
1451 }
1453}
1454
1455/// Small jump tables contain an unsigned byte or half, representing the offset
1456/// from the lowest-addressed possible destination to the desired basic
1457/// block. Since all instructions are 4-byte aligned, this is further compressed
1458/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1459/// materialize the correct destination we need:
1460///
1461/// adr xDest, .LBB0_0
1462/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1463/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1464void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1465 const llvm::MachineInstr &MI) {
1466 Register DestReg = MI.getOperand(0).getReg();
1467 Register ScratchReg = MI.getOperand(1).getReg();
1468 Register ScratchRegW =
1469 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1470 Register TableReg = MI.getOperand(2).getReg();
1471 Register EntryReg = MI.getOperand(3).getReg();
1472 int JTIdx = MI.getOperand(4).getIndex();
1473 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1474
1475 // This has to be first because the compression pass based its reachability
1476 // calculations on the start of the JumpTableDest instruction.
1477 auto Label =
1478 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1479
1480 // If we don't already have a symbol to use as the base, use the ADR
1481 // instruction itself.
1482 if (!Label) {
1483 Label = MF->getContext().createTempSymbol();
1484 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1485 OutStreamer.emitLabel(Label);
1486 }
1487
1488 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1489 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1490 .addReg(DestReg)
1491 .addExpr(LabelExpr));
1492
1493 // Load the number of instruction-steps to offset from the label.
1494 unsigned LdrOpcode;
1495 switch (Size) {
1496 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1497 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1498 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1499 default:
1500 llvm_unreachable("Unknown jump table size");
1501 }
1502
1503 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1504 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1505 .addReg(TableReg)
1506 .addReg(EntryReg)
1507 .addImm(0)
1508 .addImm(Size == 1 ? 0 : 1));
1509
1510 // Add to the already materialized base label address, multiplying by 4 if
1511 // compressed.
1512 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1513 .addReg(DestReg)
1514 .addReg(DestReg)
1515 .addReg(ScratchReg)
1516 .addImm(Size == 4 ? 0 : 2));
1517}
1518
1519void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1520 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1521 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1522
1523 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1524 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1525
1526 // Emit:
1527 // mov x17, #<size of table> ; depending on table size, with MOVKs
1528 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1529 // csel x16, x16, xzr, ls ; check for index overflow
1530 //
1531 // adrp x17, Ltable@PAGE ; materialize table address
1532 // add x17, Ltable@PAGEOFF
1533 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1534 //
1535 // Lanchor:
1536 // adr x17, Lanchor ; compute target address
1537 // add x16, x17, x16
1538 // br x16 ; branch to target
1539
1540 MachineOperand JTOp = MI.getOperand(0);
1541
1542 unsigned JTI = JTOp.getIndex();
1543 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1544 "unsupported compressed jump table");
1545
1546 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1547
1548 // cmp only supports a 12-bit immediate. If we need more, materialize the
1549 // immediate, using x17 as a scratch register.
1550 uint64_t MaxTableEntry = NumTableEntries - 1;
1551 if (isUInt<12>(MaxTableEntry)) {
1552 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1553 .addReg(AArch64::XZR)
1554 .addReg(AArch64::X16)
1555 .addImm(MaxTableEntry)
1556 .addImm(0));
1557 } else {
1558 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1559 // It's sad that we have to manually materialize instructions, but we can't
1560 // trivially reuse the main pseudo expansion logic.
1561 // A MOVK sequence is easy enough to generate and handles the general case.
1562 for (int Offset = 16; Offset < 64; Offset += 16) {
1563 if ((MaxTableEntry >> Offset) == 0)
1564 break;
1565 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1566 Offset);
1567 }
1568 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1569 .addReg(AArch64::XZR)
1570 .addReg(AArch64::X16)
1571 .addReg(AArch64::X17)
1572 .addImm(0));
1573 }
1574
1575 // This picks entry #0 on failure.
1576 // We might want to trap instead.
1577 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1578 .addReg(AArch64::X16)
1579 .addReg(AArch64::X16)
1580 .addReg(AArch64::XZR)
1581 .addImm(AArch64CC::LS));
1582
1583 // Prepare the @PAGE/@PAGEOFF low/high operands.
1584 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1585 MCOperand JTMCHi, JTMCLo;
1586
1587 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1588 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1589
1590 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1591 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1592
1593 EmitToStreamer(
1594 *OutStreamer,
1595 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1596
1597 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1598 .addReg(AArch64::X17)
1599 .addReg(AArch64::X17)
1600 .addOperand(JTMCLo)
1601 .addImm(0));
1602
1603 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1604 .addReg(AArch64::X16)
1605 .addReg(AArch64::X17)
1606 .addReg(AArch64::X16)
1607 .addImm(0)
1608 .addImm(1));
1609
1610 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1611 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1612 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1613
1614 OutStreamer->emitLabel(AdrLabel);
1615 EmitToStreamer(
1616 *OutStreamer,
1617 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1618
1619 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1620 .addReg(AArch64::X16)
1621 .addReg(AArch64::X17)
1622 .addReg(AArch64::X16)
1623 .addImm(0));
1624
1625 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1626}
1627
1628void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1629 const llvm::MachineInstr &MI) {
1630 unsigned Opcode = MI.getOpcode();
1631 assert(STI->hasMOPS());
1632 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1633
1634 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1635 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1636 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1637 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1638 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1639 if (Opcode == AArch64::MOPSMemorySetPseudo)
1640 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1641 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1642 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1643 llvm_unreachable("Unhandled memory operation pseudo");
1644 }();
1645 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1646 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1647
1648 for (auto Op : Ops) {
1649 int i = 0;
1650 auto MCIB = MCInstBuilder(Op);
1651 // Destination registers
1652 MCIB.addReg(MI.getOperand(i++).getReg());
1653 MCIB.addReg(MI.getOperand(i++).getReg());
1654 if (!IsSet)
1655 MCIB.addReg(MI.getOperand(i++).getReg());
1656 // Input registers
1657 MCIB.addReg(MI.getOperand(i++).getReg());
1658 MCIB.addReg(MI.getOperand(i++).getReg());
1659 MCIB.addReg(MI.getOperand(i++).getReg());
1660
1661 EmitToStreamer(OutStreamer, MCIB);
1662 }
1663}
1664
1665void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1666 const MachineInstr &MI) {
1667 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1668
1669 auto &Ctx = OutStreamer.getContext();
1670 MCSymbol *MILabel = Ctx.createTempSymbol();
1671 OutStreamer.emitLabel(MILabel);
1672
1673 SM.recordStackMap(*MILabel, MI);
1674 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1675
1676 // Scan ahead to trim the shadow.
1677 const MachineBasicBlock &MBB = *MI.getParent();
1679 ++MII;
1680 while (NumNOPBytes > 0) {
1681 if (MII == MBB.end() || MII->isCall() ||
1682 MII->getOpcode() == AArch64::DBG_VALUE ||
1683 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1684 MII->getOpcode() == TargetOpcode::STACKMAP)
1685 break;
1686 ++MII;
1687 NumNOPBytes -= 4;
1688 }
1689
1690 // Emit nops.
1691 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1692 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1693}
1694
1695// Lower a patchpoint of the form:
1696// [<def>], <id>, <numBytes>, <target>, <numArgs>
1697void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1698 const MachineInstr &MI) {
1699 auto &Ctx = OutStreamer.getContext();
1700 MCSymbol *MILabel = Ctx.createTempSymbol();
1701 OutStreamer.emitLabel(MILabel);
1702 SM.recordPatchPoint(*MILabel, MI);
1703
1704 PatchPointOpers Opers(&MI);
1705
1706 int64_t CallTarget = Opers.getCallTarget().getImm();
1707 unsigned EncodedBytes = 0;
1708 if (CallTarget) {
1709 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1710 "High 16 bits of call target should be zero.");
1711 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1712 EncodedBytes = 16;
1713 // Materialize the jump address:
1714 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1715 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1716 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1717 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1718 }
1719 // Emit padding.
1720 unsigned NumBytes = Opers.getNumPatchBytes();
1721 assert(NumBytes >= EncodedBytes &&
1722 "Patchpoint can't request size less than the length of a call.");
1723 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1724 "Invalid number of NOP bytes requested!");
1725 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1726 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1727}
1728
1729void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1730 const MachineInstr &MI) {
1731 StatepointOpers SOpers(&MI);
1732 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1733 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1734 for (unsigned i = 0; i < PatchBytes; i += 4)
1735 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1736 } else {
1737 // Lower call target and choose correct opcode
1738 const MachineOperand &CallTarget = SOpers.getCallTarget();
1739 MCOperand CallTargetMCOp;
1740 unsigned CallOpcode;
1741 switch (CallTarget.getType()) {
1744 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1745 CallOpcode = AArch64::BL;
1746 break;
1748 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1749 CallOpcode = AArch64::BL;
1750 break;
1752 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1753 CallOpcode = AArch64::BLR;
1754 break;
1755 default:
1756 llvm_unreachable("Unsupported operand type in statepoint call target");
1757 break;
1758 }
1759
1760 EmitToStreamer(OutStreamer,
1761 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1762 }
1763
1764 auto &Ctx = OutStreamer.getContext();
1765 MCSymbol *MILabel = Ctx.createTempSymbol();
1766 OutStreamer.emitLabel(MILabel);
1767 SM.recordStatepoint(*MILabel, MI);
1768}
1769
1770void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1771 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1772 // <opcode>, <operands>
1773
1774 Register DefRegister = FaultingMI.getOperand(0).getReg();
1776 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1777 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1778 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1779 unsigned OperandsBeginIdx = 4;
1780
1781 auto &Ctx = OutStreamer->getContext();
1782 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1783 OutStreamer->emitLabel(FaultingLabel);
1784
1785 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1786 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1787
1788 MCInst MI;
1789 MI.setOpcode(Opcode);
1790
1791 if (DefRegister != (Register)0)
1792 MI.addOperand(MCOperand::createReg(DefRegister));
1793
1794 for (const MachineOperand &MO :
1795 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1796 MCOperand Dest;
1797 lowerOperand(MO, Dest);
1798 MI.addOperand(Dest);
1799 }
1800
1801 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1802 EmitToStreamer(MI);
1803}
1804
1805void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1806 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1807 .addReg(Dest)
1808 .addReg(AArch64::XZR)
1809 .addReg(Src)
1810 .addImm(0));
1811}
1812
1813void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1814 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1815 EmitToStreamer(*OutStreamer,
1816 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1817 .addReg(Dest)
1818 .addImm(Imm)
1819 .addImm(Shift));
1820}
1821
1822void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1823 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1824 EmitToStreamer(*OutStreamer,
1825 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1826 .addReg(Dest)
1827 .addReg(Dest)
1828 .addImm(Imm)
1829 .addImm(Shift));
1830}
1831
1832void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1833 Register DestReg = MI.getOperand(0).getReg();
1834 if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround() &&
1835 STI->isNeonAvailable()) {
1836 // Convert H/S register to corresponding D register
1837 if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
1838 DestReg = AArch64::D0 + (DestReg - AArch64::H0);
1839 else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
1840 DestReg = AArch64::D0 + (DestReg - AArch64::S0);
1841 else
1842 assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
1843
1844 MCInst MOVI;
1845 MOVI.setOpcode(AArch64::MOVID);
1846 MOVI.addOperand(MCOperand::createReg(DestReg));
1847 MOVI.addOperand(MCOperand::createImm(0));
1848 EmitToStreamer(*OutStreamer, MOVI);
1849 } else {
1850 MCInst FMov;
1851 switch (MI.getOpcode()) {
1852 default: llvm_unreachable("Unexpected opcode");
1853 case AArch64::FMOVH0:
1854 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1855 if (!STI->hasFullFP16())
1856 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1857 FMov.addOperand(MCOperand::createReg(DestReg));
1858 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1859 break;
1860 case AArch64::FMOVS0:
1861 FMov.setOpcode(AArch64::FMOVWSr);
1862 FMov.addOperand(MCOperand::createReg(DestReg));
1863 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1864 break;
1865 case AArch64::FMOVD0:
1866 FMov.setOpcode(AArch64::FMOVXDr);
1867 FMov.addOperand(MCOperand::createReg(DestReg));
1868 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1869 break;
1870 }
1871 EmitToStreamer(*OutStreamer, FMov);
1872 }
1873}
1874
1875Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint16_t Disc,
1876 Register AddrDisc,
1877 Register ScratchReg,
1878 bool MayUseAddrAsScratch) {
1879 assert(ScratchReg == AArch64::X16 || ScratchReg == AArch64::X17);
1880 // So far we've used NoRegister in pseudos. Now we need real encodings.
1881 if (AddrDisc == AArch64::NoRegister)
1882 AddrDisc = AArch64::XZR;
1883
1884 // If there is no constant discriminator, there's no blend involved:
1885 // just use the address discriminator register as-is (XZR or not).
1886 if (!Disc)
1887 return AddrDisc;
1888
1889 // If there's only a constant discriminator, MOV it into the scratch register.
1890 if (AddrDisc == AArch64::XZR) {
1891 emitMOVZ(ScratchReg, Disc, 0);
1892 return ScratchReg;
1893 }
1894
1895 // If there are both, emit a blend into the scratch register.
1896
1897 // Check if we can save one MOV instruction.
1898 assert(MayUseAddrAsScratch || ScratchReg != AddrDisc);
1899 bool AddrDiscIsSafe = AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
1900 if (MayUseAddrAsScratch && AddrDiscIsSafe)
1901 ScratchReg = AddrDisc;
1902 else
1903 emitMovXReg(ScratchReg, AddrDisc);
1904
1905 emitMOVK(ScratchReg, Disc, 48);
1906 return ScratchReg;
1907}
1908
1909/// Emits a code sequence to check an authenticated pointer value.
1910///
1911/// If OnFailure argument is passed, jump there on check failure instead
1912/// of proceeding to the next instruction (only if ShouldTrap is false).
1913void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
1914 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
1915 AArch64PAuth::AuthCheckMethod Method, bool ShouldTrap,
1916 const MCSymbol *OnFailure) {
1917 // Insert a sequence to check if authentication of TestedReg succeeded,
1918 // such as:
1919 //
1920 // - checked and clearing:
1921 // ; x16 is TestedReg, x17 is ScratchReg
1922 // mov x17, x16
1923 // xpaci x17
1924 // cmp x16, x17
1925 // b.eq Lsuccess
1926 // mov x16, x17
1927 // b Lend
1928 // Lsuccess:
1929 // ; skipped if authentication failed
1930 // Lend:
1931 // ...
1932 //
1933 // - checked and trapping:
1934 // mov x17, x16
1935 // xpaci x17
1936 // cmp x16, x17
1937 // b.eq Lsuccess
1938 // brk #<0xc470 + aut key>
1939 // Lsuccess:
1940 // ...
1941 //
1942 // See the documentation on AuthCheckMethod enumeration constants for
1943 // the specific code sequences that can be used to perform the check.
1945
1946 if (Method == AuthCheckMethod::None)
1947 return;
1948 if (Method == AuthCheckMethod::DummyLoad) {
1949 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
1950 .addReg(getWRegFromXReg(ScratchReg))
1951 .addReg(TestedReg)
1952 .addImm(0));
1953 assert(ShouldTrap && !OnFailure && "DummyLoad always traps on error");
1954 return;
1955 }
1956
1957 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
1958 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
1959 // mov Xscratch, Xtested
1960 emitMovXReg(ScratchReg, TestedReg);
1961
1962 if (Method == AuthCheckMethod::XPAC) {
1963 // xpac(i|d) Xscratch
1964 unsigned XPACOpc = getXPACOpcodeForKey(Key);
1965 EmitToStreamer(
1966 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
1967 } else {
1968 // xpaclri
1969
1970 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
1971 assert(TestedReg == AArch64::LR &&
1972 "XPACHint mode is only compatible with checking the LR register");
1973 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
1974 "XPACHint mode is only compatible with I-keys");
1975 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
1976 }
1977
1978 // cmp Xtested, Xscratch
1979 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
1980 .addReg(AArch64::XZR)
1981 .addReg(TestedReg)
1982 .addReg(ScratchReg)
1983 .addImm(0));
1984
1985 // b.eq Lsuccess
1986 EmitToStreamer(
1987 MCInstBuilder(AArch64::Bcc)
1988 .addImm(AArch64CC::EQ)
1989 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
1990 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
1991 // eor Xscratch, Xtested, Xtested, lsl #1
1992 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
1993 .addReg(ScratchReg)
1994 .addReg(TestedReg)
1995 .addReg(TestedReg)
1996 .addImm(1));
1997 // tbz Xscratch, #62, Lsuccess
1998 EmitToStreamer(
1999 MCInstBuilder(AArch64::TBZX)
2000 .addReg(ScratchReg)
2001 .addImm(62)
2002 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2003 } else {
2004 llvm_unreachable("Unsupported check method");
2005 }
2006
2007 if (ShouldTrap) {
2008 assert(!OnFailure && "Cannot specify OnFailure with ShouldTrap");
2009 // Trapping sequences do a 'brk'.
2010 // brk #<0xc470 + aut key>
2011 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2012 } else {
2013 // Non-trapping checked sequences return the stripped result in TestedReg,
2014 // skipping over success-only code (such as re-signing the pointer) if
2015 // there is one.
2016 // Note that this can introduce an authentication oracle (such as based on
2017 // the high bits of the re-signed value).
2018
2019 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2020 // instead of ScratchReg, thus eliminating one `mov` instruction.
2021 // Both XPAC and XPACHint can be further optimized by not using a
2022 // conditional branch jumping over an unconditional one.
2023
2024 switch (Method) {
2025 case AuthCheckMethod::XPACHint:
2026 // LR is already XPAC-ed at this point.
2027 break;
2028 case AuthCheckMethod::XPAC:
2029 // mov Xtested, Xscratch
2030 emitMovXReg(TestedReg, ScratchReg);
2031 break;
2032 default:
2033 // If Xtested was not XPAC-ed so far, emit XPAC here.
2034 // xpac(i|d) Xtested
2035 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2036 EmitToStreamer(
2037 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2038 }
2039
2040 if (OnFailure) {
2041 // b Lend
2042 EmitToStreamer(
2043 MCInstBuilder(AArch64::B)
2044 .addExpr(MCSymbolRefExpr::create(OnFailure, OutContext)));
2045 }
2046 }
2047
2048 // If the auth check succeeds, we can continue.
2049 // Lsuccess:
2050 OutStreamer->emitLabel(SuccessSym);
2051}
2052
2053// With Pointer Authentication, it may be needed to explicitly check the
2054// authenticated value in LR before performing a tail call.
2055// Otherwise, the callee may re-sign the invalid return address,
2056// introducing a signing oracle.
2057void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2058 if (!AArch64FI->shouldSignReturnAddress(*MF))
2059 return;
2060
2061 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2062 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2063 return;
2064
2065 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2066 Register ScratchReg =
2067 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2068 assert(!TC->readsRegister(ScratchReg, TRI) &&
2069 "Neither x16 nor x17 is available as a scratch register");
2071 AArch64FI->shouldSignWithBKey() ? AArch64PACKey::IB : AArch64PACKey::IA;
2072 emitPtrauthCheckAuthenticatedValue(
2073 AArch64::LR, ScratchReg, Key, LRCheckMethod,
2074 /*ShouldTrap=*/true, /*OnFailure=*/nullptr);
2075}
2076
2077void AArch64AsmPrinter::emitPtrauthAuthResign(const MachineInstr *MI) {
2078 const bool IsAUTPAC = MI->getOpcode() == AArch64::AUTPAC;
2079
2080 // We expand AUT/AUTPAC into a sequence of the form
2081 //
2082 // ; authenticate x16
2083 // ; check pointer in x16
2084 // Lsuccess:
2085 // ; sign x16 (if AUTPAC)
2086 // Lend: ; if not trapping on failure
2087 //
2088 // with the checking sequence chosen depending on whether/how we should check
2089 // the pointer and whether we should trap on failure.
2090
2091 // By default, auth/resign sequences check for auth failures.
2092 bool ShouldCheck = true;
2093 // In the checked sequence, we only trap if explicitly requested.
2094 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2095
2096 // On an FPAC CPU, you get traps whether you want them or not: there's
2097 // no point in emitting checks or traps.
2098 if (STI->hasFPAC())
2099 ShouldCheck = ShouldTrap = false;
2100
2101 // However, command-line flags can override this, for experimentation.
2102 switch (PtrauthAuthChecks) {
2104 break;
2106 ShouldCheck = ShouldTrap = false;
2107 break;
2109 ShouldCheck = true;
2110 ShouldTrap = false;
2111 break;
2113 ShouldCheck = ShouldTrap = true;
2114 break;
2115 }
2116
2117 auto AUTKey = (AArch64PACKey::ID)MI->getOperand(0).getImm();
2118 uint64_t AUTDisc = MI->getOperand(1).getImm();
2119 unsigned AUTAddrDisc = MI->getOperand(2).getReg();
2120
2121 // Compute aut discriminator into x17
2122 assert(isUInt<16>(AUTDisc));
2123 Register AUTDiscReg =
2124 emitPtrauthDiscriminator(AUTDisc, AUTAddrDisc, AArch64::X17);
2125 bool AUTZero = AUTDiscReg == AArch64::XZR;
2126 unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero);
2127
2128 // autiza x16 ; if AUTZero
2129 // autia x16, x17 ; if !AUTZero
2130 MCInst AUTInst;
2131 AUTInst.setOpcode(AUTOpc);
2132 AUTInst.addOperand(MCOperand::createReg(AArch64::X16));
2133 AUTInst.addOperand(MCOperand::createReg(AArch64::X16));
2134 if (!AUTZero)
2135 AUTInst.addOperand(MCOperand::createReg(AUTDiscReg));
2136 EmitToStreamer(*OutStreamer, AUTInst);
2137
2138 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2139 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2140 return;
2141
2142 MCSymbol *EndSym = nullptr;
2143
2144 if (ShouldCheck) {
2145 if (IsAUTPAC && !ShouldTrap)
2146 EndSym = createTempSymbol("resign_end_");
2147
2148 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AUTKey,
2149 AArch64PAuth::AuthCheckMethod::XPAC,
2150 ShouldTrap, EndSym);
2151 }
2152
2153 // We already emitted unchecked and checked-but-non-trapping AUTs.
2154 // That left us with trapping AUTs, and AUTPACs.
2155 // Trapping AUTs don't need PAC: we're done.
2156 if (!IsAUTPAC)
2157 return;
2158
2159 auto PACKey = (AArch64PACKey::ID)MI->getOperand(3).getImm();
2160 uint64_t PACDisc = MI->getOperand(4).getImm();
2161 unsigned PACAddrDisc = MI->getOperand(5).getReg();
2162
2163 // Compute pac discriminator into x17
2164 assert(isUInt<16>(PACDisc));
2165 Register PACDiscReg =
2166 emitPtrauthDiscriminator(PACDisc, PACAddrDisc, AArch64::X17);
2167 bool PACZero = PACDiscReg == AArch64::XZR;
2168 unsigned PACOpc = getPACOpcodeForKey(PACKey, PACZero);
2169
2170 // pacizb x16 ; if PACZero
2171 // pacib x16, x17 ; if !PACZero
2172 MCInst PACInst;
2173 PACInst.setOpcode(PACOpc);
2174 PACInst.addOperand(MCOperand::createReg(AArch64::X16));
2175 PACInst.addOperand(MCOperand::createReg(AArch64::X16));
2176 if (!PACZero)
2177 PACInst.addOperand(MCOperand::createReg(PACDiscReg));
2178 EmitToStreamer(*OutStreamer, PACInst);
2179
2180 // Lend:
2181 if (EndSym)
2182 OutStreamer->emitLabel(EndSym);
2183}
2184
2185void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2186 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2187 unsigned BrTarget = MI->getOperand(0).getReg();
2188
2189 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2190 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
2191 "Invalid auth call key");
2192
2193 uint64_t Disc = MI->getOperand(2).getImm();
2194 assert(isUInt<16>(Disc));
2195
2196 unsigned AddrDisc = MI->getOperand(3).getReg();
2197
2198 // Make sure AddrDisc is solely used to compute the discriminator.
2199 // While hardly meaningful, it is still possible to describe an authentication
2200 // of a pointer against its own value (instead of storage address) with
2201 // intrinsics, so use report_fatal_error instead of assert.
2202 if (BrTarget == AddrDisc)
2203 report_fatal_error("Branch target is signed with its own value");
2204
2205 // If we are printing BLRA pseudo instruction, then x16 and x17 are
2206 // implicit-def'ed by the MI and AddrDisc is not used as any other input, so
2207 // try to save one MOV by setting MayUseAddrAsScratch.
2208 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2209 // declared as clobbering x16/x17.
2210 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2211 /*MayUseAddrAsScratch=*/IsCall);
2212 bool IsZeroDisc = DiscReg == AArch64::XZR;
2213
2214 unsigned Opc;
2215 if (IsCall) {
2216 if (Key == AArch64PACKey::IA)
2217 Opc = IsZeroDisc ? AArch64::BLRAAZ : AArch64::BLRAA;
2218 else
2219 Opc = IsZeroDisc ? AArch64::BLRABZ : AArch64::BLRAB;
2220 } else {
2221 if (Key == AArch64PACKey::IA)
2222 Opc = IsZeroDisc ? AArch64::BRAAZ : AArch64::BRAA;
2223 else
2224 Opc = IsZeroDisc ? AArch64::BRABZ : AArch64::BRAB;
2225 }
2226
2227 MCInst BRInst;
2228 BRInst.setOpcode(Opc);
2229 BRInst.addOperand(MCOperand::createReg(BrTarget));
2230 if (!IsZeroDisc)
2231 BRInst.addOperand(MCOperand::createReg(DiscReg));
2232 EmitToStreamer(*OutStreamer, BRInst);
2233}
2234
2235const MCExpr *
2236AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2237 MCContext &Ctx = OutContext;
2238
2239 // Figure out the base symbol and the addend, if any.
2240 APInt Offset(64, 0);
2241 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2242 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2243
2244 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2245
2246 // If we can't understand the referenced ConstantExpr, there's nothing
2247 // else we can do: emit an error.
2248 if (!BaseGVB) {
2249 BaseGV->getContext().emitError(
2250 "cannot resolve target base/addend of ptrauth constant");
2251 return nullptr;
2252 }
2253
2254 // If there is an addend, turn that into the appropriate MCExpr.
2255 const MCExpr *Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2256 if (Offset.sgt(0))
2258 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2259 else if (Offset.slt(0))
2261 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2262
2263 uint64_t KeyID = CPA.getKey()->getZExtValue();
2264 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2265 // AArch64AuthMCExpr::printImpl, so fail fast.
2266 if (KeyID > AArch64PACKey::LAST)
2267 report_fatal_error("AArch64 PAC Key ID '" + Twine(KeyID) +
2268 "' out of range [0, " +
2269 Twine((unsigned)AArch64PACKey::LAST) + "]");
2270
2271 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2272 if (!isUInt<16>(Disc))
2273 report_fatal_error("AArch64 PAC Discriminator '" + Twine(Disc) +
2274 "' out of range [0, 0xFFFF]");
2275
2276 // Finally build the complete @AUTH expr.
2277 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2278 CPA.hasAddressDiscriminator(), Ctx);
2279}
2280
2281void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2282 unsigned DstReg = MI.getOperand(0).getReg();
2283 const MachineOperand &GAOp = MI.getOperand(1);
2284 const uint64_t KeyC = MI.getOperand(2).getImm();
2285 assert(KeyC <= AArch64PACKey::LAST &&
2286 "key is out of range [0, AArch64PACKey::LAST]");
2287 const auto Key = (AArch64PACKey::ID)KeyC;
2288 const uint64_t Disc = MI.getOperand(3).getImm();
2289 assert(isUInt<16>(Disc) &&
2290 "constant discriminator is out of range [0, 0xffff]");
2291
2292 // Emit instruction sequence like the following:
2293 // ADRP x16, symbol$auth_ptr$key$disc
2294 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2295 //
2296 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2297 // to symbol.
2298 MCSymbol *AuthPtrStubSym;
2299 if (TM.getTargetTriple().isOSBinFormatELF()) {
2300 const auto &TLOF =
2301 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2302
2303 assert(GAOp.getOffset() == 0 &&
2304 "non-zero offset for $auth_ptr$ stub slots is not supported");
2305 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2306 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2307 } else {
2308 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2309 "LOADauthptrstatic is implemented only for MachO/ELF");
2310
2311 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2312 getObjFileLowering());
2313
2314 assert(GAOp.getOffset() == 0 &&
2315 "non-zero offset for $auth_ptr$ stub slots is not supported");
2316 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2317 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2318 }
2319
2320 MachineOperand StubMOHi =
2323 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2324 MCOperand StubMCHi, StubMCLo;
2325
2326 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2327 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2328
2329 EmitToStreamer(
2330 *OutStreamer,
2331 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2332
2333 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2334 .addReg(DstReg)
2335 .addReg(DstReg)
2336 .addOperand(StubMCLo));
2337}
2338
2339void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2340 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2341 const bool IsELFSignedGOT = MI.getParent()
2342 ->getParent()
2343 ->getInfo<AArch64FunctionInfo>()
2344 ->hasELFSignedGOT();
2345 MachineOperand GAOp = MI.getOperand(0);
2346 const uint64_t KeyC = MI.getOperand(1).getImm();
2347 assert(KeyC <= AArch64PACKey::LAST &&
2348 "key is out of range [0, AArch64PACKey::LAST]");
2349 const auto Key = (AArch64PACKey::ID)KeyC;
2350 const unsigned AddrDisc = MI.getOperand(2).getReg();
2351 const uint64_t Disc = MI.getOperand(3).getImm();
2352 assert(isUInt<16>(Disc) &&
2353 "constant discriminator is out of range [0, 0xffff]");
2354
2355 const int64_t Offset = GAOp.getOffset();
2356 GAOp.setOffset(0);
2357
2358 // Emit:
2359 // target materialization:
2360 // - via GOT:
2361 // - unsigned GOT:
2362 // adrp x16, :got:target
2363 // ldr x16, [x16, :got_lo12:target]
2364 // add offset to x16 if offset != 0
2365 // - ELF signed GOT:
2366 // adrp x17, :got:target
2367 // add x17, x17, :got_auth_lo12:target
2368 // ldr x16, [x17]
2369 // aut{i|d}a x16, x17
2370 // check+trap sequence (if no FPAC)
2371 // add offset to x16 if offset != 0
2372 //
2373 // - direct:
2374 // adrp x16, target
2375 // add x16, x16, :lo12:target
2376 // add offset to x16 if offset != 0
2377 //
2378 // add offset to x16:
2379 // - abs(offset) fits 24 bits:
2380 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2381 // - abs(offset) does not fit 24 bits:
2382 // - offset < 0:
2383 // movn+movk sequence filling x17 register with the offset (up to 4
2384 // instructions)
2385 // add x16, x16, x17
2386 // - offset > 0:
2387 // movz+movk sequence filling x17 register with the offset (up to 4
2388 // instructions)
2389 // add x16, x16, x17
2390 //
2391 // signing:
2392 // - 0 discriminator:
2393 // paciza x16
2394 // - Non-0 discriminator, no address discriminator:
2395 // mov x17, #Disc
2396 // pacia x16, x17
2397 // - address discriminator (with potentially folded immediate discriminator):
2398 // pacia x16, xAddrDisc
2399
2400 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2401 MCOperand GAMCHi, GAMCLo;
2402
2403 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2404 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2405 if (IsGOTLoad) {
2406 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2407 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2408 }
2409
2410 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2411 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2412
2413 EmitToStreamer(
2414 MCInstBuilder(AArch64::ADRP)
2415 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2416 .addOperand(GAMCHi));
2417
2418 if (IsGOTLoad) {
2419 if (IsELFSignedGOT) {
2420 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2421 .addReg(AArch64::X17)
2422 .addReg(AArch64::X17)
2423 .addOperand(GAMCLo)
2424 .addImm(0));
2425
2426 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2427 .addReg(AArch64::X16)
2428 .addReg(AArch64::X17)
2429 .addImm(0));
2430
2431 assert(GAOp.isGlobal());
2432 assert(GAOp.getGlobal()->getValueType() != nullptr);
2433 unsigned AuthOpcode = GAOp.getGlobal()->getValueType()->isFunctionTy()
2434 ? AArch64::AUTIA
2435 : AArch64::AUTDA;
2436
2437 EmitToStreamer(MCInstBuilder(AuthOpcode)
2438 .addReg(AArch64::X16)
2439 .addReg(AArch64::X16)
2440 .addReg(AArch64::X17));
2441
2442 if (!STI->hasFPAC()) {
2443 auto AuthKey = (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA
2445
2446 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2447 AArch64PAuth::AuthCheckMethod::XPAC,
2448 /*ShouldTrap=*/true,
2449 /*OnFailure=*/nullptr);
2450 }
2451 } else {
2452 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2453 .addReg(AArch64::X16)
2454 .addReg(AArch64::X16)
2455 .addOperand(GAMCLo));
2456 }
2457 } else {
2458 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2459 .addReg(AArch64::X16)
2460 .addReg(AArch64::X16)
2461 .addOperand(GAMCLo)
2462 .addImm(0));
2463 }
2464
2465 if (Offset != 0) {
2466 const uint64_t AbsOffset = (Offset > 0 ? Offset : -((uint64_t)Offset));
2467 const bool IsNeg = Offset < 0;
2468 if (isUInt<24>(AbsOffset)) {
2469 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2470 BitPos += 12) {
2471 EmitToStreamer(
2472 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2473 .addReg(AArch64::X16)
2474 .addReg(AArch64::X16)
2475 .addImm((AbsOffset >> BitPos) & 0xfff)
2476 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2477 }
2478 } else {
2479 const uint64_t UOffset = Offset;
2480 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2481 .addReg(AArch64::X17)
2482 .addImm((IsNeg ? ~UOffset : UOffset) & 0xffff)
2483 .addImm(/*shift=*/0));
2484 auto NeedMovk = [IsNeg, UOffset](int BitPos) -> bool {
2485 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2486 uint64_t Shifted = UOffset >> BitPos;
2487 if (!IsNeg)
2488 return Shifted != 0;
2489 for (int I = 0; I != 64 - BitPos; I += 16)
2490 if (((Shifted >> I) & 0xffff) != 0xffff)
2491 return true;
2492 return false;
2493 };
2494 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2495 emitMOVK(AArch64::X17, (UOffset >> BitPos) & 0xffff, BitPos);
2496
2497 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2498 .addReg(AArch64::X16)
2499 .addReg(AArch64::X16)
2500 .addReg(AArch64::X17)
2501 .addImm(/*shift=*/0));
2502 }
2503 }
2504
2505 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2506
2507 auto MIB = MCInstBuilder(getPACOpcodeForKey(Key, DiscReg == AArch64::XZR))
2508 .addReg(AArch64::X16)
2509 .addReg(AArch64::X16);
2510 if (DiscReg != AArch64::XZR)
2511 MIB.addReg(DiscReg);
2512 EmitToStreamer(MIB);
2513}
2514
2515void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2516 Register DstReg = MI.getOperand(0).getReg();
2517 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2518 const MachineOperand &GAMO = MI.getOperand(1);
2519 assert(GAMO.getOffset() == 0);
2520
2521 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2522 MCOperand GAMC;
2523 MCInstLowering.lowerOperand(GAMO, GAMC);
2524 EmitToStreamer(
2525 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2526 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2527 .addReg(AuthResultReg)
2528 .addReg(AArch64::X17)
2529 .addImm(0));
2530 } else {
2531 MachineOperand GAHiOp(GAMO);
2532 MachineOperand GALoOp(GAMO);
2533 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2534 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2535
2536 MCOperand GAMCHi, GAMCLo;
2537 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2538 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2539
2540 EmitToStreamer(
2541 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2542
2543 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2544 .addReg(AArch64::X17)
2545 .addReg(AArch64::X17)
2546 .addOperand(GAMCLo)
2547 .addImm(0));
2548
2549 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2550 .addReg(AuthResultReg)
2551 .addReg(AArch64::X17)
2552 .addImm(0));
2553 }
2554
2555 assert(GAMO.isGlobal());
2556 MCSymbol *UndefWeakSym;
2557 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2558 UndefWeakSym = createTempSymbol("undef_weak");
2559 EmitToStreamer(
2560 MCInstBuilder(AArch64::CBZX)
2561 .addReg(AuthResultReg)
2562 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2563 }
2564
2565 assert(GAMO.getGlobal()->getValueType() != nullptr);
2566 unsigned AuthOpcode = GAMO.getGlobal()->getValueType()->isFunctionTy()
2567 ? AArch64::AUTIA
2568 : AArch64::AUTDA;
2569 EmitToStreamer(MCInstBuilder(AuthOpcode)
2570 .addReg(AuthResultReg)
2571 .addReg(AuthResultReg)
2572 .addReg(AArch64::X17));
2573
2574 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2575 OutStreamer->emitLabel(UndefWeakSym);
2576
2577 if (!STI->hasFPAC()) {
2578 auto AuthKey =
2579 (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA : AArch64PACKey::DA);
2580
2581 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2582 AArch64PAuth::AuthCheckMethod::XPAC,
2583 /*ShouldTrap=*/true,
2584 /*OnFailure=*/nullptr);
2585
2586 emitMovXReg(DstReg, AuthResultReg);
2587 }
2588}
2589
2590const MCExpr *
2591AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2593 const Function &Fn = *BA.getFunction();
2594
2595 if (std::optional<uint16_t> BADisc =
2596 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2597 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2598 /*HasAddressDiversity=*/false, OutContext);
2599
2600 return BAE;
2601}
2602
2603// Simple pseudo-instructions have their lowering (with expansion to real
2604// instructions) auto-generated.
2605#include "AArch64GenMCPseudoLowering.inc"
2606
2607void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
2608 S.emitInstruction(Inst, *STI);
2609#ifndef NDEBUG
2610 ++InstsEmitted;
2611#endif
2612}
2613
2614void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
2615 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
2616
2617#ifndef NDEBUG
2618 InstsEmitted = 0;
2619 auto CheckMISize = make_scope_exit([&]() {
2620 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
2621 });
2622#endif
2623
2624 // Do any auto-generated pseudo lowerings.
2625 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
2626 EmitToStreamer(*OutStreamer, OutInst);
2627 return;
2628 }
2629
2630 if (MI->getOpcode() == AArch64::ADRP) {
2631 for (auto &Opd : MI->operands()) {
2632 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
2633 "swift_async_extendedFramePointerFlags") {
2634 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
2635 }
2636 }
2637 }
2638
2639 if (AArch64FI->getLOHRelated().count(MI)) {
2640 // Generate a label for LOH related instruction
2641 MCSymbol *LOHLabel = createTempSymbol("loh");
2642 // Associate the instruction with the label
2643 LOHInstToLabel[MI] = LOHLabel;
2644 OutStreamer->emitLabel(LOHLabel);
2645 }
2646
2648 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
2649 // Do any manual lowerings.
2650 switch (MI->getOpcode()) {
2651 default:
2653 "Unhandled tail call instruction");
2654 break;
2655 case AArch64::HINT: {
2656 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
2657 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
2658 // non-empty. If MI is the initial BTI, place the
2659 // __patchable_function_entries label after BTI.
2660 if (CurrentPatchableFunctionEntrySym &&
2661 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
2662 MI == &MF->front().front()) {
2663 int64_t Imm = MI->getOperand(0).getImm();
2664 if ((Imm & 32) && (Imm & 6)) {
2665 MCInst Inst;
2666 MCInstLowering.Lower(MI, Inst);
2667 EmitToStreamer(*OutStreamer, Inst);
2668 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
2669 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
2670 return;
2671 }
2672 }
2673 break;
2674 }
2675 case AArch64::MOVMCSym: {
2676 Register DestReg = MI->getOperand(0).getReg();
2677 const MachineOperand &MO_Sym = MI->getOperand(1);
2678 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
2679 MCOperand Hi_MCSym, Lo_MCSym;
2680
2681 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
2682 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
2683
2684 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
2685 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
2686
2687 MCInst MovZ;
2688 MovZ.setOpcode(AArch64::MOVZXi);
2689 MovZ.addOperand(MCOperand::createReg(DestReg));
2690 MovZ.addOperand(Hi_MCSym);
2692 EmitToStreamer(*OutStreamer, MovZ);
2693
2694 MCInst MovK;
2695 MovK.setOpcode(AArch64::MOVKXi);
2696 MovK.addOperand(MCOperand::createReg(DestReg));
2697 MovK.addOperand(MCOperand::createReg(DestReg));
2698 MovK.addOperand(Lo_MCSym);
2700 EmitToStreamer(*OutStreamer, MovK);
2701 return;
2702 }
2703 case AArch64::MOVIv2d_ns:
2704 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
2705 // as movi is more efficient across all cores. Newer cores can eliminate
2706 // fmovs early and there is no difference with movi, but this not true for
2707 // all implementations.
2708 //
2709 // The floating-point version doesn't quite work in rare cases on older
2710 // CPUs, so on those targets we lower this instruction to movi.16b instead.
2711 if (STI->hasZeroCycleZeroingFPWorkaround() &&
2712 MI->getOperand(1).getImm() == 0) {
2713 MCInst TmpInst;
2714 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
2715 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
2716 TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm()));
2717 EmitToStreamer(*OutStreamer, TmpInst);
2718 return;
2719 }
2720 break;
2721
2722 case AArch64::DBG_VALUE:
2723 case AArch64::DBG_VALUE_LIST:
2724 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
2725 SmallString<128> TmpStr;
2726 raw_svector_ostream OS(TmpStr);
2727 PrintDebugValueComment(MI, OS);
2728 OutStreamer->emitRawText(StringRef(OS.str()));
2729 }
2730 return;
2731
2732 case AArch64::EMITBKEY: {
2733 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
2734 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
2735 ExceptionHandlingType != ExceptionHandling::ARM)
2736 return;
2737
2738 if (getFunctionCFISectionType(*MF) == CFISection::None)
2739 return;
2740
2741 OutStreamer->emitCFIBKeyFrame();
2742 return;
2743 }
2744
2745 case AArch64::EMITMTETAGGED: {
2746 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
2747 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
2748 ExceptionHandlingType != ExceptionHandling::ARM)
2749 return;
2750
2751 if (getFunctionCFISectionType(*MF) != CFISection::None)
2752 OutStreamer->emitCFIMTETaggedFrame();
2753 return;
2754 }
2755
2756 case AArch64::AUT:
2757 case AArch64::AUTPAC:
2758 emitPtrauthAuthResign(MI);
2759 return;
2760
2761 case AArch64::LOADauthptrstatic:
2762 LowerLOADauthptrstatic(*MI);
2763 return;
2764
2765 case AArch64::LOADgotPAC:
2766 case AArch64::MOVaddrPAC:
2767 LowerMOVaddrPAC(*MI);
2768 return;
2769
2770 case AArch64::LOADgotAUTH:
2771 LowerLOADgotAUTH(*MI);
2772 return;
2773
2774 case AArch64::BRA:
2775 case AArch64::BLRA:
2776 emitPtrauthBranch(MI);
2777 return;
2778
2779 // Tail calls use pseudo instructions so they have the proper code-gen
2780 // attributes (isCall, isReturn, etc.). We lower them to the real
2781 // instruction here.
2782 case AArch64::AUTH_TCRETURN:
2783 case AArch64::AUTH_TCRETURN_BTI: {
2784 Register Callee = MI->getOperand(0).getReg();
2785 const uint64_t Key = MI->getOperand(2).getImm();
2786 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
2787 "Invalid auth key for tail-call return");
2788
2789 const uint64_t Disc = MI->getOperand(3).getImm();
2790 assert(isUInt<16>(Disc) && "Integer discriminator is too wide");
2791
2792 Register AddrDisc = MI->getOperand(4).getReg();
2793
2794 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2795
2796 emitPtrauthTailCallHardening(MI);
2797
2798 // See the comments in emitPtrauthBranch.
2799 if (Callee == AddrDisc)
2800 report_fatal_error("Call target is signed with its own value");
2801 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
2802 /*MayUseAddrAsScratch=*/true);
2803
2804 const bool IsZero = DiscReg == AArch64::XZR;
2805 const unsigned Opcodes[2][2] = {{AArch64::BRAA, AArch64::BRAAZ},
2806 {AArch64::BRAB, AArch64::BRABZ}};
2807
2808 MCInst TmpInst;
2809 TmpInst.setOpcode(Opcodes[Key][IsZero]);
2810 TmpInst.addOperand(MCOperand::createReg(Callee));
2811 if (!IsZero)
2812 TmpInst.addOperand(MCOperand::createReg(DiscReg));
2813 EmitToStreamer(*OutStreamer, TmpInst);
2814 return;
2815 }
2816
2817 case AArch64::TCRETURNri:
2818 case AArch64::TCRETURNrix16x17:
2819 case AArch64::TCRETURNrix17:
2820 case AArch64::TCRETURNrinotx16:
2821 case AArch64::TCRETURNriALL: {
2822 emitPtrauthTailCallHardening(MI);
2823
2824 recordIfImportCall(MI);
2825 MCInst TmpInst;
2826 TmpInst.setOpcode(AArch64::BR);
2827 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
2828 EmitToStreamer(*OutStreamer, TmpInst);
2829 return;
2830 }
2831 case AArch64::TCRETURNdi: {
2832 emitPtrauthTailCallHardening(MI);
2833
2834 MCOperand Dest;
2835 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
2836 recordIfImportCall(MI);
2837 MCInst TmpInst;
2838 TmpInst.setOpcode(AArch64::B);
2839 TmpInst.addOperand(Dest);
2840 EmitToStreamer(*OutStreamer, TmpInst);
2841 return;
2842 }
2843 case AArch64::SpeculationBarrierISBDSBEndBB: {
2844 // Print DSB SYS + ISB
2845 MCInst TmpInstDSB;
2846 TmpInstDSB.setOpcode(AArch64::DSB);
2847 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
2848 EmitToStreamer(*OutStreamer, TmpInstDSB);
2849 MCInst TmpInstISB;
2850 TmpInstISB.setOpcode(AArch64::ISB);
2851 TmpInstISB.addOperand(MCOperand::createImm(0xf));
2852 EmitToStreamer(*OutStreamer, TmpInstISB);
2853 return;
2854 }
2855 case AArch64::SpeculationBarrierSBEndBB: {
2856 // Print SB
2857 MCInst TmpInstSB;
2858 TmpInstSB.setOpcode(AArch64::SB);
2859 EmitToStreamer(*OutStreamer, TmpInstSB);
2860 return;
2861 }
2862 case AArch64::TLSDESC_AUTH_CALLSEQ: {
2863 /// lower this to:
2864 /// adrp x0, :tlsdesc_auth:var
2865 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
2866 /// add x0, x0, #:tlsdesc_auth_lo12:var
2867 /// blraa x16, x0
2868 /// (TPIDR_EL0 offset now in x0)
2869 const MachineOperand &MO_Sym = MI->getOperand(0);
2870 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
2871 MCOperand SymTLSDescLo12, SymTLSDesc;
2872 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
2873 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
2874 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
2875 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
2876
2877 MCInst Adrp;
2878 Adrp.setOpcode(AArch64::ADRP);
2879 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
2880 Adrp.addOperand(SymTLSDesc);
2881 EmitToStreamer(*OutStreamer, Adrp);
2882
2883 MCInst Ldr;
2884 Ldr.setOpcode(AArch64::LDRXui);
2885 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
2886 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
2887 Ldr.addOperand(SymTLSDescLo12);
2889 EmitToStreamer(*OutStreamer, Ldr);
2890
2891 MCInst Add;
2892 Add.setOpcode(AArch64::ADDXri);
2893 Add.addOperand(MCOperand::createReg(AArch64::X0));
2894 Add.addOperand(MCOperand::createReg(AArch64::X0));
2895 Add.addOperand(SymTLSDescLo12);
2897 EmitToStreamer(*OutStreamer, Add);
2898
2899 // Authenticated TLSDESC accesses are not relaxed.
2900 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
2901
2902 MCInst Blraa;
2903 Blraa.setOpcode(AArch64::BLRAA);
2904 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
2905 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
2906 EmitToStreamer(*OutStreamer, Blraa);
2907
2908 return;
2909 }
2910 case AArch64::TLSDESC_CALLSEQ: {
2911 /// lower this to:
2912 /// adrp x0, :tlsdesc:var
2913 /// ldr x1, [x0, #:tlsdesc_lo12:var]
2914 /// add x0, x0, #:tlsdesc_lo12:var
2915 /// .tlsdesccall var
2916 /// blr x1
2917 /// (TPIDR_EL0 offset now in x0)
2918 const MachineOperand &MO_Sym = MI->getOperand(0);
2919 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
2920 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
2921 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
2922 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
2923 MCInstLowering.lowerOperand(MO_Sym, Sym);
2924 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
2925 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
2926
2927 MCInst Adrp;
2928 Adrp.setOpcode(AArch64::ADRP);
2929 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
2930 Adrp.addOperand(SymTLSDesc);
2931 EmitToStreamer(*OutStreamer, Adrp);
2932
2933 MCInst Ldr;
2934 if (STI->isTargetILP32()) {
2935 Ldr.setOpcode(AArch64::LDRWui);
2936 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
2937 } else {
2938 Ldr.setOpcode(AArch64::LDRXui);
2939 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
2940 }
2941 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
2942 Ldr.addOperand(SymTLSDescLo12);
2944 EmitToStreamer(*OutStreamer, Ldr);
2945
2946 MCInst Add;
2947 if (STI->isTargetILP32()) {
2948 Add.setOpcode(AArch64::ADDWri);
2949 Add.addOperand(MCOperand::createReg(AArch64::W0));
2950 Add.addOperand(MCOperand::createReg(AArch64::W0));
2951 } else {
2952 Add.setOpcode(AArch64::ADDXri);
2953 Add.addOperand(MCOperand::createReg(AArch64::X0));
2954 Add.addOperand(MCOperand::createReg(AArch64::X0));
2955 }
2956 Add.addOperand(SymTLSDescLo12);
2958 EmitToStreamer(*OutStreamer, Add);
2959
2960 // Emit a relocation-annotation. This expands to no code, but requests
2961 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
2962 MCInst TLSDescCall;
2963 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
2964 TLSDescCall.addOperand(Sym);
2965 EmitToStreamer(*OutStreamer, TLSDescCall);
2966#ifndef NDEBUG
2967 --InstsEmitted; // no code emitted
2968#endif
2969
2970 MCInst Blr;
2971 Blr.setOpcode(AArch64::BLR);
2972 Blr.addOperand(MCOperand::createReg(AArch64::X1));
2973 EmitToStreamer(*OutStreamer, Blr);
2974
2975 return;
2976 }
2977
2978 case AArch64::JumpTableDest32:
2979 case AArch64::JumpTableDest16:
2980 case AArch64::JumpTableDest8:
2981 LowerJumpTableDest(*OutStreamer, *MI);
2982 return;
2983
2984 case AArch64::BR_JumpTable:
2985 LowerHardenedBRJumpTable(*MI);
2986 return;
2987
2988 case AArch64::FMOVH0:
2989 case AArch64::FMOVS0:
2990 case AArch64::FMOVD0:
2991 emitFMov0(*MI);
2992 return;
2993
2994 case AArch64::MOPSMemoryCopyPseudo:
2995 case AArch64::MOPSMemoryMovePseudo:
2996 case AArch64::MOPSMemorySetPseudo:
2997 case AArch64::MOPSMemorySetTaggingPseudo:
2998 LowerMOPS(*OutStreamer, *MI);
2999 return;
3000
3001 case TargetOpcode::STACKMAP:
3002 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3003
3004 case TargetOpcode::PATCHPOINT:
3005 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3006
3007 case TargetOpcode::STATEPOINT:
3008 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3009
3010 case TargetOpcode::FAULTING_OP:
3011 return LowerFAULTING_OP(*MI);
3012
3013 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3014 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3015 return;
3016
3017 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3018 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3019 return;
3020
3021 case TargetOpcode::PATCHABLE_TAIL_CALL:
3022 LowerPATCHABLE_TAIL_CALL(*MI);
3023 return;
3024 case TargetOpcode::PATCHABLE_EVENT_CALL:
3025 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3026 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3027 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3028
3029 case AArch64::KCFI_CHECK:
3030 LowerKCFI_CHECK(*MI);
3031 return;
3032
3033 case AArch64::HWASAN_CHECK_MEMACCESS:
3034 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3035 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3036 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3037 LowerHWASAN_CHECK_MEMACCESS(*MI);
3038 return;
3039
3040 case AArch64::SEH_StackAlloc:
3041 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3042 return;
3043
3044 case AArch64::SEH_SaveFPLR:
3045 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3046 return;
3047
3048 case AArch64::SEH_SaveFPLR_X:
3049 assert(MI->getOperand(0).getImm() < 0 &&
3050 "Pre increment SEH opcode must have a negative offset");
3051 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3052 return;
3053
3054 case AArch64::SEH_SaveReg:
3055 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3056 MI->getOperand(1).getImm());
3057 return;
3058
3059 case AArch64::SEH_SaveReg_X:
3060 assert(MI->getOperand(1).getImm() < 0 &&
3061 "Pre increment SEH opcode must have a negative offset");
3062 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3063 -MI->getOperand(1).getImm());
3064 return;
3065
3066 case AArch64::SEH_SaveRegP:
3067 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3068 MI->getOperand(0).getImm() <= 28) {
3069 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3070 "Register paired with LR must be odd");
3071 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3072 MI->getOperand(2).getImm());
3073 return;
3074 }
3075 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3076 "Non-consecutive registers not allowed for save_regp");
3077 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3078 MI->getOperand(2).getImm());
3079 return;
3080
3081 case AArch64::SEH_SaveRegP_X:
3082 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3083 "Non-consecutive registers not allowed for save_regp_x");
3084 assert(MI->getOperand(2).getImm() < 0 &&
3085 "Pre increment SEH opcode must have a negative offset");
3086 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3087 -MI->getOperand(2).getImm());
3088 return;
3089
3090 case AArch64::SEH_SaveFReg:
3091 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3092 MI->getOperand(1).getImm());
3093 return;
3094
3095 case AArch64::SEH_SaveFReg_X:
3096 assert(MI->getOperand(1).getImm() < 0 &&
3097 "Pre increment SEH opcode must have a negative offset");
3098 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3099 -MI->getOperand(1).getImm());
3100 return;
3101
3102 case AArch64::SEH_SaveFRegP:
3103 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3104 "Non-consecutive registers not allowed for save_regp");
3105 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3106 MI->getOperand(2).getImm());
3107 return;
3108
3109 case AArch64::SEH_SaveFRegP_X:
3110 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3111 "Non-consecutive registers not allowed for save_regp_x");
3112 assert(MI->getOperand(2).getImm() < 0 &&
3113 "Pre increment SEH opcode must have a negative offset");
3114 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3115 -MI->getOperand(2).getImm());
3116 return;
3117
3118 case AArch64::SEH_SetFP:
3120 return;
3121
3122 case AArch64::SEH_AddFP:
3123 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3124 return;
3125
3126 case AArch64::SEH_Nop:
3127 TS->emitARM64WinCFINop();
3128 return;
3129
3130 case AArch64::SEH_PrologEnd:
3132 return;
3133
3134 case AArch64::SEH_EpilogStart:
3136 return;
3137
3138 case AArch64::SEH_EpilogEnd:
3140 return;
3141
3142 case AArch64::SEH_PACSignLR:
3144 return;
3145
3146 case AArch64::SEH_SaveAnyRegQP:
3147 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3148 "Non-consecutive registers not allowed for save_any_reg");
3149 assert(MI->getOperand(2).getImm() >= 0 &&
3150 "SaveAnyRegQP SEH opcode offset must be non-negative");
3151 assert(MI->getOperand(2).getImm() <= 1008 &&
3152 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3153 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3154 MI->getOperand(2).getImm());
3155 return;
3156
3157 case AArch64::SEH_SaveAnyRegQPX:
3158 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3159 "Non-consecutive registers not allowed for save_any_reg");
3160 assert(MI->getOperand(2).getImm() < 0 &&
3161 "SaveAnyRegQPX SEH opcode offset must be negative");
3162 assert(MI->getOperand(2).getImm() >= -1008 &&
3163 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3164 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3165 -MI->getOperand(2).getImm());
3166 return;
3167
3168 case AArch64::BLR:
3169 case AArch64::BR:
3170 recordIfImportCall(MI);
3171 MCInst TmpInst;
3172 MCInstLowering.Lower(MI, TmpInst);
3173 EmitToStreamer(*OutStreamer, TmpInst);
3174 return;
3175 }
3176
3177 // Finally, do the automated lowerings for everything else.
3178 MCInst TmpInst;
3179 MCInstLowering.Lower(MI, TmpInst);
3180 EmitToStreamer(*OutStreamer, TmpInst);
3181}
3182
3183void AArch64AsmPrinter::recordIfImportCall(
3186 !TM.getTargetTriple().isOSBinFormatCOFF())
3187 return;
3188
3189 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3190 if (GV && GV->hasDLLImportStorageClass()) {
3191 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3192 OutStreamer->emitLabel(CallSiteSymbol);
3193
3194 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3195 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3196 .push_back({CallSiteSymbol, CalledSymbol});
3197 }
3198}
3199
3200void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3201 MCSymbol *LazyPointer) {
3202 // _ifunc:
3203 // adrp x16, lazy_pointer@GOTPAGE
3204 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3205 // ldr x16, [x16]
3206 // br x16
3207
3208 {
3209 MCInst Adrp;
3210 Adrp.setOpcode(AArch64::ADRP);
3211 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3212 MCOperand SymPage;
3213 MCInstLowering.lowerOperand(
3216 SymPage);
3217 Adrp.addOperand(SymPage);
3218 EmitToStreamer(Adrp);
3219 }
3220
3221 {
3222 MCInst Ldr;
3223 Ldr.setOpcode(AArch64::LDRXui);
3224 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3225 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3226 MCOperand SymPageOff;
3227 MCInstLowering.lowerOperand(
3230 SymPageOff);
3231 Ldr.addOperand(SymPageOff);
3233 EmitToStreamer(Ldr);
3234 }
3235
3236 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3237 .addReg(AArch64::X16)
3238 .addReg(AArch64::X16)
3239 .addImm(0));
3240
3241 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3242 : AArch64::BR)
3243 .addReg(AArch64::X16));
3244}
3245
3246void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3247 const GlobalIFunc &GI,
3248 MCSymbol *LazyPointer) {
3249 // These stub helpers are only ever called once, so here we're optimizing for
3250 // minimum size by using the pre-indexed store variants, which saves a few
3251 // bytes of instructions to bump & restore sp.
3252
3253 // _ifunc.stub_helper:
3254 // stp fp, lr, [sp, #-16]!
3255 // mov fp, sp
3256 // stp x1, x0, [sp, #-16]!
3257 // stp x3, x2, [sp, #-16]!
3258 // stp x5, x4, [sp, #-16]!
3259 // stp x7, x6, [sp, #-16]!
3260 // stp d1, d0, [sp, #-16]!
3261 // stp d3, d2, [sp, #-16]!
3262 // stp d5, d4, [sp, #-16]!
3263 // stp d7, d6, [sp, #-16]!
3264 // bl _resolver
3265 // adrp x16, lazy_pointer@GOTPAGE
3266 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3267 // str x0, [x16]
3268 // mov x16, x0
3269 // ldp d7, d6, [sp], #16
3270 // ldp d5, d4, [sp], #16
3271 // ldp d3, d2, [sp], #16
3272 // ldp d1, d0, [sp], #16
3273 // ldp x7, x6, [sp], #16
3274 // ldp x5, x4, [sp], #16
3275 // ldp x3, x2, [sp], #16
3276 // ldp x1, x0, [sp], #16
3277 // ldp fp, lr, [sp], #16
3278 // br x16
3279
3280 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3281 .addReg(AArch64::SP)
3282 .addReg(AArch64::FP)
3283 .addReg(AArch64::LR)
3284 .addReg(AArch64::SP)
3285 .addImm(-2));
3286
3287 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3288 .addReg(AArch64::FP)
3289 .addReg(AArch64::SP)
3290 .addImm(0)
3291 .addImm(0));
3292
3293 for (int I = 0; I != 4; ++I)
3294 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3295 .addReg(AArch64::SP)
3296 .addReg(AArch64::X1 + 2 * I)
3297 .addReg(AArch64::X0 + 2 * I)
3298 .addReg(AArch64::SP)
3299 .addImm(-2));
3300
3301 for (int I = 0; I != 4; ++I)
3302 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3303 .addReg(AArch64::SP)
3304 .addReg(AArch64::D1 + 2 * I)
3305 .addReg(AArch64::D0 + 2 * I)
3306 .addReg(AArch64::SP)
3307 .addImm(-2));
3308
3309 EmitToStreamer(
3310 MCInstBuilder(AArch64::BL)
3312
3313 {
3314 MCInst Adrp;
3315 Adrp.setOpcode(AArch64::ADRP);
3316 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3317 MCOperand SymPage;
3318 MCInstLowering.lowerOperand(
3319 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3321 SymPage);
3322 Adrp.addOperand(SymPage);
3323 EmitToStreamer(Adrp);
3324 }
3325
3326 {
3327 MCInst Ldr;
3328 Ldr.setOpcode(AArch64::LDRXui);
3329 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3330 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3331 MCOperand SymPageOff;
3332 MCInstLowering.lowerOperand(
3333 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3335 SymPageOff);
3336 Ldr.addOperand(SymPageOff);
3338 EmitToStreamer(Ldr);
3339 }
3340
3341 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3342 .addReg(AArch64::X0)
3343 .addReg(AArch64::X16)
3344 .addImm(0));
3345
3346 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3347 .addReg(AArch64::X16)
3348 .addReg(AArch64::X0)
3349 .addImm(0)
3350 .addImm(0));
3351
3352 for (int I = 3; I != -1; --I)
3353 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3354 .addReg(AArch64::SP)
3355 .addReg(AArch64::D1 + 2 * I)
3356 .addReg(AArch64::D0 + 2 * I)
3357 .addReg(AArch64::SP)
3358 .addImm(2));
3359
3360 for (int I = 3; I != -1; --I)
3361 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3362 .addReg(AArch64::SP)
3363 .addReg(AArch64::X1 + 2 * I)
3364 .addReg(AArch64::X0 + 2 * I)
3365 .addReg(AArch64::SP)
3366 .addImm(2));
3367
3368 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3369 .addReg(AArch64::SP)
3370 .addReg(AArch64::FP)
3371 .addReg(AArch64::LR)
3372 .addReg(AArch64::SP)
3373 .addImm(2));
3374
3375 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3376 : AArch64::BR)
3377 .addReg(AArch64::X16));
3378}
3379
3380const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV) {
3381 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3382 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3383 OutContext);
3384 }
3385
3386 return AsmPrinter::lowerConstant(CV);
3387}
3388
3389// Force static initialization.
3396}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Poison
@ Default
@ Unchecked
static cl::opt< bool > EnableImportCallOptimization("aarch64-win-import-call-optimization", cl::Hidden, cl::desc("Enable import call optimization for AArch64 Windows"), cl::init(false))
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter()
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
This file defines the DenseMap class.
std::string Name
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
std::optional< std::string > getOutliningStyle() const
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String, bool Override)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAtributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
This implementation is used for AArch64 ELF targets (Linux in particular).
AArch64_MachoTargetObjectFile - This TLOF implementation is used for Darwin.
Class for arbitrary precision integers.
Definition: APInt.h:78
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class is intended to be used as a driving class for all asm writers.
Definition: AsmPrinter.h:87
virtual void emitInstruction(const MachineInstr *)
Targets should implement this to emit instructions.
Definition: AsmPrinter.h:562
void EmitToStreamer(MCStreamer &S, const MCInst &Inst)
Definition: AsmPrinter.cpp:428
virtual const MCExpr * lowerConstantPtrAuth(const ConstantPtrAuth &CPA)
Definition: AsmPrinter.h:583
void emitXRayTable()
Emit a table with all XRay instrumentation points.
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI, MCSymbol *LazyPointer)
Definition: AsmPrinter.h:614
virtual void emitJumpTableInfo()
Print assembly representations of the jump tables used by the current function to the current output ...
virtual void SetupMachineFunction(MachineFunction &MF)
This should be called when a new MachineFunction is being processed from runOnMachineFunction.
void emitFunctionBody()
This method emits the body and trailer for a function.
virtual void emitStartOfAsmFile(Module &)
This virtual method can be overridden by targets that want to emit something at the start of their fi...
Definition: AsmPrinter.h:538
virtual void emitEndOfAsmFile(Module &)
This virtual method can be overridden by targets that want to emit something at the end of their file...
Definition: AsmPrinter.h:542
virtual void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI, MCSymbol *LazyPointer)
Definition: AsmPrinter.h:608
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
Definition: AsmPrinter.cpp:450
virtual bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const
Definition: AsmPrinter.h:917
virtual const MCSubtargetInfo * getIFuncMCSubtargetInfo() const
getSubtargetInfo() cannot be used where this is needed because we don't have a MachineFunction when w...
Definition: AsmPrinter.h:604
bool runOnMachineFunction(MachineFunction &MF) override
Emit the specified function out to the OutStreamer.
Definition: AsmPrinter.h:390
virtual const MCExpr * lowerConstant(const Constant *CV)
Lower the specified LLVM Constant to an MCExpr.
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant as...
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition: AsmPrinter.h:579
virtual void emitFunctionBodyEnd()
Targets can override this to emit stuff after the last basic block in the function.
Definition: AsmPrinter.h:550
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual std::tuple< const MCSymbol *, uint64_t, const MCSymbol *, codeview::JumpTableEntrySize > getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr, const MCSymbol *BranchLabel) const
Gets information required to create a CodeView debug symbol for a jump table.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
The address of a basic block.
Definition: Constants.h:893
Function * getFunction() const
Definition: Constants.h:923
Conditional or Unconditional Branch instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1021
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1048
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1051
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition: Constants.h:1066
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1054
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const Constant * getAliasee() const
Definition: GlobalAlias.h:86
const Constant * getResolver() const
Definition: GlobalIFunc.h:72
bool hasLocalLinkage() const
Definition: GlobalValue.h:529
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:530
Type * getValueType() const
Definition: GlobalValue.h:297
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:617
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:537
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:622
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:222
Context object for machine code objects.
Definition: MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
Definition: MCInstBuilder.h:37
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
void addOperand(const MCOperand Op)
Definition: MCInst.h:211
void setOpcode(unsigned Op)
Definition: MCInst.h:198
MCSection * getDataSection() const
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:37
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:163
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:135
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:142
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:36
Streaming machine code generation interface.
Definition: MCStreamer.h:213
virtual void emitCFIBKeyFrame()
Definition: MCStreamer.cpp:248
virtual void beginCOFFSymbolDef(const MCSymbol *Symbol)
Start emitting COFF symbol definition.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitCOFFSymbolType(int Type)
Emit the type of the symbol.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition: MCStreamer.h:347
virtual void endCOFFSymbolDef()
Marks the end of the symbol definition.
MCContext & getContext() const
Definition: MCStreamer.h:300
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition: MCStreamer.h:366
virtual void emitCFIMTETaggedFrame()
Definition: MCStreamer.cpp:255
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
Definition: MCStreamer.cpp:179
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
Definition: MCStreamer.cpp:420
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:309
MCSection * getCurrentSectionOnly() const
Definition: MCStreamer.h:400
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
virtual void emitCOFFSymbolStorageClass(int StorageClass)
Emit the storage class of the symbol.
Generic base class for all target subtargets.
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:398
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:205
Metadata node.
Definition: Metadata.h:1073
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
Definition: MachineInstr.h:69
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:691
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation for ELF targets.
MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation for MachO targets.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
MI-level patchpoint operands.
Definition: StackMaps.h:76
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static SectionKind getMetadata()
Definition: SectionKind.h:188
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
MI-level stackmap operands.
Definition: StackMaps.h:35
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
Definition: StackMaps.h:50
void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
Definition: StackMaps.cpp:562
void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
Definition: StackMaps.cpp:541
void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
Definition: StackMaps.cpp:531
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:255
LLVM Value Representation.
Definition: Value.h:74
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Key
PAL metadata keys.
@ SectionSize
Definition: COFF.h:60
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition: COFF.h:217
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition: COFF.h:223
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition: COFF.h:224
@ IMAGE_SYM_DTYPE_NULL
No complex type; simple scalar variable.
Definition: COFF.h:273
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition: COFF.h:275
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition: COFF.h:279
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:221
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:224
@ SHT_PROGBITS
Definition: ELF.h:1098
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition: ELF.h:1803
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition: ELF.h:1804
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition: ELF.h:1805
@ SHF_ALLOC
Definition: ELF.h:1198
@ SHF_GROUP
Definition: ELF.h:1220
@ SHF_EXECINSTR
Definition: ELF.h:1201
@ S_REGULAR
S_REGULAR - Regular section.
Definition: MachO.h:127
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
constexpr double e
Definition: MathExtras.h:48
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Target & getTheAArch64beTarget()
Target & getTheAArch64leTarget()
Target & getTheAArch64_32Target()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
Target & getTheARM64_32Target()
@ MCAF_SubsectionsViaSymbols
.subsections_via_symbols (MachO)
Definition: MCDirectives.h:55
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
Definition: MCDirectives.h:45
@ MCSA_Global
.type _foo, @gnu_unique_object
Definition: MCDirectives.h:30
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
Definition: MCDirectives.h:49
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
Definition: MCDirectives.h:23
@ MCSA_Hidden
.hidden (ELF)
Definition: MCDirectives.h:33
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...