LLVM 23.0.0git
AArch64Arm64ECCallLowering.cpp
Go to the documentation of this file.
1//===-- AArch64Arm64ECCallLowering.cpp - Lower Arm64EC calls ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains the IR transform to lower external or indirect calls for
11/// the ARM64EC calling convention. Such calls must go through the runtime, so
12/// we can translate the calling convention for calls into the emulator.
13///
14/// This subsumes Control Flow Guard handling.
15///
16//===----------------------------------------------------------------------===//
17
18#include "AArch64.h"
19#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/IR/CallingConv.h"
24#include "llvm/IR/GlobalAlias.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/Instruction.h"
27#include "llvm/IR/Mangler.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Object/COFF.h"
30#include "llvm/Pass.h"
33
34using namespace llvm;
35using namespace llvm::COFF;
36
38
39#define DEBUG_TYPE "arm64eccalllowering"
40
41STATISTIC(Arm64ECCallsLowered, "Number of Arm64EC calls lowered");
42
43static cl::opt<bool> LowerDirectToIndirect("arm64ec-lower-direct-to-indirect",
44 cl::Hidden, cl::init(true));
45static cl::opt<bool> GenerateThunks("arm64ec-generate-thunks", cl::Hidden,
46 cl::init(true));
47
48namespace {
49
50enum ThunkArgTranslation : uint8_t {
51 Direct,
52 Bitcast,
53 PointerIndirection,
54};
55
56struct ThunkArgInfo {
57 Type *Arm64Ty;
58 Type *X64Ty;
59 ThunkArgTranslation Translation;
60};
61
62class AArch64Arm64ECCallLowering : public ModulePass {
63public:
64 static char ID;
65 AArch64Arm64ECCallLowering() : ModulePass(ID) {}
66
67 Function *buildExitThunk(FunctionType *FnTy, AttributeList Attrs);
68 Function *buildEntryThunk(Function *F);
69 void lowerCall(CallBase *CB);
70 Function *buildGuestExitThunk(Function *F);
71 Function *buildPatchableThunk(GlobalAlias *UnmangledAlias,
72 GlobalAlias *MangledAlias);
73 bool processFunction(Function &F, SetVector<GlobalValue *> &DirectCalledFns,
74 DenseMap<GlobalAlias *, GlobalAlias *> &FnsMap);
75 bool runOnModule(Module &M) override;
76
77private:
78 ControlFlowGuardMode CFGuardModuleFlag = ControlFlowGuardMode::Disabled;
79 FunctionType *GuardFnType = nullptr;
80 FunctionType *DispatchFnType = nullptr;
81 Constant *GuardFnCFGlobal = nullptr;
82 Constant *GuardFnGlobal = nullptr;
83 Constant *DispatchFnGlobal = nullptr;
84 Module *M = nullptr;
85
86 Type *PtrTy;
87 Type *I64Ty;
88 Type *VoidTy;
89
90 void getThunkType(FunctionType *FT, AttributeList AttrList,
91 Arm64ECThunkType TT, raw_ostream &Out,
92 FunctionType *&Arm64Ty, FunctionType *&X64Ty,
93 SmallVector<ThunkArgTranslation> &ArgTranslations);
94 void getThunkRetType(FunctionType *FT, AttributeList AttrList,
95 raw_ostream &Out, Type *&Arm64RetTy, Type *&X64RetTy,
96 SmallVectorImpl<Type *> &Arm64ArgTypes,
97 SmallVectorImpl<Type *> &X64ArgTypes,
98 SmallVector<ThunkArgTranslation> &ArgTranslations,
99 bool &HasSretPtr);
100 void getThunkArgTypes(FunctionType *FT, AttributeList AttrList,
101 Arm64ECThunkType TT, raw_ostream &Out,
102 SmallVectorImpl<Type *> &Arm64ArgTypes,
103 SmallVectorImpl<Type *> &X64ArgTypes,
104 SmallVectorImpl<ThunkArgTranslation> &ArgTranslations,
105 bool HasSretPtr);
106 ThunkArgInfo canonicalizeThunkType(Type *T, Align Alignment, bool Ret,
107 uint64_t ArgSizeBytes, raw_ostream &Out);
108};
109
110} // end anonymous namespace
111
112void AArch64Arm64ECCallLowering::getThunkType(
113 FunctionType *FT, AttributeList AttrList, Arm64ECThunkType TT,
114 raw_ostream &Out, FunctionType *&Arm64Ty, FunctionType *&X64Ty,
115 SmallVector<ThunkArgTranslation> &ArgTranslations) {
116 Out << (TT == Arm64ECThunkType::Entry ? "$ientry_thunk$cdecl$"
117 : "$iexit_thunk$cdecl$");
118
119 Type *Arm64RetTy;
120 Type *X64RetTy;
121
122 SmallVector<Type *> Arm64ArgTypes;
123 SmallVector<Type *> X64ArgTypes;
124
125 // The first argument to a thunk is the called function, stored in x9.
126 // For exit thunks, we pass the called function down to the emulator;
127 // for entry/guest exit thunks, we just call the Arm64 function directly.
128 if (TT == Arm64ECThunkType::Exit)
129 Arm64ArgTypes.push_back(PtrTy);
130 X64ArgTypes.push_back(PtrTy);
131
132 bool HasSretPtr = false;
133 getThunkRetType(FT, AttrList, Out, Arm64RetTy, X64RetTy, Arm64ArgTypes,
134 X64ArgTypes, ArgTranslations, HasSretPtr);
135
136 getThunkArgTypes(FT, AttrList, TT, Out, Arm64ArgTypes, X64ArgTypes,
137 ArgTranslations, HasSretPtr);
138
139 Arm64Ty = FunctionType::get(Arm64RetTy, Arm64ArgTypes, false);
140
141 X64Ty = FunctionType::get(X64RetTy, X64ArgTypes, false);
142}
143
144void AArch64Arm64ECCallLowering::getThunkArgTypes(
145 FunctionType *FT, AttributeList AttrList, Arm64ECThunkType TT,
146 raw_ostream &Out, SmallVectorImpl<Type *> &Arm64ArgTypes,
147 SmallVectorImpl<Type *> &X64ArgTypes,
148 SmallVectorImpl<ThunkArgTranslation> &ArgTranslations, bool HasSretPtr) {
149
150 Out << "$";
151 if (FT->isVarArg()) {
152 // We treat the variadic function's thunk as a normal function
153 // with the following type on the ARM side:
154 // rettype exitthunk(
155 // ptr x9, ptr x0, i64 x1, i64 x2, i64 x3, ptr x4, i64 x5)
156 //
157 // that can coverage all types of variadic function.
158 // x9 is similar to normal exit thunk, store the called function.
159 // x0-x3 is the arguments be stored in registers.
160 // x4 is the address of the arguments on the stack.
161 // x5 is the size of the arguments on the stack.
162 //
163 // On the x64 side, it's the same except that x5 isn't set.
164 //
165 // If both the ARM and X64 sides are sret, there are only three
166 // arguments in registers.
167 //
168 // If the X64 side is sret, but the ARM side isn't, we pass an extra value
169 // to/from the X64 side, and let SelectionDAG transform it into a memory
170 // location.
171 Out << "varargs";
172
173 // x0-x3
174 for (int i = HasSretPtr ? 1 : 0; i < 4; i++) {
175 Arm64ArgTypes.push_back(I64Ty);
176 X64ArgTypes.push_back(I64Ty);
177 ArgTranslations.push_back(ThunkArgTranslation::Direct);
178 }
179
180 // x4
181 Arm64ArgTypes.push_back(PtrTy);
182 X64ArgTypes.push_back(PtrTy);
183 ArgTranslations.push_back(ThunkArgTranslation::Direct);
184 // x5
185 Arm64ArgTypes.push_back(I64Ty);
186 if (TT != Arm64ECThunkType::Entry) {
187 // FIXME: x5 isn't actually used by the x64 side; revisit once we
188 // have proper isel for varargs
189 X64ArgTypes.push_back(I64Ty);
190 ArgTranslations.push_back(ThunkArgTranslation::Direct);
191 }
192 return;
193 }
194
195 unsigned I = 0;
196 if (HasSretPtr)
197 I++;
198
199 if (I == FT->getNumParams()) {
200 Out << "v";
201 return;
202 }
203
204 for (unsigned E = FT->getNumParams(); I != E; ++I) {
205#if 0
206 // FIXME: Need more information about argument size; see
207 // https://reviews.llvm.org/D132926
208 uint64_t ArgSizeBytes = AttrList.getParamArm64ECArgSizeBytes(I);
209 Align ParamAlign = AttrList.getParamAlignment(I).valueOrOne();
210#else
211 uint64_t ArgSizeBytes = 0;
212 Align ParamAlign = Align();
213#endif
214 auto [Arm64Ty, X64Ty, ArgTranslation] =
215 canonicalizeThunkType(FT->getParamType(I), ParamAlign,
216 /*Ret*/ false, ArgSizeBytes, Out);
217 Arm64ArgTypes.push_back(Arm64Ty);
218 X64ArgTypes.push_back(X64Ty);
219 ArgTranslations.push_back(ArgTranslation);
220 }
221}
222
223void AArch64Arm64ECCallLowering::getThunkRetType(
224 FunctionType *FT, AttributeList AttrList, raw_ostream &Out,
225 Type *&Arm64RetTy, Type *&X64RetTy, SmallVectorImpl<Type *> &Arm64ArgTypes,
226 SmallVectorImpl<Type *> &X64ArgTypes,
227 SmallVector<ThunkArgTranslation> &ArgTranslations, bool &HasSretPtr) {
228 Type *T = FT->getReturnType();
229#if 0
230 // FIXME: Need more information about argument size; see
231 // https://reviews.llvm.org/D132926
232 uint64_t ArgSizeBytes = AttrList.getRetArm64ECArgSizeBytes();
233#else
234 int64_t ArgSizeBytes = 0;
235#endif
236 if (T->isVoidTy()) {
237 if (FT->getNumParams()) {
238 Attribute SRetAttr0 = AttrList.getParamAttr(0, Attribute::StructRet);
239 Attribute InRegAttr0 = AttrList.getParamAttr(0, Attribute::InReg);
240 Attribute SRetAttr1, InRegAttr1;
241 if (FT->getNumParams() > 1) {
242 // Also check the second parameter (for class methods, the first
243 // parameter is "this", and the second parameter is the sret pointer.)
244 // It doesn't matter which one is sret.
245 SRetAttr1 = AttrList.getParamAttr(1, Attribute::StructRet);
246 InRegAttr1 = AttrList.getParamAttr(1, Attribute::InReg);
247 }
248 if ((SRetAttr0.isValid() && InRegAttr0.isValid()) ||
249 (SRetAttr1.isValid() && InRegAttr1.isValid())) {
250 // sret+inreg indicates a call that returns a C++ class value. This is
251 // actually equivalent to just passing and returning a void* pointer
252 // as the first or second argument. Translate it that way, instead of
253 // trying to model "inreg" in the thunk's calling convention; this
254 // simplfies the rest of the code, and matches MSVC mangling.
255 Out << "i8";
256 Arm64RetTy = I64Ty;
257 X64RetTy = I64Ty;
258 return;
259 }
260 if (SRetAttr0.isValid()) {
261 // FIXME: Sanity-check the sret type; if it's an integer or pointer,
262 // we'll get screwy mangling/codegen.
263 // FIXME: For large struct types, mangle as an integer argument and
264 // integer return, so we can reuse more thunks, instead of "m" syntax.
265 // (MSVC mangles this case as an integer return with no argument, but
266 // that's a miscompile.)
267 Type *SRetType = SRetAttr0.getValueAsType();
268 Align SRetAlign = AttrList.getParamAlignment(0).valueOrOne();
269 canonicalizeThunkType(SRetType, SRetAlign, /*Ret*/ true, ArgSizeBytes,
270 Out);
271 Arm64RetTy = VoidTy;
272 X64RetTy = VoidTy;
273 Arm64ArgTypes.push_back(FT->getParamType(0));
274 X64ArgTypes.push_back(FT->getParamType(0));
275 ArgTranslations.push_back(ThunkArgTranslation::Direct);
276 HasSretPtr = true;
277 return;
278 }
279 }
280
281 Out << "v";
282 Arm64RetTy = VoidTy;
283 X64RetTy = VoidTy;
284 return;
285 }
286
287 auto info =
288 canonicalizeThunkType(T, Align(), /*Ret*/ true, ArgSizeBytes, Out);
289 Arm64RetTy = info.Arm64Ty;
290 X64RetTy = info.X64Ty;
291 if (X64RetTy->isPointerTy()) {
292 // If the X64 type is canonicalized to a pointer, that means it's
293 // passed/returned indirectly. For a return value, that means it's an
294 // sret pointer.
295 X64ArgTypes.push_back(X64RetTy);
296 X64RetTy = VoidTy;
297 }
298}
299
300ThunkArgInfo AArch64Arm64ECCallLowering::canonicalizeThunkType(
301 Type *T, Align Alignment, bool Ret, uint64_t ArgSizeBytes,
302 raw_ostream &Out) {
303
304 auto direct = [](Type *T) {
305 return ThunkArgInfo{T, T, ThunkArgTranslation::Direct};
306 };
307
308 auto bitcast = [this](Type *Arm64Ty, uint64_t SizeInBytes) {
309 return ThunkArgInfo{Arm64Ty,
310 llvm::Type::getIntNTy(M->getContext(), SizeInBytes * 8),
311 ThunkArgTranslation::Bitcast};
312 };
313
314 auto pointerIndirection = [this](Type *Arm64Ty) {
315 return ThunkArgInfo{Arm64Ty, PtrTy,
316 ThunkArgTranslation::PointerIndirection};
317 };
318
319 if (T->isHalfTy()) {
320 // Prefix with `llvm` since MSVC doesn't specify `_Float16`
321 Out << "__llvm_h__";
322 return direct(T);
323 }
324
325 if (T->isFloatTy()) {
326 Out << "f";
327 return direct(T);
328 }
329
330 if (T->isDoubleTy()) {
331 Out << "d";
332 return direct(T);
333 }
334
335 if (T->isFloatingPointTy()) {
336 report_fatal_error("Only 16, 32, and 64 bit floating points are supported "
337 "for ARM64EC thunks");
338 }
339
340 auto &DL = M->getDataLayout();
341
342 if (auto *StructTy = dyn_cast<StructType>(T))
343 if (StructTy->getNumElements() == 1)
344 T = StructTy->getElementType(0);
345
346 if (T->isArrayTy()) {
347 Type *ElementTy = T->getArrayElementType();
348 uint64_t ElementCnt = T->getArrayNumElements();
349 uint64_t ElementSizePerBytes = DL.getTypeSizeInBits(ElementTy) / 8;
350 uint64_t TotalSizeBytes = ElementCnt * ElementSizePerBytes;
351 if (ElementTy->isHalfTy() || ElementTy->isFloatTy() ||
352 ElementTy->isDoubleTy()) {
353 if (ElementTy->isHalfTy())
354 // Prefix with `llvm` since MSVC doesn't specify `_Float16`
355 Out << "__llvm_H__";
356 else if (ElementTy->isFloatTy())
357 Out << "F";
358 else if (ElementTy->isDoubleTy())
359 Out << "D";
360 Out << TotalSizeBytes;
361 if (Alignment.value() >= 16 && !Ret)
362 Out << "a" << Alignment.value();
363 if (TotalSizeBytes <= 8) {
364 // Arm64 returns small structs of float/double in float registers;
365 // X64 uses RAX.
366 return bitcast(T, TotalSizeBytes);
367 } else {
368 // Struct is passed directly on Arm64, but indirectly on X64.
369 return pointerIndirection(T);
370 }
371 } else if (T->isFloatingPointTy()) {
373 "Only 16, 32, and 64 bit floating points are supported "
374 "for ARM64EC thunks");
375 }
376 }
377
378 if ((T->isIntegerTy() || T->isPointerTy()) && DL.getTypeSizeInBits(T) <= 64) {
379 Out << "i8";
380 return direct(I64Ty);
381 }
382
383 unsigned TypeSize = ArgSizeBytes;
384 if (TypeSize == 0)
385 TypeSize = DL.getTypeSizeInBits(T) / 8;
386 Out << "m";
387 if (TypeSize != 4)
388 Out << TypeSize;
389 if (Alignment.value() >= 16 && !Ret)
390 Out << "a" << Alignment.value();
391 // FIXME: Try to canonicalize Arm64Ty more thoroughly?
392 if (TypeSize == 1 || TypeSize == 2 || TypeSize == 4 || TypeSize == 8) {
393 // Pass directly in an integer register
394 return bitcast(T, TypeSize);
395 } else {
396 // Passed directly on Arm64, but indirectly on X64.
397 return pointerIndirection(T);
398 }
399}
400
401// This function builds the "exit thunk", a function which translates
402// arguments and return values when calling x64 code from AArch64 code.
403Function *AArch64Arm64ECCallLowering::buildExitThunk(FunctionType *FT,
404 AttributeList Attrs) {
405 SmallString<256> ExitThunkName;
406 llvm::raw_svector_ostream ExitThunkStream(ExitThunkName);
407 FunctionType *Arm64Ty, *X64Ty;
408 SmallVector<ThunkArgTranslation> ArgTranslations;
409 getThunkType(FT, Attrs, Arm64ECThunkType::Exit, ExitThunkStream, Arm64Ty,
410 X64Ty, ArgTranslations);
411 if (Function *F = M->getFunction(ExitThunkName))
412 return F;
413
415 ExitThunkName, M);
416 F->setCallingConv(CallingConv::ARM64EC_Thunk_Native);
417 F->setSection(".wowthk$aa");
418 F->setComdat(M->getOrInsertComdat(ExitThunkName));
419 // Copy MSVC, and always set up a frame pointer. (Maybe this isn't necessary.)
420 F->addFnAttr("frame-pointer", "all");
421 // Only copy sret from the first argument. For C++ instance methods, clang can
422 // stick an sret marking on a later argument, but it doesn't actually affect
423 // the ABI, so we can omit it. This avoids triggering a verifier assertion.
424 if (FT->getNumParams()) {
425 auto SRet = Attrs.getParamAttr(0, Attribute::StructRet);
426 auto InReg = Attrs.getParamAttr(0, Attribute::InReg);
427 if (SRet.isValid() && !InReg.isValid())
428 F->addParamAttr(1, SRet);
429 }
430 // FIXME: Copy anything other than sret? Shouldn't be necessary for normal
431 // C ABI, but might show up in other cases.
432 BasicBlock *BB = BasicBlock::Create(M->getContext(), "", F);
433 IRBuilder<> IRB(BB);
434 Value *CalleePtr =
435 M->getOrInsertGlobal("__os_arm64x_dispatch_call_no_redirect", PtrTy);
436 Value *Callee = IRB.CreateLoad(PtrTy, CalleePtr);
437 auto &DL = M->getDataLayout();
439
440 // Pass the called function in x9.
441 auto X64TyOffset = 1;
442 Args.push_back(F->arg_begin());
443
444 Type *RetTy = Arm64Ty->getReturnType();
445 if (RetTy != X64Ty->getReturnType()) {
446 // If the return type is an array or struct, translate it. Values of size
447 // 8 or less go into RAX; bigger values go into memory, and we pass a
448 // pointer.
449 if (DL.getTypeStoreSize(RetTy) > 8) {
450 Args.push_back(IRB.CreateAlloca(RetTy));
451 X64TyOffset++;
452 }
453 }
454
455 for (auto [Arg, X64ArgType, ArgTranslation] : llvm::zip_equal(
456 make_range(F->arg_begin() + 1, F->arg_end()),
457 make_range(X64Ty->param_begin() + X64TyOffset, X64Ty->param_end()),
458 ArgTranslations)) {
459 // Translate arguments from AArch64 calling convention to x86 calling
460 // convention.
461 //
462 // For simple types, we don't need to do any translation: they're
463 // represented the same way. (Implicit sign extension is not part of
464 // either convention.)
465 //
466 // The big thing we have to worry about is struct types... but
467 // fortunately AArch64 clang is pretty friendly here: the cases that need
468 // translation are always passed as a struct or array. (If we run into
469 // some cases where this doesn't work, we can teach clang to mark it up
470 // with an attribute.)
471 //
472 // The first argument is the called function, stored in x9.
473 if (ArgTranslation != ThunkArgTranslation::Direct) {
474 Value *Mem = IRB.CreateAlloca(Arg.getType());
475 IRB.CreateStore(&Arg, Mem);
476 if (ArgTranslation == ThunkArgTranslation::Bitcast) {
477 Type *IntTy = IRB.getIntNTy(DL.getTypeStoreSizeInBits(Arg.getType()));
478 Args.push_back(IRB.CreateLoad(IntTy, Mem));
479 } else {
480 assert(ArgTranslation == ThunkArgTranslation::PointerIndirection);
481 Args.push_back(Mem);
482 }
483 } else {
484 Args.push_back(&Arg);
485 }
486 assert(Args.back()->getType() == X64ArgType);
487 }
488 // FIXME: Transfer necessary attributes? sret? anything else?
489
490 CallInst *Call = IRB.CreateCall(X64Ty, Callee, Args);
491 Call->setCallingConv(CallingConv::ARM64EC_Thunk_X64);
492
493 Value *RetVal = Call;
494 if (RetTy != X64Ty->getReturnType()) {
495 // If we rewrote the return type earlier, convert the return value to
496 // the proper type.
497 if (DL.getTypeStoreSize(RetTy) > 8) {
498 RetVal = IRB.CreateLoad(RetTy, Args[1]);
499 } else {
500 Value *CastAlloca = IRB.CreateAlloca(RetTy);
501 IRB.CreateStore(Call, CastAlloca);
502 RetVal = IRB.CreateLoad(RetTy, CastAlloca);
503 }
504 }
505
506 if (RetTy->isVoidTy())
507 IRB.CreateRetVoid();
508 else
509 IRB.CreateRet(RetVal);
510 return F;
511}
512
513// This function builds the "entry thunk", a function which translates
514// arguments and return values when calling AArch64 code from x64 code.
515Function *AArch64Arm64ECCallLowering::buildEntryThunk(Function *F) {
516 SmallString<256> EntryThunkName;
517 llvm::raw_svector_ostream EntryThunkStream(EntryThunkName);
518 FunctionType *Arm64Ty, *X64Ty;
519 SmallVector<ThunkArgTranslation> ArgTranslations;
520 getThunkType(F->getFunctionType(), F->getAttributes(),
521 Arm64ECThunkType::Entry, EntryThunkStream, Arm64Ty, X64Ty,
522 ArgTranslations);
523 if (Function *F = M->getFunction(EntryThunkName))
524 return F;
525
527 EntryThunkName, M);
528 Thunk->setCallingConv(CallingConv::ARM64EC_Thunk_X64);
529 Thunk->setSection(".wowthk$aa");
530 Thunk->setComdat(M->getOrInsertComdat(EntryThunkName));
531 // Copy MSVC, and always set up a frame pointer. (Maybe this isn't necessary.)
532 Thunk->addFnAttr("frame-pointer", "all");
533
534 BasicBlock *BB = BasicBlock::Create(M->getContext(), "", Thunk);
535 IRBuilder<> IRB(BB);
536
537 Type *RetTy = Arm64Ty->getReturnType();
538 Type *X64RetType = X64Ty->getReturnType();
539
540 bool TransformDirectToSRet = X64RetType->isVoidTy() && !RetTy->isVoidTy();
541 unsigned ThunkArgOffset = TransformDirectToSRet ? 2 : 1;
542 unsigned PassthroughArgSize =
543 (F->isVarArg() ? 5 : Thunk->arg_size()) - ThunkArgOffset;
544 assert(ArgTranslations.size() == (F->isVarArg() ? 5 : PassthroughArgSize));
545
546 // Translate arguments to call.
548 for (unsigned i = 0; i != PassthroughArgSize; ++i) {
549 Value *Arg = Thunk->getArg(i + ThunkArgOffset);
550 Type *ArgTy = Arm64Ty->getParamType(i);
551 ThunkArgTranslation ArgTranslation = ArgTranslations[i];
552 if (ArgTranslation != ThunkArgTranslation::Direct) {
553 // Translate array/struct arguments to the expected type.
554 if (ArgTranslation == ThunkArgTranslation::Bitcast) {
555 Value *CastAlloca = IRB.CreateAlloca(ArgTy);
556 IRB.CreateStore(Arg, CastAlloca);
557 Arg = IRB.CreateLoad(ArgTy, CastAlloca);
558 } else {
559 assert(ArgTranslation == ThunkArgTranslation::PointerIndirection);
560 Arg = IRB.CreateLoad(ArgTy, Arg);
561 }
562 }
563 assert(Arg->getType() == ArgTy);
564 Args.push_back(Arg);
565 }
566
567 if (F->isVarArg()) {
568 // The 5th argument to variadic entry thunks is used to model the x64 sp
569 // which is passed to the thunk in x4, this can be passed to the callee as
570 // the variadic argument start address after skipping over the 32 byte
571 // shadow store.
572
573 // The EC thunk CC will assign any argument marked as InReg to x4.
574 Thunk->addParamAttr(5, Attribute::InReg);
575 Value *Arg = Thunk->getArg(5);
576 Arg = IRB.CreatePtrAdd(Arg, IRB.getInt64(0x20));
577 Args.push_back(Arg);
578
579 // Pass in a zero variadic argument size (in x5).
580 Args.push_back(IRB.getInt64(0));
581 }
582
583 // Call the function passed to the thunk.
584 Value *Callee = Thunk->getArg(0);
585 CallInst *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
586
587 auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
588 auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
589 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
590 Thunk->addParamAttr(1, SRetAttr);
591 Call->addParamAttr(0, SRetAttr);
592 }
593
594 Value *RetVal = Call;
595 if (TransformDirectToSRet) {
596 IRB.CreateStore(RetVal, Thunk->getArg(1));
597 } else if (X64RetType != RetTy) {
598 Value *CastAlloca = IRB.CreateAlloca(X64RetType);
599 IRB.CreateStore(Call, CastAlloca);
600 RetVal = IRB.CreateLoad(X64RetType, CastAlloca);
601 }
602
603 // Return to the caller. Note that the isel has code to translate this
604 // "ret" to a tail call to __os_arm64x_dispatch_ret. (Alternatively, we
605 // could emit a tail call here, but that would require a dedicated calling
606 // convention, which seems more complicated overall.)
607 if (X64RetType->isVoidTy())
608 IRB.CreateRetVoid();
609 else
610 IRB.CreateRet(RetVal);
611
612 return Thunk;
613}
614
615std::optional<std::string> getArm64ECMangledFunctionName(GlobalValue &GV) {
616 if (!GV.hasName()) {
617 GV.setName("__unnamed");
618 }
619
621}
622
623// Builds the "guest exit thunk", a helper to call a function which may or may
624// not be an exit thunk. (We optimistically assume non-dllimport function
625// declarations refer to functions defined in AArch64 code; if the linker
626// can't prove that, we use this routine instead.)
627Function *AArch64Arm64ECCallLowering::buildGuestExitThunk(Function *F) {
628 llvm::raw_null_ostream NullThunkName;
629 FunctionType *Arm64Ty, *X64Ty;
630 SmallVector<ThunkArgTranslation> ArgTranslations;
631 getThunkType(F->getFunctionType(), F->getAttributes(),
632 Arm64ECThunkType::GuestExit, NullThunkName, Arm64Ty, X64Ty,
633 ArgTranslations);
634 auto MangledName = getArm64ECMangledFunctionName(*F);
635 assert(MangledName && "Can't guest exit to function that's already native");
636 std::string ThunkName = *MangledName;
637 if (ThunkName[0] == '?' && ThunkName.find("@") != std::string::npos) {
638 ThunkName.insert(ThunkName.find("@"), "$exit_thunk");
639 } else {
640 ThunkName.append("$exit_thunk");
641 }
643 Function::Create(Arm64Ty, GlobalValue::WeakODRLinkage, 0, ThunkName, M);
644 GuestExit->setComdat(M->getOrInsertComdat(ThunkName));
645 GuestExit->setSection(".wowthk$aa");
646 GuestExit->addMetadata(
647 "arm64ec_unmangled_name",
649 MDString::get(M->getContext(), F->getName())));
650 GuestExit->setMetadata(
651 "arm64ec_ecmangled_name",
653 MDString::get(M->getContext(), *MangledName)));
654 F->setMetadata("arm64ec_hasguestexit", MDNode::get(M->getContext(), {}));
656 IRBuilder<> B(BB);
657
658 // Create new call instruction. The call check should always be a call,
659 // even if the original CallBase is an Invoke or CallBr instructio.
660 // This is treated as a direct call, so do not use GuardFnCFGlobal.
661 LoadInst *GuardCheckLoad = B.CreateLoad(PtrTy, GuardFnGlobal);
662 Function *Thunk = buildExitThunk(F->getFunctionType(), F->getAttributes());
663 CallInst *GuardCheck = B.CreateCall(
664 GuardFnType, GuardCheckLoad, {F, Thunk});
665 Value *GuardCheckDest = B.CreateExtractValue(GuardCheck, 0);
666 Value *GuardFinalDest = B.CreateExtractValue(GuardCheck, 1);
667
668 // Ensure that the first argument is passed in the correct register.
669 GuardCheck->setCallingConv(CallingConv::CFGuard_Check);
670
672 OperandBundleDef OB("cfguardtarget", GuardFinalDest);
673 CallInst *Call = B.CreateCall(Arm64Ty, GuardCheckDest, Args, OB);
675
676 if (Call->getType()->isVoidTy())
677 B.CreateRetVoid();
678 else
679 B.CreateRet(Call);
680
681 auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
682 auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
683 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
684 GuestExit->addParamAttr(0, SRetAttr);
685 Call->addParamAttr(0, SRetAttr);
686 }
687
688 return GuestExit;
689}
690
691Function *
692AArch64Arm64ECCallLowering::buildPatchableThunk(GlobalAlias *UnmangledAlias,
693 GlobalAlias *MangledAlias) {
694 llvm::raw_null_ostream NullThunkName;
695 FunctionType *Arm64Ty, *X64Ty;
696 Function *F = cast<Function>(MangledAlias->getAliasee());
697 SmallVector<ThunkArgTranslation> ArgTranslations;
698 getThunkType(F->getFunctionType(), F->getAttributes(),
699 Arm64ECThunkType::GuestExit, NullThunkName, Arm64Ty, X64Ty,
700 ArgTranslations);
701 std::string ThunkName(MangledAlias->getName());
702 if (ThunkName[0] == '?' && ThunkName.find("@") != std::string::npos) {
703 ThunkName.insert(ThunkName.find("@"), "$hybpatch_thunk");
704 } else {
705 ThunkName.append("$hybpatch_thunk");
706 }
707
709 Function::Create(Arm64Ty, GlobalValue::WeakODRLinkage, 0, ThunkName, M);
710 GuestExit->setComdat(M->getOrInsertComdat(ThunkName));
711 GuestExit->setSection(".wowthk$aa");
713 IRBuilder<> B(BB);
714
715 // Load the global symbol as a pointer to the check function.
716 LoadInst *DispatchLoad = B.CreateLoad(PtrTy, DispatchFnGlobal);
717
718 // Create new dispatch call instruction.
719 Function *ExitThunk =
720 buildExitThunk(F->getFunctionType(), F->getAttributes());
721 CallInst *Dispatch =
722 B.CreateCall(DispatchFnType, DispatchLoad,
723 {UnmangledAlias, ExitThunk, UnmangledAlias->getAliasee()});
724
725 // Ensure that the first arguments are passed in the correct registers.
726 Dispatch->setCallingConv(CallingConv::CFGuard_Check);
727
729 CallInst *Call = B.CreateCall(Arm64Ty, Dispatch, Args);
731
732 if (Call->getType()->isVoidTy())
733 B.CreateRetVoid();
734 else
735 B.CreateRet(Call);
736
737 auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
738 auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
739 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
740 GuestExit->addParamAttr(0, SRetAttr);
741 Call->addParamAttr(0, SRetAttr);
742 }
743
744 MangledAlias->setAliasee(GuestExit);
745 return GuestExit;
746}
747
748// Lower an indirect call with inline code.
749void AArch64Arm64ECCallLowering::lowerCall(CallBase *CB) {
750 IRBuilder<> B(CB);
751 Value *CalledOperand = CB->getCalledOperand();
752
753 // If the indirect call is called within catchpad or cleanuppad,
754 // we need to copy "funclet" bundle of the call.
756 if (auto Bundle = CB->getOperandBundle(LLVMContext::OB_funclet))
757 Bundles.push_back(OperandBundleDef(*Bundle));
758
759 // Load the global symbol as a pointer to the check function.
760 Value *GuardFn;
761 if ((CFGuardModuleFlag == ControlFlowGuardMode::Enabled) &&
762 !CB->hasFnAttr("guard_nocf"))
763 GuardFn = GuardFnCFGlobal;
764 else
765 GuardFn = GuardFnGlobal;
766 LoadInst *GuardCheckLoad = B.CreateLoad(PtrTy, GuardFn);
767
768 // Create new call instruction. The CFGuard check should always be a call,
769 // even if the original CallBase is an Invoke or CallBr instruction.
770 Function *Thunk = buildExitThunk(CB->getFunctionType(), CB->getAttributes());
771 CallInst *GuardCheck =
772 B.CreateCall(GuardFnType, GuardCheckLoad, {CalledOperand, Thunk},
773 Bundles);
774 Value *GuardCheckDest = B.CreateExtractValue(GuardCheck, 0);
775 Value *GuardFinalDest = B.CreateExtractValue(GuardCheck, 1);
776
777 // Ensure that the first argument is passed in the correct register.
778 GuardCheck->setCallingConv(CallingConv::CFGuard_Check);
779
780 // Update the call: set the callee, and add a bundle with the final
781 // destination,
782 CB->setCalledOperand(GuardCheckDest);
783 OperandBundleDef OB("cfguardtarget", GuardFinalDest);
785 OB, CB->getIterator());
786 NewCall->copyMetadata(*CB);
787 CB->replaceAllUsesWith(NewCall);
788 CB->eraseFromParent();
789}
790
791bool AArch64Arm64ECCallLowering::runOnModule(Module &Mod) {
792 if (!GenerateThunks)
793 return false;
794
795 M = &Mod;
796
797 // Check if this module has the cfguard flag and read its value.
798 CFGuardModuleFlag = M->getControlFlowGuardMode();
799
800 PtrTy = PointerType::getUnqual(M->getContext());
801 I64Ty = Type::getInt64Ty(M->getContext());
802 VoidTy = Type::getVoidTy(M->getContext());
803
804 GuardFnType =
805 FunctionType::get(StructType::get(PtrTy, PtrTy), {PtrTy, PtrTy}, false);
806 DispatchFnType = FunctionType::get(PtrTy, {PtrTy, PtrTy, PtrTy}, false);
807 GuardFnCFGlobal = M->getOrInsertGlobal("__os_arm64x_check_icall_cfg", PtrTy);
808 GuardFnGlobal = M->getOrInsertGlobal("__os_arm64x_check_icall", PtrTy);
809 DispatchFnGlobal = M->getOrInsertGlobal("__os_arm64x_dispatch_call", PtrTy);
810
811 // Mangle names of function aliases and add the alias name to
812 // arm64ec_unmangled_name metadata to ensure a weak anti-dependency symbol is
813 // emitted for the alias as well. Do this early, before handling
814 // hybrid_patchable functions, to avoid mangling their aliases.
815 for (GlobalAlias &A : Mod.aliases()) {
816 auto F = dyn_cast_or_null<Function>(A.getAliaseeObject());
817 if (!F)
818 continue;
819 if (std::optional<std::string> MangledName =
821 F->addMetadata("arm64ec_unmangled_name",
823 MDString::get(M->getContext(), A.getName())));
824 A.setName(MangledName.value());
825 }
826 }
827
828 DenseMap<GlobalAlias *, GlobalAlias *> FnsMap;
829 SetVector<GlobalAlias *> PatchableFns;
830
831 for (Function &F : Mod) {
832 if (F.hasPersonalityFn()) {
833 GlobalValue *PersFn =
834 cast<GlobalValue>(F.getPersonalityFn()->stripPointerCasts());
835 if (PersFn->getValueType() && PersFn->getValueType()->isFunctionTy()) {
836 if (std::optional<std::string> MangledName =
838 PersFn->setName(MangledName.value());
839 }
840 }
841 }
842
843 if (!F.hasFnAttribute(Attribute::HybridPatchable) ||
844 F.isDeclarationForLinker() || F.hasLocalLinkage() ||
845 F.getName().ends_with(HybridPatchableTargetSuffix))
846 continue;
847
848 // Rename hybrid patchable functions and change callers to use a global
849 // alias instead.
850 if (std::optional<std::string> MangledName =
852 std::string OrigName(F.getName());
853 F.setName(MangledName.value() + HybridPatchableTargetSuffix);
854
855 // The unmangled symbol is a weak alias to an undefined symbol with the
856 // "EXP+" prefix. This undefined symbol is resolved by the linker by
857 // creating an x86 thunk that jumps back to the actual EC target. Since we
858 // can't represent that in IR, we create an alias to the target instead.
859 // The "EXP+" symbol is set as metadata, which is then used by
860 // emitGlobalAlias to emit the right alias.
861 auto *A =
864 MangledName.value(), &F);
865 F.replaceUsesWithIf(AM,
866 [](Use &U) { return isa<GlobalAlias>(U.getUser()); });
867 F.replaceAllUsesWith(A);
868 F.setMetadata("arm64ec_exp_name",
871 "EXP+" + MangledName.value())));
872 A->setAliasee(&F);
873 AM->setAliasee(&F);
874
875 if (F.hasDLLExportStorageClass()) {
876 A->setDLLStorageClass(GlobalValue::DLLExportStorageClass);
877 F.setDLLStorageClass(GlobalValue::DefaultStorageClass);
878 }
879
880 FnsMap[A] = AM;
881 PatchableFns.insert(A);
882 }
883 }
884
885 SetVector<GlobalValue *> DirectCalledFns;
886 for (Function &F : Mod)
887 if (!F.isDeclarationForLinker() &&
888 F.getCallingConv() != CallingConv::ARM64EC_Thunk_Native &&
889 F.getCallingConv() != CallingConv::ARM64EC_Thunk_X64)
890 processFunction(F, DirectCalledFns, FnsMap);
891
892 struct ThunkInfo {
893 Constant *Src;
894 Constant *Dst;
896 };
897 SmallVector<ThunkInfo> ThunkMapping;
898 for (Function &F : Mod) {
899 if (!F.isDeclarationForLinker() &&
900 (!F.hasLocalLinkage() || F.hasAddressTaken()) &&
901 F.getCallingConv() != CallingConv::ARM64EC_Thunk_Native &&
902 F.getCallingConv() != CallingConv::ARM64EC_Thunk_X64) {
903 if (!F.hasComdat())
904 F.setComdat(Mod.getOrInsertComdat(F.getName()));
905 ThunkMapping.push_back(
906 {&F, buildEntryThunk(&F), Arm64ECThunkType::Entry});
907 }
908 }
909 for (GlobalValue *O : DirectCalledFns) {
910 auto GA = dyn_cast<GlobalAlias>(O);
911 auto F = dyn_cast<Function>(GA ? GA->getAliasee() : O);
912 ThunkMapping.push_back(
913 {O, buildExitThunk(F->getFunctionType(), F->getAttributes()),
914 Arm64ECThunkType::Exit});
915 if (!GA && !F->hasDLLImportStorageClass())
916 ThunkMapping.push_back(
917 {buildGuestExitThunk(F), F, Arm64ECThunkType::GuestExit});
918 }
919 for (GlobalAlias *A : PatchableFns) {
920 Function *Thunk = buildPatchableThunk(A, FnsMap[A]);
921 ThunkMapping.push_back({Thunk, A, Arm64ECThunkType::GuestExit});
922 }
923
924 if (!ThunkMapping.empty()) {
925 SmallVector<Constant *> ThunkMappingArrayElems;
926 for (ThunkInfo &Thunk : ThunkMapping) {
927 ThunkMappingArrayElems.push_back(ConstantStruct::getAnon(
928 {Thunk.Src, Thunk.Dst,
929 ConstantInt::get(M->getContext(), APInt(32, uint8_t(Thunk.Kind)))}));
930 }
931 Constant *ThunkMappingArray = ConstantArray::get(
932 llvm::ArrayType::get(ThunkMappingArrayElems[0]->getType(),
933 ThunkMappingArrayElems.size()),
934 ThunkMappingArrayElems);
935 new GlobalVariable(Mod, ThunkMappingArray->getType(), /*isConstant*/ false,
936 GlobalValue::ExternalLinkage, ThunkMappingArray,
937 "llvm.arm64ec.symbolmap");
938 }
939
940 return true;
941}
942
943bool AArch64Arm64ECCallLowering::processFunction(
944 Function &F, SetVector<GlobalValue *> &DirectCalledFns,
945 DenseMap<GlobalAlias *, GlobalAlias *> &FnsMap) {
946 SmallVector<CallBase *, 8> IndirectCalls;
947
948 // For ARM64EC targets, a function definition's name is mangled differently
949 // from the normal symbol. We currently have no representation of this sort
950 // of symbol in IR, so we change the name to the mangled name, then store
951 // the unmangled name as metadata. Later passes that need the unmangled
952 // name (emitting the definition) can grab it from the metadata.
953 //
954 // FIXME: Handle functions with weak linkage?
955 if (!F.hasLocalLinkage() || F.hasAddressTaken()) {
956 if (std::optional<std::string> MangledName =
958 F.addMetadata("arm64ec_unmangled_name",
960 MDString::get(M->getContext(), F.getName())));
961 if (F.hasComdat() && F.getComdat()->getName() == F.getName()) {
962 Comdat *MangledComdat = M->getOrInsertComdat(MangledName.value());
963 SmallVector<GlobalObject *> ComdatUsers =
964 to_vector(F.getComdat()->getUsers());
965 for (GlobalObject *User : ComdatUsers)
966 User->setComdat(MangledComdat);
967 }
968 F.setName(MangledName.value());
969 }
970 }
971
972 // Iterate over the instructions to find all indirect call/invoke/callbr
973 // instructions. Make a separate list of pointers to indirect
974 // call/invoke/callbr instructions because the original instructions will be
975 // deleted as the checks are added.
976 for (BasicBlock &BB : F) {
977 for (Instruction &I : BB) {
978 auto *CB = dyn_cast<CallBase>(&I);
979 if (!CB || CB->getCallingConv() == CallingConv::ARM64EC_Thunk_X64 ||
980 CB->isInlineAsm())
981 continue;
982
983 // We need to instrument any call that isn't directly calling an
984 // ARM64 function.
985 //
986 // FIXME: getCalledFunction() fails if there's a bitcast (e.g.
987 // unprototyped functions in C)
988 if (Function *F = CB->getCalledFunction()) {
989 if (!LowerDirectToIndirect || F->hasLocalLinkage() ||
990 F->isIntrinsic() || !F->isDeclarationForLinker())
991 continue;
992
993 DirectCalledFns.insert(F);
994 continue;
995 }
996
997 // Use mangled global alias for direct calls to patchable functions.
998 if (GlobalAlias *A = dyn_cast<GlobalAlias>(CB->getCalledOperand())) {
999 auto I = FnsMap.find(A);
1000 if (I != FnsMap.end()) {
1001 CB->setCalledOperand(I->second);
1002 DirectCalledFns.insert(I->first);
1003 continue;
1004 }
1005 }
1006
1007 IndirectCalls.push_back(CB);
1008 ++Arm64ECCallsLowered;
1009 }
1010 }
1011
1012 if (IndirectCalls.empty())
1013 return false;
1014
1015 for (CallBase *CB : IndirectCalls)
1016 lowerCall(CB);
1017
1018 return true;
1019}
1020
1021char AArch64Arm64ECCallLowering::ID = 0;
1022INITIALIZE_PASS(AArch64Arm64ECCallLowering, "Arm64ECCallLowering",
1023 "AArch64Arm64ECCallLowering", false, false)
1024
1026 return new AArch64Arm64ECCallLowering;
1027}
static cl::opt< bool > LowerDirectToIndirect("arm64ec-lower-direct-to-indirect", cl::Hidden, cl::init(true))
static cl::opt< bool > GenerateThunks("arm64ec-generate-thunks", cl::Hidden, cl::init(true))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Module.h This file contains the declarations for the Module class.
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
#define T
static bool processFunction(Function &F, NVPTXTargetMachine &TM)
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCallingConv(CallingConv::ID CC)
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
CallingConv::ID getCallingConv() const
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
Value * getCalledOperand() const
FunctionType * getFunctionType() const
void setCalledOperand(Value *V)
AttributeList getAttributes() const
Return the attributes for this call.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
void setTailCallKind(TailCallKind TCK)
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition Constants.h:491
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
LLVM_ABI void setAliasee(Constant *Aliasee)
These methods retrieve and set alias target.
Definition Globals.cpp:639
const Constant * getAliasee() const
Definition GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:607
@ DLLExportStorageClass
Function to be accessible from DLL.
Definition GlobalValue.h:77
@ WeakODRLinkage
Same, but only replaced by something equivalent.
Definition GlobalValue.h:58
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition GlobalValue.h:56
Type * getValueType() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:614
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
LLVMContext & getContext() const
Get the global data context.
Definition Module.h:285
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
ControlFlowGuardMode getControlFlowGuardMode() const
Gets the Control Flow Guard mode.
Definition Module.cpp:944
Comdat * getOrInsertComdat(StringRef Name)
Return the Comdat in the module with the specified name.
Definition Module.cpp:621
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition Module.h:278
GlobalVariable * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition Module.cpp:262
A container for an operand bundle being viewed as a set of values rather than a set of uses.
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:413
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:153
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition Type.h:142
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:156
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
Arm64ECThunkType
Definition COFF.h:860
@ GuestExit
Definition COFF.h:861
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:292
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:839
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
ControlFlowGuardMode
Definition CodeGen.h:176
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
ModulePass * createAArch64Arm64ECCallLoweringPass()
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr std::string_view HybridPatchableTargetSuffix
Definition Mangler.h:37
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77