Bug Summary

File:build/source/llvm/include/llvm/Analysis/ObjCARCUtil.h
Warning:line 44, column 12
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AArch64CallLowering.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16 -I lib/Target/AArch64 -I /build/source/llvm/lib/Target/AArch64 -I include -I /build/source/llvm/include -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1670969321 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-12-14-010809-15973-1 -x c++ /build/source/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp

/build/source/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp

1//===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12///
13//===----------------------------------------------------------------------===//
14
15#include "AArch64CallLowering.h"
16#include "AArch64ISelLowering.h"
17#include "AArch64MachineFunctionInfo.h"
18#include "AArch64Subtarget.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/Analysis/ObjCARCUtil.h"
22#include "llvm/CodeGen/Analysis.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/FunctionLoweringInfo.h"
25#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26#include "llvm/CodeGen/GlobalISel/Utils.h"
27#include "llvm/CodeGen/LowLevelType.h"
28#include "llvm/CodeGen/MachineBasicBlock.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineFunction.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/MachineOperand.h"
34#include "llvm/CodeGen/MachineRegisterInfo.h"
35#include "llvm/CodeGen/TargetRegisterInfo.h"
36#include "llvm/CodeGen/TargetSubtargetInfo.h"
37#include "llvm/CodeGen/ValueTypes.h"
38#include "llvm/IR/Argument.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/Type.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/MachineValueType.h"
44#include <algorithm>
45#include <cassert>
46#include <cstdint>
47#include <iterator>
48
49#define DEBUG_TYPE"aarch64-call-lowering" "aarch64-call-lowering"
50
51using namespace llvm;
52
53AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
54 : CallLowering(&TLI) {}
55
56static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT,
57 MVT &LocVT) {
58 // If ValVT is i1/i8/i16, we should set LocVT to i8/i8/i16. This is a legacy
59 // hack because the DAG calls the assignment function with pre-legalized
60 // register typed values, not the raw type.
61 //
62 // This hack is not applied to return values which are not passed on the
63 // stack.
64 if (OrigVT == MVT::i1 || OrigVT == MVT::i8)
65 ValVT = LocVT = MVT::i8;
66 else if (OrigVT == MVT::i16)
67 ValVT = LocVT = MVT::i16;
68}
69
70// Account for i1/i8/i16 stack passed value hack
71static LLT getStackValueStoreTypeHack(const CCValAssign &VA) {
72 const MVT ValVT = VA.getValVT();
73 return (ValVT == MVT::i8 || ValVT == MVT::i16) ? LLT(ValVT)
74 : LLT(VA.getLocVT());
75}
76
77namespace {
78
79struct AArch64IncomingValueAssigner
80 : public CallLowering::IncomingValueAssigner {
81 AArch64IncomingValueAssigner(CCAssignFn *AssignFn_,
82 CCAssignFn *AssignFnVarArg_)
83 : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
84
85 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
86 CCValAssign::LocInfo LocInfo,
87 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
88 CCState &State) override {
89 applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
90 return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
91 LocInfo, Info, Flags, State);
92 }
93};
94
95struct AArch64OutgoingValueAssigner
96 : public CallLowering::OutgoingValueAssigner {
97 const AArch64Subtarget &Subtarget;
98
99 /// Track if this is used for a return instead of function argument
100 /// passing. We apply a hack to i1/i8/i16 stack passed values, but do not use
101 /// stack passed returns for them and cannot apply the type adjustment.
102 bool IsReturn;
103
104 AArch64OutgoingValueAssigner(CCAssignFn *AssignFn_,
105 CCAssignFn *AssignFnVarArg_,
106 const AArch64Subtarget &Subtarget_,
107 bool IsReturn)
108 : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
109 Subtarget(Subtarget_), IsReturn(IsReturn) {}
110
111 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
112 CCValAssign::LocInfo LocInfo,
113 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
114 CCState &State) override {
115 bool IsCalleeWin = Subtarget.isCallingConvWin64(State.getCallingConv());
116 bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
117
118 if (!State.isVarArg() && !UseVarArgsCCForFixed && !IsReturn)
119 applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
120
121 bool Res;
122 if (Info.IsFixed && !UseVarArgsCCForFixed)
123 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
124 else
125 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
126
127 StackOffset = State.getNextStackOffset();
128 return Res;
129 }
130};
131
132struct IncomingArgHandler : public CallLowering::IncomingValueHandler {
133 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
134 : IncomingValueHandler(MIRBuilder, MRI) {}
135
136 Register getStackAddress(uint64_t Size, int64_t Offset,
137 MachinePointerInfo &MPO,
138 ISD::ArgFlagsTy Flags) override {
139 auto &MFI = MIRBuilder.getMF().getFrameInfo();
140
141 // Byval is assumed to be writable memory, but other stack passed arguments
142 // are not.
143 const bool IsImmutable = !Flags.isByVal();
144
145 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
146 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
147 auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
148 return AddrReg.getReg(0);
149 }
150
151 LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
152 ISD::ArgFlagsTy Flags) const override {
153 // For pointers, we just need to fixup the integer types reported in the
154 // CCValAssign.
155 if (Flags.isPointer())
156 return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
157 return getStackValueStoreTypeHack(VA);
158 }
159
160 void assignValueToReg(Register ValVReg, Register PhysReg,
161 CCValAssign VA) override {
162 markPhysRegUsed(PhysReg);
163 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
164 }
165
166 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
167 MachinePointerInfo &MPO, CCValAssign &VA) override {
168 MachineFunction &MF = MIRBuilder.getMF();
169
170 LLT ValTy(VA.getValVT());
171 LLT LocTy(VA.getLocVT());
172
173 // Fixup the types for the DAG compatibility hack.
174 if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16)
175 std::swap(ValTy, LocTy);
176 else {
177 // The calling code knows if this is a pointer or not, we're only touching
178 // the LocTy for the i8/i16 hack.
179 assert(LocTy.getSizeInBits() == MemTy.getSizeInBits())(static_cast <bool> (LocTy.getSizeInBits() == MemTy.getSizeInBits
()) ? void (0) : __assert_fail ("LocTy.getSizeInBits() == MemTy.getSizeInBits()"
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 179
, __extension__ __PRETTY_FUNCTION__))
;
180 LocTy = MemTy;
181 }
182
183 auto MMO = MF.getMachineMemOperand(
184 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, LocTy,
185 inferAlignFromPtrInfo(MF, MPO));
186
187 switch (VA.getLocInfo()) {
188 case CCValAssign::LocInfo::ZExt:
189 MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);
190 return;
191 case CCValAssign::LocInfo::SExt:
192 MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);
193 return;
194 default:
195 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
196 return;
197 }
198 }
199
200 /// How the physical register gets marked varies between formal
201 /// parameters (it's a basic-block live-in), and a call instruction
202 /// (it's an implicit-def of the BL).
203 virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
204};
205
206struct FormalArgHandler : public IncomingArgHandler {
207 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
208 : IncomingArgHandler(MIRBuilder, MRI) {}
209
210 void markPhysRegUsed(MCRegister PhysReg) override {
211 MIRBuilder.getMRI()->addLiveIn(PhysReg);
212 MIRBuilder.getMBB().addLiveIn(PhysReg);
213 }
214};
215
216struct CallReturnHandler : public IncomingArgHandler {
217 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
218 MachineInstrBuilder MIB)
219 : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
220
221 void markPhysRegUsed(MCRegister PhysReg) override {
222 MIB.addDef(PhysReg, RegState::Implicit);
223 }
224
225 MachineInstrBuilder MIB;
226};
227
228/// A special return arg handler for "returned" attribute arg calls.
229struct ReturnedArgCallReturnHandler : public CallReturnHandler {
230 ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
231 MachineRegisterInfo &MRI,
232 MachineInstrBuilder MIB)
233 : CallReturnHandler(MIRBuilder, MRI, MIB) {}
234
235 void markPhysRegUsed(MCRegister PhysReg) override {}
236};
237
238struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
239 OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
240 MachineInstrBuilder MIB, bool IsTailCall = false,
241 int FPDiff = 0)
242 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall),
243 FPDiff(FPDiff),
244 Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
245
246 Register getStackAddress(uint64_t Size, int64_t Offset,
247 MachinePointerInfo &MPO,
248 ISD::ArgFlagsTy Flags) override {
249 MachineFunction &MF = MIRBuilder.getMF();
250 LLT p0 = LLT::pointer(0, 64);
251 LLT s64 = LLT::scalar(64);
252
253 if (IsTailCall) {
254 assert(!Flags.isByVal() && "byval unhandled with tail calls")(static_cast <bool> (!Flags.isByVal() && "byval unhandled with tail calls"
) ? void (0) : __assert_fail ("!Flags.isByVal() && \"byval unhandled with tail calls\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 254
, __extension__ __PRETTY_FUNCTION__))
;
255
256 Offset += FPDiff;
257 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
258 auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
259 MPO = MachinePointerInfo::getFixedStack(MF, FI);
260 return FIReg.getReg(0);
261 }
262
263 if (!SPReg)
264 SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);
265
266 auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);
267
268 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
269
270 MPO = MachinePointerInfo::getStack(MF, Offset);
271 return AddrReg.getReg(0);
272 }
273
274 /// We need to fixup the reported store size for certain value types because
275 /// we invert the interpretation of ValVT and LocVT in certain cases. This is
276 /// for compatability with the DAG call lowering implementation, which we're
277 /// currently building on top of.
278 LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
279 ISD::ArgFlagsTy Flags) const override {
280 if (Flags.isPointer())
281 return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
282 return getStackValueStoreTypeHack(VA);
283 }
284
285 void assignValueToReg(Register ValVReg, Register PhysReg,
286 CCValAssign VA) override {
287 MIB.addUse(PhysReg, RegState::Implicit);
288 Register ExtReg = extendRegister(ValVReg, VA);
289 MIRBuilder.buildCopy(PhysReg, ExtReg);
290 }
291
292 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
293 MachinePointerInfo &MPO, CCValAssign &VA) override {
294 MachineFunction &MF = MIRBuilder.getMF();
295 auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
296 inferAlignFromPtrInfo(MF, MPO));
297 MIRBuilder.buildStore(ValVReg, Addr, *MMO);
298 }
299
300 void assignValueToAddress(const CallLowering::ArgInfo &Arg, unsigned RegIndex,
301 Register Addr, LLT MemTy, MachinePointerInfo &MPO,
302 CCValAssign &VA) override {
303 unsigned MaxSize = MemTy.getSizeInBytes() * 8;
304 // For varargs, we always want to extend them to 8 bytes, in which case
305 // we disable setting a max.
306 if (!Arg.IsFixed)
307 MaxSize = 0;
308
309 Register ValVReg = Arg.Regs[RegIndex];
310 if (VA.getLocInfo() != CCValAssign::LocInfo::FPExt) {
311 MVT LocVT = VA.getLocVT();
312 MVT ValVT = VA.getValVT();
313
314 if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16) {
315 std::swap(ValVT, LocVT);
316 MemTy = LLT(VA.getValVT());
317 }
318
319 ValVReg = extendRegister(ValVReg, VA, MaxSize);
320 } else {
321 // The store does not cover the full allocated stack slot.
322 MemTy = LLT(VA.getValVT());
323 }
324
325 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
326 }
327
328 MachineInstrBuilder MIB;
329
330 bool IsTailCall;
331
332 /// For tail calls, the byte offset of the call's argument area from the
333 /// callee's. Unused elsewhere.
334 int FPDiff;
335
336 // Cache the SP register vreg if we need it more than once in this call site.
337 Register SPReg;
338
339 const AArch64Subtarget &Subtarget;
340};
341} // namespace
342
343static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
344 return (CallConv == CallingConv::Fast && TailCallOpt) ||
345 CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
346}
347
348bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
349 const Value *Val,
350 ArrayRef<Register> VRegs,
351 FunctionLoweringInfo &FLI,
352 Register SwiftErrorVReg) const {
353 auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
354 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&(static_cast <bool> (((Val && !VRegs.empty()) ||
(!Val && VRegs.empty())) && "Return value without a vreg"
) ? void (0) : __assert_fail ("((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && \"Return value without a vreg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 355
, __extension__ __PRETTY_FUNCTION__))
355 "Return value without a vreg")(static_cast <bool> (((Val && !VRegs.empty()) ||
(!Val && VRegs.empty())) && "Return value without a vreg"
) ? void (0) : __assert_fail ("((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && \"Return value without a vreg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 355
, __extension__ __PRETTY_FUNCTION__))
;
356
357 bool Success = true;
358 if (!FLI.CanLowerReturn) {
359 insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister);
360 } else if (!VRegs.empty()) {
361 MachineFunction &MF = MIRBuilder.getMF();
362 const Function &F = MF.getFunction();
363 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
364
365 MachineRegisterInfo &MRI = MF.getRegInfo();
366 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
367 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
368 auto &DL = F.getParent()->getDataLayout();
369 LLVMContext &Ctx = Val->getType()->getContext();
370
371 SmallVector<EVT, 4> SplitEVTs;
372 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
373 assert(VRegs.size() == SplitEVTs.size() &&(static_cast <bool> (VRegs.size() == SplitEVTs.size() &&
"For each split Type there should be exactly one VReg.") ? void
(0) : __assert_fail ("VRegs.size() == SplitEVTs.size() && \"For each split Type there should be exactly one VReg.\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 374
, __extension__ __PRETTY_FUNCTION__))
374 "For each split Type there should be exactly one VReg.")(static_cast <bool> (VRegs.size() == SplitEVTs.size() &&
"For each split Type there should be exactly one VReg.") ? void
(0) : __assert_fail ("VRegs.size() == SplitEVTs.size() && \"For each split Type there should be exactly one VReg.\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 374
, __extension__ __PRETTY_FUNCTION__))
;
375
376 SmallVector<ArgInfo, 8> SplitArgs;
377 CallingConv::ID CC = F.getCallingConv();
378
379 for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
380 Register CurVReg = VRegs[i];
381 ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};
382 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
383
384 // i1 is a special case because SDAG i1 true is naturally zero extended
385 // when widened using ANYEXT. We need to do it explicitly here.
386 auto &Flags = CurArgInfo.Flags[0];
387 if (MRI.getType(CurVReg).getSizeInBits() == 1 && !Flags.isSExt() &&
388 !Flags.isZExt()) {
389 CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
390 } else if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) ==
391 1) {
392 // Some types will need extending as specified by the CC.
393 MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
394 if (EVT(NewVT) != SplitEVTs[i]) {
395 unsigned ExtendOp = TargetOpcode::G_ANYEXT;
396 if (F.getAttributes().hasRetAttr(Attribute::SExt))
397 ExtendOp = TargetOpcode::G_SEXT;
398 else if (F.getAttributes().hasRetAttr(Attribute::ZExt))
399 ExtendOp = TargetOpcode::G_ZEXT;
400
401 LLT NewLLT(NewVT);
402 LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
403 CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
404 // Instead of an extend, we might have a vector type which needs
405 // padding with more elements, e.g. <2 x half> -> <4 x half>.
406 if (NewVT.isVector()) {
407 if (OldLLT.isVector()) {
408 if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
409 // We don't handle VA types which are not exactly twice the
410 // size, but can easily be done in future.
411 if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
412 LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Outgoing vector ret has too many elts"
; } } while (false)
;
413 return false;
414 }
415 auto Undef = MIRBuilder.buildUndef({OldLLT});
416 CurVReg =
417 MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef}).getReg(0);
418 } else {
419 // Just do a vector extend.
420 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
421 .getReg(0);
422 }
423 } else if (NewLLT.getNumElements() == 2) {
424 // We need to pad a <1 x S> type to <2 x S>. Since we don't have
425 // <1 x S> vector types in GISel we use a build_vector instead
426 // of a vector merge/concat.
427 auto Undef = MIRBuilder.buildUndef({OldLLT});
428 CurVReg =
429 MIRBuilder
430 .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)})
431 .getReg(0);
432 } else {
433 LLVM_DEBUG(dbgs() << "Could not handle ret ty\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Could not handle ret ty\n"
; } } while (false)
;
434 return false;
435 }
436 } else {
437 // If the split EVT was a <1 x T> vector, and NewVT is T, then we
438 // don't have to do anything since we don't distinguish between the
439 // two.
440 if (NewLLT != MRI.getType(CurVReg)) {
441 // A scalar extend.
442 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
443 .getReg(0);
444 }
445 }
446 }
447 }
448 if (CurVReg != CurArgInfo.Regs[0]) {
449 CurArgInfo.Regs[0] = CurVReg;
450 // Reset the arg flags after modifying CurVReg.
451 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
452 }
453 splitToValueTypes(CurArgInfo, SplitArgs, DL, CC);
454 }
455
456 AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
457 /*IsReturn*/ true);
458 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);
459 Success = determineAndHandleAssignments(Handler, Assigner, SplitArgs,
460 MIRBuilder, CC, F.isVarArg());
461 }
462
463 if (SwiftErrorVReg) {
464 MIB.addUse(AArch64::X21, RegState::Implicit);
465 MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
466 }
467
468 MIRBuilder.insertInstr(MIB);
469 return Success;
470}
471
472bool AArch64CallLowering::canLowerReturn(MachineFunction &MF,
473 CallingConv::ID CallConv,
474 SmallVectorImpl<BaseArgInfo> &Outs,
475 bool IsVarArg) const {
476 SmallVector<CCValAssign, 16> ArgLocs;
477 const auto &TLI = *getTLI<AArch64TargetLowering>();
478 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
479 MF.getFunction().getContext());
480
481 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));
482}
483
484/// Helper function to compute forwarded registers for musttail calls. Computes
485/// the forwarded registers, sets MBB liveness, and emits COPY instructions that
486/// can be used to save + restore registers later.
487static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder,
488 CCAssignFn *AssignFn) {
489 MachineBasicBlock &MBB = MIRBuilder.getMBB();
490 MachineFunction &MF = MIRBuilder.getMF();
491 MachineFrameInfo &MFI = MF.getFrameInfo();
492
493 if (!MFI.hasMustTailInVarArgFunc())
494 return;
495
496 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
497 const Function &F = MF.getFunction();
498 assert(F.isVarArg() && "Expected F to be vararg?")(static_cast <bool> (F.isVarArg() && "Expected F to be vararg?"
) ? void (0) : __assert_fail ("F.isVarArg() && \"Expected F to be vararg?\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 498
, __extension__ __PRETTY_FUNCTION__))
;
499
500 // Compute the set of forwarded registers. The rest are scratch.
501 SmallVector<CCValAssign, 16> ArgLocs;
502 CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs,
503 F.getContext());
504 SmallVector<MVT, 2> RegParmTypes;
505 RegParmTypes.push_back(MVT::i64);
506 RegParmTypes.push_back(MVT::f128);
507
508 // Later on, we can use this vector to restore the registers if necessary.
509 SmallVectorImpl<ForwardedRegister> &Forwards =
510 FuncInfo->getForwardedMustTailRegParms();
511 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn);
512
513 // Conservatively forward X8, since it might be used for an aggregate
514 // return.
515 if (!CCInfo.isAllocated(AArch64::X8)) {
516 Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
517 Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
518 }
519
520 // Add the forwards to the MachineBasicBlock and MachineFunction.
521 for (const auto &F : Forwards) {
522 MBB.addLiveIn(F.PReg);
523 MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg));
524 }
525}
526
527bool AArch64CallLowering::fallBackToDAGISel(const MachineFunction &MF) const {
528 auto &F = MF.getFunction();
529 if (isa<ScalableVectorType>(F.getReturnType()))
530 return true;
531 if (llvm::any_of(F.args(), [](const Argument &A) {
532 return isa<ScalableVectorType>(A.getType());
533 }))
534 return true;
535 const auto &ST = MF.getSubtarget<AArch64Subtarget>();
536 if (!ST.hasNEON() || !ST.hasFPARMv8()) {
537 LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Falling back to SDAG because we don't support no-NEON\n"
; } } while (false)
;
538 return true;
539 }
540
541 SMEAttrs Attrs(F);
542 if (Attrs.hasNewZAInterface() ||
543 (!Attrs.hasStreamingInterface() && Attrs.hasStreamingBody()))
544 return true;
545
546 return false;
547}
548
549bool AArch64CallLowering::lowerFormalArguments(
550 MachineIRBuilder &MIRBuilder, const Function &F,
551 ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const {
552 MachineFunction &MF = MIRBuilder.getMF();
553 MachineBasicBlock &MBB = MIRBuilder.getMBB();
554 MachineRegisterInfo &MRI = MF.getRegInfo();
555 auto &DL = F.getParent()->getDataLayout();
556
557 SmallVector<ArgInfo, 8> SplitArgs;
558 SmallVector<std::pair<Register, Register>> BoolArgs;
559
560 // Insert the hidden sret parameter if the return value won't fit in the
561 // return registers.
562 if (!FLI.CanLowerReturn)
563 insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
564
565 unsigned i = 0;
566 for (auto &Arg : F.args()) {
567 if (DL.getTypeStoreSize(Arg.getType()).isZero())
568 continue;
569
570 ArgInfo OrigArg{VRegs[i], Arg, i};
571 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
572
573 // i1 arguments are zero-extended to i8 by the caller. Emit a
574 // hint to reflect this.
575 if (OrigArg.Ty->isIntegerTy(1)) {
576 assert(OrigArg.Regs.size() == 1 &&(static_cast <bool> (OrigArg.Regs.size() == 1 &&
MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
"Unexpected registers used for i1 arg") ? void (0) : __assert_fail
("OrigArg.Regs.size() == 1 && MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 578
, __extension__ __PRETTY_FUNCTION__))
577 MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&(static_cast <bool> (OrigArg.Regs.size() == 1 &&
MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
"Unexpected registers used for i1 arg") ? void (0) : __assert_fail
("OrigArg.Regs.size() == 1 && MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 578
, __extension__ __PRETTY_FUNCTION__))
578 "Unexpected registers used for i1 arg")(static_cast <bool> (OrigArg.Regs.size() == 1 &&
MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
"Unexpected registers used for i1 arg") ? void (0) : __assert_fail
("OrigArg.Regs.size() == 1 && MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 578
, __extension__ __PRETTY_FUNCTION__))
;
579
580 auto &Flags = OrigArg.Flags[0];
581 if (!Flags.isZExt() && !Flags.isSExt()) {
582 // Lower i1 argument as i8, and insert AssertZExt + Trunc later.
583 Register OrigReg = OrigArg.Regs[0];
584 Register WideReg = MRI.createGenericVirtualRegister(LLT::scalar(8));
585 OrigArg.Regs[0] = WideReg;
586 BoolArgs.push_back({OrigReg, WideReg});
587 }
588 }
589
590 if (Arg.hasAttribute(Attribute::SwiftAsync))
591 MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
592
593 splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv());
594 ++i;
595 }
596
597 if (!MBB.empty())
598 MIRBuilder.setInstr(*MBB.begin());
599
600 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
601 CCAssignFn *AssignFn =
602 TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
603
604 AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
605 FormalArgHandler Handler(MIRBuilder, MRI);
606 if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder,
607 F.getCallingConv(), F.isVarArg()))
608 return false;
609
610 if (!BoolArgs.empty()) {
611 for (auto &KV : BoolArgs) {
612 Register OrigReg = KV.first;
613 Register WideReg = KV.second;
614 LLT WideTy = MRI.getType(WideReg);
615 assert(MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&(static_cast <bool> (MRI.getType(OrigReg).getScalarSizeInBits
() == 1 && "Unexpected bit size of a bool arg") ? void
(0) : __assert_fail ("MRI.getType(OrigReg).getScalarSizeInBits() == 1 && \"Unexpected bit size of a bool arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 616
, __extension__ __PRETTY_FUNCTION__))
616 "Unexpected bit size of a bool arg")(static_cast <bool> (MRI.getType(OrigReg).getScalarSizeInBits
() == 1 && "Unexpected bit size of a bool arg") ? void
(0) : __assert_fail ("MRI.getType(OrigReg).getScalarSizeInBits() == 1 && \"Unexpected bit size of a bool arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 616
, __extension__ __PRETTY_FUNCTION__))
;
617 MIRBuilder.buildTrunc(
618 OrigReg, MIRBuilder.buildAssertZExt(WideTy, WideReg, 1).getReg(0));
619 }
620 }
621
622 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
623 uint64_t StackOffset = Assigner.StackOffset;
624 if (F.isVarArg()) {
625 auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
626 if (!Subtarget.isTargetDarwin()) {
627 // FIXME: we need to reimplement saveVarArgsRegisters from
628 // AArch64ISelLowering.
629 return false;
630 }
631
632 // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
633 StackOffset =
634 alignTo(Assigner.StackOffset, Subtarget.isTargetILP32() ? 4 : 8);
635
636 auto &MFI = MIRBuilder.getMF().getFrameInfo();
637 FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
638 }
639
640 if (doesCalleeRestoreStack(F.getCallingConv(),
641 MF.getTarget().Options.GuaranteedTailCallOpt)) {
642 // We have a non-standard ABI, so why not make full use of the stack that
643 // we're going to pop? It must be aligned to 16 B in any case.
644 StackOffset = alignTo(StackOffset, 16);
645
646 // If we're expected to restore the stack (e.g. fastcc), then we'll be
647 // adding a multiple of 16.
648 FuncInfo->setArgumentStackToRestore(StackOffset);
649
650 // Our own callers will guarantee that the space is free by giving an
651 // aligned value to CALLSEQ_START.
652 }
653
654 // When we tail call, we need to check if the callee's arguments
655 // will fit on the caller's stack. So, whenever we lower formal arguments,
656 // we should keep track of this information, since we might lower a tail call
657 // in this function later.
658 FuncInfo->setBytesInStackArgArea(StackOffset);
659
660 auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
661 if (Subtarget.hasCustomCallingConv())
662 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
663
664 handleMustTailForwardedRegisters(MIRBuilder, AssignFn);
665
666 // Move back to the end of the basic block.
667 MIRBuilder.setMBB(MBB);
668
669 return true;
670}
671
672/// Return true if the calling convention is one that we can guarantee TCO for.
673static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
674 return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
675 CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
676}
677
678/// Return true if we might ever do TCO for calls with this calling convention.
679static bool mayTailCallThisCC(CallingConv::ID CC) {
680 switch (CC) {
681 case CallingConv::C:
682 case CallingConv::PreserveMost:
683 case CallingConv::Swift:
684 case CallingConv::SwiftTail:
685 case CallingConv::Tail:
686 case CallingConv::Fast:
687 return true;
688 default:
689 return false;
690 }
691}
692
693/// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
694/// CC.
695static std::pair<CCAssignFn *, CCAssignFn *>
696getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) {
697 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
698}
699
700bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
701 CallLoweringInfo &Info, MachineFunction &MF,
702 SmallVectorImpl<ArgInfo> &InArgs) const {
703 const Function &CallerF = MF.getFunction();
704 CallingConv::ID CalleeCC = Info.CallConv;
705 CallingConv::ID CallerCC = CallerF.getCallingConv();
706
707 // If the calling conventions match, then everything must be the same.
708 if (CalleeCC == CallerCC)
709 return true;
710
711 // Check if the caller and callee will handle arguments in the same way.
712 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
713 CCAssignFn *CalleeAssignFnFixed;
714 CCAssignFn *CalleeAssignFnVarArg;
715 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
716 getAssignFnsForCC(CalleeCC, TLI);
717
718 CCAssignFn *CallerAssignFnFixed;
719 CCAssignFn *CallerAssignFnVarArg;
720 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
721 getAssignFnsForCC(CallerCC, TLI);
722
723 AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
724 CalleeAssignFnVarArg);
725 AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
726 CallerAssignFnVarArg);
727
728 if (!resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner))
729 return false;
730
731 // Make sure that the caller and callee preserve all of the same registers.
732 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
733 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
734 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
735 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
736 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
737 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
738 }
739
740 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
741}
742
743bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
744 CallLoweringInfo &Info, MachineFunction &MF,
745 SmallVectorImpl<ArgInfo> &OutArgs) const {
746 // If there are no outgoing arguments, then we are done.
747 if (OutArgs.empty())
748 return true;
749
750 const Function &CallerF = MF.getFunction();
751 LLVMContext &Ctx = CallerF.getContext();
752 CallingConv::ID CalleeCC = Info.CallConv;
753 CallingConv::ID CallerCC = CallerF.getCallingConv();
754 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
755 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
756
757 CCAssignFn *AssignFnFixed;
758 CCAssignFn *AssignFnVarArg;
759 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
760
761 // We have outgoing arguments. Make sure that we can tail call with them.
762 SmallVector<CCValAssign, 16> OutLocs;
763 CCState OutInfo(CalleeCC, false, MF, OutLocs, Ctx);
764
765 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
766 Subtarget, /*IsReturn*/ false);
767 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) {
768 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Could not analyze call operands.\n"
; } } while (false)
;
769 return false;
770 }
771
772 // Make sure that they can fit on the caller's stack.
773 const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
774 if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) {
775 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot fit call operands on caller's stack.\n"
; } } while (false)
;
776 return false;
777 }
778
779 // Verify that the parameters in callee-saved registers match.
780 // TODO: Port this over to CallLowering as general code once swiftself is
781 // supported.
782 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
783 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
784 MachineRegisterInfo &MRI = MF.getRegInfo();
785
786 if (Info.IsVarArg) {
787 // Be conservative and disallow variadic memory operands to match SDAG's
788 // behaviour.
789 // FIXME: If the caller's calling convention is C, then we can
790 // potentially use its argument area. However, for cases like fastcc,
791 // we can't do anything.
792 for (unsigned i = 0; i < OutLocs.size(); ++i) {
793 auto &ArgLoc = OutLocs[i];
794 if (ArgLoc.isRegLoc())
795 continue;
796
797 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call vararg function with stack arguments\n"
; } } while (false)
798 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call vararg function with stack arguments\n"
; } } while (false)
799 << "... Cannot tail call vararg function with stack arguments\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call vararg function with stack arguments\n"
; } } while (false)
;
800 return false;
801 }
802 }
803
804 return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
805}
806
807bool AArch64CallLowering::isEligibleForTailCallOptimization(
808 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
809 SmallVectorImpl<ArgInfo> &InArgs,
810 SmallVectorImpl<ArgInfo> &OutArgs) const {
811
812 // Must pass all target-independent checks in order to tail call optimize.
813 if (!Info.IsTailCall)
6
Assuming field 'IsTailCall' is true
7
Taking false branch
814 return false;
815
816 CallingConv::ID CalleeCC = Info.CallConv;
817 MachineFunction &MF = MIRBuilder.getMF();
818 const Function &CallerF = MF.getFunction();
819
820 LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Attempting to lower call as tail call\n"
; } } while (false)
;
8
Assuming 'DebugFlag' is false
9
Loop condition is false. Exiting loop
821
822 if (Info.SwiftErrorVReg) {
10
Assuming the condition is false
11
Taking false branch
823 // TODO: We should handle this.
824 // Note that this is also handled by the check for no outgoing arguments.
825 // Proactively disabling this though, because the swifterror handling in
826 // lowerCall inserts a COPY *after* the location of the call.
827 LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot handle tail calls with swifterror yet.\n"
; } } while (false)
;
828 return false;
829 }
830
831 if (!mayTailCallThisCC(CalleeCC)) {
832 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Calling convention cannot be tail called.\n"
; } } while (false)
;
12
Taking true branch
13
Loop condition is false. Exiting loop
833 return false;
14
Returning zero, which participates in a condition later
834 }
835
836 // Byval parameters hand the function a pointer directly into the stack area
837 // we want to reuse during a tail call. Working around this *is* possible (see
838 // X86).
839 //
840 // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
841 // it?
842 //
843 // On Windows, "inreg" attributes signify non-aggregate indirect returns.
844 // In this case, it is necessary to save/restore X0 in the callee. Tail
845 // call opt interferes with this. So we disable tail call opt when the
846 // caller has an argument with "inreg" attribute.
847 //
848 // FIXME: Check whether the callee also has an "inreg" argument.
849 //
850 // When the caller has a swifterror argument, we don't want to tail call
851 // because would have to move into the swifterror register before the
852 // tail call.
853 if (any_of(CallerF.args(), [](const Argument &A) {
854 return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
855 })) {
856 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call from callers with byval, "
"inreg, or swifterror arguments\n"; } } while (false)
857 "inreg, or swifterror arguments\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call from callers with byval, "
"inreg, or swifterror arguments\n"; } } while (false)
;
858 return false;
859 }
860
861 // Externally-defined functions with weak linkage should not be
862 // tail-called on AArch64 when the OS does not support dynamic
863 // pre-emption of symbols, as the AAELF spec requires normal calls
864 // to undefined weak functions to be replaced with a NOP or jump to the
865 // next instruction. The behaviour of branch instructions in this
866 // situation (as used for tail calls) is implementation-defined, so we
867 // cannot rely on the linker replacing the tail call with a return.
868 if (Info.Callee.isGlobal()) {
869 const GlobalValue *GV = Info.Callee.getGlobal();
870 const Triple &TT = MF.getTarget().getTargetTriple();
871 if (GV->hasExternalWeakLinkage() &&
872 (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
873 TT.isOSBinFormatMachO())) {
874 LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call externally-defined function "
"with weak linkage for this OS.\n"; } } while (false)
875 "with weak linkage for this OS.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call externally-defined function "
"with weak linkage for this OS.\n"; } } while (false)
;
876 return false;
877 }
878 }
879
880 // If we have -tailcallopt, then we're done.
881 if (canGuaranteeTCO(CalleeCC, MF.getTarget().Options.GuaranteedTailCallOpt))
882 return CalleeCC == CallerF.getCallingConv();
883
884 // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
885 // Try to find cases where we can do that.
886
887 // I want anyone implementing a new calling convention to think long and hard
888 // about this assert.
889 assert((!Info.IsVarArg || CalleeCC == CallingConv::C) &&(static_cast <bool> ((!Info.IsVarArg || CalleeCC == CallingConv
::C) && "Unexpected variadic calling convention") ? void
(0) : __assert_fail ("(!Info.IsVarArg || CalleeCC == CallingConv::C) && \"Unexpected variadic calling convention\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 890
, __extension__ __PRETTY_FUNCTION__))
890 "Unexpected variadic calling convention")(static_cast <bool> ((!Info.IsVarArg || CalleeCC == CallingConv
::C) && "Unexpected variadic calling convention") ? void
(0) : __assert_fail ("(!Info.IsVarArg || CalleeCC == CallingConv::C) && \"Unexpected variadic calling convention\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 890
, __extension__ __PRETTY_FUNCTION__))
;
891
892 // Verify that the incoming and outgoing arguments from the callee are
893 // safe to tail call.
894 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
895 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n"
; } } while (false)
896 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n"
; } } while (false)
897 << "... Caller and callee have incompatible calling conventions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n"
; } } while (false)
;
898 return false;
899 }
900
901 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
902 return false;
903
904 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Call is eligible for tail call optimization.\n"
; } } while (false)
905 dbgs() << "... Call is eligible for tail call optimization.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Call is eligible for tail call optimization.\n"
; } } while (false)
;
906 return true;
907}
908
909static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
910 bool IsTailCall) {
911 if (!IsTailCall)
912 return IsIndirect ? getBLRCallOpcode(CallerF) : (unsigned)AArch64::BL;
913
914 if (!IsIndirect)
915 return AArch64::TCRETURNdi;
916
917 // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use
918 // x16 or x17.
919 if (CallerF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
920 return AArch64::TCRETURNriBTI;
921
922 return AArch64::TCRETURNri;
923}
924
925static const uint32_t *
926getMaskForArgs(SmallVectorImpl<AArch64CallLowering::ArgInfo> &OutArgs,
927 AArch64CallLowering::CallLoweringInfo &Info,
928 const AArch64RegisterInfo &TRI, MachineFunction &MF) {
929 const uint32_t *Mask;
930 if (!OutArgs.empty() && OutArgs[0].Flags[0].isReturned()) {
931 // For 'this' returns, use the X0-preserving mask if applicable
932 Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);
933 if (!Mask) {
934 OutArgs[0].Flags[0].setReturned(false);
935 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
936 }
937 } else {
938 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
939 }
940 return Mask;
941}
942
943bool AArch64CallLowering::lowerTailCall(
944 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
945 SmallVectorImpl<ArgInfo> &OutArgs) const {
946 MachineFunction &MF = MIRBuilder.getMF();
947 const Function &F = MF.getFunction();
948 MachineRegisterInfo &MRI = MF.getRegInfo();
949 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
950 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
951
952 // True when we're tail calling, but without -tailcallopt.
953 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt &&
954 Info.CallConv != CallingConv::Tail &&
955 Info.CallConv != CallingConv::SwiftTail;
956
957 // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
958 // register class. Until we can do that, we should fall back here.
959 if (MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) {
960 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n"
; } } while (false)
961 dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n"
; } } while (false)
;
962 return false;
963 }
964
965 // Find out which ABI gets to decide where things go.
966 CallingConv::ID CalleeCC = Info.CallConv;
967 CCAssignFn *AssignFnFixed;
968 CCAssignFn *AssignFnVarArg;
969 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
970
971 MachineInstrBuilder CallSeqStart;
972 if (!IsSibCall)
973 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
974
975 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true);
976 auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
977 MIB.add(Info.Callee);
978
979 // Byte offset for the tail call. When we are sibcalling, this will always
980 // be 0.
981 MIB.addImm(0);
982
983 // Tell the call which registers are clobbered.
984 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
985 auto TRI = Subtarget.getRegisterInfo();
986 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
987 if (Subtarget.hasCustomCallingConv())
988 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
989 MIB.addRegMask(Mask);
990
991 if (Info.CFIType)
992 MIB->setCFIType(MF, Info.CFIType->getZExtValue());
993
994 if (TRI->isAnyArgRegReserved(MF))
995 TRI->emitReservedArgRegCallError(MF);
996
997 // FPDiff is the byte offset of the call's argument area from the callee's.
998 // Stores to callee stack arguments will be placed in FixedStackSlots offset
999 // by this amount for a tail call. In a sibling call it must be 0 because the
1000 // caller will deallocate the entire stack and the callee still expects its
1001 // arguments to begin at SP+0.
1002 int FPDiff = 0;
1003
1004 // This will be 0 for sibcalls, potentially nonzero for tail calls produced
1005 // by -tailcallopt. For sibcalls, the memory operands for the call are
1006 // already available in the caller's incoming argument space.
1007 unsigned NumBytes = 0;
1008 if (!IsSibCall) {
1009 // We aren't sibcalling, so we need to compute FPDiff. We need to do this
1010 // before handling assignments, because FPDiff must be known for memory
1011 // arguments.
1012 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1013 SmallVector<CCValAssign, 16> OutLocs;
1014 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
1015
1016 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
1017 Subtarget, /*IsReturn*/ false);
1018 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
1019 return false;
1020
1021 // The callee will pop the argument stack as a tail call. Thus, we must
1022 // keep it 16-byte aligned.
1023 NumBytes = alignTo(OutInfo.getNextStackOffset(), 16);
1024
1025 // FPDiff will be negative if this tail call requires more space than we
1026 // would automatically have in our incoming argument space. Positive if we
1027 // actually shrink the stack.
1028 FPDiff = NumReusableBytes - NumBytes;
1029
1030 // Update the required reserved area if this is the tail call requiring the
1031 // most argument stack space.
1032 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
1033 FuncInfo->setTailCallReservedStack(-FPDiff);
1034
1035 // The stack pointer must be 16-byte aligned at all times it's used for a
1036 // memory operation, which in practice means at *all* times and in
1037 // particular across call boundaries. Therefore our own arguments started at
1038 // a 16-byte aligned SP and the delta applied for the tail call should
1039 // satisfy the same constraint.
1040 assert(FPDiff % 16 == 0 && "unaligned stack on tail call")(static_cast <bool> (FPDiff % 16 == 0 && "unaligned stack on tail call"
) ? void (0) : __assert_fail ("FPDiff % 16 == 0 && \"unaligned stack on tail call\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 1040
, __extension__ __PRETTY_FUNCTION__))
;
1041 }
1042
1043 const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
1044
1045 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1046 Subtarget, /*IsReturn*/ false);
1047
1048 // Do the actual argument marshalling.
1049 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB,
1050 /*IsTailCall*/ true, FPDiff);
1051 if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1052 CalleeCC, Info.IsVarArg))
1053 return false;
1054
1055 Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1056
1057 if (Info.IsVarArg && Info.IsMustTailCall) {
1058 // Now we know what's being passed to the function. Add uses to the call for
1059 // the forwarded registers that we *aren't* passing as parameters. This will
1060 // preserve the copies we build earlier.
1061 for (const auto &F : Forwards) {
1062 Register ForwardedReg = F.PReg;
1063 // If the register is already passed, or aliases a register which is
1064 // already being passed, then skip it.
1065 if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
1066 if (!Use.isReg())
1067 return false;
1068 return TRI->regsOverlap(Use.getReg(), ForwardedReg);
1069 }))
1070 continue;
1071
1072 // We aren't passing it already, so we should add it to the call.
1073 MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg));
1074 MIB.addReg(ForwardedReg, RegState::Implicit);
1075 }
1076 }
1077
1078 // If we have -tailcallopt, we need to adjust the stack. We'll do the call
1079 // sequence start and end here.
1080 if (!IsSibCall) {
1081 MIB->getOperand(1).setImm(FPDiff);
1082 CallSeqStart.addImm(0).addImm(0);
1083 // End the call sequence *before* emitting the call. Normally, we would
1084 // tidy the frame up after the call. However, here, we've laid out the
1085 // parameters so that when SP is reset, they will be in the correct
1086 // location.
1087 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(0).addImm(0);
1088 }
1089
1090 // Now we can add the actual call instruction to the correct basic block.
1091 MIRBuilder.insertInstr(MIB);
1092
1093 // If Callee is a reg, since it is used by a target specific instruction,
1094 // it must have a register class matching the constraint of that instruction.
1095 if (MIB->getOperand(0).isReg())
1096 constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1097 *MF.getSubtarget().getRegBankInfo(), *MIB,
1098 MIB->getDesc(), MIB->getOperand(0), 0);
1099
1100 MF.getFrameInfo().setHasTailCall();
1101 Info.LoweredTailCall = true;
1102 return true;
1103}
1104
1105bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
1106 CallLoweringInfo &Info) const {
1107 MachineFunction &MF = MIRBuilder.getMF();
1108 const Function &F = MF.getFunction();
1109 MachineRegisterInfo &MRI = MF.getRegInfo();
1110 auto &DL = F.getParent()->getDataLayout();
1111 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
1112 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1113
1114 // Arm64EC has extra requirements for varargs calls; bail out for now.
1115 if (Info.IsVarArg && Subtarget.isWindowsArm64EC())
1
Assuming field 'IsVarArg' is false
1116 return false;
1117
1118 SmallVector<ArgInfo, 8> OutArgs;
1119 for (auto &OrigArg : Info.OrigArgs) {
2
Value assigned to field 'CB'
3
Assuming '__begin1' is equal to '__end1'
1120 splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv);
1121 // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
1122 auto &Flags = OrigArg.Flags[0];
1123 if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {
1124 ArgInfo &OutArg = OutArgs.back();
1125 assert(OutArg.Regs.size() == 1 &&(static_cast <bool> (OutArg.Regs.size() == 1 &&
MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && "Unexpected registers used for i1 arg"
) ? void (0) : __assert_fail ("OutArg.Regs.size() == 1 && MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 1127
, __extension__ __PRETTY_FUNCTION__))
1126 MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 &&(static_cast <bool> (OutArg.Regs.size() == 1 &&
MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && "Unexpected registers used for i1 arg"
) ? void (0) : __assert_fail ("OutArg.Regs.size() == 1 && MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 1127
, __extension__ __PRETTY_FUNCTION__))
1127 "Unexpected registers used for i1 arg")(static_cast <bool> (OutArg.Regs.size() == 1 &&
MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && "Unexpected registers used for i1 arg"
) ? void (0) : __assert_fail ("OutArg.Regs.size() == 1 && MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 1127
, __extension__ __PRETTY_FUNCTION__))
;
1128
1129 // We cannot use a ZExt ArgInfo flag here, because it will
1130 // zero-extend the argument to i32 instead of just i8.
1131 OutArg.Regs[0] =
1132 MIRBuilder.buildZExt(LLT::scalar(8), OutArg.Regs[0]).getReg(0);
1133 LLVMContext &Ctx = MF.getFunction().getContext();
1134 OutArg.Ty = Type::getInt8Ty(Ctx);
1135 }
1136 }
1137
1138 SmallVector<ArgInfo, 8> InArgs;
1139 if (!Info.OrigRet.Ty->isVoidTy())
4
Taking true branch
1140 splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
1141
1142 // If we can lower as a tail call, do that instead.
1143 bool CanTailCallOpt =
1144 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
5
Calling 'AArch64CallLowering::isEligibleForTailCallOptimization'
15
Returning from 'AArch64CallLowering::isEligibleForTailCallOptimization'
1145
1146 // We must emit a tail call if we have musttail.
1147 if (Info.IsMustTailCall && !CanTailCallOpt) {
16
Assuming field 'IsMustTailCall' is false
1148 // There are types of incoming/outgoing arguments we can't handle yet, so
1149 // it doesn't make sense to actually die here like in ISelLowering. Instead,
1150 // fall back to SelectionDAG and let it try to handle this.
1151 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Failed to lower musttail call as tail call\n"
; } } while (false)
;
1152 return false;
1153 }
1154
1155 Info.IsTailCall = CanTailCallOpt;
1156 if (CanTailCallOpt
16.1
'CanTailCallOpt' is false
16.1
'CanTailCallOpt' is false
)
17
Taking false branch
1157 return lowerTailCall(MIRBuilder, Info, OutArgs);
1158
1159 // Find out which ABI gets to decide where things go.
1160 CCAssignFn *AssignFnFixed;
1161 CCAssignFn *AssignFnVarArg;
1162 std::tie(AssignFnFixed, AssignFnVarArg) =
1163 getAssignFnsForCC(Info.CallConv, TLI);
1164
1165 MachineInstrBuilder CallSeqStart;
1166 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
1167
1168 // Create a temporarily-floating call instruction so we can add the implicit
1169 // uses of arg registers.
1170
1171 unsigned Opc = 0;
1172 // Calls with operand bundle "clang.arc.attachedcall" are special. They should
1173 // be expanded to the call, directly followed by a special marker sequence and
1174 // a call to an ObjC library function.
1175 if (Info.CB && objcarc::hasAttachedCallOpBundle(Info.CB))
18
Assuming field 'CB' is null
1176 Opc = AArch64::BLR_RVMARKER;
1177 // A call to a returns twice function like setjmp must be followed by a bti
1178 // instruction.
1179 else if (Info.CB
18.1
Field 'CB' is null
18.1
Field 'CB' is null
&&
19
Taking false branch
1180 Info.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
1181 !Subtarget.noBTIAtReturnTwice() &&
1182 MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
1183 Opc = AArch64::BLR_BTI;
1184 else
1185 Opc = getCallOpcode(MF, Info.Callee.isReg(), false);
1186
1187 auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1188 unsigned CalleeOpNo = 0;
1189
1190 if (Opc == AArch64::BLR_RVMARKER) {
20
Assuming 'Opc' is equal to BLR_RVMARKER
21
Taking true branch
1191 // Add a target global address for the retainRV/claimRV runtime function
1192 // just before the call target.
1193 Function *ARCFn = *objcarc::getAttachedARCFunction(Info.CB);
22
Passing null pointer value via 1st parameter 'CB'
23
Calling 'getAttachedARCFunction'
1194 MIB.addGlobalAddress(ARCFn);
1195 ++CalleeOpNo;
1196 } else if (Info.CFIType) {
1197 MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1198 }
1199
1200 MIB.add(Info.Callee);
1201
1202 // Tell the call which registers are clobbered.
1203 const uint32_t *Mask;
1204 const auto *TRI = Subtarget.getRegisterInfo();
1205
1206 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1207 Subtarget, /*IsReturn*/ false);
1208 // Do the actual argument marshalling.
1209 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, /*IsReturn*/ false);
1210 if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1211 Info.CallConv, Info.IsVarArg))
1212 return false;
1213
1214 Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1215
1216 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
1217 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1218 MIB.addRegMask(Mask);
1219
1220 if (TRI->isAnyArgRegReserved(MF))
1221 TRI->emitReservedArgRegCallError(MF);
1222
1223 // Now we can add the actual call instruction to the correct basic block.
1224 MIRBuilder.insertInstr(MIB);
1225
1226 // If Callee is a reg, since it is used by a target specific
1227 // instruction, it must have a register class matching the
1228 // constraint of that instruction.
1229 if (MIB->getOperand(CalleeOpNo).isReg())
1230 constrainOperandRegClass(MF, *TRI, MRI, *Subtarget.getInstrInfo(),
1231 *Subtarget.getRegBankInfo(), *MIB, MIB->getDesc(),
1232 MIB->getOperand(CalleeOpNo), CalleeOpNo);
1233
1234 // Finally we can copy the returned value back into its virtual-register. In
1235 // symmetry with the arguments, the physical register must be an
1236 // implicit-define of the call instruction.
1237 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
1238 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1239 CallReturnHandler Handler(MIRBuilder, MRI, MIB);
1240 bool UsingReturnedArg =
1241 !OutArgs.empty() && OutArgs[0].Flags[0].isReturned();
1242
1243 AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
1244 /*IsReturn*/ false);
1245 ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB);
1246 if (!determineAndHandleAssignments(
1247 UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
1248 MIRBuilder, Info.CallConv, Info.IsVarArg,
1249 UsingReturnedArg ? makeArrayRef(OutArgs[0].Regs) : std::nullopt))
1250 return false;
1251 }
1252
1253 if (Info.SwiftErrorVReg) {
1254 MIB.addDef(AArch64::X21, RegState::Implicit);
1255 MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
1256 }
1257
1258 uint64_t CalleePopBytes =
1259 doesCalleeRestoreStack(Info.CallConv,
1260 MF.getTarget().Options.GuaranteedTailCallOpt)
1261 ? alignTo(Assigner.StackOffset, 16)
1262 : 0;
1263
1264 CallSeqStart.addImm(Assigner.StackOffset).addImm(0);
1265 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
1266 .addImm(Assigner.StackOffset)
1267 .addImm(CalleePopBytes);
1268
1269 if (!Info.CanLowerReturn) {
1270 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
1271 Info.DemoteRegister, Info.DemoteStackIndex);
1272 }
1273 return true;
1274}
1275
1276bool AArch64CallLowering::isTypeIsValidForThisReturn(EVT Ty) const {
1277 return Ty.getSizeInBits() == 64;
1278}

/build/source/llvm/include/llvm/Analysis/ObjCARCUtil.h

1//===- ObjCARCUtil.h - ObjC ARC Utility Functions ---------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file defines ARC utility functions which are used by various parts of
10/// the compiler.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ANALYSIS_OBJCARCUTIL_H
15#define LLVM_ANALYSIS_OBJCARCUTIL_H
16
17#include "llvm/Analysis/ObjCARCInstKind.h"
18#include "llvm/IR/Function.h"
19#include "llvm/IR/InstrTypes.h"
20#include "llvm/IR/LLVMContext.h"
21
22namespace llvm {
23namespace objcarc {
24
25inline const char *getRVMarkerModuleFlagStr() {
26 return "clang.arc.retainAutoreleasedReturnValueMarker";
27}
28
29inline bool hasAttachedCallOpBundle(const CallBase *CB) {
30 // Ignore the bundle if the return type is void. Global optimization passes
31 // can turn the called function's return type to void. That should happen only
32 // if the call doesn't return and the call to @llvm.objc.clang.arc.noop.use
33 // no longer consumes the function return or is deleted. In that case, it's
34 // not necessary to emit the marker instruction or calls to the ARC runtime
35 // functions.
36 return !CB->getFunctionType()->getReturnType()->isVoidTy() &&
37 CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall)
38 .has_value();
39}
40
41/// This function returns operand bundle clang_arc_attachedcall's argument,
42/// which is the address of the ARC runtime function.
43inline Optional<Function *> getAttachedARCFunction(const CallBase *CB) {
44 auto B = CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall);
24
Called C++ object pointer is null
45 if (!B)
46 return std::nullopt;
47
48 return cast<Function>(B->Inputs[0]);
49}
50
51/// Check whether the function is retainRV/unsafeClaimRV.
52inline bool isRetainOrClaimRV(ARCInstKind Kind) {
53 return Kind == ARCInstKind::RetainRV || Kind == ARCInstKind::UnsafeClaimRV;
54}
55
56/// This function returns the ARCInstKind of the function attached to operand
57/// bundle clang_arc_attachedcall. It returns std::nullopt if the call doesn't
58/// have the operand bundle or the operand is null. Otherwise it returns either
59/// RetainRV or UnsafeClaimRV.
60inline ARCInstKind getAttachedARCFunctionKind(const CallBase *CB) {
61 Optional<Function *> Fn = getAttachedARCFunction(CB);
62 if (!Fn)
63 return ARCInstKind::None;
64 auto FnClass = GetFunctionClass(*Fn);
65 assert(isRetainOrClaimRV(FnClass) && "unexpected ARC runtime function")(static_cast <bool> (isRetainOrClaimRV(FnClass) &&
"unexpected ARC runtime function") ? void (0) : __assert_fail
("isRetainOrClaimRV(FnClass) && \"unexpected ARC runtime function\""
, "llvm/include/llvm/Analysis/ObjCARCUtil.h", 65, __extension__
__PRETTY_FUNCTION__))
;
66 return FnClass;
67}
68
69} // end namespace objcarc
70} // end namespace llvm
71
72#endif