Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include/llvm/Analysis/ObjCARCUtil.h
Warning:line 44, column 12
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AArch64CallLowering.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AArch64 -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AArch64 -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp

/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp

1//===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12///
13//===----------------------------------------------------------------------===//
14
15#include "AArch64CallLowering.h"
16#include "AArch64ISelLowering.h"
17#include "AArch64MachineFunctionInfo.h"
18#include "AArch64Subtarget.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/Analysis/ObjCARCUtil.h"
22#include "llvm/CodeGen/Analysis.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/FunctionLoweringInfo.h"
25#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26#include "llvm/CodeGen/GlobalISel/Utils.h"
27#include "llvm/CodeGen/LowLevelType.h"
28#include "llvm/CodeGen/MachineBasicBlock.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineFunction.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/MachineOperand.h"
34#include "llvm/CodeGen/MachineRegisterInfo.h"
35#include "llvm/CodeGen/TargetRegisterInfo.h"
36#include "llvm/CodeGen/TargetSubtargetInfo.h"
37#include "llvm/CodeGen/ValueTypes.h"
38#include "llvm/IR/Argument.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/Type.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/MachineValueType.h"
44#include <algorithm>
45#include <cassert>
46#include <cstdint>
47#include <iterator>
48
49#define DEBUG_TYPE"aarch64-call-lowering" "aarch64-call-lowering"
50
51using namespace llvm;
52
53AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
54 : CallLowering(&TLI) {}
55
56static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT,
57 MVT &LocVT) {
58 // If ValVT is i1/i8/i16, we should set LocVT to i8/i8/i16. This is a legacy
59 // hack because the DAG calls the assignment function with pre-legalized
60 // register typed values, not the raw type.
61 //
62 // This hack is not applied to return values which are not passed on the
63 // stack.
64 if (OrigVT == MVT::i1 || OrigVT == MVT::i8)
65 ValVT = LocVT = MVT::i8;
66 else if (OrigVT == MVT::i16)
67 ValVT = LocVT = MVT::i16;
68}
69
70// Account for i1/i8/i16 stack passed value hack
71static LLT getStackValueStoreTypeHack(const CCValAssign &VA) {
72 const MVT ValVT = VA.getValVT();
73 return (ValVT == MVT::i8 || ValVT == MVT::i16) ? LLT(ValVT)
74 : LLT(VA.getLocVT());
75}
76
77namespace {
78
79struct AArch64IncomingValueAssigner
80 : public CallLowering::IncomingValueAssigner {
81 AArch64IncomingValueAssigner(CCAssignFn *AssignFn_,
82 CCAssignFn *AssignFnVarArg_)
83 : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
84
85 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
86 CCValAssign::LocInfo LocInfo,
87 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
88 CCState &State) override {
89 applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
90 return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
91 LocInfo, Info, Flags, State);
92 }
93};
94
95struct AArch64OutgoingValueAssigner
96 : public CallLowering::OutgoingValueAssigner {
97 const AArch64Subtarget &Subtarget;
98
99 /// Track if this is used for a return instead of function argument
100 /// passing. We apply a hack to i1/i8/i16 stack passed values, but do not use
101 /// stack passed returns for them and cannot apply the type adjustment.
102 bool IsReturn;
103
104 AArch64OutgoingValueAssigner(CCAssignFn *AssignFn_,
105 CCAssignFn *AssignFnVarArg_,
106 const AArch64Subtarget &Subtarget_,
107 bool IsReturn)
108 : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
109 Subtarget(Subtarget_), IsReturn(IsReturn) {}
110
111 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
112 CCValAssign::LocInfo LocInfo,
113 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
114 CCState &State) override {
115 bool IsCalleeWin = Subtarget.isCallingConvWin64(State.getCallingConv());
116 bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
117
118 if (!State.isVarArg() && !UseVarArgsCCForFixed && !IsReturn)
119 applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
120
121 bool Res;
122 if (Info.IsFixed && !UseVarArgsCCForFixed)
123 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
124 else
125 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
126
127 StackOffset = State.getNextStackOffset();
128 return Res;
129 }
130};
131
132struct IncomingArgHandler : public CallLowering::IncomingValueHandler {
133 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
134 : IncomingValueHandler(MIRBuilder, MRI) {}
135
136 Register getStackAddress(uint64_t Size, int64_t Offset,
137 MachinePointerInfo &MPO,
138 ISD::ArgFlagsTy Flags) override {
139 auto &MFI = MIRBuilder.getMF().getFrameInfo();
140
141 // Byval is assumed to be writable memory, but other stack passed arguments
142 // are not.
143 const bool IsImmutable = !Flags.isByVal();
144
145 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
146 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
147 auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
148 return AddrReg.getReg(0);
149 }
150
151 LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
152 ISD::ArgFlagsTy Flags) const override {
153 // For pointers, we just need to fixup the integer types reported in the
154 // CCValAssign.
155 if (Flags.isPointer())
156 return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
157 return getStackValueStoreTypeHack(VA);
158 }
159
160 void assignValueToReg(Register ValVReg, Register PhysReg,
161 CCValAssign VA) override {
162 markPhysRegUsed(PhysReg);
163 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
164 }
165
166 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
167 MachinePointerInfo &MPO, CCValAssign &VA) override {
168 MachineFunction &MF = MIRBuilder.getMF();
169
170 LLT ValTy(VA.getValVT());
171 LLT LocTy(VA.getLocVT());
172
173 // Fixup the types for the DAG compatibility hack.
174 if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16)
175 std::swap(ValTy, LocTy);
176 else {
177 // The calling code knows if this is a pointer or not, we're only touching
178 // the LocTy for the i8/i16 hack.
179 assert(LocTy.getSizeInBits() == MemTy.getSizeInBits())(static_cast <bool> (LocTy.getSizeInBits() == MemTy.getSizeInBits
()) ? void (0) : __assert_fail ("LocTy.getSizeInBits() == MemTy.getSizeInBits()"
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 179
, __extension__ __PRETTY_FUNCTION__))
;
180 LocTy = MemTy;
181 }
182
183 auto MMO = MF.getMachineMemOperand(
184 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, LocTy,
185 inferAlignFromPtrInfo(MF, MPO));
186
187 switch (VA.getLocInfo()) {
188 case CCValAssign::LocInfo::ZExt:
189 MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);
190 return;
191 case CCValAssign::LocInfo::SExt:
192 MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);
193 return;
194 default:
195 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
196 return;
197 }
198 }
199
200 /// How the physical register gets marked varies between formal
201 /// parameters (it's a basic-block live-in), and a call instruction
202 /// (it's an implicit-def of the BL).
203 virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
204};
205
206struct FormalArgHandler : public IncomingArgHandler {
207 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
208 : IncomingArgHandler(MIRBuilder, MRI) {}
209
210 void markPhysRegUsed(MCRegister PhysReg) override {
211 MIRBuilder.getMRI()->addLiveIn(PhysReg);
212 MIRBuilder.getMBB().addLiveIn(PhysReg);
213 }
214};
215
216struct CallReturnHandler : public IncomingArgHandler {
217 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
218 MachineInstrBuilder MIB)
219 : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
220
221 void markPhysRegUsed(MCRegister PhysReg) override {
222 MIB.addDef(PhysReg, RegState::Implicit);
223 }
224
225 MachineInstrBuilder MIB;
226};
227
228/// A special return arg handler for "returned" attribute arg calls.
229struct ReturnedArgCallReturnHandler : public CallReturnHandler {
230 ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
231 MachineRegisterInfo &MRI,
232 MachineInstrBuilder MIB)
233 : CallReturnHandler(MIRBuilder, MRI, MIB) {}
234
235 void markPhysRegUsed(MCRegister PhysReg) override {}
236};
237
238struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
239 OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
240 MachineInstrBuilder MIB, bool IsTailCall = false,
241 int FPDiff = 0)
242 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall),
243 FPDiff(FPDiff),
244 Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
245
246 Register getStackAddress(uint64_t Size, int64_t Offset,
247 MachinePointerInfo &MPO,
248 ISD::ArgFlagsTy Flags) override {
249 MachineFunction &MF = MIRBuilder.getMF();
250 LLT p0 = LLT::pointer(0, 64);
251 LLT s64 = LLT::scalar(64);
252
253 if (IsTailCall) {
254 assert(!Flags.isByVal() && "byval unhandled with tail calls")(static_cast <bool> (!Flags.isByVal() && "byval unhandled with tail calls"
) ? void (0) : __assert_fail ("!Flags.isByVal() && \"byval unhandled with tail calls\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 254
, __extension__ __PRETTY_FUNCTION__))
;
255
256 Offset += FPDiff;
257 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
258 auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
259 MPO = MachinePointerInfo::getFixedStack(MF, FI);
260 return FIReg.getReg(0);
261 }
262
263 if (!SPReg)
264 SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);
265
266 auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);
267
268 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
269
270 MPO = MachinePointerInfo::getStack(MF, Offset);
271 return AddrReg.getReg(0);
272 }
273
274 /// We need to fixup the reported store size for certain value types because
275 /// we invert the interpretation of ValVT and LocVT in certain cases. This is
276 /// for compatability with the DAG call lowering implementation, which we're
277 /// currently building on top of.
278 LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
279 ISD::ArgFlagsTy Flags) const override {
280 if (Flags.isPointer())
281 return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
282 return getStackValueStoreTypeHack(VA);
283 }
284
285 void assignValueToReg(Register ValVReg, Register PhysReg,
286 CCValAssign VA) override {
287 MIB.addUse(PhysReg, RegState::Implicit);
288 Register ExtReg = extendRegister(ValVReg, VA);
289 MIRBuilder.buildCopy(PhysReg, ExtReg);
290 }
291
292 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
293 MachinePointerInfo &MPO, CCValAssign &VA) override {
294 MachineFunction &MF = MIRBuilder.getMF();
295 auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
296 inferAlignFromPtrInfo(MF, MPO));
297 MIRBuilder.buildStore(ValVReg, Addr, *MMO);
298 }
299
300 void assignValueToAddress(const CallLowering::ArgInfo &Arg, unsigned RegIndex,
301 Register Addr, LLT MemTy, MachinePointerInfo &MPO,
302 CCValAssign &VA) override {
303 unsigned MaxSize = MemTy.getSizeInBytes() * 8;
304 // For varargs, we always want to extend them to 8 bytes, in which case
305 // we disable setting a max.
306 if (!Arg.IsFixed)
307 MaxSize = 0;
308
309 Register ValVReg = Arg.Regs[RegIndex];
310 if (VA.getLocInfo() != CCValAssign::LocInfo::FPExt) {
311 MVT LocVT = VA.getLocVT();
312 MVT ValVT = VA.getValVT();
313
314 if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16) {
315 std::swap(ValVT, LocVT);
316 MemTy = LLT(VA.getValVT());
317 }
318
319 ValVReg = extendRegister(ValVReg, VA, MaxSize);
320 } else {
321 // The store does not cover the full allocated stack slot.
322 MemTy = LLT(VA.getValVT());
323 }
324
325 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
326 }
327
328 MachineInstrBuilder MIB;
329
330 bool IsTailCall;
331
332 /// For tail calls, the byte offset of the call's argument area from the
333 /// callee's. Unused elsewhere.
334 int FPDiff;
335
336 // Cache the SP register vreg if we need it more than once in this call site.
337 Register SPReg;
338
339 const AArch64Subtarget &Subtarget;
340};
341} // namespace
342
343static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
344 return (CallConv == CallingConv::Fast && TailCallOpt) ||
345 CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
346}
347
348bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
349 const Value *Val,
350 ArrayRef<Register> VRegs,
351 FunctionLoweringInfo &FLI,
352 Register SwiftErrorVReg) const {
353 auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
354 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&(static_cast <bool> (((Val && !VRegs.empty()) ||
(!Val && VRegs.empty())) && "Return value without a vreg"
) ? void (0) : __assert_fail ("((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && \"Return value without a vreg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 355
, __extension__ __PRETTY_FUNCTION__))
355 "Return value without a vreg")(static_cast <bool> (((Val && !VRegs.empty()) ||
(!Val && VRegs.empty())) && "Return value without a vreg"
) ? void (0) : __assert_fail ("((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && \"Return value without a vreg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 355
, __extension__ __PRETTY_FUNCTION__))
;
356
357 bool Success = true;
358 if (!FLI.CanLowerReturn) {
359 insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister);
360 } else if (!VRegs.empty()) {
361 MachineFunction &MF = MIRBuilder.getMF();
362 const Function &F = MF.getFunction();
363 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
364
365 MachineRegisterInfo &MRI = MF.getRegInfo();
366 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
367 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
368 auto &DL = F.getParent()->getDataLayout();
369 LLVMContext &Ctx = Val->getType()->getContext();
370
371 SmallVector<EVT, 4> SplitEVTs;
372 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
373 assert(VRegs.size() == SplitEVTs.size() &&(static_cast <bool> (VRegs.size() == SplitEVTs.size() &&
"For each split Type there should be exactly one VReg.") ? void
(0) : __assert_fail ("VRegs.size() == SplitEVTs.size() && \"For each split Type there should be exactly one VReg.\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 374
, __extension__ __PRETTY_FUNCTION__))
374 "For each split Type there should be exactly one VReg.")(static_cast <bool> (VRegs.size() == SplitEVTs.size() &&
"For each split Type there should be exactly one VReg.") ? void
(0) : __assert_fail ("VRegs.size() == SplitEVTs.size() && \"For each split Type there should be exactly one VReg.\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 374
, __extension__ __PRETTY_FUNCTION__))
;
375
376 SmallVector<ArgInfo, 8> SplitArgs;
377 CallingConv::ID CC = F.getCallingConv();
378
379 for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
380 Register CurVReg = VRegs[i];
381 ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};
382 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
383
384 // i1 is a special case because SDAG i1 true is naturally zero extended
385 // when widened using ANYEXT. We need to do it explicitly here.
386 if (MRI.getType(CurVReg).getSizeInBits() == 1) {
387 CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
388 } else if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) ==
389 1) {
390 // Some types will need extending as specified by the CC.
391 MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
392 if (EVT(NewVT) != SplitEVTs[i]) {
393 unsigned ExtendOp = TargetOpcode::G_ANYEXT;
394 if (F.getAttributes().hasRetAttr(Attribute::SExt))
395 ExtendOp = TargetOpcode::G_SEXT;
396 else if (F.getAttributes().hasRetAttr(Attribute::ZExt))
397 ExtendOp = TargetOpcode::G_ZEXT;
398
399 LLT NewLLT(NewVT);
400 LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
401 CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
402 // Instead of an extend, we might have a vector type which needs
403 // padding with more elements, e.g. <2 x half> -> <4 x half>.
404 if (NewVT.isVector()) {
405 if (OldLLT.isVector()) {
406 if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
407 // We don't handle VA types which are not exactly twice the
408 // size, but can easily be done in future.
409 if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
410 LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Outgoing vector ret has too many elts"
; } } while (false)
;
411 return false;
412 }
413 auto Undef = MIRBuilder.buildUndef({OldLLT});
414 CurVReg =
415 MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef}).getReg(0);
416 } else {
417 // Just do a vector extend.
418 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
419 .getReg(0);
420 }
421 } else if (NewLLT.getNumElements() == 2) {
422 // We need to pad a <1 x S> type to <2 x S>. Since we don't have
423 // <1 x S> vector types in GISel we use a build_vector instead
424 // of a vector merge/concat.
425 auto Undef = MIRBuilder.buildUndef({OldLLT});
426 CurVReg =
427 MIRBuilder
428 .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)})
429 .getReg(0);
430 } else {
431 LLVM_DEBUG(dbgs() << "Could not handle ret ty\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Could not handle ret ty\n"
; } } while (false)
;
432 return false;
433 }
434 } else {
435 // If the split EVT was a <1 x T> vector, and NewVT is T, then we
436 // don't have to do anything since we don't distinguish between the
437 // two.
438 if (NewLLT != MRI.getType(CurVReg)) {
439 // A scalar extend.
440 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
441 .getReg(0);
442 }
443 }
444 }
445 }
446 if (CurVReg != CurArgInfo.Regs[0]) {
447 CurArgInfo.Regs[0] = CurVReg;
448 // Reset the arg flags after modifying CurVReg.
449 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
450 }
451 splitToValueTypes(CurArgInfo, SplitArgs, DL, CC);
452 }
453
454 AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
455 /*IsReturn*/ true);
456 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);
457 Success = determineAndHandleAssignments(Handler, Assigner, SplitArgs,
458 MIRBuilder, CC, F.isVarArg());
459 }
460
461 if (SwiftErrorVReg) {
462 MIB.addUse(AArch64::X21, RegState::Implicit);
463 MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
464 }
465
466 MIRBuilder.insertInstr(MIB);
467 return Success;
468}
469
470bool AArch64CallLowering::canLowerReturn(MachineFunction &MF,
471 CallingConv::ID CallConv,
472 SmallVectorImpl<BaseArgInfo> &Outs,
473 bool IsVarArg) const {
474 SmallVector<CCValAssign, 16> ArgLocs;
475 const auto &TLI = *getTLI<AArch64TargetLowering>();
476 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
477 MF.getFunction().getContext());
478
479 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));
480}
481
482/// Helper function to compute forwarded registers for musttail calls. Computes
483/// the forwarded registers, sets MBB liveness, and emits COPY instructions that
484/// can be used to save + restore registers later.
485static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder,
486 CCAssignFn *AssignFn) {
487 MachineBasicBlock &MBB = MIRBuilder.getMBB();
488 MachineFunction &MF = MIRBuilder.getMF();
489 MachineFrameInfo &MFI = MF.getFrameInfo();
490
491 if (!MFI.hasMustTailInVarArgFunc())
492 return;
493
494 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
495 const Function &F = MF.getFunction();
496 assert(F.isVarArg() && "Expected F to be vararg?")(static_cast <bool> (F.isVarArg() && "Expected F to be vararg?"
) ? void (0) : __assert_fail ("F.isVarArg() && \"Expected F to be vararg?\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 496
, __extension__ __PRETTY_FUNCTION__))
;
497
498 // Compute the set of forwarded registers. The rest are scratch.
499 SmallVector<CCValAssign, 16> ArgLocs;
500 CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs,
501 F.getContext());
502 SmallVector<MVT, 2> RegParmTypes;
503 RegParmTypes.push_back(MVT::i64);
504 RegParmTypes.push_back(MVT::f128);
505
506 // Later on, we can use this vector to restore the registers if necessary.
507 SmallVectorImpl<ForwardedRegister> &Forwards =
508 FuncInfo->getForwardedMustTailRegParms();
509 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn);
510
511 // Conservatively forward X8, since it might be used for an aggregate
512 // return.
513 if (!CCInfo.isAllocated(AArch64::X8)) {
514 Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
515 Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
516 }
517
518 // Add the forwards to the MachineBasicBlock and MachineFunction.
519 for (const auto &F : Forwards) {
520 MBB.addLiveIn(F.PReg);
521 MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg));
522 }
523}
524
525bool AArch64CallLowering::fallBackToDAGISel(const MachineFunction &MF) const {
526 auto &F = MF.getFunction();
527 if (isa<ScalableVectorType>(F.getReturnType()))
528 return true;
529 if (llvm::any_of(F.args(), [](const Argument &A) {
530 return isa<ScalableVectorType>(A.getType());
531 }))
532 return true;
533 const auto &ST = MF.getSubtarget<AArch64Subtarget>();
534 if (!ST.hasNEON() || !ST.hasFPARMv8()) {
535 LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Falling back to SDAG because we don't support no-NEON\n"
; } } while (false)
;
536 return true;
537 }
538 return false;
539}
540
541bool AArch64CallLowering::lowerFormalArguments(
542 MachineIRBuilder &MIRBuilder, const Function &F,
543 ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const {
544 MachineFunction &MF = MIRBuilder.getMF();
545 MachineBasicBlock &MBB = MIRBuilder.getMBB();
546 MachineRegisterInfo &MRI = MF.getRegInfo();
547 auto &DL = F.getParent()->getDataLayout();
548
549 SmallVector<ArgInfo, 8> SplitArgs;
550 SmallVector<std::pair<Register, Register>> BoolArgs;
551
552 // Insert the hidden sret parameter if the return value won't fit in the
553 // return registers.
554 if (!FLI.CanLowerReturn)
555 insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
556
557 unsigned i = 0;
558 for (auto &Arg : F.args()) {
559 if (DL.getTypeStoreSize(Arg.getType()).isZero())
560 continue;
561
562 ArgInfo OrigArg{VRegs[i], Arg, i};
563 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
564
565 // i1 arguments are zero-extended to i8 by the caller. Emit a
566 // hint to reflect this.
567 if (OrigArg.Ty->isIntegerTy(1)) {
568 assert(OrigArg.Regs.size() == 1 &&(static_cast <bool> (OrigArg.Regs.size() == 1 &&
MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
"Unexpected registers used for i1 arg") ? void (0) : __assert_fail
("OrigArg.Regs.size() == 1 && MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 570
, __extension__ __PRETTY_FUNCTION__))
569 MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&(static_cast <bool> (OrigArg.Regs.size() == 1 &&
MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
"Unexpected registers used for i1 arg") ? void (0) : __assert_fail
("OrigArg.Regs.size() == 1 && MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 570
, __extension__ __PRETTY_FUNCTION__))
570 "Unexpected registers used for i1 arg")(static_cast <bool> (OrigArg.Regs.size() == 1 &&
MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
"Unexpected registers used for i1 arg") ? void (0) : __assert_fail
("OrigArg.Regs.size() == 1 && MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 570
, __extension__ __PRETTY_FUNCTION__))
;
571
572 if (!OrigArg.Flags[0].isZExt()) {
573 // Lower i1 argument as i8, and insert AssertZExt + Trunc later.
574 Register OrigReg = OrigArg.Regs[0];
575 Register WideReg = MRI.createGenericVirtualRegister(LLT::scalar(8));
576 OrigArg.Regs[0] = WideReg;
577 BoolArgs.push_back({OrigReg, WideReg});
578 }
579 }
580
581 if (Arg.hasAttribute(Attribute::SwiftAsync))
582 MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
583
584 splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv());
585 ++i;
586 }
587
588 if (!MBB.empty())
589 MIRBuilder.setInstr(*MBB.begin());
590
591 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
592 CCAssignFn *AssignFn =
593 TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
594
595 AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
596 FormalArgHandler Handler(MIRBuilder, MRI);
597 if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder,
598 F.getCallingConv(), F.isVarArg()))
599 return false;
600
601 if (!BoolArgs.empty()) {
602 for (auto &KV : BoolArgs) {
603 Register OrigReg = KV.first;
604 Register WideReg = KV.second;
605 LLT WideTy = MRI.getType(WideReg);
606 assert(MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&(static_cast <bool> (MRI.getType(OrigReg).getScalarSizeInBits
() == 1 && "Unexpected bit size of a bool arg") ? void
(0) : __assert_fail ("MRI.getType(OrigReg).getScalarSizeInBits() == 1 && \"Unexpected bit size of a bool arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 607
, __extension__ __PRETTY_FUNCTION__))
607 "Unexpected bit size of a bool arg")(static_cast <bool> (MRI.getType(OrigReg).getScalarSizeInBits
() == 1 && "Unexpected bit size of a bool arg") ? void
(0) : __assert_fail ("MRI.getType(OrigReg).getScalarSizeInBits() == 1 && \"Unexpected bit size of a bool arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 607
, __extension__ __PRETTY_FUNCTION__))
;
608 MIRBuilder.buildTrunc(
609 OrigReg, MIRBuilder.buildAssertZExt(WideTy, WideReg, 1).getReg(0));
610 }
611 }
612
613 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
614 uint64_t StackOffset = Assigner.StackOffset;
615 if (F.isVarArg()) {
616 auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
617 if (!Subtarget.isTargetDarwin()) {
618 // FIXME: we need to reimplement saveVarArgsRegisters from
619 // AArch64ISelLowering.
620 return false;
621 }
622
623 // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
624 StackOffset =
625 alignTo(Assigner.StackOffset, Subtarget.isTargetILP32() ? 4 : 8);
626
627 auto &MFI = MIRBuilder.getMF().getFrameInfo();
628 FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
629 }
630
631 if (doesCalleeRestoreStack(F.getCallingConv(),
632 MF.getTarget().Options.GuaranteedTailCallOpt)) {
633 // We have a non-standard ABI, so why not make full use of the stack that
634 // we're going to pop? It must be aligned to 16 B in any case.
635 StackOffset = alignTo(StackOffset, 16);
636
637 // If we're expected to restore the stack (e.g. fastcc), then we'll be
638 // adding a multiple of 16.
639 FuncInfo->setArgumentStackToRestore(StackOffset);
640
641 // Our own callers will guarantee that the space is free by giving an
642 // aligned value to CALLSEQ_START.
643 }
644
645 // When we tail call, we need to check if the callee's arguments
646 // will fit on the caller's stack. So, whenever we lower formal arguments,
647 // we should keep track of this information, since we might lower a tail call
648 // in this function later.
649 FuncInfo->setBytesInStackArgArea(StackOffset);
650
651 auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
652 if (Subtarget.hasCustomCallingConv())
653 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
654
655 handleMustTailForwardedRegisters(MIRBuilder, AssignFn);
656
657 // Move back to the end of the basic block.
658 MIRBuilder.setMBB(MBB);
659
660 return true;
661}
662
663/// Return true if the calling convention is one that we can guarantee TCO for.
664static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
665 return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
666 CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
667}
668
669/// Return true if we might ever do TCO for calls with this calling convention.
670static bool mayTailCallThisCC(CallingConv::ID CC) {
671 switch (CC) {
672 case CallingConv::C:
673 case CallingConv::PreserveMost:
674 case CallingConv::Swift:
675 case CallingConv::SwiftTail:
676 case CallingConv::Tail:
677 case CallingConv::Fast:
678 return true;
679 default:
680 return false;
681 }
682}
683
684/// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
685/// CC.
686static std::pair<CCAssignFn *, CCAssignFn *>
687getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) {
688 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
689}
690
691bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
692 CallLoweringInfo &Info, MachineFunction &MF,
693 SmallVectorImpl<ArgInfo> &InArgs) const {
694 const Function &CallerF = MF.getFunction();
695 CallingConv::ID CalleeCC = Info.CallConv;
696 CallingConv::ID CallerCC = CallerF.getCallingConv();
697
698 // If the calling conventions match, then everything must be the same.
699 if (CalleeCC == CallerCC)
700 return true;
701
702 // Check if the caller and callee will handle arguments in the same way.
703 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
704 CCAssignFn *CalleeAssignFnFixed;
705 CCAssignFn *CalleeAssignFnVarArg;
706 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
707 getAssignFnsForCC(CalleeCC, TLI);
708
709 CCAssignFn *CallerAssignFnFixed;
710 CCAssignFn *CallerAssignFnVarArg;
711 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
712 getAssignFnsForCC(CallerCC, TLI);
713
714 AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
715 CalleeAssignFnVarArg);
716 AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
717 CallerAssignFnVarArg);
718
719 if (!resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner))
720 return false;
721
722 // Make sure that the caller and callee preserve all of the same registers.
723 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
724 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
725 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
726 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
727 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
728 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
729 }
730
731 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
732}
733
734bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
735 CallLoweringInfo &Info, MachineFunction &MF,
736 SmallVectorImpl<ArgInfo> &OutArgs) const {
737 // If there are no outgoing arguments, then we are done.
738 if (OutArgs.empty())
739 return true;
740
741 const Function &CallerF = MF.getFunction();
742 LLVMContext &Ctx = CallerF.getContext();
743 CallingConv::ID CalleeCC = Info.CallConv;
744 CallingConv::ID CallerCC = CallerF.getCallingConv();
745 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
746 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
747
748 CCAssignFn *AssignFnFixed;
749 CCAssignFn *AssignFnVarArg;
750 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
751
752 // We have outgoing arguments. Make sure that we can tail call with them.
753 SmallVector<CCValAssign, 16> OutLocs;
754 CCState OutInfo(CalleeCC, false, MF, OutLocs, Ctx);
755
756 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
757 Subtarget, /*IsReturn*/ false);
758 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) {
759 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Could not analyze call operands.\n"
; } } while (false)
;
760 return false;
761 }
762
763 // Make sure that they can fit on the caller's stack.
764 const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
765 if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) {
766 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot fit call operands on caller's stack.\n"
; } } while (false)
;
767 return false;
768 }
769
770 // Verify that the parameters in callee-saved registers match.
771 // TODO: Port this over to CallLowering as general code once swiftself is
772 // supported.
773 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
774 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
775 MachineRegisterInfo &MRI = MF.getRegInfo();
776
777 if (Info.IsVarArg) {
778 // Be conservative and disallow variadic memory operands to match SDAG's
779 // behaviour.
780 // FIXME: If the caller's calling convention is C, then we can
781 // potentially use its argument area. However, for cases like fastcc,
782 // we can't do anything.
783 for (unsigned i = 0; i < OutLocs.size(); ++i) {
784 auto &ArgLoc = OutLocs[i];
785 if (ArgLoc.isRegLoc())
786 continue;
787
788 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call vararg function with stack arguments\n"
; } } while (false)
789 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call vararg function with stack arguments\n"
; } } while (false)
790 << "... Cannot tail call vararg function with stack arguments\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call vararg function with stack arguments\n"
; } } while (false)
;
791 return false;
792 }
793 }
794
795 return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
796}
797
798bool AArch64CallLowering::isEligibleForTailCallOptimization(
799 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
800 SmallVectorImpl<ArgInfo> &InArgs,
801 SmallVectorImpl<ArgInfo> &OutArgs) const {
802
803 // Must pass all target-independent checks in order to tail call optimize.
804 if (!Info.IsTailCall)
5
Assuming field 'IsTailCall' is true
6
Taking false branch
805 return false;
806
807 CallingConv::ID CalleeCC = Info.CallConv;
808 MachineFunction &MF = MIRBuilder.getMF();
809 const Function &CallerF = MF.getFunction();
810
811 LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Attempting to lower call as tail call\n"
; } } while (false)
;
7
Assuming 'DebugFlag' is false
8
Loop condition is false. Exiting loop
812
813 if (Info.SwiftErrorVReg) {
9
Assuming the condition is false
10
Taking false branch
814 // TODO: We should handle this.
815 // Note that this is also handled by the check for no outgoing arguments.
816 // Proactively disabling this though, because the swifterror handling in
817 // lowerCall inserts a COPY *after* the location of the call.
818 LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot handle tail calls with swifterror yet.\n"
; } } while (false)
;
819 return false;
820 }
821
822 if (!mayTailCallThisCC(CalleeCC)) {
823 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Calling convention cannot be tail called.\n"
; } } while (false)
;
11
Taking true branch
12
Loop condition is false. Exiting loop
824 return false;
13
Returning without writing to 'Info.IsMustTailCall', which participates in a condition later
14
Returning without writing to 'Info.CB'
15
Returning zero, which participates in a condition later
825 }
826
827 // Byval parameters hand the function a pointer directly into the stack area
828 // we want to reuse during a tail call. Working around this *is* possible (see
829 // X86).
830 //
831 // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
832 // it?
833 //
834 // On Windows, "inreg" attributes signify non-aggregate indirect returns.
835 // In this case, it is necessary to save/restore X0 in the callee. Tail
836 // call opt interferes with this. So we disable tail call opt when the
837 // caller has an argument with "inreg" attribute.
838 //
839 // FIXME: Check whether the callee also has an "inreg" argument.
840 //
841 // When the caller has a swifterror argument, we don't want to tail call
842 // because would have to move into the swifterror register before the
843 // tail call.
844 if (any_of(CallerF.args(), [](const Argument &A) {
845 return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
846 })) {
847 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call from callers with byval, "
"inreg, or swifterror arguments\n"; } } while (false)
848 "inreg, or swifterror arguments\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call from callers with byval, "
"inreg, or swifterror arguments\n"; } } while (false)
;
849 return false;
850 }
851
852 // Externally-defined functions with weak linkage should not be
853 // tail-called on AArch64 when the OS does not support dynamic
854 // pre-emption of symbols, as the AAELF spec requires normal calls
855 // to undefined weak functions to be replaced with a NOP or jump to the
856 // next instruction. The behaviour of branch instructions in this
857 // situation (as used for tail calls) is implementation-defined, so we
858 // cannot rely on the linker replacing the tail call with a return.
859 if (Info.Callee.isGlobal()) {
860 const GlobalValue *GV = Info.Callee.getGlobal();
861 const Triple &TT = MF.getTarget().getTargetTriple();
862 if (GV->hasExternalWeakLinkage() &&
863 (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
864 TT.isOSBinFormatMachO())) {
865 LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call externally-defined function "
"with weak linkage for this OS.\n"; } } while (false)
866 "with weak linkage for this OS.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Cannot tail call externally-defined function "
"with weak linkage for this OS.\n"; } } while (false)
;
867 return false;
868 }
869 }
870
871 // If we have -tailcallopt, then we're done.
872 if (canGuaranteeTCO(CalleeCC, MF.getTarget().Options.GuaranteedTailCallOpt))
873 return CalleeCC == CallerF.getCallingConv();
874
875 // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
876 // Try to find cases where we can do that.
877
878 // I want anyone implementing a new calling convention to think long and hard
879 // about this assert.
880 assert((!Info.IsVarArg || CalleeCC == CallingConv::C) &&(static_cast <bool> ((!Info.IsVarArg || CalleeCC == CallingConv
::C) && "Unexpected variadic calling convention") ? void
(0) : __assert_fail ("(!Info.IsVarArg || CalleeCC == CallingConv::C) && \"Unexpected variadic calling convention\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 881
, __extension__ __PRETTY_FUNCTION__))
881 "Unexpected variadic calling convention")(static_cast <bool> ((!Info.IsVarArg || CalleeCC == CallingConv
::C) && "Unexpected variadic calling convention") ? void
(0) : __assert_fail ("(!Info.IsVarArg || CalleeCC == CallingConv::C) && \"Unexpected variadic calling convention\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 881
, __extension__ __PRETTY_FUNCTION__))
;
882
883 // Verify that the incoming and outgoing arguments from the callee are
884 // safe to tail call.
885 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
886 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n"
; } } while (false)
887 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n"
; } } while (false)
888 << "... Caller and callee have incompatible calling conventions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n"
; } } while (false)
;
889 return false;
890 }
891
892 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
893 return false;
894
895 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Call is eligible for tail call optimization.\n"
; } } while (false)
896 dbgs() << "... Call is eligible for tail call optimization.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "... Call is eligible for tail call optimization.\n"
; } } while (false)
;
897 return true;
898}
899
900static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
901 bool IsTailCall) {
902 if (!IsTailCall)
903 return IsIndirect ? getBLRCallOpcode(CallerF) : (unsigned)AArch64::BL;
904
905 if (!IsIndirect)
906 return AArch64::TCRETURNdi;
907
908 // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use
909 // x16 or x17.
910 if (CallerF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
911 return AArch64::TCRETURNriBTI;
912
913 return AArch64::TCRETURNri;
914}
915
916static const uint32_t *
917getMaskForArgs(SmallVectorImpl<AArch64CallLowering::ArgInfo> &OutArgs,
918 AArch64CallLowering::CallLoweringInfo &Info,
919 const AArch64RegisterInfo &TRI, MachineFunction &MF) {
920 const uint32_t *Mask;
921 if (!OutArgs.empty() && OutArgs[0].Flags[0].isReturned()) {
922 // For 'this' returns, use the X0-preserving mask if applicable
923 Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);
924 if (!Mask) {
925 OutArgs[0].Flags[0].setReturned(false);
926 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
927 }
928 } else {
929 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
930 }
931 return Mask;
932}
933
934bool AArch64CallLowering::lowerTailCall(
935 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
936 SmallVectorImpl<ArgInfo> &OutArgs) const {
937 MachineFunction &MF = MIRBuilder.getMF();
938 const Function &F = MF.getFunction();
939 MachineRegisterInfo &MRI = MF.getRegInfo();
940 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
941 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
942
943 // True when we're tail calling, but without -tailcallopt.
944 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt &&
945 Info.CallConv != CallingConv::Tail &&
946 Info.CallConv != CallingConv::SwiftTail;
947
948 // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
949 // register class. Until we can do that, we should fall back here.
950 if (MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) {
951 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n"
; } } while (false)
952 dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n"
; } } while (false)
;
953 return false;
954 }
955
956 // Find out which ABI gets to decide where things go.
957 CallingConv::ID CalleeCC = Info.CallConv;
958 CCAssignFn *AssignFnFixed;
959 CCAssignFn *AssignFnVarArg;
960 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
961
962 MachineInstrBuilder CallSeqStart;
963 if (!IsSibCall)
964 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
965
966 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true);
967 auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
968 MIB.add(Info.Callee);
969
970 // Byte offset for the tail call. When we are sibcalling, this will always
971 // be 0.
972 MIB.addImm(0);
973
974 // Tell the call which registers are clobbered.
975 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
976 auto TRI = Subtarget.getRegisterInfo();
977 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
978 if (Subtarget.hasCustomCallingConv())
979 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
980 MIB.addRegMask(Mask);
981
982 if (Info.CFIType)
983 MIB->setCFIType(MF, Info.CFIType->getZExtValue());
984
985 if (TRI->isAnyArgRegReserved(MF))
986 TRI->emitReservedArgRegCallError(MF);
987
988 // FPDiff is the byte offset of the call's argument area from the callee's.
989 // Stores to callee stack arguments will be placed in FixedStackSlots offset
990 // by this amount for a tail call. In a sibling call it must be 0 because the
991 // caller will deallocate the entire stack and the callee still expects its
992 // arguments to begin at SP+0.
993 int FPDiff = 0;
994
995 // This will be 0 for sibcalls, potentially nonzero for tail calls produced
996 // by -tailcallopt. For sibcalls, the memory operands for the call are
997 // already available in the caller's incoming argument space.
998 unsigned NumBytes = 0;
999 if (!IsSibCall) {
1000 // We aren't sibcalling, so we need to compute FPDiff. We need to do this
1001 // before handling assignments, because FPDiff must be known for memory
1002 // arguments.
1003 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1004 SmallVector<CCValAssign, 16> OutLocs;
1005 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
1006
1007 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
1008 Subtarget, /*IsReturn*/ false);
1009 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
1010 return false;
1011
1012 // The callee will pop the argument stack as a tail call. Thus, we must
1013 // keep it 16-byte aligned.
1014 NumBytes = alignTo(OutInfo.getNextStackOffset(), 16);
1015
1016 // FPDiff will be negative if this tail call requires more space than we
1017 // would automatically have in our incoming argument space. Positive if we
1018 // actually shrink the stack.
1019 FPDiff = NumReusableBytes - NumBytes;
1020
1021 // Update the required reserved area if this is the tail call requiring the
1022 // most argument stack space.
1023 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
1024 FuncInfo->setTailCallReservedStack(-FPDiff);
1025
1026 // The stack pointer must be 16-byte aligned at all times it's used for a
1027 // memory operation, which in practice means at *all* times and in
1028 // particular across call boundaries. Therefore our own arguments started at
1029 // a 16-byte aligned SP and the delta applied for the tail call should
1030 // satisfy the same constraint.
1031 assert(FPDiff % 16 == 0 && "unaligned stack on tail call")(static_cast <bool> (FPDiff % 16 == 0 && "unaligned stack on tail call"
) ? void (0) : __assert_fail ("FPDiff % 16 == 0 && \"unaligned stack on tail call\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 1031
, __extension__ __PRETTY_FUNCTION__))
;
1032 }
1033
1034 const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
1035
1036 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1037 Subtarget, /*IsReturn*/ false);
1038
1039 // Do the actual argument marshalling.
1040 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB,
1041 /*IsTailCall*/ true, FPDiff);
1042 if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1043 CalleeCC, Info.IsVarArg))
1044 return false;
1045
1046 Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1047
1048 if (Info.IsVarArg && Info.IsMustTailCall) {
1049 // Now we know what's being passed to the function. Add uses to the call for
1050 // the forwarded registers that we *aren't* passing as parameters. This will
1051 // preserve the copies we build earlier.
1052 for (const auto &F : Forwards) {
1053 Register ForwardedReg = F.PReg;
1054 // If the register is already passed, or aliases a register which is
1055 // already being passed, then skip it.
1056 if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
1057 if (!Use.isReg())
1058 return false;
1059 return TRI->regsOverlap(Use.getReg(), ForwardedReg);
1060 }))
1061 continue;
1062
1063 // We aren't passing it already, so we should add it to the call.
1064 MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg));
1065 MIB.addReg(ForwardedReg, RegState::Implicit);
1066 }
1067 }
1068
1069 // If we have -tailcallopt, we need to adjust the stack. We'll do the call
1070 // sequence start and end here.
1071 if (!IsSibCall) {
1072 MIB->getOperand(1).setImm(FPDiff);
1073 CallSeqStart.addImm(0).addImm(0);
1074 // End the call sequence *before* emitting the call. Normally, we would
1075 // tidy the frame up after the call. However, here, we've laid out the
1076 // parameters so that when SP is reset, they will be in the correct
1077 // location.
1078 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(0).addImm(0);
1079 }
1080
1081 // Now we can add the actual call instruction to the correct basic block.
1082 MIRBuilder.insertInstr(MIB);
1083
1084 // If Callee is a reg, since it is used by a target specific instruction,
1085 // it must have a register class matching the constraint of that instruction.
1086 if (MIB->getOperand(0).isReg())
1087 constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1088 *MF.getSubtarget().getRegBankInfo(), *MIB,
1089 MIB->getDesc(), MIB->getOperand(0), 0);
1090
1091 MF.getFrameInfo().setHasTailCall();
1092 Info.LoweredTailCall = true;
1093 return true;
1094}
1095
1096bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
1097 CallLoweringInfo &Info) const {
1098 MachineFunction &MF = MIRBuilder.getMF();
1099 const Function &F = MF.getFunction();
1100 MachineRegisterInfo &MRI = MF.getRegInfo();
1101 auto &DL = F.getParent()->getDataLayout();
1102 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
1103
1104 SmallVector<ArgInfo, 8> OutArgs;
1105 for (auto &OrigArg : Info.OrigArgs) {
1
Value assigned to field 'CB'
2
Assuming '__begin1' is equal to '__end1'
1106 splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv);
1107 // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
1108 if (OrigArg.Ty->isIntegerTy(1)) {
1109 ArgInfo &OutArg = OutArgs.back();
1110 assert(OutArg.Regs.size() == 1 &&(static_cast <bool> (OutArg.Regs.size() == 1 &&
MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && "Unexpected registers used for i1 arg"
) ? void (0) : __assert_fail ("OutArg.Regs.size() == 1 && MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 1112
, __extension__ __PRETTY_FUNCTION__))
1111 MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 &&(static_cast <bool> (OutArg.Regs.size() == 1 &&
MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && "Unexpected registers used for i1 arg"
) ? void (0) : __assert_fail ("OutArg.Regs.size() == 1 && MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 1112
, __extension__ __PRETTY_FUNCTION__))
1112 "Unexpected registers used for i1 arg")(static_cast <bool> (OutArg.Regs.size() == 1 &&
MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && "Unexpected registers used for i1 arg"
) ? void (0) : __assert_fail ("OutArg.Regs.size() == 1 && MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 && \"Unexpected registers used for i1 arg\""
, "llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp", 1112
, __extension__ __PRETTY_FUNCTION__))
;
1113
1114 // We cannot use a ZExt ArgInfo flag here, because it will
1115 // zero-extend the argument to i32 instead of just i8.
1116 OutArg.Regs[0] =
1117 MIRBuilder.buildZExt(LLT::scalar(8), OutArg.Regs[0]).getReg(0);
1118 LLVMContext &Ctx = MF.getFunction().getContext();
1119 OutArg.Ty = Type::getInt8Ty(Ctx);
1120 }
1121 }
1122
1123 SmallVector<ArgInfo, 8> InArgs;
1124 if (!Info.OrigRet.Ty->isVoidTy())
3
Taking true branch
1125 splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
1126
1127 // If we can lower as a tail call, do that instead.
1128 bool CanTailCallOpt =
1129 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
4
Calling 'AArch64CallLowering::isEligibleForTailCallOptimization'
16
Returning from 'AArch64CallLowering::isEligibleForTailCallOptimization'
1130
1131 // We must emit a tail call if we have musttail.
1132 if (Info.IsMustTailCall && !CanTailCallOpt) {
17
Assuming field 'IsMustTailCall' is false
1133 // There are types of incoming/outgoing arguments we can't handle yet, so
1134 // it doesn't make sense to actually die here like in ISelLowering. Instead,
1135 // fall back to SelectionDAG and let it try to handle this.
1136 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("aarch64-call-lowering")) { dbgs() << "Failed to lower musttail call as tail call\n"
; } } while (false)
;
1137 return false;
1138 }
1139
1140 Info.IsTailCall = CanTailCallOpt;
1141 if (CanTailCallOpt
17.1
'CanTailCallOpt' is false
17.1
'CanTailCallOpt' is false
)
18
Taking false branch
1142 return lowerTailCall(MIRBuilder, Info, OutArgs);
1143
1144 // Find out which ABI gets to decide where things go.
1145 CCAssignFn *AssignFnFixed;
1146 CCAssignFn *AssignFnVarArg;
1147 std::tie(AssignFnFixed, AssignFnVarArg) =
1148 getAssignFnsForCC(Info.CallConv, TLI);
1149
1150 MachineInstrBuilder CallSeqStart;
1151 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
1152
1153 // Create a temporarily-floating call instruction so we can add the implicit
1154 // uses of arg registers.
1155
1156 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1157 unsigned Opc = 0;
1158 // Calls with operand bundle "clang.arc.attachedcall" are special. They should
1159 // be expanded to the call, directly followed by a special marker sequence and
1160 // a call to an ObjC library function.
1161 if (Info.CB && objcarc::hasAttachedCallOpBundle(Info.CB))
19
Assuming field 'CB' is null
1162 Opc = AArch64::BLR_RVMARKER;
1163 // A call to a returns twice function like setjmp must be followed by a bti
1164 // instruction.
1165 else if (Info.CB
19.1
Field 'CB' is null
19.1
Field 'CB' is null
&&
20
Taking false branch
1166 Info.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
1167 !Subtarget.noBTIAtReturnTwice() &&
1168 MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
1169 Opc = AArch64::BLR_BTI;
1170 else
1171 Opc = getCallOpcode(MF, Info.Callee.isReg(), false);
1172
1173 auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1174 unsigned CalleeOpNo = 0;
1175
1176 if (Opc == AArch64::BLR_RVMARKER) {
21
Assuming 'Opc' is equal to BLR_RVMARKER
22
Taking true branch
1177 // Add a target global address for the retainRV/claimRV runtime function
1178 // just before the call target.
1179 Function *ARCFn = *objcarc::getAttachedARCFunction(Info.CB);
23
Passing null pointer value via 1st parameter 'CB'
24
Calling 'getAttachedARCFunction'
1180 MIB.addGlobalAddress(ARCFn);
1181 ++CalleeOpNo;
1182 } else if (Info.CFIType) {
1183 MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1184 }
1185
1186 MIB.add(Info.Callee);
1187
1188 // Tell the call which registers are clobbered.
1189 const uint32_t *Mask;
1190 const auto *TRI = Subtarget.getRegisterInfo();
1191
1192 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1193 Subtarget, /*IsReturn*/ false);
1194 // Do the actual argument marshalling.
1195 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, /*IsReturn*/ false);
1196 if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1197 Info.CallConv, Info.IsVarArg))
1198 return false;
1199
1200 Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1201
1202 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
1203 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1204 MIB.addRegMask(Mask);
1205
1206 if (TRI->isAnyArgRegReserved(MF))
1207 TRI->emitReservedArgRegCallError(MF);
1208
1209 // Now we can add the actual call instruction to the correct basic block.
1210 MIRBuilder.insertInstr(MIB);
1211
1212 // If Callee is a reg, since it is used by a target specific
1213 // instruction, it must have a register class matching the
1214 // constraint of that instruction.
1215 if (MIB->getOperand(CalleeOpNo).isReg())
1216 constrainOperandRegClass(MF, *TRI, MRI, *Subtarget.getInstrInfo(),
1217 *Subtarget.getRegBankInfo(), *MIB, MIB->getDesc(),
1218 MIB->getOperand(CalleeOpNo), CalleeOpNo);
1219
1220 // Finally we can copy the returned value back into its virtual-register. In
1221 // symmetry with the arguments, the physical register must be an
1222 // implicit-define of the call instruction.
1223 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
1224 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1225 CallReturnHandler Handler(MIRBuilder, MRI, MIB);
1226 bool UsingReturnedArg =
1227 !OutArgs.empty() && OutArgs[0].Flags[0].isReturned();
1228
1229 AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
1230 /*IsReturn*/ false);
1231 ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB);
1232 if (!determineAndHandleAssignments(
1233 UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
1234 MIRBuilder, Info.CallConv, Info.IsVarArg,
1235 UsingReturnedArg ? makeArrayRef(OutArgs[0].Regs) : None))
1236 return false;
1237 }
1238
1239 if (Info.SwiftErrorVReg) {
1240 MIB.addDef(AArch64::X21, RegState::Implicit);
1241 MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
1242 }
1243
1244 uint64_t CalleePopBytes =
1245 doesCalleeRestoreStack(Info.CallConv,
1246 MF.getTarget().Options.GuaranteedTailCallOpt)
1247 ? alignTo(Assigner.StackOffset, 16)
1248 : 0;
1249
1250 CallSeqStart.addImm(Assigner.StackOffset).addImm(0);
1251 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
1252 .addImm(Assigner.StackOffset)
1253 .addImm(CalleePopBytes);
1254
1255 if (!Info.CanLowerReturn) {
1256 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
1257 Info.DemoteRegister, Info.DemoteStackIndex);
1258 }
1259 return true;
1260}
1261
1262bool AArch64CallLowering::isTypeIsValidForThisReturn(EVT Ty) const {
1263 return Ty.getSizeInBits() == 64;
1264}

/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include/llvm/Analysis/ObjCARCUtil.h

1//===- ObjCARCUtil.h - ObjC ARC Utility Functions ---------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file defines ARC utility functions which are used by various parts of
10/// the compiler.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ANALYSIS_OBJCARCUTIL_H
15#define LLVM_ANALYSIS_OBJCARCUTIL_H
16
17#include "llvm/Analysis/ObjCARCInstKind.h"
18#include "llvm/IR/Function.h"
19#include "llvm/IR/InstrTypes.h"
20#include "llvm/IR/LLVMContext.h"
21
22namespace llvm {
23namespace objcarc {
24
25inline const char *getRVMarkerModuleFlagStr() {
26 return "clang.arc.retainAutoreleasedReturnValueMarker";
27}
28
29inline bool hasAttachedCallOpBundle(const CallBase *CB) {
30 // Ignore the bundle if the return type is void. Global optimization passes
31 // can turn the called function's return type to void. That should happen only
32 // if the call doesn't return and the call to @llvm.objc.clang.arc.noop.use
33 // no longer consumes the function return or is deleted. In that case, it's
34 // not necessary to emit the marker instruction or calls to the ARC runtime
35 // functions.
36 return !CB->getFunctionType()->getReturnType()->isVoidTy() &&
37 CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall)
38 .has_value();
39}
40
41/// This function returns operand bundle clang_arc_attachedcall's argument,
42/// which is the address of the ARC runtime function.
43inline Optional<Function *> getAttachedARCFunction(const CallBase *CB) {
44 auto B = CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall);
25
Called C++ object pointer is null
45 if (!B)
46 return None;
47
48 return cast<Function>(B->Inputs[0]);
49}
50
51/// Check whether the function is retainRV/unsafeClaimRV.
52inline bool isRetainOrClaimRV(ARCInstKind Kind) {
53 return Kind == ARCInstKind::RetainRV || Kind == ARCInstKind::UnsafeClaimRV;
54}
55
56/// This function returns the ARCInstKind of the function attached to operand
57/// bundle clang_arc_attachedcall. It returns None if the call doesn't have the
58/// operand bundle or the operand is null. Otherwise it returns either RetainRV
59/// or UnsafeClaimRV.
60inline ARCInstKind getAttachedARCFunctionKind(const CallBase *CB) {
61 Optional<Function *> Fn = getAttachedARCFunction(CB);
62 if (!Fn)
63 return ARCInstKind::None;
64 auto FnClass = GetFunctionClass(*Fn);
65 assert(isRetainOrClaimRV(FnClass) && "unexpected ARC runtime function")(static_cast <bool> (isRetainOrClaimRV(FnClass) &&
"unexpected ARC runtime function") ? void (0) : __assert_fail
("isRetainOrClaimRV(FnClass) && \"unexpected ARC runtime function\""
, "llvm/include/llvm/Analysis/ObjCARCUtil.h", 65, __extension__
__PRETTY_FUNCTION__))
;
66 return FnClass;
67}
68
69} // end namespace objcarc
70} // end namespace llvm
71
72#endif