Bug Summary

File:build/source/clang/lib/CodeGen/CGCall.cpp
Warning:line 981, column 27
The left operand of '*' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CGCall.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16 -I tools/clang/lib/CodeGen -I /build/source/clang/lib/CodeGen -I /build/source/clang/include -I tools/clang/include -I include -I /build/source/llvm/include -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -source-date-epoch 1670191760 -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-12-05-012027-15999-1 -x c++ /build/source/clang/lib/CodeGen/CGCall.cpp

/build/source/clang/lib/CodeGen/CGCall.cpp

1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCall.h"
15#include "ABIInfo.h"
16#include "CGBlocks.h"
17#include "CGCXXABI.h"
18#include "CGCleanup.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "TargetInfo.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/DeclCXX.h"
26#include "clang/AST/DeclObjC.h"
27#include "clang/Basic/CodeGenOptions.h"
28#include "clang/Basic/TargetBuiltins.h"
29#include "clang/Basic/TargetInfo.h"
30#include "clang/CodeGen/CGFunctionInfo.h"
31#include "clang/CodeGen/SwiftCallingConv.h"
32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/Attributes.h"
36#include "llvm/IR/CallingConv.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/InlineAsm.h"
39#include "llvm/IR/IntrinsicInst.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Type.h"
42#include "llvm/Transforms/Utils/Local.h"
43#include <optional>
44using namespace clang;
45using namespace CodeGen;
46
47/***/
48
49unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
50 switch (CC) {
51 default: return llvm::CallingConv::C;
52 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
53 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
54 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
55 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
56 case CC_Win64: return llvm::CallingConv::Win64;
57 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
58 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
59 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
60 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
61 // TODO: Add support for __pascal to LLVM.
62 case CC_X86Pascal: return llvm::CallingConv::C;
63 // TODO: Add support for __vectorcall to LLVM.
64 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
65 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
66 case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall;
67 case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL;
68 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
69 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
70 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
71 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
72 case CC_Swift: return llvm::CallingConv::Swift;
73 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
74 }
75}
76
77/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
78/// qualification. Either or both of RD and MD may be null. A null RD indicates
79/// that there is no meaningful 'this' type, and a null MD can occur when
80/// calling a method pointer.
81CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
82 const CXXMethodDecl *MD) {
83 QualType RecTy;
84 if (RD)
85 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
86 else
87 RecTy = Context.VoidTy;
88
89 if (MD)
90 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
91 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
92}
93
94/// Returns the canonical formal type of the given C++ method.
95static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
96 return MD->getType()->getCanonicalTypeUnqualified()
97 .getAs<FunctionProtoType>();
98}
99
100/// Returns the "extra-canonicalized" return type, which discards
101/// qualifiers on the return type. Codegen doesn't care about them,
102/// and it makes ABI code a little easier to be able to assume that
103/// all parameter and return types are top-level unqualified.
104static CanQualType GetReturnType(QualType RetTy) {
105 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
106}
107
108/// Arrange the argument and result information for a value of the given
109/// unprototyped freestanding function type.
110const CGFunctionInfo &
111CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
112 // When translating an unprototyped function type, always use a
113 // variadic type.
114 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
115 /*instanceMethod=*/false,
116 /*chainCall=*/false, std::nullopt,
117 FTNP->getExtInfo(), {}, RequiredArgs(0));
118}
119
120static void addExtParameterInfosForCall(
121 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
122 const FunctionProtoType *proto,
123 unsigned prefixArgs,
124 unsigned totalArgs) {
125 assert(proto->hasExtParameterInfos())(static_cast <bool> (proto->hasExtParameterInfos()) ?
void (0) : __assert_fail ("proto->hasExtParameterInfos()"
, "clang/lib/CodeGen/CGCall.cpp", 125, __extension__ __PRETTY_FUNCTION__
))
;
126 assert(paramInfos.size() <= prefixArgs)(static_cast <bool> (paramInfos.size() <= prefixArgs
) ? void (0) : __assert_fail ("paramInfos.size() <= prefixArgs"
, "clang/lib/CodeGen/CGCall.cpp", 126, __extension__ __PRETTY_FUNCTION__
))
;
127 assert(proto->getNumParams() + prefixArgs <= totalArgs)(static_cast <bool> (proto->getNumParams() + prefixArgs
<= totalArgs) ? void (0) : __assert_fail ("proto->getNumParams() + prefixArgs <= totalArgs"
, "clang/lib/CodeGen/CGCall.cpp", 127, __extension__ __PRETTY_FUNCTION__
))
;
128
129 paramInfos.reserve(totalArgs);
130
131 // Add default infos for any prefix args that don't already have infos.
132 paramInfos.resize(prefixArgs);
133
134 // Add infos for the prototype.
135 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
136 paramInfos.push_back(ParamInfo);
137 // pass_object_size params have no parameter info.
138 if (ParamInfo.hasPassObjectSize())
139 paramInfos.emplace_back();
140 }
141
142 assert(paramInfos.size() <= totalArgs &&(static_cast <bool> (paramInfos.size() <= totalArgs &&
"Did we forget to insert pass_object_size args?") ? void (0)
: __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "clang/lib/CodeGen/CGCall.cpp", 143, __extension__ __PRETTY_FUNCTION__
))
143 "Did we forget to insert pass_object_size args?")(static_cast <bool> (paramInfos.size() <= totalArgs &&
"Did we forget to insert pass_object_size args?") ? void (0)
: __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "clang/lib/CodeGen/CGCall.cpp", 143, __extension__ __PRETTY_FUNCTION__
))
;
144 // Add default infos for the variadic and/or suffix arguments.
145 paramInfos.resize(totalArgs);
146}
147
148/// Adds the formal parameters in FPT to the given prefix. If any parameter in
149/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
150static void appendParameterTypes(const CodeGenTypes &CGT,
151 SmallVectorImpl<CanQualType> &prefix,
152 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
153 CanQual<FunctionProtoType> FPT) {
154 // Fast path: don't touch param info if we don't need to.
155 if (!FPT->hasExtParameterInfos()) {
156 assert(paramInfos.empty() &&(static_cast <bool> (paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? void (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "clang/lib/CodeGen/CGCall.cpp", 157, __extension__ __PRETTY_FUNCTION__
))
157 "We have paramInfos, but the prototype doesn't?")(static_cast <bool> (paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? void (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "clang/lib/CodeGen/CGCall.cpp", 157, __extension__ __PRETTY_FUNCTION__
))
;
158 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
159 return;
160 }
161
162 unsigned PrefixSize = prefix.size();
163 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
164 // parameters; the only thing that can change this is the presence of
165 // pass_object_size. So, we preallocate for the common case.
166 prefix.reserve(prefix.size() + FPT->getNumParams());
167
168 auto ExtInfos = FPT->getExtParameterInfos();
169 assert(ExtInfos.size() == FPT->getNumParams())(static_cast <bool> (ExtInfos.size() == FPT->getNumParams
()) ? void (0) : __assert_fail ("ExtInfos.size() == FPT->getNumParams()"
, "clang/lib/CodeGen/CGCall.cpp", 169, __extension__ __PRETTY_FUNCTION__
))
;
170 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
171 prefix.push_back(FPT->getParamType(I));
172 if (ExtInfos[I].hasPassObjectSize())
173 prefix.push_back(CGT.getContext().getSizeType());
174 }
175
176 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
177 prefix.size());
178}
179
180/// Arrange the LLVM function layout for a value of the given function
181/// type, on top of any implicit parameters already stored.
182static const CGFunctionInfo &
183arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
184 SmallVectorImpl<CanQualType> &prefix,
185 CanQual<FunctionProtoType> FTP) {
186 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
187 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
188 // FIXME: Kill copy.
189 appendParameterTypes(CGT, prefix, paramInfos, FTP);
190 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
191
192 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
193 /*chainCall=*/false, prefix,
194 FTP->getExtInfo(), paramInfos,
195 Required);
196}
197
198/// Arrange the argument and result information for a value of the
199/// given freestanding function type.
200const CGFunctionInfo &
201CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
202 SmallVector<CanQualType, 16> argTypes;
203 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
204 FTP);
205}
206
207static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
208 bool IsWindows) {
209 // Set the appropriate calling convention for the Function.
210 if (D->hasAttr<StdCallAttr>())
211 return CC_X86StdCall;
212
213 if (D->hasAttr<FastCallAttr>())
214 return CC_X86FastCall;
215
216 if (D->hasAttr<RegCallAttr>())
217 return CC_X86RegCall;
218
219 if (D->hasAttr<ThisCallAttr>())
220 return CC_X86ThisCall;
221
222 if (D->hasAttr<VectorCallAttr>())
223 return CC_X86VectorCall;
224
225 if (D->hasAttr<PascalAttr>())
226 return CC_X86Pascal;
227
228 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
229 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
230
231 if (D->hasAttr<AArch64VectorPcsAttr>())
232 return CC_AArch64VectorCall;
233
234 if (D->hasAttr<AArch64SVEPcsAttr>())
235 return CC_AArch64SVEPCS;
236
237 if (D->hasAttr<AMDGPUKernelCallAttr>())
238 return CC_AMDGPUKernelCall;
239
240 if (D->hasAttr<IntelOclBiccAttr>())
241 return CC_IntelOclBicc;
242
243 if (D->hasAttr<MSABIAttr>())
244 return IsWindows ? CC_C : CC_Win64;
245
246 if (D->hasAttr<SysVABIAttr>())
247 return IsWindows ? CC_X86_64SysV : CC_C;
248
249 if (D->hasAttr<PreserveMostAttr>())
250 return CC_PreserveMost;
251
252 if (D->hasAttr<PreserveAllAttr>())
253 return CC_PreserveAll;
254
255 return CC_C;
256}
257
258/// Arrange the argument and result information for a call to an
259/// unknown C++ non-static member function of the given abstract type.
260/// (A null RD means we don't have any meaningful "this" argument type,
261/// so fall back to a generic pointer type).
262/// The member function must be an ordinary function, i.e. not a
263/// constructor or destructor.
264const CGFunctionInfo &
265CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
266 const FunctionProtoType *FTP,
267 const CXXMethodDecl *MD) {
268 SmallVector<CanQualType, 16> argTypes;
269
270 // Add the 'this' pointer.
271 argTypes.push_back(DeriveThisType(RD, MD));
272
273 return ::arrangeLLVMFunctionInfo(
274 *this, true, argTypes,
275 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
276}
277
278/// Set calling convention for CUDA/HIP kernel.
279static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
280 const FunctionDecl *FD) {
281 if (FD->hasAttr<CUDAGlobalAttr>()) {
282 const FunctionType *FT = FTy->getAs<FunctionType>();
283 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
284 FTy = FT->getCanonicalTypeUnqualified();
285 }
286}
287
288/// Arrange the argument and result information for a declaration or
289/// definition of the given C++ non-static member function. The
290/// member function must be an ordinary function, i.e. not a
291/// constructor or destructor.
292const CGFunctionInfo &
293CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
294 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!")(static_cast <bool> (!isa<CXXConstructorDecl>(MD)
&& "wrong method for constructors!") ? void (0) : __assert_fail
("!isa<CXXConstructorDecl>(MD) && \"wrong method for constructors!\""
, "clang/lib/CodeGen/CGCall.cpp", 294, __extension__ __PRETTY_FUNCTION__
))
;
295 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!")(static_cast <bool> (!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!") ? void (0) : __assert_fail (
"!isa<CXXDestructorDecl>(MD) && \"wrong method for destructors!\""
, "clang/lib/CodeGen/CGCall.cpp", 295, __extension__ __PRETTY_FUNCTION__
))
;
296
297 CanQualType FT = GetFormalType(MD).getAs<Type>();
298 setCUDAKernelCallingConvention(FT, CGM, MD);
299 auto prototype = FT.getAs<FunctionProtoType>();
300
301 if (MD->isInstance()) {
302 // The abstract case is perfectly fine.
303 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
304 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
305 }
306
307 return arrangeFreeFunctionType(prototype);
308}
309
310bool CodeGenTypes::inheritingCtorHasParams(
311 const InheritedConstructor &Inherited, CXXCtorType Type) {
312 // Parameters are unnecessary if we're constructing a base class subobject
313 // and the inherited constructor lives in a virtual base.
314 return Type == Ctor_Complete ||
315 !Inherited.getShadowDecl()->constructsVirtualBase() ||
316 !Target.getCXXABI().hasConstructorVariants();
317}
318
319const CGFunctionInfo &
320CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
321 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
322
323 SmallVector<CanQualType, 16> argTypes;
324 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
325 argTypes.push_back(DeriveThisType(MD->getParent(), MD));
326
327 bool PassParams = true;
328
329 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
330 // A base class inheriting constructor doesn't get forwarded arguments
331 // needed to construct a virtual base (or base class thereof).
332 if (auto Inherited = CD->getInheritedConstructor())
333 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
334 }
335
336 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
337
338 // Add the formal parameters.
339 if (PassParams)
340 appendParameterTypes(*this, argTypes, paramInfos, FTP);
341
342 CGCXXABI::AddedStructorArgCounts AddedArgs =
343 TheCXXABI.buildStructorSignature(GD, argTypes);
344 if (!paramInfos.empty()) {
345 // Note: prefix implies after the first param.
346 if (AddedArgs.Prefix)
347 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
348 FunctionProtoType::ExtParameterInfo{});
349 if (AddedArgs.Suffix)
350 paramInfos.append(AddedArgs.Suffix,
351 FunctionProtoType::ExtParameterInfo{});
352 }
353
354 RequiredArgs required =
355 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
356 : RequiredArgs::All);
357
358 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
359 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
360 ? argTypes.front()
361 : TheCXXABI.hasMostDerivedReturn(GD)
362 ? CGM.getContext().VoidPtrTy
363 : Context.VoidTy;
364 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
365 /*chainCall=*/false, argTypes, extInfo,
366 paramInfos, required);
367}
368
369static SmallVector<CanQualType, 16>
370getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
371 SmallVector<CanQualType, 16> argTypes;
372 for (auto &arg : args)
373 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
374 return argTypes;
375}
376
377static SmallVector<CanQualType, 16>
378getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
379 SmallVector<CanQualType, 16> argTypes;
380 for (auto &arg : args)
381 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
382 return argTypes;
383}
384
385static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
386getExtParameterInfosForCall(const FunctionProtoType *proto,
387 unsigned prefixArgs, unsigned totalArgs) {
388 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
389 if (proto->hasExtParameterInfos()) {
390 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
391 }
392 return result;
393}
394
395/// Arrange a call to a C++ method, passing the given arguments.
396///
397/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
398/// parameter.
399/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
400/// args.
401/// PassProtoArgs indicates whether `args` has args for the parameters in the
402/// given CXXConstructorDecl.
403const CGFunctionInfo &
404CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
405 const CXXConstructorDecl *D,
406 CXXCtorType CtorKind,
407 unsigned ExtraPrefixArgs,
408 unsigned ExtraSuffixArgs,
409 bool PassProtoArgs) {
410 // FIXME: Kill copy.
411 SmallVector<CanQualType, 16> ArgTypes;
412 for (const auto &Arg : args)
413 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
414
415 // +1 for implicit this, which should always be args[0].
416 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
417
418 CanQual<FunctionProtoType> FPT = GetFormalType(D);
419 RequiredArgs Required = PassProtoArgs
420 ? RequiredArgs::forPrototypePlus(
421 FPT, TotalPrefixArgs + ExtraSuffixArgs)
422 : RequiredArgs::All;
423
424 GlobalDecl GD(D, CtorKind);
425 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
426 ? ArgTypes.front()
427 : TheCXXABI.hasMostDerivedReturn(GD)
428 ? CGM.getContext().VoidPtrTy
429 : Context.VoidTy;
430
431 FunctionType::ExtInfo Info = FPT->getExtInfo();
432 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
433 // If the prototype args are elided, we should only have ABI-specific args,
434 // which never have param info.
435 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
436 // ABI-specific suffix arguments are treated the same as variadic arguments.
437 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
438 ArgTypes.size());
439 }
440 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
441 /*chainCall=*/false, ArgTypes, Info,
442 ParamInfos, Required);
443}
444
445/// Arrange the argument and result information for the declaration or
446/// definition of the given function.
447const CGFunctionInfo &
448CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
449 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
450 if (MD->isInstance())
451 return arrangeCXXMethodDeclaration(MD);
452
453 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
454
455 assert(isa<FunctionType>(FTy))(static_cast <bool> (isa<FunctionType>(FTy)) ? void
(0) : __assert_fail ("isa<FunctionType>(FTy)", "clang/lib/CodeGen/CGCall.cpp"
, 455, __extension__ __PRETTY_FUNCTION__))
;
456 setCUDAKernelCallingConvention(FTy, CGM, FD);
457
458 // When declaring a function without a prototype, always use a
459 // non-variadic type.
460 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
461 return arrangeLLVMFunctionInfo(
462 noProto->getReturnType(), /*instanceMethod=*/false,
463 /*chainCall=*/false, std::nullopt, noProto->getExtInfo(), {},
464 RequiredArgs::All);
465 }
466
467 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
468}
469
470/// Arrange the argument and result information for the declaration or
471/// definition of an Objective-C method.
472const CGFunctionInfo &
473CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
474 // It happens that this is the same as a call with no optional
475 // arguments, except also using the formal 'self' type.
476 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
477}
478
479/// Arrange the argument and result information for the function type
480/// through which to perform a send to the given Objective-C method,
481/// using the given receiver type. The receiver type is not always
482/// the 'self' type of the method or even an Objective-C pointer type.
483/// This is *not* the right method for actually performing such a
484/// message send, due to the possibility of optional arguments.
485const CGFunctionInfo &
486CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
487 QualType receiverType) {
488 SmallVector<CanQualType, 16> argTys;
489 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(
490 MD->isDirectMethod() ? 1 : 2);
491 argTys.push_back(Context.getCanonicalParamType(receiverType));
492 if (!MD->isDirectMethod())
493 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
494 // FIXME: Kill copy?
495 for (const auto *I : MD->parameters()) {
496 argTys.push_back(Context.getCanonicalParamType(I->getType()));
497 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
498 I->hasAttr<NoEscapeAttr>());
499 extParamInfos.push_back(extParamInfo);
500 }
501
502 FunctionType::ExtInfo einfo;
503 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
504 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
505
506 if (getContext().getLangOpts().ObjCAutoRefCount &&
507 MD->hasAttr<NSReturnsRetainedAttr>())
508 einfo = einfo.withProducesResult(true);
509
510 RequiredArgs required =
511 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
512
513 return arrangeLLVMFunctionInfo(
514 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
515 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
516}
517
518const CGFunctionInfo &
519CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
520 const CallArgList &args) {
521 auto argTypes = getArgTypesForCall(Context, args);
522 FunctionType::ExtInfo einfo;
523
524 return arrangeLLVMFunctionInfo(
525 GetReturnType(returnType), /*instanceMethod=*/false,
526 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
527}
528
529const CGFunctionInfo &
530CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
531 // FIXME: Do we need to handle ObjCMethodDecl?
532 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
533
534 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
535 isa<CXXDestructorDecl>(GD.getDecl()))
536 return arrangeCXXStructorDeclaration(GD);
537
538 return arrangeFunctionDeclaration(FD);
539}
540
541/// Arrange a thunk that takes 'this' as the first parameter followed by
542/// varargs. Return a void pointer, regardless of the actual return type.
543/// The body of the thunk will end in a musttail call to a function of the
544/// correct type, and the caller will bitcast the function to the correct
545/// prototype.
546const CGFunctionInfo &
547CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
548 assert(MD->isVirtual() && "only methods have thunks")(static_cast <bool> (MD->isVirtual() && "only methods have thunks"
) ? void (0) : __assert_fail ("MD->isVirtual() && \"only methods have thunks\""
, "clang/lib/CodeGen/CGCall.cpp", 548, __extension__ __PRETTY_FUNCTION__
))
;
549 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
550 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
551 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
552 /*chainCall=*/false, ArgTys,
553 FTP->getExtInfo(), {}, RequiredArgs(1));
554}
555
556const CGFunctionInfo &
557CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
558 CXXCtorType CT) {
559 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure)(static_cast <bool> (CT == Ctor_CopyingClosure || CT ==
Ctor_DefaultClosure) ? void (0) : __assert_fail ("CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure"
, "clang/lib/CodeGen/CGCall.cpp", 559, __extension__ __PRETTY_FUNCTION__
))
;
560
561 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
562 SmallVector<CanQualType, 2> ArgTys;
563 const CXXRecordDecl *RD = CD->getParent();
564 ArgTys.push_back(DeriveThisType(RD, CD));
565 if (CT == Ctor_CopyingClosure)
566 ArgTys.push_back(*FTP->param_type_begin());
567 if (RD->getNumVBases() > 0)
568 ArgTys.push_back(Context.IntTy);
569 CallingConv CC = Context.getDefaultCallingConvention(
570 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
571 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
572 /*chainCall=*/false, ArgTys,
573 FunctionType::ExtInfo(CC), {},
574 RequiredArgs::All);
575}
576
577/// Arrange a call as unto a free function, except possibly with an
578/// additional number of formal parameters considered required.
579static const CGFunctionInfo &
580arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
581 CodeGenModule &CGM,
582 const CallArgList &args,
583 const FunctionType *fnType,
584 unsigned numExtraRequiredArgs,
585 bool chainCall) {
586 assert(args.size() >= numExtraRequiredArgs)(static_cast <bool> (args.size() >= numExtraRequiredArgs
) ? void (0) : __assert_fail ("args.size() >= numExtraRequiredArgs"
, "clang/lib/CodeGen/CGCall.cpp", 586, __extension__ __PRETTY_FUNCTION__
))
;
587
588 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
589
590 // In most cases, there are no optional arguments.
591 RequiredArgs required = RequiredArgs::All;
592
593 // If we have a variadic prototype, the required arguments are the
594 // extra prefix plus the arguments in the prototype.
595 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
596 if (proto->isVariadic())
597 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
598
599 if (proto->hasExtParameterInfos())
600 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
601 args.size());
602
603 // If we don't have a prototype at all, but we're supposed to
604 // explicitly use the variadic convention for unprototyped calls,
605 // treat all of the arguments as required but preserve the nominal
606 // possibility of variadics.
607 } else if (CGM.getTargetCodeGenInfo()
608 .isNoProtoCallVariadic(args,
609 cast<FunctionNoProtoType>(fnType))) {
610 required = RequiredArgs(args.size());
611 }
612
613 // FIXME: Kill copy.
614 SmallVector<CanQualType, 16> argTypes;
615 for (const auto &arg : args)
616 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
617 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
618 /*instanceMethod=*/false, chainCall,
619 argTypes, fnType->getExtInfo(), paramInfos,
620 required);
621}
622
623/// Figure out the rules for calling a function with the given formal
624/// type using the given arguments. The arguments are necessary
625/// because the function might be unprototyped, in which case it's
626/// target-dependent in crazy ways.
627const CGFunctionInfo &
628CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
629 const FunctionType *fnType,
630 bool chainCall) {
631 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
632 chainCall ? 1 : 0, chainCall);
633}
634
635/// A block function is essentially a free function with an
636/// extra implicit argument.
637const CGFunctionInfo &
638CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
639 const FunctionType *fnType) {
640 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
641 /*chainCall=*/false);
642}
643
644const CGFunctionInfo &
645CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
646 const FunctionArgList &params) {
647 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
648 auto argTypes = getArgTypesForDeclaration(Context, params);
649
650 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
651 /*instanceMethod*/ false, /*chainCall*/ false,
652 argTypes, proto->getExtInfo(), paramInfos,
653 RequiredArgs::forPrototypePlus(proto, 1));
654}
655
656const CGFunctionInfo &
657CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
658 const CallArgList &args) {
659 // FIXME: Kill copy.
660 SmallVector<CanQualType, 16> argTypes;
661 for (const auto &Arg : args)
662 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
663 return arrangeLLVMFunctionInfo(
664 GetReturnType(resultType), /*instanceMethod=*/false,
665 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
666 /*paramInfos=*/ {}, RequiredArgs::All);
667}
668
669const CGFunctionInfo &
670CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
671 const FunctionArgList &args) {
672 auto argTypes = getArgTypesForDeclaration(Context, args);
673
674 return arrangeLLVMFunctionInfo(
675 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
676 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
677}
678
679const CGFunctionInfo &
680CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
681 ArrayRef<CanQualType> argTypes) {
682 return arrangeLLVMFunctionInfo(
683 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
684 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
685}
686
687/// Arrange a call to a C++ method, passing the given arguments.
688///
689/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
690/// does not count `this`.
691const CGFunctionInfo &
692CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
693 const FunctionProtoType *proto,
694 RequiredArgs required,
695 unsigned numPrefixArgs) {
696 assert(numPrefixArgs + 1 <= args.size() &&(static_cast <bool> (numPrefixArgs + 1 <= args.size(
) && "Emitting a call with less args than the required prefix?"
) ? void (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "clang/lib/CodeGen/CGCall.cpp", 697, __extension__ __PRETTY_FUNCTION__
))
697 "Emitting a call with less args than the required prefix?")(static_cast <bool> (numPrefixArgs + 1 <= args.size(
) && "Emitting a call with less args than the required prefix?"
) ? void (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "clang/lib/CodeGen/CGCall.cpp", 697, __extension__ __PRETTY_FUNCTION__
))
;
698 // Add one to account for `this`. It's a bit awkward here, but we don't count
699 // `this` in similar places elsewhere.
700 auto paramInfos =
701 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
702
703 // FIXME: Kill copy.
704 auto argTypes = getArgTypesForCall(Context, args);
705
706 FunctionType::ExtInfo info = proto->getExtInfo();
707 return arrangeLLVMFunctionInfo(
708 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
709 /*chainCall=*/false, argTypes, info, paramInfos, required);
710}
711
712const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
713 return arrangeLLVMFunctionInfo(
714 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
715 std::nullopt, FunctionType::ExtInfo(), {}, RequiredArgs::All);
716}
717
718const CGFunctionInfo &
719CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
720 const CallArgList &args) {
721 assert(signature.arg_size() <= args.size())(static_cast <bool> (signature.arg_size() <= args.size
()) ? void (0) : __assert_fail ("signature.arg_size() <= args.size()"
, "clang/lib/CodeGen/CGCall.cpp", 721, __extension__ __PRETTY_FUNCTION__
))
;
722 if (signature.arg_size() == args.size())
723 return signature;
724
725 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
726 auto sigParamInfos = signature.getExtParameterInfos();
727 if (!sigParamInfos.empty()) {
728 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
729 paramInfos.resize(args.size());
730 }
731
732 auto argTypes = getArgTypesForCall(Context, args);
733
734 assert(signature.getRequiredArgs().allowsOptionalArgs())(static_cast <bool> (signature.getRequiredArgs().allowsOptionalArgs
()) ? void (0) : __assert_fail ("signature.getRequiredArgs().allowsOptionalArgs()"
, "clang/lib/CodeGen/CGCall.cpp", 734, __extension__ __PRETTY_FUNCTION__
))
;
735 return arrangeLLVMFunctionInfo(signature.getReturnType(),
736 signature.isInstanceMethod(),
737 signature.isChainCall(),
738 argTypes,
739 signature.getExtInfo(),
740 paramInfos,
741 signature.getRequiredArgs());
742}
743
744namespace clang {
745namespace CodeGen {
746void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
747}
748}
749
750/// Arrange the argument and result information for an abstract value
751/// of a given function type. This is the method which all of the
752/// above functions ultimately defer to.
753const CGFunctionInfo &
754CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
755 bool instanceMethod,
756 bool chainCall,
757 ArrayRef<CanQualType> argTypes,
758 FunctionType::ExtInfo info,
759 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
760 RequiredArgs required) {
761 assert(llvm::all_of(argTypes,(static_cast <bool> (llvm::all_of(argTypes, [](CanQualType
T) { return T.isCanonicalAsParam(); })) ? void (0) : __assert_fail
("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "clang/lib/CodeGen/CGCall.cpp", 762, __extension__ __PRETTY_FUNCTION__
))
762 [](CanQualType T) { return T.isCanonicalAsParam(); }))(static_cast <bool> (llvm::all_of(argTypes, [](CanQualType
T) { return T.isCanonicalAsParam(); })) ? void (0) : __assert_fail
("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "clang/lib/CodeGen/CGCall.cpp", 762, __extension__ __PRETTY_FUNCTION__
))
;
763
764 // Lookup or create unique function info.
765 llvm::FoldingSetNodeID ID;
766 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
767 required, resultType, argTypes);
768
769 void *insertPos = nullptr;
770 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
771 if (FI)
772 return *FI;
773
774 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
775
776 // Construct the function info. We co-allocate the ArgInfos.
777 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
778 paramInfos, resultType, argTypes, required);
779 FunctionInfos.InsertNode(FI, insertPos);
780
781 bool inserted = FunctionsBeingProcessed.insert(FI).second;
782 (void)inserted;
783 assert(inserted && "Recursively being processed?")(static_cast <bool> (inserted && "Recursively being processed?"
) ? void (0) : __assert_fail ("inserted && \"Recursively being processed?\""
, "clang/lib/CodeGen/CGCall.cpp", 783, __extension__ __PRETTY_FUNCTION__
))
;
784
785 // Compute ABI information.
786 if (CC == llvm::CallingConv::SPIR_KERNEL) {
787 // Force target independent argument handling for the host visible
788 // kernel functions.
789 computeSPIRKernelABIInfo(CGM, *FI);
790 } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) {
791 swiftcall::computeABIInfo(CGM, *FI);
792 } else {
793 getABIInfo().computeInfo(*FI);
794 }
795
796 // Loop over all of the computed argument and return value info. If any of
797 // them are direct or extend without a specified coerce type, specify the
798 // default now.
799 ABIArgInfo &retInfo = FI->getReturnInfo();
800 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
801 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
802
803 for (auto &I : FI->arguments())
804 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
805 I.info.setCoerceToType(ConvertType(I.type));
806
807 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
808 assert(erased && "Not in set?")(static_cast <bool> (erased && "Not in set?") ?
void (0) : __assert_fail ("erased && \"Not in set?\""
, "clang/lib/CodeGen/CGCall.cpp", 808, __extension__ __PRETTY_FUNCTION__
))
;
809
810 return *FI;
811}
812
813CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
814 bool instanceMethod,
815 bool chainCall,
816 const FunctionType::ExtInfo &info,
817 ArrayRef<ExtParameterInfo> paramInfos,
818 CanQualType resultType,
819 ArrayRef<CanQualType> argTypes,
820 RequiredArgs required) {
821 assert(paramInfos.empty() || paramInfos.size() == argTypes.size())(static_cast <bool> (paramInfos.empty() || paramInfos.size
() == argTypes.size()) ? void (0) : __assert_fail ("paramInfos.empty() || paramInfos.size() == argTypes.size()"
, "clang/lib/CodeGen/CGCall.cpp", 821, __extension__ __PRETTY_FUNCTION__
))
;
822 assert(!required.allowsOptionalArgs() ||(static_cast <bool> (!required.allowsOptionalArgs() || required
.getNumRequiredArgs() <= argTypes.size()) ? void (0) : __assert_fail
("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()"
, "clang/lib/CodeGen/CGCall.cpp", 823, __extension__ __PRETTY_FUNCTION__
))
823 required.getNumRequiredArgs() <= argTypes.size())(static_cast <bool> (!required.allowsOptionalArgs() || required
.getNumRequiredArgs() <= argTypes.size()) ? void (0) : __assert_fail
("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()"
, "clang/lib/CodeGen/CGCall.cpp", 823, __extension__ __PRETTY_FUNCTION__
))
;
824
825 void *buffer =
826 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
827 argTypes.size() + 1, paramInfos.size()));
828
829 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
830 FI->CallingConvention = llvmCC;
831 FI->EffectiveCallingConvention = llvmCC;
832 FI->ASTCallingConvention = info.getCC();
833 FI->InstanceMethod = instanceMethod;
834 FI->ChainCall = chainCall;
835 FI->CmseNSCall = info.getCmseNSCall();
836 FI->NoReturn = info.getNoReturn();
837 FI->ReturnsRetained = info.getProducesResult();
838 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
839 FI->NoCfCheck = info.getNoCfCheck();
840 FI->Required = required;
841 FI->HasRegParm = info.getHasRegParm();
842 FI->RegParm = info.getRegParm();
843 FI->ArgStruct = nullptr;
844 FI->ArgStructAlign = 0;
845 FI->NumArgs = argTypes.size();
846 FI->HasExtParameterInfos = !paramInfos.empty();
847 FI->getArgsBuffer()[0].type = resultType;
848 FI->MaxVectorWidth = 0;
849 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
850 FI->getArgsBuffer()[i + 1].type = argTypes[i];
851 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
852 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
853 return FI;
854}
855
856/***/
857
858namespace {
859// ABIArgInfo::Expand implementation.
860
861// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
862struct TypeExpansion {
863 enum TypeExpansionKind {
864 // Elements of constant arrays are expanded recursively.
865 TEK_ConstantArray,
866 // Record fields are expanded recursively (but if record is a union, only
867 // the field with the largest size is expanded).
868 TEK_Record,
869 // For complex types, real and imaginary parts are expanded recursively.
870 TEK_Complex,
871 // All other types are not expandable.
872 TEK_None
873 };
874
875 const TypeExpansionKind Kind;
876
877 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
878 virtual ~TypeExpansion() {}
879};
880
881struct ConstantArrayExpansion : TypeExpansion {
882 QualType EltTy;
883 uint64_t NumElts;
884
885 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
886 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
887 static bool classof(const TypeExpansion *TE) {
888 return TE->Kind == TEK_ConstantArray;
889 }
890};
891
892struct RecordExpansion : TypeExpansion {
893 SmallVector<const CXXBaseSpecifier *, 1> Bases;
894
895 SmallVector<const FieldDecl *, 1> Fields;
896
897 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
898 SmallVector<const FieldDecl *, 1> &&Fields)
899 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
900 Fields(std::move(Fields)) {}
901 static bool classof(const TypeExpansion *TE) {
902 return TE->Kind == TEK_Record;
903 }
904};
905
906struct ComplexExpansion : TypeExpansion {
907 QualType EltTy;
908
909 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
910 static bool classof(const TypeExpansion *TE) {
911 return TE->Kind == TEK_Complex;
912 }
913};
914
915struct NoExpansion : TypeExpansion {
916 NoExpansion() : TypeExpansion(TEK_None) {}
917 static bool classof(const TypeExpansion *TE) {
918 return TE->Kind == TEK_None;
919 }
920};
921} // namespace
922
923static std::unique_ptr<TypeExpansion>
924getTypeExpansion(QualType Ty, const ASTContext &Context) {
925 if (const ConstantArrayType *AT
17.1
'AT' is null
17.1
'AT' is null
= Context.getAsConstantArrayType(Ty)) {
18
Taking false branch
926 return std::make_unique<ConstantArrayExpansion>(
927 AT->getElementType(), AT->getSize().getZExtValue());
928 }
929 if (const RecordType *RT
19.1
'RT' is null
19.1
'RT' is null
= Ty->getAs<RecordType>()) {
19
Assuming the object is not a 'const RecordType *'
20
Taking false branch
930 SmallVector<const CXXBaseSpecifier *, 1> Bases;
931 SmallVector<const FieldDecl *, 1> Fields;
932 const RecordDecl *RD = RT->getDecl();
933 assert(!RD->hasFlexibleArrayMember() &&(static_cast <bool> (!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.") ? void (0) :
__assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "clang/lib/CodeGen/CGCall.cpp", 934, __extension__ __PRETTY_FUNCTION__
))
934 "Cannot expand structure with flexible array.")(static_cast <bool> (!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.") ? void (0) :
__assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "clang/lib/CodeGen/CGCall.cpp", 934, __extension__ __PRETTY_FUNCTION__
))
;
935 if (RD->isUnion()) {
936 // Unions can be here only in degenerative cases - all the fields are same
937 // after flattening. Thus we have to use the "largest" field.
938 const FieldDecl *LargestFD = nullptr;
939 CharUnits UnionSize = CharUnits::Zero();
940
941 for (const auto *FD : RD->fields()) {
942 if (FD->isZeroLengthBitField(Context))
943 continue;
944 assert(!FD->isBitField() &&(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "clang/lib/CodeGen/CGCall.cpp", 945, __extension__ __PRETTY_FUNCTION__
))
945 "Cannot expand structure with bit-field members.")(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "clang/lib/CodeGen/CGCall.cpp", 945, __extension__ __PRETTY_FUNCTION__
))
;
946 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
947 if (UnionSize < FieldSize) {
948 UnionSize = FieldSize;
949 LargestFD = FD;
950 }
951 }
952 if (LargestFD)
953 Fields.push_back(LargestFD);
954 } else {
955 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
956 assert(!CXXRD->isDynamicClass() &&(static_cast <bool> (!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes") ? void (
0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "clang/lib/CodeGen/CGCall.cpp", 957, __extension__ __PRETTY_FUNCTION__
))
957 "cannot expand vtable pointers in dynamic classes")(static_cast <bool> (!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes") ? void (
0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "clang/lib/CodeGen/CGCall.cpp", 957, __extension__ __PRETTY_FUNCTION__
))
;
958 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
959 }
960
961 for (const auto *FD : RD->fields()) {
962 if (FD->isZeroLengthBitField(Context))
963 continue;
964 assert(!FD->isBitField() &&(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "clang/lib/CodeGen/CGCall.cpp", 965, __extension__ __PRETTY_FUNCTION__
))
965 "Cannot expand structure with bit-field members.")(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "clang/lib/CodeGen/CGCall.cpp", 965, __extension__ __PRETTY_FUNCTION__
))
;
966 Fields.push_back(FD);
967 }
968 }
969 return std::make_unique<RecordExpansion>(std::move(Bases),
970 std::move(Fields));
971 }
972 if (const ComplexType *CT
21.1
'CT' is null
21.1
'CT' is null
= Ty->getAs<ComplexType>()) {
21
Assuming the object is not a 'const class clang::ComplexType *'
22
Taking false branch
973 return std::make_unique<ComplexExpansion>(CT->getElementType());
974 }
975 return std::make_unique<NoExpansion>();
23
Calling 'make_unique<(anonymous namespace)::NoExpansion, >'
31
Returning from 'make_unique<(anonymous namespace)::NoExpansion, >'
32
Calling constructor for 'unique_ptr<(anonymous namespace)::TypeExpansion, std::default_delete<(anonymous namespace)::TypeExpansion>>'
37
Returning from constructor for 'unique_ptr<(anonymous namespace)::TypeExpansion, std::default_delete<(anonymous namespace)::TypeExpansion>>'
976}
977
978static int getExpansionSize(QualType Ty, const ASTContext &Context) {
979 auto Exp = getTypeExpansion(Ty, Context);
17
Calling 'getTypeExpansion'
38
Returning from 'getTypeExpansion'
980 if (auto CAExp
39.1
'CAExp' is non-null
39.1
'CAExp' is non-null
= dyn_cast<ConstantArrayExpansion>(Exp.get())) {
39
Assuming the object is a 'CastReturnType'
40
Taking true branch
981 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
41
The left operand of '*' is a garbage value
982 }
983 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
984 int Res = 0;
985 for (auto BS : RExp->Bases)
986 Res += getExpansionSize(BS->getType(), Context);
987 for (auto FD : RExp->Fields)
988 Res += getExpansionSize(FD->getType(), Context);
989 return Res;
990 }
991 if (isa<ComplexExpansion>(Exp.get()))
992 return 2;
993 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "clang/lib/CodeGen/CGCall.cpp", 993, __extension__ __PRETTY_FUNCTION__
))
;
994 return 1;
995}
996
997void
998CodeGenTypes::getExpandedTypes(QualType Ty,
999 SmallVectorImpl<llvm::Type *>::iterator &TI) {
1000 auto Exp = getTypeExpansion(Ty, Context);
1001 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1002 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
1003 getExpandedTypes(CAExp->EltTy, TI);
1004 }
1005 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1006 for (auto BS : RExp->Bases)
1007 getExpandedTypes(BS->getType(), TI);
1008 for (auto FD : RExp->Fields)
1009 getExpandedTypes(FD->getType(), TI);
1010 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1011 llvm::Type *EltTy = ConvertType(CExp->EltTy);
1012 *TI++ = EltTy;
1013 *TI++ = EltTy;
1014 } else {
1015 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "clang/lib/CodeGen/CGCall.cpp", 1015, __extension__ __PRETTY_FUNCTION__
))
;
1016 *TI++ = ConvertType(Ty);
1017 }
1018}
1019
1020static void forConstantArrayExpansion(CodeGenFunction &CGF,
1021 ConstantArrayExpansion *CAE,
1022 Address BaseAddr,
1023 llvm::function_ref<void(Address)> Fn) {
1024 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1025 CharUnits EltAlign =
1026 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1027 llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy);
1028
1029 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1030 llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
1031 BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
1032 Fn(Address(EltAddr, EltTy, EltAlign));
1033 }
1034}
1035
1036void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1037 llvm::Function::arg_iterator &AI) {
1038 assert(LV.isSimple() &&(static_cast <bool> (LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? void (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "clang/lib/CodeGen/CGCall.cpp", 1039, __extension__ __PRETTY_FUNCTION__
))
1039 "Unexpected non-simple lvalue during struct expansion.")(static_cast <bool> (LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? void (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "clang/lib/CodeGen/CGCall.cpp", 1039, __extension__ __PRETTY_FUNCTION__
))
;
1040
1041 auto Exp = getTypeExpansion(Ty, getContext());
1042 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1043 forConstantArrayExpansion(
1044 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1045 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1046 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1047 });
1048 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1049 Address This = LV.getAddress(*this);
1050 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1051 // Perform a single step derived-to-base conversion.
1052 Address Base =
1053 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1054 /*NullCheckValue=*/false, SourceLocation());
1055 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1056
1057 // Recurse onto bases.
1058 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1059 }
1060 for (auto FD : RExp->Fields) {
1061 // FIXME: What are the right qualifiers here?
1062 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1063 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1064 }
1065 } else if (isa<ComplexExpansion>(Exp.get())) {
1066 auto realValue = &*AI++;
1067 auto imagValue = &*AI++;
1068 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1069 } else {
1070 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1071 // primitive store.
1072 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "clang/lib/CodeGen/CGCall.cpp", 1072, __extension__ __PRETTY_FUNCTION__
))
;
1073 llvm::Value *Arg = &*AI++;
1074 if (LV.isBitField()) {
1075 EmitStoreThroughLValue(RValue::get(Arg), LV);
1076 } else {
1077 // TODO: currently there are some places are inconsistent in what LLVM
1078 // pointer type they use (see D118744). Once clang uses opaque pointers
1079 // all LLVM pointer types will be the same and we can remove this check.
1080 if (Arg->getType()->isPointerTy()) {
1081 Address Addr = LV.getAddress(*this);
1082 Arg = Builder.CreateBitCast(Arg, Addr.getElementType());
1083 }
1084 EmitStoreOfScalar(Arg, LV);
1085 }
1086 }
1087}
1088
1089void CodeGenFunction::ExpandTypeToArgs(
1090 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1091 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1092 auto Exp = getTypeExpansion(Ty, getContext());
1093 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1094 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1095 : Arg.getKnownRValue().getAggregateAddress();
1096 forConstantArrayExpansion(
1097 *this, CAExp, Addr, [&](Address EltAddr) {
1098 CallArg EltArg = CallArg(
1099 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1100 CAExp->EltTy);
1101 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1102 IRCallArgPos);
1103 });
1104 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1105 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1106 : Arg.getKnownRValue().getAggregateAddress();
1107 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1108 // Perform a single step derived-to-base conversion.
1109 Address Base =
1110 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1111 /*NullCheckValue=*/false, SourceLocation());
1112 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1113
1114 // Recurse onto bases.
1115 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1116 IRCallArgPos);
1117 }
1118
1119 LValue LV = MakeAddrLValue(This, Ty);
1120 for (auto FD : RExp->Fields) {
1121 CallArg FldArg =
1122 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1123 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1124 IRCallArgPos);
1125 }
1126 } else if (isa<ComplexExpansion>(Exp.get())) {
1127 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1128 IRCallArgs[IRCallArgPos++] = CV.first;
1129 IRCallArgs[IRCallArgPos++] = CV.second;
1130 } else {
1131 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "clang/lib/CodeGen/CGCall.cpp", 1131, __extension__ __PRETTY_FUNCTION__
))
;
1132 auto RV = Arg.getKnownRValue();
1133 assert(RV.isScalar() &&(static_cast <bool> (RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? void (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "clang/lib/CodeGen/CGCall.cpp", 1134, __extension__ __PRETTY_FUNCTION__
))
1134 "Unexpected non-scalar rvalue during struct expansion.")(static_cast <bool> (RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? void (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "clang/lib/CodeGen/CGCall.cpp", 1134, __extension__ __PRETTY_FUNCTION__
))
;
1135
1136 // Insert a bitcast as needed.
1137 llvm::Value *V = RV.getScalarVal();
1138 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1139 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1140 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1141
1142 IRCallArgs[IRCallArgPos++] = V;
1143 }
1144}
1145
1146/// Create a temporary allocation for the purposes of coercion.
1147static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1148 CharUnits MinAlign,
1149 const Twine &Name = "tmp") {
1150 // Don't use an alignment that's worse than what LLVM would prefer.
1151 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1152 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1153
1154 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1155}
1156
1157/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1158/// accessing some number of bytes out of it, try to gep into the struct to get
1159/// at its inner goodness. Dive as deep as possible without entering an element
1160/// with an in-memory size smaller than DstSize.
1161static Address
1162EnterStructPointerForCoercedAccess(Address SrcPtr,
1163 llvm::StructType *SrcSTy,
1164 uint64_t DstSize, CodeGenFunction &CGF) {
1165 // We can't dive into a zero-element struct.
1166 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1167
1168 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1169
1170 // If the first elt is at least as large as what we're looking for, or if the
1171 // first element is the same size as the whole struct, we can enter it. The
1172 // comparison must be made on the store size and not the alloca size. Using
1173 // the alloca size may overstate the size of the load.
1174 uint64_t FirstEltSize =
1175 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1176 if (FirstEltSize < DstSize &&
1177 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1178 return SrcPtr;
1179
1180 // GEP into the first element.
1181 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1182
1183 // If the first element is a struct, recurse.
1184 llvm::Type *SrcTy = SrcPtr.getElementType();
1185 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1186 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1187
1188 return SrcPtr;
1189}
1190
1191/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1192/// are either integers or pointers. This does a truncation of the value if it
1193/// is too large or a zero extension if it is too small.
1194///
1195/// This behaves as if the value were coerced through memory, so on big-endian
1196/// targets the high bits are preserved in a truncation, while little-endian
1197/// targets preserve the low bits.
1198static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1199 llvm::Type *Ty,
1200 CodeGenFunction &CGF) {
1201 if (Val->getType() == Ty)
1202 return Val;
1203
1204 if (isa<llvm::PointerType>(Val->getType())) {
1205 // If this is Pointer->Pointer avoid conversion to and from int.
1206 if (isa<llvm::PointerType>(Ty))
1207 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1208
1209 // Convert the pointer to an integer so we can play with its width.
1210 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1211 }
1212
1213 llvm::Type *DestIntTy = Ty;
1214 if (isa<llvm::PointerType>(DestIntTy))
1215 DestIntTy = CGF.IntPtrTy;
1216
1217 if (Val->getType() != DestIntTy) {
1218 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1219 if (DL.isBigEndian()) {
1220 // Preserve the high bits on big-endian targets.
1221 // That is what memory coercion does.
1222 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1223 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1224
1225 if (SrcSize > DstSize) {
1226 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1227 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1228 } else {
1229 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1230 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1231 }
1232 } else {
1233 // Little-endian targets preserve the low bits. No shifts required.
1234 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1235 }
1236 }
1237
1238 if (isa<llvm::PointerType>(Ty))
1239 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1240 return Val;
1241}
1242
1243
1244
1245/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1246/// a pointer to an object of type \arg Ty, known to be aligned to
1247/// \arg SrcAlign bytes.
1248///
1249/// This safely handles the case when the src type is smaller than the
1250/// destination type; in this situation the values of bits which not
1251/// present in the src are undefined.
1252static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1253 CodeGenFunction &CGF) {
1254 llvm::Type *SrcTy = Src.getElementType();
1255
1256 // If SrcTy and Ty are the same, just do a load.
1257 if (SrcTy == Ty)
1258 return CGF.Builder.CreateLoad(Src);
1259
1260 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1261
1262 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1263 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1264 DstSize.getFixedSize(), CGF);
1265 SrcTy = Src.getElementType();
1266 }
1267
1268 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1269
1270 // If the source and destination are integer or pointer types, just do an
1271 // extension or truncation to the desired type.
1272 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1273 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1274 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1275 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1276 }
1277
1278 // If load is legal, just bitcast the src pointer.
1279 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1280 SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1281 // Generally SrcSize is never greater than DstSize, since this means we are
1282 // losing bits. However, this can happen in cases where the structure has
1283 // additional padding, for example due to a user specified alignment.
1284 //
1285 // FIXME: Assert that we aren't truncating non-padding bits when have access
1286 // to that information.
1287 Src = CGF.Builder.CreateElementBitCast(Src, Ty);
1288 return CGF.Builder.CreateLoad(Src);
1289 }
1290
1291 // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1292 // the types match, use the llvm.vector.insert intrinsic to perform the
1293 // conversion.
1294 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1295 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1296 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
1297 // vector, use a vector insert and bitcast the result.
1298 bool NeedsBitcast = false;
1299 auto PredType =
1300 llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16);
1301 llvm::Type *OrigType = Ty;
1302 if (ScalableDst == PredType &&
1303 FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) {
1304 ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2);
1305 NeedsBitcast = true;
1306 }
1307 if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
1308 auto *Load = CGF.Builder.CreateLoad(Src);
1309 auto *UndefVec = llvm::UndefValue::get(ScalableDst);
1310 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1311 llvm::Value *Result = CGF.Builder.CreateInsertVector(
1312 ScalableDst, UndefVec, Load, Zero, "castScalableSve");
1313 if (NeedsBitcast)
1314 Result = CGF.Builder.CreateBitCast(Result, OrigType);
1315 return Result;
1316 }
1317 }
1318 }
1319
1320 // Otherwise do coercion through memory. This is stupid, but simple.
1321 Address Tmp =
1322 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1323 CGF.Builder.CreateMemCpy(
1324 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1325 Src.getAlignment().getAsAlign(),
1326 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1327 return CGF.Builder.CreateLoad(Tmp);
1328}
1329
1330// Function to store a first-class aggregate into memory. We prefer to
1331// store the elements rather than the aggregate to be more friendly to
1332// fast-isel.
1333// FIXME: Do we need to recurse here?
1334void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1335 bool DestIsVolatile) {
1336 // Prefer scalar stores to first-class aggregate stores.
1337 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1338 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1339 Address EltPtr = Builder.CreateStructGEP(Dest, i);
1340 llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1341 Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1342 }
1343 } else {
1344 Builder.CreateStore(Val, Dest, DestIsVolatile);
1345 }
1346}
1347
1348/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1349/// where the source and destination may have different types. The
1350/// destination is known to be aligned to \arg DstAlign bytes.
1351///
1352/// This safely handles the case when the src type is larger than the
1353/// destination type; the upper bits of the src will be lost.
1354static void CreateCoercedStore(llvm::Value *Src,
1355 Address Dst,
1356 bool DstIsVolatile,
1357 CodeGenFunction &CGF) {
1358 llvm::Type *SrcTy = Src->getType();
1359 llvm::Type *DstTy = Dst.getElementType();
1360 if (SrcTy == DstTy) {
1361 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1362 return;
1363 }
1364
1365 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1366
1367 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1368 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1369 SrcSize.getFixedSize(), CGF);
1370 DstTy = Dst.getElementType();
1371 }
1372
1373 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1374 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1375 if (SrcPtrTy && DstPtrTy &&
1376 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1377 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1378 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1379 return;
1380 }
1381
1382 // If the source and destination are integer or pointer types, just do an
1383 // extension or truncation to the desired type.
1384 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1385 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1386 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1387 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1388 return;
1389 }
1390
1391 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1392
1393 // If store is legal, just bitcast the src pointer.
1394 if (isa<llvm::ScalableVectorType>(SrcTy) ||
1395 isa<llvm::ScalableVectorType>(DstTy) ||
1396 SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1397 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1398 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1399 } else {
1400 // Otherwise do coercion through memory. This is stupid, but
1401 // simple.
1402
1403 // Generally SrcSize is never greater than DstSize, since this means we are
1404 // losing bits. However, this can happen in cases where the structure has
1405 // additional padding, for example due to a user specified alignment.
1406 //
1407 // FIXME: Assert that we aren't truncating non-padding bits when have access
1408 // to that information.
1409 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1410 CGF.Builder.CreateStore(Src, Tmp);
1411 CGF.Builder.CreateMemCpy(
1412 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1413 Tmp.getAlignment().getAsAlign(),
1414 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1415 }
1416}
1417
1418static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1419 const ABIArgInfo &info) {
1420 if (unsigned offset = info.getDirectOffset()) {
1421 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1422 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1423 CharUnits::fromQuantity(offset));
1424 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1425 }
1426 return addr;
1427}
1428
1429namespace {
1430
1431/// Encapsulates information about the way function arguments from
1432/// CGFunctionInfo should be passed to actual LLVM IR function.
1433class ClangToLLVMArgMapping {
1434 static const unsigned InvalidIndex = ~0U;
1435 unsigned InallocaArgNo;
1436 unsigned SRetArgNo;
1437 unsigned TotalIRArgs;
1438
1439 /// Arguments of LLVM IR function corresponding to single Clang argument.
1440 struct IRArgs {
1441 unsigned PaddingArgIndex;
1442 // Argument is expanded to IR arguments at positions
1443 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1444 unsigned FirstArgIndex;
1445 unsigned NumberOfArgs;
1446
1447 IRArgs()
1448 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1449 NumberOfArgs(0) {}
1450 };
1451
1452 SmallVector<IRArgs, 8> ArgInfo;
1453
1454public:
1455 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1456 bool OnlyRequiredArgs = false)
1457 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1458 ArgInfo(OnlyRequiredArgs
4.1
'OnlyRequiredArgs' is false
4.1
'OnlyRequiredArgs' is false
? FI.getNumRequiredArgs() : FI.arg_size()) {
5
'?' condition is false
1459 construct(Context, FI, OnlyRequiredArgs);
6
Calling 'ClangToLLVMArgMapping::construct'
1460 }
1461
1462 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1463 unsigned getInallocaArgNo() const {
1464 assert(hasInallocaArg())(static_cast <bool> (hasInallocaArg()) ? void (0) : __assert_fail
("hasInallocaArg()", "clang/lib/CodeGen/CGCall.cpp", 1464, __extension__
__PRETTY_FUNCTION__))
;
1465 return InallocaArgNo;
1466 }
1467
1468 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1469 unsigned getSRetArgNo() const {
1470 assert(hasSRetArg())(static_cast <bool> (hasSRetArg()) ? void (0) : __assert_fail
("hasSRetArg()", "clang/lib/CodeGen/CGCall.cpp", 1470, __extension__
__PRETTY_FUNCTION__))
;
1471 return SRetArgNo;
1472 }
1473
1474 unsigned totalIRArgs() const { return TotalIRArgs; }
1475
1476 bool hasPaddingArg(unsigned ArgNo) const {
1477 assert(ArgNo < ArgInfo.size())(static_cast <bool> (ArgNo < ArgInfo.size()) ? void (
0) : __assert_fail ("ArgNo < ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp"
, 1477, __extension__ __PRETTY_FUNCTION__))
;
1478 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1479 }
1480 unsigned getPaddingArgNo(unsigned ArgNo) const {
1481 assert(hasPaddingArg(ArgNo))(static_cast <bool> (hasPaddingArg(ArgNo)) ? void (0) :
__assert_fail ("hasPaddingArg(ArgNo)", "clang/lib/CodeGen/CGCall.cpp"
, 1481, __extension__ __PRETTY_FUNCTION__))
;
1482 return ArgInfo[ArgNo].PaddingArgIndex;
1483 }
1484
1485 /// Returns index of first IR argument corresponding to ArgNo, and their
1486 /// quantity.
1487 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1488 assert(ArgNo < ArgInfo.size())(static_cast <bool> (ArgNo < ArgInfo.size()) ? void (
0) : __assert_fail ("ArgNo < ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp"
, 1488, __extension__ __PRETTY_FUNCTION__))
;
1489 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1490 ArgInfo[ArgNo].NumberOfArgs);
1491 }
1492
1493private:
1494 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1495 bool OnlyRequiredArgs);
1496};
1497
1498void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1499 const CGFunctionInfo &FI,
1500 bool OnlyRequiredArgs) {
1501 unsigned IRArgNo = 0;
1502 bool SwapThisWithSRet = false;
1503 const ABIArgInfo &RetAI = FI.getReturnInfo();
1504
1505 if (RetAI.getKind() == ABIArgInfo::Indirect) {
7
Assuming the condition is false
8
Taking false branch
1506 SwapThisWithSRet = RetAI.isSRetAfterThis();
1507 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1508 }
1509
1510 unsigned ArgNo = 0;
1511 unsigned NumArgs = OnlyRequiredArgs
8.1
'OnlyRequiredArgs' is false
8.1
'OnlyRequiredArgs' is false
? FI.getNumRequiredArgs() : FI.arg_size();
9
'?' condition is false
1512 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
10
Assuming 'ArgNo' is < 'NumArgs'
11
Loop condition is true. Entering loop body
1513 ++I, ++ArgNo) {
1514 assert(I != FI.arg_end())(static_cast <bool> (I != FI.arg_end()) ? void (0) : __assert_fail
("I != FI.arg_end()", "clang/lib/CodeGen/CGCall.cpp", 1514, __extension__
__PRETTY_FUNCTION__))
;
12
'?' condition is true
1515 QualType ArgType = I->type;
1516 const ABIArgInfo &AI = I->info;
1517 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1518 auto &IRArgs = ArgInfo[ArgNo];
1519
1520 if (AI.getPaddingType())
13
Assuming the condition is false
14
Taking false branch
1521 IRArgs.PaddingArgIndex = IRArgNo++;
1522
1523 switch (AI.getKind()) {
15
Control jumps to 'case Expand:' at line 1547
1524 case ABIArgInfo::Extend:
1525 case ABIArgInfo::Direct: {
1526 // FIXME: handle sseregparm someday...
1527 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1528 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1529 IRArgs.NumberOfArgs = STy->getNumElements();
1530 } else {
1531 IRArgs.NumberOfArgs = 1;
1532 }
1533 break;
1534 }
1535 case ABIArgInfo::Indirect:
1536 case ABIArgInfo::IndirectAliased:
1537 IRArgs.NumberOfArgs = 1;
1538 break;
1539 case ABIArgInfo::Ignore:
1540 case ABIArgInfo::InAlloca:
1541 // ignore and inalloca doesn't have matching LLVM parameters.
1542 IRArgs.NumberOfArgs = 0;
1543 break;
1544 case ABIArgInfo::CoerceAndExpand:
1545 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1546 break;
1547 case ABIArgInfo::Expand:
1548 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
16
Calling 'getExpansionSize'
1549 break;
1550 }
1551
1552 if (IRArgs.NumberOfArgs > 0) {
1553 IRArgs.FirstArgIndex = IRArgNo;
1554 IRArgNo += IRArgs.NumberOfArgs;
1555 }
1556
1557 // Skip over the sret parameter when it comes second. We already handled it
1558 // above.
1559 if (IRArgNo == 1 && SwapThisWithSRet)
1560 IRArgNo++;
1561 }
1562 assert(ArgNo == ArgInfo.size())(static_cast <bool> (ArgNo == ArgInfo.size()) ? void (0
) : __assert_fail ("ArgNo == ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp"
, 1562, __extension__ __PRETTY_FUNCTION__))
;
1563
1564 if (FI.usesInAlloca())
1565 InallocaArgNo = IRArgNo++;
1566
1567 TotalIRArgs = IRArgNo;
1568}
1569} // namespace
1570
1571/***/
1572
1573bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1574 const auto &RI = FI.getReturnInfo();
1575 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1576}
1577
1578bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1579 return ReturnTypeUsesSRet(FI) &&
1580 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1581}
1582
1583bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1584 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1585 switch (BT->getKind()) {
1586 default:
1587 return false;
1588 case BuiltinType::Float:
1589 return getTarget().useObjCFPRetForRealType(FloatModeKind::Float);
1590 case BuiltinType::Double:
1591 return getTarget().useObjCFPRetForRealType(FloatModeKind::Double);
1592 case BuiltinType::LongDouble:
1593 return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble);
1594 }
1595 }
1596
1597 return false;
1598}
1599
1600bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1601 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1602 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1603 if (BT->getKind() == BuiltinType::LongDouble)
1604 return getTarget().useObjCFP2RetForComplexLongDouble();
1605 }
1606 }
1607
1608 return false;
1609}
1610
1611llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1612 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1613 return GetFunctionType(FI);
1614}
1615
1616llvm::FunctionType *
1617CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1618
1619 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1620 (void)Inserted;
1621 assert(Inserted && "Recursively being processed?")(static_cast <bool> (Inserted && "Recursively being processed?"
) ? void (0) : __assert_fail ("Inserted && \"Recursively being processed?\""
, "clang/lib/CodeGen/CGCall.cpp", 1621, __extension__ __PRETTY_FUNCTION__
))
;
1622
1623 llvm::Type *resultType = nullptr;
1624 const ABIArgInfo &retAI = FI.getReturnInfo();
1625 switch (retAI.getKind()) {
1626 case ABIArgInfo::Expand:
1627 case ABIArgInfo::IndirectAliased:
1628 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "clang/lib/CodeGen/CGCall.cpp", 1628)
;
1629
1630 case ABIArgInfo::Extend:
1631 case ABIArgInfo::Direct:
1632 resultType = retAI.getCoerceToType();
1633 break;
1634
1635 case ABIArgInfo::InAlloca:
1636 if (retAI.getInAllocaSRet()) {
1637 // sret things on win32 aren't void, they return the sret pointer.
1638 QualType ret = FI.getReturnType();
1639 llvm::Type *ty = ConvertType(ret);
1640 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1641 resultType = llvm::PointerType::get(ty, addressSpace);
1642 } else {
1643 resultType = llvm::Type::getVoidTy(getLLVMContext());
1644 }
1645 break;
1646
1647 case ABIArgInfo::Indirect:
1648 case ABIArgInfo::Ignore:
1649 resultType = llvm::Type::getVoidTy(getLLVMContext());
1650 break;
1651
1652 case ABIArgInfo::CoerceAndExpand:
1653 resultType = retAI.getUnpaddedCoerceAndExpandType();
1654 break;
1655 }
1656
1657 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1658 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1659
1660 // Add type for sret argument.
1661 if (IRFunctionArgs.hasSRetArg()) {
1662 QualType Ret = FI.getReturnType();
1663 llvm::Type *Ty = ConvertType(Ret);
1664 unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret);
1665 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1666 llvm::PointerType::get(Ty, AddressSpace);
1667 }
1668
1669 // Add type for inalloca argument.
1670 if (IRFunctionArgs.hasInallocaArg()) {
1671 auto ArgStruct = FI.getArgStruct();
1672 assert(ArgStruct)(static_cast <bool> (ArgStruct) ? void (0) : __assert_fail
("ArgStruct", "clang/lib/CodeGen/CGCall.cpp", 1672, __extension__
__PRETTY_FUNCTION__))
;
1673 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1674 }
1675
1676 // Add in all of the required arguments.
1677 unsigned ArgNo = 0;
1678 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1679 ie = it + FI.getNumRequiredArgs();
1680 for (; it != ie; ++it, ++ArgNo) {
1681 const ABIArgInfo &ArgInfo = it->info;
1682
1683 // Insert a padding type to ensure proper alignment.
1684 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1685 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1686 ArgInfo.getPaddingType();
1687
1688 unsigned FirstIRArg, NumIRArgs;
1689 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1690
1691 switch (ArgInfo.getKind()) {
1692 case ABIArgInfo::Ignore:
1693 case ABIArgInfo::InAlloca:
1694 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 1694, __extension__
__PRETTY_FUNCTION__))
;
1695 break;
1696
1697 case ABIArgInfo::Indirect: {
1698 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1698, __extension__
__PRETTY_FUNCTION__))
;
1699 // indirect arguments are always on the stack, which is alloca addr space.
1700 llvm::Type *LTy = ConvertTypeForMem(it->type);
1701 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1702 CGM.getDataLayout().getAllocaAddrSpace());
1703 break;
1704 }
1705 case ABIArgInfo::IndirectAliased: {
1706 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1706, __extension__
__PRETTY_FUNCTION__))
;
1707 llvm::Type *LTy = ConvertTypeForMem(it->type);
1708 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1709 break;
1710 }
1711 case ABIArgInfo::Extend:
1712 case ABIArgInfo::Direct: {
1713 // Fast-isel and the optimizer generally like scalar values better than
1714 // FCAs, so we flatten them if this is safe to do for this argument.
1715 llvm::Type *argType = ArgInfo.getCoerceToType();
1716 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1717 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1718 assert(NumIRArgs == st->getNumElements())(static_cast <bool> (NumIRArgs == st->getNumElements
()) ? void (0) : __assert_fail ("NumIRArgs == st->getNumElements()"
, "clang/lib/CodeGen/CGCall.cpp", 1718, __extension__ __PRETTY_FUNCTION__
))
;
1719 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1720 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1721 } else {
1722 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1722, __extension__
__PRETTY_FUNCTION__))
;
1723 ArgTypes[FirstIRArg] = argType;
1724 }
1725 break;
1726 }
1727
1728 case ABIArgInfo::CoerceAndExpand: {
1729 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1730 for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1731 *ArgTypesIter++ = EltTy;
1732 }
1733 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (ArgTypesIter == ArgTypes.begin() +
FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 1733, __extension__ __PRETTY_FUNCTION__
))
;
1734 break;
1735 }
1736
1737 case ABIArgInfo::Expand:
1738 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1739 getExpandedTypes(it->type, ArgTypesIter);
1740 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (ArgTypesIter == ArgTypes.begin() +
FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 1740, __extension__ __PRETTY_FUNCTION__
))
;
1741 break;
1742 }
1743 }
1744
1745 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1746 assert(Erased && "Not in set?")(static_cast <bool> (Erased && "Not in set?") ?
void (0) : __assert_fail ("Erased && \"Not in set?\""
, "clang/lib/CodeGen/CGCall.cpp", 1746, __extension__ __PRETTY_FUNCTION__
))
;
1747
1748 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1749}
1750
1751llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1752 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1753 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1754
1755 if (!isFuncTypeConvertible(FPT))
1756 return llvm::StructType::get(getLLVMContext());
1757
1758 return GetFunctionType(GD);
1759}
1760
1761static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1762 llvm::AttrBuilder &FuncAttrs,
1763 const FunctionProtoType *FPT) {
1764 if (!FPT)
1765 return;
1766
1767 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1768 FPT->isNothrow())
1769 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1770}
1771
1772static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs,
1773 const Decl *Callee) {
1774 if (!Callee)
1775 return;
1776
1777 SmallVector<StringRef, 4> Attrs;
1778
1779 for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>())
1780 AA->getAssumption().split(Attrs, ",");
1781
1782 if (!Attrs.empty())
1783 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1784 llvm::join(Attrs.begin(), Attrs.end(), ","));
1785}
1786
1787bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
1788 QualType ReturnType) {
1789 // We can't just discard the return value for a record type with a
1790 // complex destructor or a non-trivially copyable type.
1791 if (const RecordType *RT =
1792 ReturnType.getCanonicalType()->getAs<RecordType>()) {
1793 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1794 return ClassDecl->hasTrivialDestructor();
1795 }
1796 return ReturnType.isTriviallyCopyableType(Context);
1797}
1798
1799void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1800 bool HasOptnone,
1801 bool AttrOnCallSite,
1802 llvm::AttrBuilder &FuncAttrs) {
1803 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1804 if (!HasOptnone) {
1805 if (CodeGenOpts.OptimizeSize)
1806 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1807 if (CodeGenOpts.OptimizeSize == 2)
1808 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1809 }
1810
1811 if (CodeGenOpts.DisableRedZone)
1812 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1813 if (CodeGenOpts.IndirectTlsSegRefs)
1814 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1815 if (CodeGenOpts.NoImplicitFloat)
1816 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1817
1818 if (AttrOnCallSite) {
1819 // Attributes that should go on the call site only.
1820 // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking
1821 // the -fno-builtin-foo list.
1822 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
1823 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1824 if (!CodeGenOpts.TrapFuncName.empty())
1825 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1826 } else {
1827 StringRef FpKind;
1828 switch (CodeGenOpts.getFramePointer()) {
1829 case CodeGenOptions::FramePointerKind::None:
1830 FpKind = "none";
1831 break;
1832 case CodeGenOptions::FramePointerKind::NonLeaf:
1833 FpKind = "non-leaf";
1834 break;
1835 case CodeGenOptions::FramePointerKind::All:
1836 FpKind = "all";
1837 break;
1838 }
1839 FuncAttrs.addAttribute("frame-pointer", FpKind);
1840
1841 if (CodeGenOpts.LessPreciseFPMAD)
1842 FuncAttrs.addAttribute("less-precise-fpmad", "true");
1843
1844 if (CodeGenOpts.NullPointerIsValid)
1845 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1846
1847 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1848 FuncAttrs.addAttribute("denormal-fp-math",
1849 CodeGenOpts.FPDenormalMode.str());
1850 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1851 FuncAttrs.addAttribute(
1852 "denormal-fp-math-f32",
1853 CodeGenOpts.FP32DenormalMode.str());
1854 }
1855
1856 if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore)
1857 FuncAttrs.addAttribute("no-trapping-math", "true");
1858
1859 // TODO: Are these all needed?
1860 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1861 if (LangOpts.NoHonorInfs)
1862 FuncAttrs.addAttribute("no-infs-fp-math", "true");
1863 if (LangOpts.NoHonorNaNs)
1864 FuncAttrs.addAttribute("no-nans-fp-math", "true");
1865 if (LangOpts.ApproxFunc)
1866 FuncAttrs.addAttribute("approx-func-fp-math", "true");
1867 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1868 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1869 (LangOpts.getDefaultFPContractMode() ==
1870 LangOptions::FPModeKind::FPM_Fast ||
1871 LangOpts.getDefaultFPContractMode() ==
1872 LangOptions::FPModeKind::FPM_FastHonorPragmas))
1873 FuncAttrs.addAttribute("unsafe-fp-math", "true");
1874 if (CodeGenOpts.SoftFloat)
1875 FuncAttrs.addAttribute("use-soft-float", "true");
1876 FuncAttrs.addAttribute("stack-protector-buffer-size",
1877 llvm::utostr(CodeGenOpts.SSPBufferSize));
1878 if (LangOpts.NoSignedZero)
1879 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1880
1881 // TODO: Reciprocal estimate codegen options should apply to instructions?
1882 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1883 if (!Recips.empty())
1884 FuncAttrs.addAttribute("reciprocal-estimates",
1885 llvm::join(Recips, ","));
1886
1887 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1888 CodeGenOpts.PreferVectorWidth != "none")
1889 FuncAttrs.addAttribute("prefer-vector-width",
1890 CodeGenOpts.PreferVectorWidth);
1891
1892 if (CodeGenOpts.StackRealignment)
1893 FuncAttrs.addAttribute("stackrealign");
1894 if (CodeGenOpts.Backchain)
1895 FuncAttrs.addAttribute("backchain");
1896 if (CodeGenOpts.EnableSegmentedStacks)
1897 FuncAttrs.addAttribute("split-stack");
1898
1899 if (CodeGenOpts.SpeculativeLoadHardening)
1900 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1901
1902 // Add zero-call-used-regs attribute.
1903 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1904 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1905 FuncAttrs.removeAttribute("zero-call-used-regs");
1906 break;
1907 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1908 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg");
1909 break;
1910 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1911 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr");
1912 break;
1913 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1914 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg");
1915 break;
1916 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1917 FuncAttrs.addAttribute("zero-call-used-regs", "used");
1918 break;
1919 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1920 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg");
1921 break;
1922 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1923 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr");
1924 break;
1925 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1926 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg");
1927 break;
1928 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
1929 FuncAttrs.addAttribute("zero-call-used-regs", "all");
1930 break;
1931 }
1932 }
1933
1934 if (getLangOpts().assumeFunctionsAreConvergent()) {
1935 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1936 // convergent (meaning, they may call an intrinsically convergent op, such
1937 // as __syncthreads() / barrier(), and so can't have certain optimizations
1938 // applied around them). LLVM will remove this attribute where it safely
1939 // can.
1940 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1941 }
1942
1943 // TODO: NoUnwind attribute should be added for other GPU modes OpenCL, HIP,
1944 // SYCL, OpenMP offload. AFAIK, none of them support exceptions in device
1945 // code.
1946 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1947 // Exceptions aren't supported in CUDA device code.
1948 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1949 }
1950
1951 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1952 StringRef Var, Value;
1953 std::tie(Var, Value) = Attr.split('=');
1954 FuncAttrs.addAttribute(Var, Value);
1955 }
1956}
1957
1958void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1959 llvm::AttrBuilder FuncAttrs(F.getContext());
1960 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1961 /* AttrOnCallSite = */ false, FuncAttrs);
1962 // TODO: call GetCPUAndFeaturesAttributes?
1963 F.addFnAttrs(FuncAttrs);
1964}
1965
1966void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1967 llvm::AttrBuilder &attrs) {
1968 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1969 /*for call*/ false, attrs);
1970 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1971}
1972
1973static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1974 const LangOptions &LangOpts,
1975 const NoBuiltinAttr *NBA = nullptr) {
1976 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1977 SmallString<32> AttributeName;
1978 AttributeName += "no-builtin-";
1979 AttributeName += BuiltinName;
1980 FuncAttrs.addAttribute(AttributeName);
1981 };
1982
1983 // First, handle the language options passed through -fno-builtin.
1984 if (LangOpts.NoBuiltin) {
1985 // -fno-builtin disables them all.
1986 FuncAttrs.addAttribute("no-builtins");
1987 return;
1988 }
1989
1990 // Then, add attributes for builtins specified through -fno-builtin-<name>.
1991 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1992
1993 // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1994 // the source.
1995 if (!NBA)
1996 return;
1997
1998 // If there is a wildcard in the builtin names specified through the
1999 // attribute, disable them all.
2000 if (llvm::is_contained(NBA->builtinNames(), "*")) {
2001 FuncAttrs.addAttribute("no-builtins");
2002 return;
2003 }
2004
2005 // And last, add the rest of the builtin names.
2006 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2007}
2008
2009static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
2010 const llvm::DataLayout &DL, const ABIArgInfo &AI,
2011 bool CheckCoerce = true) {
2012 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2013 if (AI.getKind() == ABIArgInfo::Indirect)
2014 return true;
2015 if (AI.getKind() == ABIArgInfo::Extend)
2016 return true;
2017 if (!DL.typeSizeEqualsStoreSize(Ty))
2018 // TODO: This will result in a modest amount of values not marked noundef
2019 // when they could be. We care about values that *invisibly* contain undef
2020 // bits from the perspective of LLVM IR.
2021 return false;
2022 if (CheckCoerce && AI.canHaveCoerceToType()) {
2023 llvm::Type *CoerceTy = AI.getCoerceToType();
2024 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2025 DL.getTypeSizeInBits(Ty)))
2026 // If we're coercing to a type with a greater size than the canonical one,
2027 // we're introducing new undef bits.
2028 // Coercing to a type of smaller or equal size is ok, as we know that
2029 // there's no internal padding (typeSizeEqualsStoreSize).
2030 return false;
2031 }
2032 if (QTy->isBitIntType())
2033 return true;
2034 if (QTy->isReferenceType())
2035 return true;
2036 if (QTy->isNullPtrType())
2037 return false;
2038 if (QTy->isMemberPointerType())
2039 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For
2040 // now, never mark them.
2041 return false;
2042 if (QTy->isScalarType()) {
2043 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy))
2044 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
2045 return true;
2046 }
2047 if (const VectorType *Vector = dyn_cast<VectorType>(QTy))
2048 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
2049 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2050 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
2051 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2052 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
2053
2054 // TODO: Some structs may be `noundef`, in specific situations.
2055 return false;
2056}
2057
2058/// Check if the argument of a function has maybe_undef attribute.
2059static bool IsArgumentMaybeUndef(const Decl *TargetDecl,
2060 unsigned NumRequiredArgs, unsigned ArgNo) {
2061 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2062 if (!FD)
2063 return false;
2064
2065 // Assume variadic arguments do not have maybe_undef attribute.
2066 if (ArgNo >= NumRequiredArgs)
2067 return false;
2068
2069 // Check if argument has maybe_undef attribute.
2070 if (ArgNo < FD->getNumParams()) {
2071 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2072 if (Param && Param->hasAttr<MaybeUndefAttr>())
2073 return true;
2074 }
2075
2076 return false;
2077}
2078
2079/// Construct the IR attribute list of a function or call.
2080///
2081/// When adding an attribute, please consider where it should be handled:
2082///
2083/// - getDefaultFunctionAttributes is for attributes that are essentially
2084/// part of the global target configuration (but perhaps can be
2085/// overridden on a per-function basis). Adding attributes there
2086/// will cause them to also be set in frontends that build on Clang's
2087/// target-configuration logic, as well as for code defined in library
2088/// modules such as CUDA's libdevice.
2089///
2090/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
2091/// and adds declaration-specific, convention-specific, and
2092/// frontend-specific logic. The last is of particular importance:
2093/// attributes that restrict how the frontend generates code must be
2094/// added here rather than getDefaultFunctionAttributes.
2095///
2096void CodeGenModule::ConstructAttributeList(StringRef Name,
2097 const CGFunctionInfo &FI,
2098 CGCalleeInfo CalleeInfo,
2099 llvm::AttributeList &AttrList,
2100 unsigned &CallingConv,
2101 bool AttrOnCallSite, bool IsThunk) {
2102 llvm::AttrBuilder FuncAttrs(getLLVMContext());
2103 llvm::AttrBuilder RetAttrs(getLLVMContext());
2104
2105 // Collect function IR attributes from the CC lowering.
2106 // We'll collect the paramete and result attributes later.
2107 CallingConv = FI.getEffectiveCallingConvention();
2108 if (FI.isNoReturn())
2109 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2110 if (FI.isCmseNSCall())
2111 FuncAttrs.addAttribute("cmse_nonsecure_call");
2112
2113 // Collect function IR attributes from the callee prototype if we have one.
2114 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
2115 CalleeInfo.getCalleeFunctionProtoType());
2116
2117 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
2118
2119 // Attach assumption attributes to the declaration. If this is a call
2120 // site, attach assumptions from the caller to the call as well.
2121 AddAttributesFromAssumes(FuncAttrs, TargetDecl);
2122
2123 bool HasOptnone = false;
2124 // The NoBuiltinAttr attached to the target FunctionDecl.
2125 const NoBuiltinAttr *NBA = nullptr;
2126
2127 // Some ABIs may result in additional accesses to arguments that may
2128 // otherwise not be present.
2129 auto AddPotentialArgAccess = [&]() {
2130 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2131 if (A.isValid())
2132 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2133 llvm::MemoryEffects::argMemOnly());
2134 };
2135
2136 // Collect function IR attributes based on declaration-specific
2137 // information.
2138 // FIXME: handle sseregparm someday...
2139 if (TargetDecl) {
2140 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
2141 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2142 if (TargetDecl->hasAttr<NoThrowAttr>())
2143 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2144 if (TargetDecl->hasAttr<NoReturnAttr>())
2145 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2146 if (TargetDecl->hasAttr<ColdAttr>())
2147 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2148 if (TargetDecl->hasAttr<HotAttr>())
2149 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2150 if (TargetDecl->hasAttr<NoDuplicateAttr>())
2151 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2152 if (TargetDecl->hasAttr<ConvergentAttr>())
2153 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2154
2155 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2156 AddAttributesFromFunctionProtoType(
2157 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
2158 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2159 // A sane operator new returns a non-aliasing pointer.
2160 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2161 if (getCodeGenOpts().AssumeSaneOperatorNew &&
2162 (Kind == OO_New || Kind == OO_Array_New))
2163 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2164 }
2165 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
2166 const bool IsVirtualCall = MD && MD->isVirtual();
2167 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
2168 // virtual function. These attributes are not inherited by overloads.
2169 if (!(AttrOnCallSite && IsVirtualCall)) {
2170 if (Fn->isNoReturn())
2171 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2172 NBA = Fn->getAttr<NoBuiltinAttr>();
2173 }
2174 // Only place nomerge attribute on call sites, never functions. This
2175 // allows it to work on indirect virtual function calls.
2176 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
2177 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2178 }
2179
2180 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
2181 if (TargetDecl->hasAttr<ConstAttr>()) {
2182 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2183 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2184 // gcc specifies that 'const' functions have greater restrictions than
2185 // 'pure' functions, so they also cannot have infinite loops.
2186 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2187 } else if (TargetDecl->hasAttr<PureAttr>()) {
2188 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2189 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2190 // gcc specifies that 'pure' functions cannot have infinite loops.
2191 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2192 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2193 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::argMemOnly());
2194 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2195 }
2196 if (TargetDecl->hasAttr<RestrictAttr>())
2197 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2198 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2199 !CodeGenOpts.NullPointerIsValid)
2200 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2201 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2202 FuncAttrs.addAttribute("no_caller_saved_registers");
2203 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2204 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2205 if (TargetDecl->hasAttr<LeafAttr>())
2206 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2207
2208 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2209 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2210 std::optional<unsigned> NumElemsParam;
2211 if (AllocSize->getNumElemsParam().isValid())
2212 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2213 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2214 NumElemsParam);
2215 }
2216
2217 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2218 if (getLangOpts().OpenCLVersion <= 120) {
2219 // OpenCL v1.2 Work groups are always uniform
2220 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2221 } else {
2222 // OpenCL v2.0 Work groups may be whether uniform or not.
2223 // '-cl-uniform-work-group-size' compile option gets a hint
2224 // to the compiler that the global work-size be a multiple of
2225 // the work-group size specified to clEnqueueNDRangeKernel
2226 // (i.e. work groups are uniform).
2227 FuncAttrs.addAttribute("uniform-work-group-size",
2228 llvm::toStringRef(CodeGenOpts.UniformWGSize));
2229 }
2230 }
2231 }
2232
2233 // Attach "no-builtins" attributes to:
2234 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2235 // * definitions: "no-builtins" or "no-builtin-<name>" only.
2236 // The attributes can come from:
2237 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2238 // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2239 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2240
2241 // Collect function IR attributes based on global settiings.
2242 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2243
2244 // Override some default IR attributes based on declaration-specific
2245 // information.
2246 if (TargetDecl) {
2247 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2248 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2249 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2250 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2251 if (TargetDecl->hasAttr<NoSplitStackAttr>())
2252 FuncAttrs.removeAttribute("split-stack");
2253 if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) {
2254 // A function "__attribute__((...))" overrides the command-line flag.
2255 auto Kind =
2256 TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2257 FuncAttrs.removeAttribute("zero-call-used-regs");
2258 FuncAttrs.addAttribute(
2259 "zero-call-used-regs",
2260 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2261 }
2262
2263 // Add NonLazyBind attribute to function declarations when -fno-plt
2264 // is used.
2265 // FIXME: what if we just haven't processed the function definition
2266 // yet, or if it's an external definition like C99 inline?
2267 if (CodeGenOpts.NoPLT) {
2268 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2269 if (!Fn->isDefined() && !AttrOnCallSite) {
2270 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2271 }
2272 }
2273 }
2274 }
2275
2276 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
2277 // functions with -funique-internal-linkage-names.
2278 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2279 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2280 if (!FD->isExternallyVisible())
2281 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2282 "selected");
2283 }
2284 }
2285
2286 // Collect non-call-site function IR attributes from declaration-specific
2287 // information.
2288 if (!AttrOnCallSite) {
2289 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2290 FuncAttrs.addAttribute("cmse_nonsecure_entry");
2291
2292 // Whether tail calls are enabled.
2293 auto shouldDisableTailCalls = [&] {
2294 // Should this be honored in getDefaultFunctionAttributes?
2295 if (CodeGenOpts.DisableTailCalls)
2296 return true;
2297
2298 if (!TargetDecl)
2299 return false;
2300
2301 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2302 TargetDecl->hasAttr<AnyX86InterruptAttr>())
2303 return true;
2304
2305 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2306 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2307 if (!BD->doesNotEscape())
2308 return true;
2309 }
2310
2311 return false;
2312 };
2313 if (shouldDisableTailCalls())
2314 FuncAttrs.addAttribute("disable-tail-calls", "true");
2315
2316 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
2317 // handles these separately to set them based on the global defaults.
2318 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2319 }
2320
2321 // Collect attributes from arguments and return values.
2322 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2323
2324 QualType RetTy = FI.getReturnType();
2325 const ABIArgInfo &RetAI = FI.getReturnInfo();
2326 const llvm::DataLayout &DL = getDataLayout();
2327
2328 // C++ explicitly makes returning undefined values UB. C's rule only applies
2329 // to used values, so we never mark them noundef for now.
2330 bool HasStrictReturn = getLangOpts().CPlusPlus;
2331 if (TargetDecl && HasStrictReturn) {
2332 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl))
2333 HasStrictReturn &= !FDecl->isExternC();
2334 else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl))
2335 // Function pointer
2336 HasStrictReturn &= !VDecl->isExternC();
2337 }
2338
2339 // We don't want to be too aggressive with the return checking, unless
2340 // it's explicit in the code opts or we're using an appropriate sanitizer.
2341 // Try to respect what the programmer intended.
2342 HasStrictReturn &= getCodeGenOpts().StrictReturn ||
2343 !MayDropFunctionReturn(getContext(), RetTy) ||
2344 getLangOpts().Sanitize.has(SanitizerKind::Memory) ||
2345 getLangOpts().Sanitize.has(SanitizerKind::Return);
2346
2347 // Determine if the return type could be partially undef
2348 if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
2349 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
2350 DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
2351 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2352 }
2353
2354 switch (RetAI.getKind()) {
2355 case ABIArgInfo::Extend:
2356 if (RetAI.isSignExt())
2357 RetAttrs.addAttribute(llvm::Attribute::SExt);
2358 else
2359 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2360 [[fallthrough]];
2361 case ABIArgInfo::Direct:
2362 if (RetAI.getInReg())
2363 RetAttrs.addAttribute(llvm::Attribute::InReg);
2364 break;
2365 case ABIArgInfo::Ignore:
2366 break;
2367
2368 case ABIArgInfo::InAlloca:
2369 case ABIArgInfo::Indirect: {
2370 // inalloca and sret disable readnone and readonly
2371 AddPotentialArgAccess();
2372 break;
2373 }
2374
2375 case ABIArgInfo::CoerceAndExpand:
2376 break;
2377
2378 case ABIArgInfo::Expand:
2379 case ABIArgInfo::IndirectAliased:
2380 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "clang/lib/CodeGen/CGCall.cpp", 2380)
;
2381 }
2382
2383 if (!IsThunk) {
2384 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2385 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2386 QualType PTy = RefTy->getPointeeType();
2387 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2388 RetAttrs.addDereferenceableAttr(
2389 getMinimumObjectSize(PTy).getQuantity());
2390 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2391 !CodeGenOpts.NullPointerIsValid)
2392 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2393 if (PTy->isObjectType()) {
2394 llvm::Align Alignment =
2395 getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2396 RetAttrs.addAlignmentAttr(Alignment);
2397 }
2398 }
2399 }
2400
2401 bool hasUsedSRet = false;
2402 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2403
2404 // Attach attributes to sret.
2405 if (IRFunctionArgs.hasSRetArg()) {
2406 llvm::AttrBuilder SRETAttrs(getLLVMContext());
2407 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2408 hasUsedSRet = true;
2409 if (RetAI.getInReg())
2410 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2411 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2412 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2413 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2414 }
2415
2416 // Attach attributes to inalloca argument.
2417 if (IRFunctionArgs.hasInallocaArg()) {
2418 llvm::AttrBuilder Attrs(getLLVMContext());
2419 Attrs.addInAllocaAttr(FI.getArgStruct());
2420 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2421 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2422 }
2423
2424 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument,
2425 // unless this is a thunk function.
2426 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2427 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2428 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2429 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2430
2431 assert(IRArgs.second == 1 && "Expected only a single `this` pointer.")(static_cast <bool> (IRArgs.second == 1 && "Expected only a single `this` pointer."
) ? void (0) : __assert_fail ("IRArgs.second == 1 && \"Expected only a single `this` pointer.\""
, "clang/lib/CodeGen/CGCall.cpp", 2431, __extension__ __PRETTY_FUNCTION__
))
;
2432
2433 llvm::AttrBuilder Attrs(getLLVMContext());
2434
2435 QualType ThisTy =
2436 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
2437
2438 if (!CodeGenOpts.NullPointerIsValid &&
2439 getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2440 Attrs.addAttribute(llvm::Attribute::NonNull);
2441 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
2442 } else {
2443 // FIXME dereferenceable should be correct here, regardless of
2444 // NullPointerIsValid. However, dereferenceable currently does not always
2445 // respect NullPointerIsValid and may imply nonnull and break the program.
2446 // See https://reviews.llvm.org/D66618 for discussions.
2447 Attrs.addDereferenceableOrNullAttr(
2448 getMinimumObjectSize(
2449 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2450 .getQuantity());
2451 }
2452
2453 llvm::Align Alignment =
2454 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr,
2455 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true)
2456 .getAsAlign();
2457 Attrs.addAlignmentAttr(Alignment);
2458
2459 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2460 }
2461
2462 unsigned ArgNo = 0;
2463 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2464 E = FI.arg_end();
2465 I != E; ++I, ++ArgNo) {
2466 QualType ParamType = I->type;
2467 const ABIArgInfo &AI = I->info;
2468 llvm::AttrBuilder Attrs(getLLVMContext());
2469
2470 // Add attribute for padding argument, if necessary.
2471 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2472 if (AI.getPaddingInReg()) {
2473 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2474 llvm::AttributeSet::get(
2475 getLLVMContext(),
2476 llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2477 }
2478 }
2479
2480 // Decide whether the argument we're handling could be partially undef
2481 if (CodeGenOpts.EnableNoundefAttrs &&
2482 DetermineNoUndef(ParamType, getTypes(), DL, AI)) {
2483 Attrs.addAttribute(llvm::Attribute::NoUndef);
2484 }
2485
2486 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2487 // have the corresponding parameter variable. It doesn't make
2488 // sense to do it here because parameters are so messed up.
2489 switch (AI.getKind()) {
2490 case ABIArgInfo::Extend:
2491 if (AI.isSignExt())
2492 Attrs.addAttribute(llvm::Attribute::SExt);
2493 else
2494 Attrs.addAttribute(llvm::Attribute::ZExt);
2495 [[fallthrough]];
2496 case ABIArgInfo::Direct:
2497 if (ArgNo == 0 && FI.isChainCall())
2498 Attrs.addAttribute(llvm::Attribute::Nest);
2499 else if (AI.getInReg())
2500 Attrs.addAttribute(llvm::Attribute::InReg);
2501 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
2502 break;
2503
2504 case ABIArgInfo::Indirect: {
2505 if (AI.getInReg())
2506 Attrs.addAttribute(llvm::Attribute::InReg);
2507
2508 if (AI.getIndirectByVal())
2509 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2510
2511 auto *Decl = ParamType->getAsRecordDecl();
2512 if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2513 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2514 // When calling the function, the pointer passed in will be the only
2515 // reference to the underlying object. Mark it accordingly.
2516 Attrs.addAttribute(llvm::Attribute::NoAlias);
2517
2518 // TODO: We could add the byref attribute if not byval, but it would
2519 // require updating many testcases.
2520
2521 CharUnits Align = AI.getIndirectAlign();
2522
2523 // In a byval argument, it is important that the required
2524 // alignment of the type is honored, as LLVM might be creating a
2525 // *new* stack object, and needs to know what alignment to give
2526 // it. (Sometimes it can deduce a sensible alignment on its own,
2527 // but not if clang decides it must emit a packed struct, or the
2528 // user specifies increased alignment requirements.)
2529 //
2530 // This is different from indirect *not* byval, where the object
2531 // exists already, and the align attribute is purely
2532 // informative.
2533 assert(!Align.isZero())(static_cast <bool> (!Align.isZero()) ? void (0) : __assert_fail
("!Align.isZero()", "clang/lib/CodeGen/CGCall.cpp", 2533, __extension__
__PRETTY_FUNCTION__))
;
2534
2535 // For now, only add this when we have a byval argument.
2536 // TODO: be less lazy about updating test cases.
2537 if (AI.getIndirectByVal())
2538 Attrs.addAlignmentAttr(Align.getQuantity());
2539
2540 // byval disables readnone and readonly.
2541 AddPotentialArgAccess();
2542 break;
2543 }
2544 case ABIArgInfo::IndirectAliased: {
2545 CharUnits Align = AI.getIndirectAlign();
2546 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2547 Attrs.addAlignmentAttr(Align.getQuantity());
2548 break;
2549 }
2550 case ABIArgInfo::Ignore:
2551 case ABIArgInfo::Expand:
2552 case ABIArgInfo::CoerceAndExpand:
2553 break;
2554
2555 case ABIArgInfo::InAlloca:
2556 // inalloca disables readnone and readonly.
2557 AddPotentialArgAccess();
2558 continue;
2559 }
2560
2561 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2562 QualType PTy = RefTy->getPointeeType();
2563 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2564 Attrs.addDereferenceableAttr(
2565 getMinimumObjectSize(PTy).getQuantity());
2566 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2567 !CodeGenOpts.NullPointerIsValid)
2568 Attrs.addAttribute(llvm::Attribute::NonNull);
2569 if (PTy->isObjectType()) {
2570 llvm::Align Alignment =
2571 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2572 Attrs.addAlignmentAttr(Alignment);
2573 }
2574 }
2575
2576 // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types:
2577 // > For arguments to a __kernel function declared to be a pointer to a
2578 // > data type, the OpenCL compiler can assume that the pointee is always
2579 // > appropriately aligned as required by the data type.
2580 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() &&
2581 ParamType->isPointerType()) {
2582 QualType PTy = ParamType->getPointeeType();
2583 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2584 llvm::Align Alignment =
2585 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2586 Attrs.addAlignmentAttr(Alignment);
2587 }
2588 }
2589
2590 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2591 case ParameterABI::Ordinary:
2592 break;
2593
2594 case ParameterABI::SwiftIndirectResult: {
2595 // Add 'sret' if we haven't already used it for something, but
2596 // only if the result is void.
2597 if (!hasUsedSRet && RetTy->isVoidType()) {
2598 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2599 hasUsedSRet = true;
2600 }
2601
2602 // Add 'noalias' in either case.
2603 Attrs.addAttribute(llvm::Attribute::NoAlias);
2604
2605 // Add 'dereferenceable' and 'alignment'.
2606 auto PTy = ParamType->getPointeeType();
2607 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2608 auto info = getContext().getTypeInfoInChars(PTy);
2609 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2610 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2611 }
2612 break;
2613 }
2614
2615 case ParameterABI::SwiftErrorResult:
2616 Attrs.addAttribute(llvm::Attribute::SwiftError);
2617 break;
2618
2619 case ParameterABI::SwiftContext:
2620 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2621 break;
2622
2623 case ParameterABI::SwiftAsyncContext:
2624 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2625 break;
2626 }
2627
2628 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2629 Attrs.addAttribute(llvm::Attribute::NoCapture);
2630
2631 if (Attrs.hasAttributes()) {
2632 unsigned FirstIRArg, NumIRArgs;
2633 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2634 for (unsigned i = 0; i < NumIRArgs; i++)
2635 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2636 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs));
2637 }
2638 }
2639 assert(ArgNo == FI.arg_size())(static_cast <bool> (ArgNo == FI.arg_size()) ? void (0)
: __assert_fail ("ArgNo == FI.arg_size()", "clang/lib/CodeGen/CGCall.cpp"
, 2639, __extension__ __PRETTY_FUNCTION__))
;
2640
2641 AttrList = llvm::AttributeList::get(
2642 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2643 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2644}
2645
2646/// An argument came in as a promoted argument; demote it back to its
2647/// declared type.
2648static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2649 const VarDecl *var,
2650 llvm::Value *value) {
2651 llvm::Type *varType = CGF.ConvertType(var->getType());
2652
2653 // This can happen with promotions that actually don't change the
2654 // underlying type, like the enum promotions.
2655 if (value->getType() == varType) return value;
2656
2657 assert((varType->isIntegerTy() || varType->isFloatingPointTy())(static_cast <bool> ((varType->isIntegerTy() || varType
->isFloatingPointTy()) && "unexpected promotion type"
) ? void (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "clang/lib/CodeGen/CGCall.cpp", 2658, __extension__ __PRETTY_FUNCTION__
))
2658 && "unexpected promotion type")(static_cast <bool> ((varType->isIntegerTy() || varType
->isFloatingPointTy()) && "unexpected promotion type"
) ? void (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "clang/lib/CodeGen/CGCall.cpp", 2658, __extension__ __PRETTY_FUNCTION__
))
;
2659
2660 if (isa<llvm::IntegerType>(varType))
2661 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2662
2663 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2664}
2665
2666/// Returns the attribute (either parameter attribute, or function
2667/// attribute), which declares argument ArgNo to be non-null.
2668static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2669 QualType ArgType, unsigned ArgNo) {
2670 // FIXME: __attribute__((nonnull)) can also be applied to:
2671 // - references to pointers, where the pointee is known to be
2672 // nonnull (apparently a Clang extension)
2673 // - transparent unions containing pointers
2674 // In the former case, LLVM IR cannot represent the constraint. In
2675 // the latter case, we have no guarantee that the transparent union
2676 // is in fact passed as a pointer.
2677 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2678 return nullptr;
2679 // First, check attribute on parameter itself.
2680 if (PVD) {
2681 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2682 return ParmNNAttr;
2683 }
2684 // Check function attributes.
2685 if (!FD)
2686 return nullptr;
2687 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2688 if (NNAttr->isNonNull(ArgNo))
2689 return NNAttr;
2690 }
2691 return nullptr;
2692}
2693
2694namespace {
2695 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2696 Address Temp;
2697 Address Arg;
2698 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2699 void Emit(CodeGenFunction &CGF, Flags flags) override {
2700 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2701 CGF.Builder.CreateStore(errorValue, Arg);
2702 }
2703 };
2704}
2705
2706void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2707 llvm::Function *Fn,
2708 const FunctionArgList &Args) {
2709 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1
Assuming field 'CurCodeDecl' is null
2710 // Naked functions don't have prologues.
2711 return;
2712
2713 // If this is an implicit-return-zero function, go ahead and
2714 // initialize the return value. TODO: it might be nice to have
2715 // a more general mechanism for this that didn't require synthesized
2716 // return statements.
2717 if (const FunctionDecl *FD
2.1
'FD' is null
2.1
'FD' is null
= dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2
Assuming null pointer is passed into cast
3
Taking false branch
2718 if (FD->hasImplicitReturnZero()) {
2719 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2720 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2721 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2722 Builder.CreateStore(Zero, ReturnValue);
2723 }
2724 }
2725
2726 // FIXME: We no longer need the types from FunctionArgList; lift up and
2727 // simplify.
2728
2729 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
4
Calling constructor for 'ClangToLLVMArgMapping'
2730 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs())(static_cast <bool> (Fn->arg_size() == IRFunctionArgs
.totalIRArgs()) ? void (0) : __assert_fail ("Fn->arg_size() == IRFunctionArgs.totalIRArgs()"
, "clang/lib/CodeGen/CGCall.cpp", 2730, __extension__ __PRETTY_FUNCTION__
))
;
2731
2732 // If we're using inalloca, all the memory arguments are GEPs off of the last
2733 // parameter, which is a pointer to the complete memory area.
2734 Address ArgStruct = Address::invalid();
2735 if (IRFunctionArgs.hasInallocaArg()) {
2736 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2737 FI.getArgStruct(), FI.getArgStructAlignment());
2738
2739 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo())(static_cast <bool> (ArgStruct.getType() == FI.getArgStruct
()->getPointerTo()) ? void (0) : __assert_fail ("ArgStruct.getType() == FI.getArgStruct()->getPointerTo()"
, "clang/lib/CodeGen/CGCall.cpp", 2739, __extension__ __PRETTY_FUNCTION__
))
;
2740 }
2741
2742 // Name the struct return parameter.
2743 if (IRFunctionArgs.hasSRetArg()) {
2744 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2745 AI->setName("agg.result");
2746 AI->addAttr(llvm::Attribute::NoAlias);
2747 }
2748
2749 // Track if we received the parameter as a pointer (indirect, byval, or
2750 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2751 // into a local alloca for us.
2752 SmallVector<ParamValue, 16> ArgVals;
2753 ArgVals.reserve(Args.size());
2754
2755 // Create a pointer value for every parameter declaration. This usually
2756 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2757 // any cleanups or do anything that might unwind. We do that separately, so
2758 // we can push the cleanups in the correct order for the ABI.
2759 assert(FI.arg_size() == Args.size() &&(static_cast <bool> (FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.") ? void
(0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "clang/lib/CodeGen/CGCall.cpp", 2760, __extension__ __PRETTY_FUNCTION__
))
2760 "Mismatch between function signature & arguments.")(static_cast <bool> (FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.") ? void
(0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "clang/lib/CodeGen/CGCall.cpp", 2760, __extension__ __PRETTY_FUNCTION__
))
;
2761 unsigned ArgNo = 0;
2762 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2763 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2764 i != e; ++i, ++info_it, ++ArgNo) {
2765 const VarDecl *Arg = *i;
2766 const ABIArgInfo &ArgI = info_it->info;
2767
2768 bool isPromoted =
2769 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2770 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2771 // the parameter is promoted. In this case we convert to
2772 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2773 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2774 assert(hasScalarEvaluationKind(Ty) ==(static_cast <bool> (hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind
(Arg->getType())) ? void (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "clang/lib/CodeGen/CGCall.cpp", 2775, __extension__ __PRETTY_FUNCTION__
))
2775 hasScalarEvaluationKind(Arg->getType()))(static_cast <bool> (hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind
(Arg->getType())) ? void (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "clang/lib/CodeGen/CGCall.cpp", 2775, __extension__ __PRETTY_FUNCTION__
))
;
2776
2777 unsigned FirstIRArg, NumIRArgs;
2778 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2779
2780 switch (ArgI.getKind()) {
2781 case ABIArgInfo::InAlloca: {
2782 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 2782, __extension__
__PRETTY_FUNCTION__))
;
2783 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2784 Address V =
2785 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2786 if (ArgI.getInAllocaIndirect())
2787 V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty),
2788 getContext().getTypeAlignInChars(Ty));
2789 ArgVals.push_back(ParamValue::forIndirect(V));
2790 break;
2791 }
2792
2793 case ABIArgInfo::Indirect:
2794 case ABIArgInfo::IndirectAliased: {
2795 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2795, __extension__
__PRETTY_FUNCTION__))
;
2796 Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
2797 ArgI.getIndirectAlign());
2798
2799 if (!hasScalarEvaluationKind(Ty)) {
2800 // Aggregates and complex variables are accessed by reference. All we
2801 // need to do is realign the value, if requested. Also, if the address
2802 // may be aliased, copy it to ensure that the parameter variable is
2803 // mutable and has a unique adress, as C requires.
2804 Address V = ParamAddr;
2805 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2806 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2807
2808 // Copy from the incoming argument pointer to the temporary with the
2809 // appropriate alignment.
2810 //
2811 // FIXME: We should have a common utility for generating an aggregate
2812 // copy.
2813 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2814 Builder.CreateMemCpy(
2815 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2816 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2817 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2818 V = AlignedTemp;
2819 }
2820 ArgVals.push_back(ParamValue::forIndirect(V));
2821 } else {
2822 // Load scalar value from indirect argument.
2823 llvm::Value *V =
2824 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2825
2826 if (isPromoted)
2827 V = emitArgumentDemotion(*this, Arg, V);
2828 ArgVals.push_back(ParamValue::forDirect(V));
2829 }
2830 break;
2831 }
2832
2833 case ABIArgInfo::Extend:
2834 case ABIArgInfo::Direct: {
2835 auto AI = Fn->getArg(FirstIRArg);
2836 llvm::Type *LTy = ConvertType(Arg->getType());
2837
2838 // Prepare parameter attributes. So far, only attributes for pointer
2839 // parameters are prepared. See
2840 // http://llvm.org/docs/LangRef.html#paramattrs.
2841 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2842 ArgI.getCoerceToType()->isPointerTy()) {
2843 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2843, __extension__
__PRETTY_FUNCTION__))
;
2844
2845 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2846 // Set `nonnull` attribute if any.
2847 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2848 PVD->getFunctionScopeIndex()) &&
2849 !CGM.getCodeGenOpts().NullPointerIsValid)
2850 AI->addAttr(llvm::Attribute::NonNull);
2851
2852 QualType OTy = PVD->getOriginalType();
2853 if (const auto *ArrTy =
2854 getContext().getAsConstantArrayType(OTy)) {
2855 // A C99 array parameter declaration with the static keyword also
2856 // indicates dereferenceability, and if the size is constant we can
2857 // use the dereferenceable attribute (which requires the size in
2858 // bytes).
2859 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2860 QualType ETy = ArrTy->getElementType();
2861 llvm::Align Alignment =
2862 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2863 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
2864 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2865 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2866 ArrSize) {
2867 llvm::AttrBuilder Attrs(getLLVMContext());
2868 Attrs.addDereferenceableAttr(
2869 getContext().getTypeSizeInChars(ETy).getQuantity() *
2870 ArrSize);
2871 AI->addAttrs(Attrs);
2872 } else if (getContext().getTargetInfo().getNullPointerValue(
2873 ETy.getAddressSpace()) == 0 &&
2874 !CGM.getCodeGenOpts().NullPointerIsValid) {
2875 AI->addAttr(llvm::Attribute::NonNull);
2876 }
2877 }
2878 } else if (const auto *ArrTy =
2879 getContext().getAsVariableArrayType(OTy)) {
2880 // For C99 VLAs with the static keyword, we don't know the size so
2881 // we can't use the dereferenceable attribute, but in addrspace(0)
2882 // we know that it must be nonnull.
2883 if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
2884 QualType ETy = ArrTy->getElementType();
2885 llvm::Align Alignment =
2886 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2887 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
2888 if (!getTypes().getTargetAddressSpace(ETy) &&
2889 !CGM.getCodeGenOpts().NullPointerIsValid)
2890 AI->addAttr(llvm::Attribute::NonNull);
2891 }
2892 }
2893
2894 // Set `align` attribute if any.
2895 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2896 if (!AVAttr)
2897 if (const auto *TOTy = OTy->getAs<TypedefType>())
2898 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2899 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2900 // If alignment-assumption sanitizer is enabled, we do *not* add
2901 // alignment attribute here, but emit normal alignment assumption,
2902 // so the UBSAN check could function.
2903 llvm::ConstantInt *AlignmentCI =
2904 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2905 uint64_t AlignmentInt =
2906 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2907 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2908 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2909 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
2910 llvm::Align(AlignmentInt)));
2911 }
2912 }
2913 }
2914
2915 // Set 'noalias' if an argument type has the `restrict` qualifier.
2916 if (Arg->getType().isRestrictQualified())
2917 AI->addAttr(llvm::Attribute::NoAlias);
2918 }
2919
2920 // Prepare the argument value. If we have the trivial case, handle it
2921 // with no muss and fuss.
2922 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2923 ArgI.getCoerceToType() == ConvertType(Ty) &&
2924 ArgI.getDirectOffset() == 0) {
2925 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2925, __extension__
__PRETTY_FUNCTION__))
;
2926
2927 // LLVM expects swifterror parameters to be used in very restricted
2928 // ways. Copy the value into a less-restricted temporary.
2929 llvm::Value *V = AI;
2930 if (FI.getExtParameterInfo(ArgNo).getABI()
2931 == ParameterABI::SwiftErrorResult) {
2932 QualType pointeeTy = Ty->getPointeeType();
2933 assert(pointeeTy->isPointerType())(static_cast <bool> (pointeeTy->isPointerType()) ? void
(0) : __assert_fail ("pointeeTy->isPointerType()", "clang/lib/CodeGen/CGCall.cpp"
, 2933, __extension__ __PRETTY_FUNCTION__))
;
2934 Address temp =
2935 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2936 Address arg(V, ConvertTypeForMem(pointeeTy),
2937 getContext().getTypeAlignInChars(pointeeTy));
2938 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2939 Builder.CreateStore(incomingErrorValue, temp);
2940 V = temp.getPointer();
2941
2942 // Push a cleanup to copy the value back at the end of the function.
2943 // The convention does not guarantee that the value will be written
2944 // back if the function exits with an unwind exception.
2945 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2946 }
2947
2948 // Ensure the argument is the correct type.
2949 if (V->getType() != ArgI.getCoerceToType())
2950 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2951
2952 if (isPromoted)
2953 V = emitArgumentDemotion(*this, Arg, V);
2954
2955 // Because of merging of function types from multiple decls it is
2956 // possible for the type of an argument to not match the corresponding
2957 // type in the function type. Since we are codegening the callee
2958 // in here, add a cast to the argument type.
2959 llvm::Type *LTy = ConvertType(Arg->getType());
2960 if (V->getType() != LTy)
2961 V = Builder.CreateBitCast(V, LTy);
2962
2963 ArgVals.push_back(ParamValue::forDirect(V));
2964 break;
2965 }
2966
2967 // VLST arguments are coerced to VLATs at the function boundary for
2968 // ABI consistency. If this is a VLST that was coerced to
2969 // a VLAT at the function boundary and the types match up, use
2970 // llvm.vector.extract to convert back to the original VLST.
2971 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
2972 llvm::Value *Coerced = Fn->getArg(FirstIRArg);
2973 if (auto *VecTyFrom =
2974 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
2975 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
2976 // vector, bitcast the source and use a vector extract.
2977 auto PredType =
2978 llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
2979 if (VecTyFrom == PredType &&
2980 VecTyTo->getElementType() == Builder.getInt8Ty()) {
2981 VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
2982 Coerced = Builder.CreateBitCast(Coerced, VecTyFrom);
2983 }
2984 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
2985 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2986
2987 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2987, __extension__
__PRETTY_FUNCTION__))
;
2988 Coerced->setName(Arg->getName() + ".coerce");
2989 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
2990 VecTyTo, Coerced, Zero, "castFixedSve")));
2991 break;
2992 }
2993 }
2994 }
2995
2996 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2997 Arg->getName());
2998
2999 // Pointer to store into.
3000 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
3001
3002 // Fast-isel and the optimizer generally like scalar values better than
3003 // FCAs, so we flatten them if this is safe to do for this argument.
3004 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
3005 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
3006 STy->getNumElements() > 1) {
3007 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
3008 llvm::Type *DstTy = Ptr.getElementType();
3009 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
3010
3011 Address AddrToStoreInto = Address::invalid();
3012 if (SrcSize <= DstSize) {
3013 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
3014 } else {
3015 AddrToStoreInto =
3016 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
3017 }
3018
3019 assert(STy->getNumElements() == NumIRArgs)(static_cast <bool> (STy->getNumElements() == NumIRArgs
) ? void (0) : __assert_fail ("STy->getNumElements() == NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 3019, __extension__ __PRETTY_FUNCTION__
))
;
3020 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3021 auto AI = Fn->getArg(FirstIRArg + i);
3022 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3023 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
3024 Builder.CreateStore(AI, EltPtr);
3025 }
3026
3027 if (SrcSize > DstSize) {
3028 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3029 }
3030
3031 } else {
3032 // Simple case, just do a coerced store of the argument into the alloca.
3033 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 3033, __extension__
__PRETTY_FUNCTION__))
;
3034 auto AI = Fn->getArg(FirstIRArg);
3035 AI->setName(Arg->getName() + ".coerce");
3036 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
3037 }
3038
3039 // Match to what EmitParmDecl is expecting for this type.
3040 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
3041 llvm::Value *V =
3042 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
3043 if (isPromoted)
3044 V = emitArgumentDemotion(*this, Arg, V);
3045 ArgVals.push_back(ParamValue::forDirect(V));
3046 } else {
3047 ArgVals.push_back(ParamValue::forIndirect(Alloca));
3048 }
3049 break;
3050 }
3051
3052 case ABIArgInfo::CoerceAndExpand: {
3053 // Reconstruct into a temporary.
3054 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
3055 ArgVals.push_back(ParamValue::forIndirect(alloca));
3056
3057 auto coercionType = ArgI.getCoerceAndExpandType();
3058 alloca = Builder.CreateElementBitCast(alloca, coercionType);
3059
3060 unsigned argIndex = FirstIRArg;
3061 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3062 llvm::Type *eltType = coercionType->getElementType(i);
3063 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
3064 continue;
3065
3066 auto eltAddr = Builder.CreateStructGEP(alloca, i);
3067 auto elt = Fn->getArg(argIndex++);
3068 Builder.CreateStore(elt, eltAddr);
3069 }
3070 assert(argIndex == FirstIRArg + NumIRArgs)(static_cast <bool> (argIndex == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("argIndex == FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 3070, __extension__ __PRETTY_FUNCTION__
))
;
3071 break;
3072 }
3073
3074 case ABIArgInfo::Expand: {
3075 // If this structure was expanded into multiple arguments then
3076 // we need to create a temporary and reconstruct it from the
3077 // arguments.
3078 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
3079 LValue LV = MakeAddrLValue(Alloca, Ty);
3080 ArgVals.push_back(ParamValue::forIndirect(Alloca));
3081
3082 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3083 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3084 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (FnArgIter == Fn->arg_begin() + FirstIRArg
+ NumIRArgs) ? void (0) : __assert_fail ("FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 3084, __extension__ __PRETTY_FUNCTION__
))
;
3085 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3086 auto AI = Fn->getArg(FirstIRArg + i);
3087 AI->setName(Arg->getName() + "." + Twine(i));
3088 }
3089 break;
3090 }
3091
3092 case ABIArgInfo::Ignore:
3093 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 3093, __extension__
__PRETTY_FUNCTION__))
;
3094 // Initialize the local variable appropriately.
3095 if (!hasScalarEvaluationKind(Ty)) {
3096 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
3097 } else {
3098 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
3099 ArgVals.push_back(ParamValue::forDirect(U));
3100 }
3101 break;
3102 }
3103 }
3104
3105 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3106 for (int I = Args.size() - 1; I >= 0; --I)
3107 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
3108 } else {
3109 for (unsigned I = 0, E = Args.size(); I != E; ++I)
3110 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
3111 }
3112}
3113
3114static void eraseUnusedBitCasts(llvm::Instruction *insn) {
3115 while (insn->use_empty()) {
3116 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3117 if (!bitcast) return;
3118
3119 // This is "safe" because we would have used a ConstantExpr otherwise.
3120 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3121 bitcast->eraseFromParent();
3122 }
3123}
3124
3125/// Try to emit a fused autorelease of a return result.
3126static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
3127 llvm::Value *result) {
3128 // We must be immediately followed the cast.
3129 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
3130 if (BB->empty()) return nullptr;
3131 if (&BB->back() != result) return nullptr;
3132
3133 llvm::Type *resultType = result->getType();
3134
3135 // result is in a BasicBlock and is therefore an Instruction.
3136 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3137
3138 SmallVector<llvm::Instruction *, 4> InstsToKill;
3139
3140 // Look for:
3141 // %generator = bitcast %type1* %generator2 to %type2*
3142 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3143 // We would have emitted this as a constant if the operand weren't
3144 // an Instruction.
3145 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3146
3147 // Require the generator to be immediately followed by the cast.
3148 if (generator->getNextNode() != bitcast)
3149 return nullptr;
3150
3151 InstsToKill.push_back(bitcast);
3152 }
3153
3154 // Look for:
3155 // %generator = call i8* @objc_retain(i8* %originalResult)
3156 // or
3157 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
3158 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3159 if (!call) return nullptr;
3160
3161 bool doRetainAutorelease;
3162
3163 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
3164 doRetainAutorelease = true;
3165 } else if (call->getCalledOperand() ==
3166 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
3167 doRetainAutorelease = false;
3168
3169 // If we emitted an assembly marker for this call (and the
3170 // ARCEntrypoints field should have been set if so), go looking
3171 // for that call. If we can't find it, we can't do this
3172 // optimization. But it should always be the immediately previous
3173 // instruction, unless we needed bitcasts around the call.
3174 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
3175 llvm::Instruction *prev = call->getPrevNode();
3176 assert(prev)(static_cast <bool> (prev) ? void (0) : __assert_fail (
"prev", "clang/lib/CodeGen/CGCall.cpp", 3176, __extension__ __PRETTY_FUNCTION__
))
;
3177 if (isa<llvm::BitCastInst>(prev)) {
3178 prev = prev->getPrevNode();
3179 assert(prev)(static_cast <bool> (prev) ? void (0) : __assert_fail (
"prev", "clang/lib/CodeGen/CGCall.cpp", 3179, __extension__ __PRETTY_FUNCTION__
))
;
3180 }
3181 assert(isa<llvm::CallInst>(prev))(static_cast <bool> (isa<llvm::CallInst>(prev)) ?
void (0) : __assert_fail ("isa<llvm::CallInst>(prev)",
"clang/lib/CodeGen/CGCall.cpp", 3181, __extension__ __PRETTY_FUNCTION__
))
;
3182 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==(static_cast <bool> (cast<llvm::CallInst>(prev)->
getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? void (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "clang/lib/CodeGen/CGCall.cpp", 3183, __extension__ __PRETTY_FUNCTION__
))
3183 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker)(static_cast <bool> (cast<llvm::CallInst>(prev)->
getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? void (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "clang/lib/CodeGen/CGCall.cpp", 3183, __extension__ __PRETTY_FUNCTION__
))
;
3184 InstsToKill.push_back(prev);
3185 }
3186 } else {
3187 return nullptr;
3188 }
3189
3190 result = call->getArgOperand(0);
3191 InstsToKill.push_back(call);
3192
3193 // Keep killing bitcasts, for sanity. Note that we no longer care
3194 // about precise ordering as long as there's exactly one use.
3195 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3196 if (!bitcast->hasOneUse()) break;
3197 InstsToKill.push_back(bitcast);
3198 result = bitcast->getOperand(0);
3199 }
3200
3201 // Delete all the unnecessary instructions, from latest to earliest.
3202 for (auto *I : InstsToKill)
3203 I->eraseFromParent();
3204
3205 // Do the fused retain/autorelease if we were asked to.
3206 if (doRetainAutorelease)
3207 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
3208
3209 // Cast back to the result type.
3210 return CGF.Builder.CreateBitCast(result, resultType);
3211}
3212
3213/// If this is a +1 of the value of an immutable 'self', remove it.
3214static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
3215 llvm::Value *result) {
3216 // This is only applicable to a method with an immutable 'self'.
3217 const ObjCMethodDecl *method =
3218 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
3219 if (!method) return nullptr;
3220 const VarDecl *self = method->getSelfDecl();
3221 if (!self->getType().isConstQualified()) return nullptr;
3222
3223 // Look for a retain call.
3224 llvm::CallInst *retainCall =
3225 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
3226 if (!retainCall || retainCall->getCalledOperand() !=
3227 CGF.CGM.getObjCEntrypoints().objc_retain)
3228 return nullptr;
3229
3230 // Look for an ordinary load of 'self'.
3231 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3232 llvm::LoadInst *load =
3233 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3234 if (!load || load->isAtomic() || load->isVolatile() ||
3235 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
3236 return nullptr;
3237
3238 // Okay! Burn it all down. This relies for correctness on the
3239 // assumption that the retain is emitted as part of the return and
3240 // that thereafter everything is used "linearly".
3241 llvm::Type *resultType = result->getType();
3242 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
3243 assert(retainCall->use_empty())(static_cast <bool> (retainCall->use_empty()) ? void
(0) : __assert_fail ("retainCall->use_empty()", "clang/lib/CodeGen/CGCall.cpp"
, 3243, __extension__ __PRETTY_FUNCTION__))
;
3244 retainCall->eraseFromParent();
3245 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
3246
3247 return CGF.Builder.CreateBitCast(load, resultType);
3248}
3249
3250/// Emit an ARC autorelease of the result of a function.
3251///
3252/// \return the value to actually return from the function
3253static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
3254 llvm::Value *result) {
3255 // If we're returning 'self', kill the initial retain. This is a
3256 // heuristic attempt to "encourage correctness" in the really unfortunate
3257 // case where we have a return of self during a dealloc and we desperately
3258 // need to avoid the possible autorelease.
3259 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
3260 return self;
3261
3262 // At -O0, try to emit a fused retain/autorelease.
3263 if (CGF.shouldUseFusedARCCalls())
3264 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
3265 return fused;
3266
3267 return CGF.EmitARCAutoreleaseReturnValue(result);
3268}
3269
3270/// Heuristically search for a dominating store to the return-value slot.
3271static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
3272 // Check if a User is a store which pointerOperand is the ReturnValue.
3273 // We are looking for stores to the ReturnValue, not for stores of the
3274 // ReturnValue to some other location.
3275 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3276 auto *SI = dyn_cast<llvm::StoreInst>(U);
3277 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() ||
3278 SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
3279 return nullptr;
3280 // These aren't actually possible for non-coerced returns, and we
3281 // only care about non-coerced returns on this code path.
3282 assert(!SI->isAtomic() && !SI->isVolatile())(static_cast <bool> (!SI->isAtomic() && !SI->
isVolatile()) ? void (0) : __assert_fail ("!SI->isAtomic() && !SI->isVolatile()"
, "clang/lib/CodeGen/CGCall.cpp", 3282, __extension__ __PRETTY_FUNCTION__
))
;
3283 return SI;
3284 };
3285 // If there are multiple uses of the return-value slot, just check
3286 // for something immediately preceding the IP. Sometimes this can
3287 // happen with how we generate implicit-returns; it can also happen
3288 // with noreturn cleanups.
3289 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3290 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3291 if (IP->empty()) return nullptr;
3292
3293 // Look at directly preceding instruction, skipping bitcasts and lifetime
3294 // markers.
3295 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3296 if (isa<llvm::BitCastInst>(&I))
3297 continue;
3298 if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3299 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3300 continue;
3301
3302 return GetStoreIfValid(&I);
3303 }
3304 return nullptr;
3305 }
3306
3307 llvm::StoreInst *store =
3308 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3309 if (!store) return nullptr;
3310
3311 // Now do a first-and-dirty dominance check: just walk up the
3312 // single-predecessors chain from the current insertion point.
3313 llvm::BasicBlock *StoreBB = store->getParent();
3314 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3315 while (IP != StoreBB) {
3316 if (!(IP = IP->getSinglePredecessor()))
3317 return nullptr;
3318 }
3319
3320 // Okay, the store's basic block dominates the insertion point; we
3321 // can do our thing.
3322 return store;
3323}
3324
3325// Helper functions for EmitCMSEClearRecord
3326
3327// Set the bits corresponding to a field having width `BitWidth` and located at
3328// offset `BitOffset` (from the least significant bit) within a storage unit of
3329// `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3330// Use little-endian layout, i.e.`Bits[0]` is the LSB.
3331static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3332 int BitWidth, int CharWidth) {
3333 assert(CharWidth <= 64)(static_cast <bool> (CharWidth <= 64) ? void (0) : __assert_fail
("CharWidth <= 64", "clang/lib/CodeGen/CGCall.cpp", 3333,
__extension__ __PRETTY_FUNCTION__))
;
3334 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth)(static_cast <bool> (static_cast<unsigned>(BitWidth
) <= Bits.size() * CharWidth) ? void (0) : __assert_fail (
"static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth"
, "clang/lib/CodeGen/CGCall.cpp", 3334, __extension__ __PRETTY_FUNCTION__
))
;
3335
3336 int Pos = 0;
3337 if (BitOffset >= CharWidth) {
3338 Pos += BitOffset / CharWidth;
3339 BitOffset = BitOffset % CharWidth;
3340 }
3341
3342 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3343 if (BitOffset + BitWidth >= CharWidth) {
3344 Bits[Pos++] |= (Used << BitOffset) & Used;
3345 BitWidth -= CharWidth - BitOffset;
3346 BitOffset = 0;
3347 }
3348
3349 while (BitWidth >= CharWidth) {
3350 Bits[Pos++] = Used;
3351 BitWidth -= CharWidth;
3352 }
3353
3354 if (BitWidth > 0)
3355 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3356}
3357
3358// Set the bits corresponding to a field having width `BitWidth` and located at
3359// offset `BitOffset` (from the least significant bit) within a storage unit of
3360// `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3361// `Bits` corresponds to one target byte. Use target endian layout.
3362static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3363 int StorageSize, int BitOffset, int BitWidth,
3364 int CharWidth, bool BigEndian) {
3365
3366 SmallVector<uint64_t, 8> TmpBits(StorageSize);
3367 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3368
3369 if (BigEndian)
3370 std::reverse(TmpBits.begin(), TmpBits.end());
3371
3372 for (uint64_t V : TmpBits)
3373 Bits[StorageOffset++] |= V;
3374}
3375
3376static void setUsedBits(CodeGenModule &, QualType, int,
3377 SmallVectorImpl<uint64_t> &);
3378
3379// Set the bits in `Bits`, which correspond to the value representations of
3380// the actual members of the record type `RTy`. Note that this function does
3381// not handle base classes, virtual tables, etc, since they cannot happen in
3382// CMSE function arguments or return. The bit mask corresponds to the target
3383// memory layout, i.e. it's endian dependent.
3384static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3385 SmallVectorImpl<uint64_t> &Bits) {
3386 ASTContext &Context = CGM.getContext();
3387 int CharWidth = Context.getCharWidth();
3388 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3389 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3390 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3391
3392 int Idx = 0;
3393 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3394 const FieldDecl *F = *I;
3395
3396 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3397 F->getType()->isIncompleteArrayType())
3398 continue;
3399
3400 if (F->isBitField()) {
3401 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3402 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3403 BFI.StorageSize / CharWidth, BFI.Offset,
3404 BFI.Size, CharWidth,
3405 CGM.getDataLayout().isBigEndian());
3406 continue;
3407 }
3408
3409 setUsedBits(CGM, F->getType(),
3410 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3411 }
3412}
3413
3414// Set the bits in `Bits`, which correspond to the value representations of
3415// the elements of an array type `ATy`.
3416static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3417 int Offset, SmallVectorImpl<uint64_t> &Bits) {
3418 const ASTContext &Context = CGM.getContext();
3419
3420 QualType ETy = Context.getBaseElementType(ATy);
3421 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3422 SmallVector<uint64_t, 4> TmpBits(Size);
3423 setUsedBits(CGM, ETy, 0, TmpBits);
3424
3425 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3426 auto Src = TmpBits.begin();
3427 auto Dst = Bits.begin() + Offset + I * Size;
3428 for (int J = 0; J < Size; ++J)
3429 *Dst++ |= *Src++;
3430 }
3431}
3432
3433// Set the bits in `Bits`, which correspond to the value representations of
3434// the type `QTy`.
3435static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3436 SmallVectorImpl<uint64_t> &Bits) {
3437 if (const auto *RTy = QTy->getAs<RecordType>())
3438 return setUsedBits(CGM, RTy, Offset, Bits);
3439
3440 ASTContext &Context = CGM.getContext();
3441 if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3442 return setUsedBits(CGM, ATy, Offset, Bits);
3443
3444 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3445 if (Size <= 0)
3446 return;
3447
3448 std::fill_n(Bits.begin() + Offset, Size,
3449 (uint64_t(1) << Context.getCharWidth()) - 1);
3450}
3451
3452static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3453 int Pos, int Size, int CharWidth,
3454 bool BigEndian) {
3455 assert(Size > 0)(static_cast <bool> (Size > 0) ? void (0) : __assert_fail
("Size > 0", "clang/lib/CodeGen/CGCall.cpp", 3455, __extension__
__PRETTY_FUNCTION__))
;
3456 uint64_t Mask = 0;
3457 if (BigEndian) {
3458 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3459 ++P)
3460 Mask = (Mask << CharWidth) | *P;
3461 } else {
3462 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3463 do
3464 Mask = (Mask << CharWidth) | *--P;
3465 while (P != End);
3466 }
3467 return Mask;
3468}
3469
3470// Emit code to clear the bits in a record, which aren't a part of any user
3471// declared member, when the record is a function return.
3472llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3473 llvm::IntegerType *ITy,
3474 QualType QTy) {
3475 assert(Src->getType() == ITy)(static_cast <bool> (Src->getType() == ITy) ? void (
0) : __assert_fail ("Src->getType() == ITy", "clang/lib/CodeGen/CGCall.cpp"
, 3475, __extension__ __PRETTY_FUNCTION__))
;
3476 assert(ITy->getScalarSizeInBits() <= 64)(static_cast <bool> (ITy->getScalarSizeInBits() <=
64) ? void (0) : __assert_fail ("ITy->getScalarSizeInBits() <= 64"
, "clang/lib/CodeGen/CGCall.cpp", 3476, __extension__ __PRETTY_FUNCTION__
))
;
3477
3478 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3479 int Size = DataLayout.getTypeStoreSize(ITy);
3480 SmallVector<uint64_t, 4> Bits(Size);
3481 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3482
3483 int CharWidth = CGM.getContext().getCharWidth();
3484 uint64_t Mask =
3485 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3486
3487 return Builder.CreateAnd(Src, Mask, "cmse.clear");
3488}
3489
3490// Emit code to clear the bits in a record, which aren't a part of any user
3491// declared member, when the record is a function argument.
3492llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3493 llvm::ArrayType *ATy,
3494 QualType QTy) {
3495 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3496 int Size = DataLayout.getTypeStoreSize(ATy);
3497 SmallVector<uint64_t, 16> Bits(Size);
3498 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3499
3500 // Clear each element of the LLVM array.
3501 int CharWidth = CGM.getContext().getCharWidth();
3502 int CharsPerElt =
3503 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3504 int MaskIndex = 0;
3505 llvm::Value *R = llvm::PoisonValue::get(ATy);
3506 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3507 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3508 DataLayout.isBigEndian());
3509 MaskIndex += CharsPerElt;
3510 llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3511 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3512 R = Builder.CreateInsertValue(R, T1, I);
3513 }
3514
3515 return R;
3516}
3517
3518void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3519 bool EmitRetDbgLoc,
3520 SourceLocation EndLoc) {
3521 if (FI.isNoReturn()) {
3522 // Noreturn functions don't return.
3523 EmitUnreachable(EndLoc);
3524 return;
3525 }
3526
3527 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3528 // Naked functions don't have epilogues.
3529 Builder.CreateUnreachable();
3530 return;
3531 }
3532
3533 // Functions with no result always return void.
3534 if (!ReturnValue.isValid()) {
3535 Builder.CreateRetVoid();
3536 return;
3537 }
3538
3539 llvm::DebugLoc RetDbgLoc;
3540 llvm::Value *RV = nullptr;
3541 QualType RetTy = FI.getReturnType();
3542 const ABIArgInfo &RetAI = FI.getReturnInfo();
3543
3544 switch (RetAI.getKind()) {
3545 case ABIArgInfo::InAlloca:
3546 // Aggregates get evaluated directly into the destination. Sometimes we
3547 // need to return the sret value in a register, though.
3548 assert(hasAggregateEvaluationKind(RetTy))(static_cast <bool> (hasAggregateEvaluationKind(RetTy))
? void (0) : __assert_fail ("hasAggregateEvaluationKind(RetTy)"
, "clang/lib/CodeGen/CGCall.cpp", 3548, __extension__ __PRETTY_FUNCTION__
))
;
3549 if (RetAI.getInAllocaSRet()) {
3550 llvm::Function::arg_iterator EI = CurFn->arg_end();
3551 --EI;
3552 llvm::Value *ArgStruct = &*EI;
3553 llvm::Value *SRet = Builder.CreateStructGEP(
3554 FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex());
3555 llvm::Type *Ty =
3556 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3557 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
3558 }
3559 break;
3560
3561 case ABIArgInfo::Indirect: {
3562 auto AI = CurFn->arg_begin();
3563 if (RetAI.isSRetAfterThis())
3564 ++AI;
3565 switch (getEvaluationKind(RetTy)) {
3566 case TEK_Complex: {
3567 ComplexPairTy RT =
3568 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3569 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3570 /*isInit*/ true);
3571 break;
3572 }
3573 case TEK_Aggregate:
3574 // Do nothing; aggregates get evaluated directly into the destination.
3575 break;
3576 case TEK_Scalar: {
3577 LValueBaseInfo BaseInfo;
3578 TBAAAccessInfo TBAAInfo;
3579 CharUnits Alignment =
3580 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
3581 Address ArgAddr(&*AI, ConvertType(RetTy), Alignment);
3582 LValue ArgVal =
3583 LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo);
3584 EmitStoreOfScalar(
3585 Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true);
3586 break;
3587 }
3588 }
3589 break;
3590 }
3591
3592 case ABIArgInfo::Extend:
3593 case ABIArgInfo::Direct:
3594 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3595 RetAI.getDirectOffset() == 0) {
3596 // The internal return value temp always will have pointer-to-return-type
3597 // type, just do a load.
3598
3599 // If there is a dominating store to ReturnValue, we can elide
3600 // the load, zap the store, and usually zap the alloca.
3601 if (llvm::StoreInst *SI =
3602 findDominatingStoreToReturnValue(*this)) {
3603 // Reuse the debug location from the store unless there is
3604 // cleanup code to be emitted between the store and return
3605 // instruction.
3606 if (EmitRetDbgLoc && !AutoreleaseResult)
3607 RetDbgLoc = SI->getDebugLoc();
3608 // Get the stored value and nuke the now-dead store.
3609 RV = SI->getValueOperand();
3610 SI->eraseFromParent();
3611
3612 // Otherwise, we have to do a simple load.
3613 } else {
3614 RV = Builder.CreateLoad(ReturnValue);
3615 }
3616 } else {
3617 // If the value is offset in memory, apply the offset now.
3618 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3619
3620 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3621 }
3622
3623 // In ARC, end functions that return a retainable type with a call
3624 // to objc_autoreleaseReturnValue.
3625 if (AutoreleaseResult) {
3626#ifndef NDEBUG
3627 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3628 // been stripped of the typedefs, so we cannot use RetTy here. Get the
3629 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3630 // CurCodeDecl or BlockInfo.
3631 QualType RT;
3632
3633 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3634 RT = FD->getReturnType();
3635 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3636 RT = MD->getReturnType();
3637 else if (isa<BlockDecl>(CurCodeDecl))
3638 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3639 else
3640 llvm_unreachable("Unexpected function/method type")::llvm::llvm_unreachable_internal("Unexpected function/method type"
, "clang/lib/CodeGen/CGCall.cpp", 3640)
;
3641
3642 assert(getLangOpts().ObjCAutoRefCount &&(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "clang/lib/CodeGen/CGCall.cpp", 3644, __extension__ __PRETTY_FUNCTION__
))
3643 !FI.isReturnsRetained() &&(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "clang/lib/CodeGen/CGCall.cpp", 3644, __extension__ __PRETTY_FUNCTION__
))
3644 RT->isObjCRetainableType())(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "clang/lib/CodeGen/CGCall.cpp", 3644, __extension__ __PRETTY_FUNCTION__
))
;
3645#endif
3646 RV = emitAutoreleaseOfResult(*this, RV);
3647 }
3648
3649 break;
3650
3651 case ABIArgInfo::Ignore:
3652 break;
3653
3654 case ABIArgInfo::CoerceAndExpand: {
3655 auto coercionType = RetAI.getCoerceAndExpandType();
3656
3657 // Load all of the coerced elements out into results.
3658 llvm::SmallVector<llvm::Value*, 4> results;
3659 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3660 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3661 auto coercedEltType = coercionType->getElementType(i);
3662 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3663 continue;
3664
3665 auto eltAddr = Builder.CreateStructGEP(addr, i);
3666 auto elt = Builder.CreateLoad(eltAddr);
3667 results.push_back(elt);
3668 }
3669
3670 // If we have one result, it's the single direct result type.
3671 if (results.size() == 1) {
3672 RV = results[0];
3673
3674 // Otherwise, we need to make a first-class aggregate.
3675 } else {
3676 // Construct a return type that lacks padding elements.
3677 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3678
3679 RV = llvm::PoisonValue::get(returnType);
3680 for (unsigned i = 0, e = results.size(); i != e; ++i) {
3681 RV = Builder.CreateInsertValue(RV, results[i], i);
3682 }
3683 }
3684 break;
3685 }
3686 case ABIArgInfo::Expand:
3687 case ABIArgInfo::IndirectAliased:
3688 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "clang/lib/CodeGen/CGCall.cpp", 3688)
;
3689 }
3690
3691 llvm::Instruction *Ret;
3692 if (RV) {
3693 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3694 // For certain return types, clear padding bits, as they may reveal
3695 // sensitive information.
3696 // Small struct/union types are passed as integers.
3697 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3698 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3699 RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3700 }
3701 EmitReturnValueCheck(RV);
3702 Ret = Builder.CreateRet(RV);
3703 } else {
3704 Ret = Builder.CreateRetVoid();
3705 }
3706
3707 if (RetDbgLoc)
3708 Ret->setDebugLoc(std::move(RetDbgLoc));
3709}
3710
3711void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3712 // A current decl may not be available when emitting vtable thunks.
3713 if (!CurCodeDecl)
3714 return;
3715
3716 // If the return block isn't reachable, neither is this check, so don't emit
3717 // it.
3718 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3719 return;
3720
3721 ReturnsNonNullAttr *RetNNAttr = nullptr;
3722 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3723 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3724
3725 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3726 return;
3727
3728 // Prefer the returns_nonnull attribute if it's present.
3729 SourceLocation AttrLoc;
3730 SanitizerMask CheckKind;
3731 SanitizerHandler Handler;
3732 if (RetNNAttr) {
3733 assert(!requiresReturnValueNullabilityCheck() &&(static_cast <bool> (!requiresReturnValueNullabilityCheck
() && "Cannot check nullability and the nonnull attribute"
) ? void (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "clang/lib/CodeGen/CGCall.cpp", 3734, __extension__ __PRETTY_FUNCTION__
))
3734 "Cannot check nullability and the nonnull attribute")(static_cast <bool> (!requiresReturnValueNullabilityCheck
() && "Cannot check nullability and the nonnull attribute"
) ? void (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "clang/lib/CodeGen/CGCall.cpp", 3734, __extension__ __PRETTY_FUNCTION__
))
;
3735 AttrLoc = RetNNAttr->getLocation();
3736 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3737 Handler = SanitizerHandler::NonnullReturn;
3738 } else {
3739 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3740 if (auto *TSI = DD->getTypeSourceInfo())
3741 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3742 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3743 CheckKind = SanitizerKind::NullabilityReturn;
3744 Handler = SanitizerHandler::NullabilityReturn;
3745 }
3746
3747 SanitizerScope SanScope(this);
3748
3749 // Make sure the "return" source location is valid. If we're checking a
3750 // nullability annotation, make sure the preconditions for the check are met.
3751 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3752 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3753 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3754 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3755 if (requiresReturnValueNullabilityCheck())
3756 CanNullCheck =
3757 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3758 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3759 EmitBlock(Check);
3760
3761 // Now do the null check.
3762 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3763 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3764 llvm::Value *DynamicData[] = {SLocPtr};
3765 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3766
3767 EmitBlock(NoCheck);
3768
3769#ifndef NDEBUG
3770 // The return location should not be used after the check has been emitted.
3771 ReturnLocation = Address::invalid();
3772#endif
3773}
3774
3775static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3776 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3777 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3778}
3779
3780static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3781 QualType Ty) {
3782 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3783 // placeholders.
3784 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3785 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3786 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy->getPointerTo());
3787
3788 // FIXME: When we generate this IR in one pass, we shouldn't need
3789 // this win32-specific alignment hack.
3790 CharUnits Align = CharUnits::fromQuantity(4);
3791 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3792
3793 return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align),
3794 Ty.getQualifiers(),
3795 AggValueSlot::IsNotDestructed,
3796 AggValueSlot::DoesNotNeedGCBarriers,
3797 AggValueSlot::IsNotAliased,
3798 AggValueSlot::DoesNotOverlap);
3799}
3800
3801void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3802 const VarDecl *param,
3803 SourceLocation loc) {
3804 // StartFunction converted the ABI-lowered parameter(s) into a
3805 // local alloca. We need to turn that into an r-value suitable
3806 // for EmitCall.
3807 Address local = GetAddrOfLocalVar(param);
3808
3809 QualType type = param->getType();
3810
3811 if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3812 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3813 }
3814
3815 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3816 // but the argument needs to be the original pointer.
3817 if (type->isReferenceType()) {
3818 args.add(RValue::get(Builder.CreateLoad(local)), type);
3819
3820 // In ARC, move out of consumed arguments so that the release cleanup
3821 // entered by StartFunction doesn't cause an over-release. This isn't
3822 // optimal -O0 code generation, but it should get cleaned up when
3823 // optimization is enabled. This also assumes that delegate calls are
3824 // performed exactly once for a set of arguments, but that should be safe.
3825 } else if (getLangOpts().ObjCAutoRefCount &&
3826 param->hasAttr<NSConsumedAttr>() &&
3827 type->isObjCRetainableType()) {
3828 llvm::Value *ptr = Builder.CreateLoad(local);
3829 auto null =
3830 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3831 Builder.CreateStore(null, local);
3832 args.add(RValue::get(ptr), type);
3833
3834 // For the most part, we just need to load the alloca, except that
3835 // aggregate r-values are actually pointers to temporaries.
3836 } else {
3837 args.add(convertTempToRValue(local, type, loc), type);
3838 }
3839
3840 // Deactivate the cleanup for the callee-destructed param that was pushed.
3841 if (type->isRecordType() && !CurFuncIsThunk &&
3842 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3843 param->needsDestruction(getContext())) {
3844 EHScopeStack::stable_iterator cleanup =
3845 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3846 assert(cleanup.isValid() &&(static_cast <bool> (cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? void (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "clang/lib/CodeGen/CGCall.cpp", 3847, __extension__ __PRETTY_FUNCTION__
))
3847 "cleanup for callee-destructed param not recorded")(static_cast <bool> (cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? void (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "clang/lib/CodeGen/CGCall.cpp", 3847, __extension__ __PRETTY_FUNCTION__
))
;
3848 // This unreachable is a temporary marker which will be removed later.
3849 llvm::Instruction *isActive = Builder.CreateUnreachable();
3850 args.addArgCleanupDeactivation(cleanup, isActive);
3851 }
3852}
3853
3854static bool isProvablyNull(llvm::Value *addr) {
3855 return isa<llvm::ConstantPointerNull>(addr);
3856}
3857
3858/// Emit the actual writing-back of a writeback.
3859static void emitWriteback(CodeGenFunction &CGF,
3860 const CallArgList::Writeback &writeback) {
3861 const LValue &srcLV = writeback.Source;
3862 Address srcAddr = srcLV.getAddress(CGF);
3863 assert(!isProvablyNull(srcAddr.getPointer()) &&(static_cast <bool> (!isProvablyNull(srcAddr.getPointer
()) && "shouldn't have writeback for provably null argument"
) ? void (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "clang/lib/CodeGen/CGCall.cpp", 3864, __extension__ __PRETTY_FUNCTION__
))
3864 "shouldn't have writeback for provably null argument")(static_cast <bool> (!isProvablyNull(srcAddr.getPointer
()) && "shouldn't have writeback for provably null argument"
) ? void (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "clang/lib/CodeGen/CGCall.cpp", 3864, __extension__ __PRETTY_FUNCTION__
))
;
3865
3866 llvm::BasicBlock *contBB = nullptr;
3867
3868 // If the argument wasn't provably non-null, we need to null check
3869 // before doing the store.
3870 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3871 CGF.CGM.getDataLayout());
3872 if (!provablyNonNull) {
3873 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3874 contBB = CGF.createBasicBlock("icr.done");
3875
3876 llvm::Value *isNull =
3877 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3878 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3879 CGF.EmitBlock(writebackBB);
3880 }
3881
3882 // Load the value to writeback.
3883 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3884
3885 // Cast it back, in case we're writing an id to a Foo* or something.
3886 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3887 "icr.writeback-cast");
3888
3889 // Perform the writeback.
3890
3891 // If we have a "to use" value, it's something we need to emit a use
3892 // of. This has to be carefully threaded in: if it's done after the
3893 // release it's potentially undefined behavior (and the optimizer
3894 // will ignore it), and if it happens before the retain then the
3895 // optimizer could move the release there.
3896 if (writeback.ToUse) {
3897 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong)(static_cast <bool> (srcLV.getObjCLifetime() == Qualifiers
::OCL_Strong) ? void (0) : __assert_fail ("srcLV.getObjCLifetime() == Qualifiers::OCL_Strong"
, "clang/lib/CodeGen/CGCall.cpp", 3897, __extension__ __PRETTY_FUNCTION__
))
;
3898
3899 // Retain the new value. No need to block-copy here: the block's
3900 // being passed up the stack.
3901 value = CGF.EmitARCRetainNonBlock(value);
3902
3903 // Emit the intrinsic use here.
3904 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3905
3906 // Load the old value (primitively).
3907 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3908
3909 // Put the new value in place (primitively).
3910 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3911
3912 // Release the old value.
3913 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3914
3915 // Otherwise, we can just do a normal lvalue store.
3916 } else {
3917 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3918 }
3919
3920 // Jump to the continuation block.
3921 if (!provablyNonNull)
3922 CGF.EmitBlock(contBB);
3923}
3924
3925static void emitWritebacks(CodeGenFunction &CGF,
3926 const CallArgList &args) {
3927 for (const auto &I : args.writebacks())
3928 emitWriteback(CGF, I);
3929}
3930
3931static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3932 const CallArgList &CallArgs) {
3933 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3934 CallArgs.getCleanupsToDeactivate();
3935 // Iterate in reverse to increase the likelihood of popping the cleanup.
3936 for (const auto &I : llvm::reverse(Cleanups)) {
3937 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3938 I.IsActiveIP->eraseFromParent();
3939 }
3940}
3941
3942static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3943 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3944 if (uop->getOpcode() == UO_AddrOf)
3945 return uop->getSubExpr();
3946 return nullptr;
3947}
3948
3949/// Emit an argument that's being passed call-by-writeback. That is,
3950/// we are passing the address of an __autoreleased temporary; it
3951/// might be copy-initialized with the current value of the given
3952/// address, but it will definitely be copied out of after the call.
3953static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3954 const ObjCIndirectCopyRestoreExpr *CRE) {
3955 LValue srcLV;
3956
3957 // Make an optimistic effort to emit the address as an l-value.
3958 // This can fail if the argument expression is more complicated.
3959 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3960 srcLV = CGF.EmitLValue(lvExpr);
3961
3962 // Otherwise, just emit it as a scalar.
3963 } else {
3964 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3965
3966 QualType srcAddrType =
3967 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3968 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3969 }
3970 Address srcAddr = srcLV.getAddress(CGF);
3971
3972 // The dest and src types don't necessarily match in LLVM terms
3973 // because of the crazy ObjC compatibility rules.
3974
3975 llvm::PointerType *destType =
3976 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3977 llvm::Type *destElemType =
3978 CGF.ConvertTypeForMem(CRE->getType()->getPointeeType());
3979
3980 // If the address is a constant null, just pass the appropriate null.
3981 if (isProvablyNull(srcAddr.getPointer())) {
3982 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3983 CRE->getType());
3984 return;
3985 }
3986
3987 // Create the temporary.
3988 Address temp =
3989 CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp");
3990 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3991 // and that cleanup will be conditional if we can't prove that the l-value
3992 // isn't null, so we need to register a dominating point so that the cleanups
3993 // system will make valid IR.
3994 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3995
3996 // Zero-initialize it if we're not doing a copy-initialization.
3997 bool shouldCopy = CRE->shouldCopy();
3998 if (!shouldCopy) {
3999 llvm::Value *null =
4000 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4001 CGF.Builder.CreateStore(null, temp);
4002 }
4003
4004 llvm::BasicBlock *contBB = nullptr;
4005 llvm::BasicBlock *originBB = nullptr;
4006
4007 // If the address is *not* known to be non-null, we need to switch.
4008 llvm::Value *finalArgument;
4009
4010 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
4011 CGF.CGM.getDataLayout());
4012 if (provablyNonNull) {
4013 finalArgument = temp.getPointer();
4014 } else {
4015 llvm::Value *isNull =
4016 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
4017
4018 finalArgument = CGF.Builder.CreateSelect(isNull,
4019 llvm::ConstantPointerNull::get(destType),
4020 temp.getPointer(), "icr.argument");
4021
4022 // If we need to copy, then the load has to be conditional, which
4023 // means we need control flow.
4024 if (shouldCopy) {
4025 originBB = CGF.Builder.GetInsertBlock();
4026 contBB = CGF.createBasicBlock("icr.cont");
4027 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
4028 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
4029 CGF.EmitBlock(copyBB);
4030 condEval.begin(CGF);
4031 }
4032 }
4033
4034 llvm::Value *valueToUse = nullptr;
4035
4036 // Perform a copy if necessary.
4037 if (shouldCopy) {
4038 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
4039 assert(srcRV.isScalar())(static_cast <bool> (srcRV.isScalar()) ? void (0) : __assert_fail
("srcRV.isScalar()", "clang/lib/CodeGen/CGCall.cpp", 4039, __extension__
__PRETTY_FUNCTION__))
;
4040
4041 llvm::Value *src = srcRV.getScalarVal();
4042 src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast");
4043
4044 // Use an ordinary store, not a store-to-lvalue.
4045 CGF.Builder.CreateStore(src, temp);
4046
4047 // If optimization is enabled, and the value was held in a
4048 // __strong variable, we need to tell the optimizer that this
4049 // value has to stay alive until we're doing the store back.
4050 // This is because the temporary is effectively unretained,
4051 // and so otherwise we can violate the high-level semantics.
4052 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4053 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
4054 valueToUse = src;
4055 }
4056 }
4057
4058 // Finish the control flow if we needed it.
4059 if (shouldCopy && !provablyNonNull) {
4060 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
4061 CGF.EmitBlock(contBB);
4062
4063 // Make a phi for the value to intrinsically use.
4064 if (valueToUse) {
4065 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
4066 "icr.to-use");
4067 phiToUse->addIncoming(valueToUse, copyBB);
4068 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4069 originBB);
4070 valueToUse = phiToUse;
4071 }
4072
4073 condEval.end(CGF);
4074 }
4075
4076 args.addWriteback(srcLV, temp, valueToUse);
4077 args.add(RValue::get(finalArgument), CRE->getType());
4078}
4079
4080void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
4081 assert(!StackBase)(static_cast <bool> (!StackBase) ? void (0) : __assert_fail
("!StackBase", "clang/lib/CodeGen/CGCall.cpp", 4081, __extension__
__PRETTY_FUNCTION__))
;
4082
4083 // Save the stack.
4084 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
4085 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
4086}
4087
4088void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
4089 if (StackBase) {
4090 // Restore the stack after the call.
4091 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
4092 CGF.Builder.CreateCall(F, StackBase);
4093 }
4094}
4095
4096void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
4097 SourceLocation ArgLoc,
4098 AbstractCallee AC,
4099 unsigned ParmNum) {
4100 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
4101 SanOpts.has(SanitizerKind::NullabilityArg)))
4102 return;
4103
4104 // The param decl may be missing in a variadic function.
4105 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
4106 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4107
4108 // Prefer the nonnull attribute if it's present.
4109 const NonNullAttr *NNAttr = nullptr;
4110 if (SanOpts.has(SanitizerKind::NonnullAttribute))
4111 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
4112
4113 bool CanCheckNullability = false;
4114 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
4115 auto Nullability = PVD->getType()->getNullability(getContext());
4116 CanCheckNullability = Nullability &&
4117 *Nullability == NullabilityKind::NonNull &&
4118 PVD->getTypeSourceInfo();
4119 }
4120
4121 if (!NNAttr && !CanCheckNullability)
4122 return;
4123
4124 SourceLocation AttrLoc;
4125 SanitizerMask CheckKind;
4126 SanitizerHandler Handler;
4127 if (NNAttr) {
4128 AttrLoc = NNAttr->getLocation();
4129 CheckKind = SanitizerKind::NonnullAttribute;
4130 Handler = SanitizerHandler::NonnullArg;
4131 } else {
4132 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4133 CheckKind = SanitizerKind::NullabilityArg;
4134 Handler = SanitizerHandler::NullabilityArg;
4135 }
4136
4137 SanitizerScope SanScope(this);
4138 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
4139 llvm::Constant *StaticData[] = {
4140 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
4141 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
4142 };
4143 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4144}
4145
4146// Check if the call is going to use the inalloca convention. This needs to
4147// agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
4148// later, so we can't check it directly.
4149static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
4150 ArrayRef<QualType> ArgTypes) {
4151 // The Swift calling conventions don't go through the target-specific
4152 // argument classification, they never use inalloca.
4153 // TODO: Consider limiting inalloca use to only calling conventions supported
4154 // by MSVC.
4155 if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync)
4156 return false;
4157 if (!CGM.getTarget().getCXXABI().isMicrosoft())
4158 return false;
4159 return llvm::any_of(ArgTypes, [&](QualType Ty) {
4160 return isInAllocaArgument(CGM.getCXXABI(), Ty);
4161 });
4162}
4163
4164#ifndef NDEBUG
4165// Determine whether the given argument is an Objective-C method
4166// that may have type parameters in its signature.
4167static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4168 const DeclContext *dc = method->getDeclContext();
4169 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
4170 return classDecl->getTypeParamListAsWritten();
4171 }
4172
4173 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4174 return catDecl->getTypeParamList();
4175 }
4176
4177 return false;
4178}
4179#endif
4180
4181/// EmitCallArgs - Emit call arguments for a function.
4182void CodeGenFunction::EmitCallArgs(
4183 CallArgList &Args, PrototypeWrapper Prototype,
4184 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4185 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
4186 SmallVector<QualType, 16> ArgTypes;
4187
4188 assert((ParamsToSkip == 0 || Prototype.P) &&(static_cast <bool> ((ParamsToSkip == 0 || Prototype.P)
&& "Can't skip parameters if type info is not provided"
) ? void (0) : __assert_fail ("(ParamsToSkip == 0 || Prototype.P) && \"Can't skip parameters if type info is not provided\""
, "clang/lib/CodeGen/CGCall.cpp", 4189, __extension__ __PRETTY_FUNCTION__
))
4189 "Can't skip parameters if type info is not provided")(static_cast <bool> ((ParamsToSkip == 0 || Prototype.P)
&& "Can't skip parameters if type info is not provided"
) ? void (0) : __assert_fail ("(ParamsToSkip == 0 || Prototype.P) && \"Can't skip parameters if type info is not provided\""
, "clang/lib/CodeGen/CGCall.cpp", 4189, __extension__ __PRETTY_FUNCTION__
))
;
4190
4191 // This variable only captures *explicitly* written conventions, not those
4192 // applied by default via command line flags or target defaults, such as
4193 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
4194 // require knowing if this is a C++ instance method or being able to see
4195 // unprototyped FunctionTypes.
4196 CallingConv ExplicitCC = CC_C;
4197
4198 // First, if a prototype was provided, use those argument types.
4199 bool IsVariadic = false;
4200 if (Prototype.P) {
4201 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
4202 if (MD) {
4203 IsVariadic = MD->isVariadic();
4204 ExplicitCC = getCallingConventionForDecl(
4205 MD, CGM.getTarget().getTriple().isOSWindows());
4206 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4207 MD->param_type_end());
4208 } else {
4209 const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
4210 IsVariadic = FPT->isVariadic();
4211 ExplicitCC = FPT->getExtInfo().getCC();
4212 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4213 FPT->param_type_end());
4214 }
4215
4216#ifndef NDEBUG
4217 // Check that the prototyped types match the argument expression types.
4218 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
4219 CallExpr::const_arg_iterator Arg = ArgRange.begin();
4220 for (QualType Ty : ArgTypes) {
4221 assert(Arg != ArgRange.end() && "Running over edge of argument list!")(static_cast <bool> (Arg != ArgRange.end() && "Running over edge of argument list!"
) ? void (0) : __assert_fail ("Arg != ArgRange.end() && \"Running over edge of argument list!\""
, "clang/lib/CodeGen/CGCall.cpp", 4221, __extension__ __PRETTY_FUNCTION__
))
;
4222 assert((static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4229, __extension__ __PRETTY_FUNCTION__
))
4223 (isGenericMethod || Ty->isVariablyModifiedType() ||(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4229, __extension__ __PRETTY_FUNCTION__
))
4224 Ty.getNonReferenceType()->isObjCRetainableType() ||(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4229, __extension__ __PRETTY_FUNCTION__
))
4225 getContext()(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4229, __extension__ __PRETTY_FUNCTION__
))
4226 .getCanonicalType(Ty.getNonReferenceType())(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4229, __extension__ __PRETTY_FUNCTION__
))
4227 .getTypePtr() ==(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4229, __extension__ __PRETTY_FUNCTION__
))
4228 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4229, __extension__ __PRETTY_FUNCTION__
))
4229 "type mismatch in call argument!")(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4229, __extension__ __PRETTY_FUNCTION__
))
;
4230 ++Arg;
4231 }
4232
4233 // Either we've emitted all the call args, or we have a call to variadic
4234 // function.
4235 assert((Arg == ArgRange.end() || IsVariadic) &&(static_cast <bool> ((Arg == ArgRange.end() || IsVariadic
) && "Extra arguments in non-variadic function!") ? void
(0) : __assert_fail ("(Arg == ArgRange.end() || IsVariadic) && \"Extra arguments in non-variadic function!\""
, "clang/lib/CodeGen/CGCall.cpp", 4236, __extension__ __PRETTY_FUNCTION__
))
4236 "Extra arguments in non-variadic function!")(static_cast <bool> ((Arg == ArgRange.end() || IsVariadic
) && "Extra arguments in non-variadic function!") ? void
(0) : __assert_fail ("(Arg == ArgRange.end() || IsVariadic) && \"Extra arguments in non-variadic function!\""
, "clang/lib/CodeGen/CGCall.cpp", 4236, __extension__ __PRETTY_FUNCTION__
))
;
4237#endif
4238 }
4239
4240 // If we still have any arguments, emit them using the type of the argument.
4241 for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4242 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4243 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()))(static_cast <bool> ((int)ArgTypes.size() == (ArgRange.
end() - ArgRange.begin())) ? void (0) : __assert_fail ("(int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())"
, "clang/lib/CodeGen/CGCall.cpp", 4243, __extension__ __PRETTY_FUNCTION__
))
;
4244
4245 // We must evaluate arguments from right to left in the MS C++ ABI,
4246 // because arguments are destroyed left to right in the callee. As a special
4247 // case, there are certain language constructs that require left-to-right
4248 // evaluation, and in those cases we consider the evaluation order requirement
4249 // to trump the "destruction order is reverse construction order" guarantee.
4250 bool LeftToRight =
4251 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4252 ? Order == EvaluationOrder::ForceLeftToRight
4253 : Order != EvaluationOrder::ForceRightToLeft;
4254
4255 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
4256 RValue EmittedArg) {
4257 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4258 return;
4259 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4260 if (PS == nullptr)
4261 return;
4262
4263 const auto &Context = getContext();
4264 auto SizeTy = Context.getSizeType();
4265 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4266 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?")(static_cast <bool> (EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?") ? void (0) : __assert_fail
("EmittedArg.getScalarVal() && \"We emitted nothing for the arg?\""
, "clang/lib/CodeGen/CGCall.cpp", 4266, __extension__ __PRETTY_FUNCTION__
))
;
4267 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4268 EmittedArg.getScalarVal(),
4269 PS->isDynamic());
4270 Args.add(RValue::get(V), SizeTy);
4271 // If we're emitting args in reverse, be sure to do so with
4272 // pass_object_size, as well.
4273 if (!LeftToRight)
4274 std::swap(Args.back(), *(&Args.back() - 1));
4275 };
4276
4277 // Insert a stack save if we're going to need any inalloca args.
4278 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4279 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&(static_cast <bool> (getTarget().getTriple().getArch() ==
llvm::Triple::x86 && "inalloca only supported on x86"
) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86 && \"inalloca only supported on x86\""
, "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__
))
4280 "inalloca only supported on x86")(static_cast <bool> (getTarget().getTriple().getArch() ==
llvm::Triple::x86 && "inalloca only supported on x86"
) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86 && \"inalloca only supported on x86\""
, "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__
))
;
4281 Args.allocateArgumentMemory(*this);
4282 }
4283
4284 // Evaluate each argument in the appropriate order.
4285 size_t CallArgsStart = Args.size();
4286 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4287 unsigned Idx = LeftToRight ? I : E - I - 1;
4288 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4289 unsigned InitialArgSize = Args.size();
4290 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4291 // the argument and parameter match or the objc method is parameterized.
4292 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4297, __extension__ __PRETTY_FUNCTION__
))
4293 getContext().hasSameUnqualifiedType((*Arg)->getType(),(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4297, __extension__ __PRETTY_FUNCTION__
))
4294 ArgTypes[Idx]) ||(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4297, __extension__ __PRETTY_FUNCTION__
))
4295 (isa<ObjCMethodDecl>(AC.getDecl()) &&(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4297, __extension__ __PRETTY_FUNCTION__
))
4296 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4297, __extension__ __PRETTY_FUNCTION__
))
4297 "Argument and parameter types don't match")(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4297, __extension__ __PRETTY_FUNCTION__
))
;
4298 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4299 // In particular, we depend on it being the last arg in Args, and the
4300 // objectsize bits depend on there only being one arg if !LeftToRight.
4301 assert(InitialArgSize + 1 == Args.size() &&(static_cast <bool> (InitialArgSize + 1 == Args.size() &&
"The code below depends on only adding one arg per EmitCallArg"
) ? void (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "clang/lib/CodeGen/CGCall.cpp", 4302, __extension__ __PRETTY_FUNCTION__
))
4302 "The code below depends on only adding one arg per EmitCallArg")(static_cast <bool> (InitialArgSize + 1 == Args.size() &&
"The code below depends on only adding one arg per EmitCallArg"
) ? void (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "clang/lib/CodeGen/CGCall.cpp", 4302, __extension__ __PRETTY_FUNCTION__
))
;
4303 (void)InitialArgSize;
4304 // Since pointer argument are never emitted as LValue, it is safe to emit
4305 // non-null argument check for r-value only.
4306 if (!Args.back().hasLValue()) {
4307 RValue RVArg = Args.back().getKnownRValue();
4308 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4309 ParamsToSkip + Idx);
4310 // @llvm.objectsize should never have side-effects and shouldn't need
4311 // destruction/cleanups, so we can safely "emit" it after its arg,
4312 // regardless of right-to-leftness
4313 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4314 }
4315 }
4316
4317 if (!LeftToRight) {
4318 // Un-reverse the arguments we just evaluated so they match up with the LLVM
4319 // IR function.
4320 std::reverse(Args.begin() + CallArgsStart, Args.end());
4321 }
4322}
4323
4324namespace {
4325
4326struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4327 DestroyUnpassedArg(Address Addr, QualType Ty)
4328 : Addr(Addr), Ty(Ty) {}
4329
4330 Address Addr;
4331 QualType Ty;
4332
4333 void Emit(CodeGenFunction &CGF, Flags flags) override {
4334 QualType::DestructionKind DtorKind = Ty.isDestructedType();
4335 if (DtorKind == QualType::DK_cxx_destructor) {
4336 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4337 assert(!Dtor->isTrivial())(static_cast <bool> (!Dtor->isTrivial()) ? void (0) :
__assert_fail ("!Dtor->isTrivial()", "clang/lib/CodeGen/CGCall.cpp"
, 4337, __extension__ __PRETTY_FUNCTION__))
;
4338 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4339 /*Delegating=*/false, Addr, Ty);
4340 } else {
4341 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4342 }
4343 }
4344};
4345
4346struct DisableDebugLocationUpdates {
4347 CodeGenFunction &CGF;
4348 bool disabledDebugInfo;
4349 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4350 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4351 CGF.disableDebugInfo();
4352 }
4353 ~DisableDebugLocationUpdates() {
4354 if (disabledDebugInfo)
4355 CGF.enableDebugInfo();
4356 }
4357};
4358
4359} // end anonymous namespace
4360
4361RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4362 if (!HasLV)
4363 return RV;
4364 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4365 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4366 LV.isVolatile());
4367 IsUsed = true;
4368 return RValue::getAggregate(Copy.getAddress(CGF));
4369}
4370
4371void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4372 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4373 if (!HasLV && RV.isScalar())
4374 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4375 else if (!HasLV && RV.isComplex())
4376 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4377 else {
4378 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4379 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4380 // We assume that call args are never copied into subobjects.
4381 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4382 HasLV ? LV.isVolatileQualified()
4383 : RV.isVolatileQualified());
4384 }
4385 IsUsed = true;
4386}
4387
4388void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4389 QualType type) {
4390 DisableDebugLocationUpdates Dis(*this, E);
4391 if (const ObjCIndirectCopyRestoreExpr *CRE
4392 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4393 assert(getLangOpts().ObjCAutoRefCount)(static_cast <bool> (getLangOpts().ObjCAutoRefCount) ? void
(0) : __assert_fail ("getLangOpts().ObjCAutoRefCount", "clang/lib/CodeGen/CGCall.cpp"
, 4393, __extension__ __PRETTY_FUNCTION__))
;
4394 return emitWritebackArg(*this, args, CRE);
4395 }
4396
4397 assert(type->isReferenceType() == E->isGLValue() &&(static_cast <bool> (type->isReferenceType() == E->
isGLValue() && "reference binding to unmaterialized r-value!"
) ? void (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "clang/lib/CodeGen/CGCall.cpp", 4398, __extension__ __PRETTY_FUNCTION__
))
4398 "reference binding to unmaterialized r-value!")(static_cast <bool> (type->isReferenceType() == E->
isGLValue() && "reference binding to unmaterialized r-value!"
) ? void (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "clang/lib/CodeGen/CGCall.cpp", 4398, __extension__ __PRETTY_FUNCTION__
))
;
4399
4400 if (E->isGLValue()) {
4401 assert(E->getObjectKind() == OK_Ordinary)(static_cast <bool> (E->getObjectKind() == OK_Ordinary
) ? void (0) : __assert_fail ("E->getObjectKind() == OK_Ordinary"
, "clang/lib/CodeGen/CGCall.cpp", 4401, __extension__ __PRETTY_FUNCTION__
))
;
4402 return args.add(EmitReferenceBindingToExpr(E), type);
4403 }
4404
4405 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4406
4407 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4408 // However, we still have to push an EH-only cleanup in case we unwind before
4409 // we make it to the call.
4410 if (type->isRecordType() &&
4411 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4412 // If we're using inalloca, use the argument memory. Otherwise, use a
4413 // temporary.
4414 AggValueSlot Slot = args.isUsingInAlloca()
4415 ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
4416
4417 bool DestroyedInCallee = true, NeedsEHCleanup = true;
4418 if (const auto *RD = type->getAsCXXRecordDecl())
4419 DestroyedInCallee = RD->hasNonTrivialDestructor();
4420 else
4421 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4422
4423 if (DestroyedInCallee)
4424 Slot.setExternallyDestructed();
4425
4426 EmitAggExpr(E, Slot);
4427 RValue RV = Slot.asRValue();
4428 args.add(RV, type);
4429
4430 if (DestroyedInCallee && NeedsEHCleanup) {
4431 // Create a no-op GEP between the placeholder and the cleanup so we can
4432 // RAUW it successfully. It also serves as a marker of the first
4433 // instruction where the cleanup is active.
4434 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4435 type);
4436 // This unreachable is a temporary marker which will be removed later.
4437 llvm::Instruction *IsActive = Builder.CreateUnreachable();
4438 args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive);
4439 }
4440 return;
4441 }
4442
4443 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4444 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4445 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4446 assert(L.isSimple())(static_cast <bool> (L.isSimple()) ? void (0) : __assert_fail
("L.isSimple()", "clang/lib/CodeGen/CGCall.cpp", 4446, __extension__
__PRETTY_FUNCTION__))
;
4447 args.addUncopiedAggregate(L, type);
4448 return;
4449 }
4450
4451 args.add(EmitAnyExprToTemp(E), type);
4452}
4453
4454QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4455 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4456 // implicitly widens null pointer constants that are arguments to varargs
4457 // functions to pointer-sized ints.
4458 if (!getTarget().getTriple().isOSWindows())
4459 return Arg->getType();
4460
4461 if (Arg->getType()->isIntegerType() &&
4462 getContext().getTypeSize(Arg->getType()) <
4463 getContext().getTargetInfo().getPointerWidth(LangAS::Default) &&
4464 Arg->isNullPointerConstant(getContext(),
4465 Expr::NPC_ValueDependentIsNotNull)) {
4466 return getContext().getIntPtrType();
4467 }
4468
4469 return Arg->getType();
4470}
4471
4472// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4473// optimizer it can aggressively ignore unwind edges.
4474void
4475CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4476 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4477 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4478 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4479 CGM.getNoObjCARCExceptionsMetadata());
4480}
4481
4482/// Emits a call to the given no-arguments nounwind runtime function.
4483llvm::CallInst *
4484CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4485 const llvm::Twine &name) {
4486 return EmitNounwindRuntimeCall(callee, std::nullopt, name);
4487}
4488
4489/// Emits a call to the given nounwind runtime function.
4490llvm::CallInst *
4491CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4492 ArrayRef<llvm::Value *> args,
4493 const llvm::Twine &name) {
4494 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4495 call->setDoesNotThrow();
4496 return call;
4497}
4498
4499/// Emits a simple call (never an invoke) to the given no-arguments
4500/// runtime function.
4501llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4502 const llvm::Twine &name) {
4503 return EmitRuntimeCall(callee, std::nullopt, name);
4504}
4505
4506// Calls which may throw must have operand bundles indicating which funclet
4507// they are nested within.
4508SmallVector<llvm::OperandBundleDef, 1>
4509CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4510 // There is no need for a funclet operand bundle if we aren't inside a
4511 // funclet.
4512 if (!CurrentFuncletPad)
4513 return (SmallVector<llvm::OperandBundleDef, 1>());
4514
4515 // Skip intrinsics which cannot throw (as long as they don't lower into
4516 // regular function calls in the course of IR transformations).
4517 if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
4518 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4519 auto IID = CalleeFn->getIntrinsicID();
4520 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4521 return (SmallVector<llvm::OperandBundleDef, 1>());
4522 }
4523 }
4524
4525 SmallVector<llvm::OperandBundleDef, 1> BundleList;
4526 BundleList.emplace_back("funclet", CurrentFuncletPad);
4527 return BundleList;
4528}
4529
4530/// Emits a simple call (never an invoke) to the given runtime function.
4531llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4532 ArrayRef<llvm::Value *> args,
4533 const llvm::Twine &name) {
4534 llvm::CallInst *call = Builder.CreateCall(
4535 callee, args, getBundlesForFunclet(callee.getCallee()), name);
4536 call->setCallingConv(getRuntimeCC());
4537 return call;
4538}
4539
4540/// Emits a call or invoke to the given noreturn runtime function.
4541void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4542 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4543 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4544 getBundlesForFunclet(callee.getCallee());
4545
4546 if (getInvokeDest()) {
4547 llvm::InvokeInst *invoke =
4548 Builder.CreateInvoke(callee,
4549 getUnreachableBlock(),
4550 getInvokeDest(),
4551 args,
4552 BundleList);
4553 invoke->setDoesNotReturn();
4554 invoke->setCallingConv(getRuntimeCC());
4555 } else {
4556 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4557 call->setDoesNotReturn();
4558 call->setCallingConv(getRuntimeCC());
4559 Builder.CreateUnreachable();
4560 }
4561}
4562
4563/// Emits a call or invoke instruction to the given nullary runtime function.
4564llvm::CallBase *
4565CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4566 const Twine &name) {
4567 return EmitRuntimeCallOrInvoke(callee, std::nullopt, name);
4568}
4569
4570/// Emits a call or invoke instruction to the given runtime function.
4571llvm::CallBase *
4572CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4573 ArrayRef<llvm::Value *> args,
4574 const Twine &name) {
4575 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4576 call->setCallingConv(getRuntimeCC());
4577 return call;
4578}
4579
4580/// Emits a call or invoke instruction to the given function, depending
4581/// on the current state of the EH stack.
4582llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4583 ArrayRef<llvm::Value *> Args,
4584 const Twine &Name) {
4585 llvm::BasicBlock *InvokeDest = getInvokeDest();
4586 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4587 getBundlesForFunclet(Callee.getCallee());
4588
4589 llvm::CallBase *Inst;
4590 if (!InvokeDest)
4591 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4592 else {
4593 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4594 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4595 Name);
4596 EmitBlock(ContBB);
4597 }
4598
4599 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4600 // optimizer it can aggressively ignore unwind edges.
4601 if (CGM.getLangOpts().ObjCAutoRefCount)
4602 AddObjCARCExceptionMetadata(Inst);
4603
4604 return Inst;
4605}
4606
4607void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4608 llvm::Value *New) {
4609 DeferredReplacements.push_back(
4610 std::make_pair(llvm::WeakTrackingVH(Old), New));
4611}
4612
4613namespace {
4614
4615/// Specify given \p NewAlign as the alignment of return value attribute. If
4616/// such attribute already exists, re-set it to the maximal one of two options.
4617[[nodiscard]] llvm::AttributeList
4618maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4619 const llvm::AttributeList &Attrs,
4620 llvm::Align NewAlign) {
4621 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4622 if (CurAlign >= NewAlign)
4623 return Attrs;
4624 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4625 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4626 .addRetAttribute(Ctx, AlignAttr);
4627}
4628
4629template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4630protected:
4631 CodeGenFunction &CGF;
4632
4633 /// We do nothing if this is, or becomes, nullptr.
4634 const AlignedAttrTy *AA = nullptr;
4635
4636 llvm::Value *Alignment = nullptr; // May or may not be a constant.
4637 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4638
4639 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4640 : CGF(CGF_) {
4641 if (!FuncDecl)
4642 return;
4643 AA = FuncDecl->getAttr<AlignedAttrTy>();
4644 }
4645
4646public:
4647 /// If we can, materialize the alignment as an attribute on return value.
4648 [[nodiscard]] llvm::AttributeList
4649 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4650 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4651 return Attrs;
4652 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4653 if (!AlignmentCI)
4654 return Attrs;
4655 // We may legitimately have non-power-of-2 alignment here.
4656 // If so, this is UB land, emit it via `@llvm.assume` instead.
4657 if (!AlignmentCI->getValue().isPowerOf2())
4658 return Attrs;
4659 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4660 CGF.getLLVMContext(), Attrs,
4661 llvm::Align(
4662 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4663 AA = nullptr; // We're done. Disallow doing anything else.
4664 return NewAttrs;
4665 }
4666
4667 /// Emit alignment assumption.
4668 /// This is a general fallback that we take if either there is an offset,
4669 /// or the alignment is variable or we are sanitizing for alignment.
4670 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4671 if (!AA)
4672 return;
4673 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4674 AA->getLocation(), Alignment, OffsetCI);
4675 AA = nullptr; // We're done. Disallow doing anything else.
4676 }
4677};
4678
4679/// Helper data structure to emit `AssumeAlignedAttr`.
4680class AssumeAlignedAttrEmitter final
4681 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4682public:
4683 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4684 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4685 if (!AA)
4686 return;
4687 // It is guaranteed that the alignment/offset are constants.
4688 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4689 if (Expr *Offset = AA->getOffset()) {
4690 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4691 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4692 OffsetCI = nullptr;
4693 }
4694 }
4695};
4696
4697/// Helper data structure to emit `AllocAlignAttr`.
4698class AllocAlignAttrEmitter final
4699 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4700public:
4701 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4702 const CallArgList &CallArgs)
4703 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4704 if (!AA)
4705 return;
4706 // Alignment may or may not be a constant, and that is okay.
4707 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4708 .getRValue(CGF)
4709 .getScalarVal();
4710 }
4711};
4712
4713} // namespace
4714
4715static unsigned getMaxVectorWidth(const llvm::Type *Ty) {
4716 if (auto *VT = dyn_cast<llvm::VectorType>(Ty))
4717 return VT->getPrimitiveSizeInBits().getKnownMinSize();
4718 if (auto *AT = dyn_cast<llvm::ArrayType>(Ty))
4719 return getMaxVectorWidth(AT->getElementType());
4720
4721 unsigned MaxVectorWidth = 0;
4722 if (auto *ST = dyn_cast<llvm::StructType>(Ty))
4723 for (auto *I : ST->elements())
4724 MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I));
4725 return MaxVectorWidth;
4726}
4727
4728RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4729 const CGCallee &Callee,
4730 ReturnValueSlot ReturnValue,
4731 const CallArgList &CallArgs,
4732 llvm::CallBase **callOrInvoke, bool IsMustTail,
4733 SourceLocation Loc) {
4734 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4735
4736 assert(Callee.isOrdinary() || Callee.isVirtual())(static_cast <bool> (Callee.isOrdinary() || Callee.isVirtual
()) ? void (0) : __assert_fail ("Callee.isOrdinary() || Callee.isVirtual()"
, "clang/lib/CodeGen/CGCall.cpp", 4736, __extension__ __PRETTY_FUNCTION__
))
;
4737
4738 // Handle struct-return functions by passing a pointer to the
4739 // location that we would like to return into.
4740 QualType RetTy = CallInfo.getReturnType();
4741 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4742
4743 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4744
4745 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4746 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4747 // We can only guarantee that a function is called from the correct
4748 // context/function based on the appropriate target attributes,
4749 // so only check in the case where we have both always_inline and target
4750 // since otherwise we could be making a conditional call after a check for
4751 // the proper cpu features (and it won't cause code generation issues due to
4752 // function based code generation).
4753 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4754 TargetDecl->hasAttr<TargetAttr>())
4755 checkTargetFeatures(Loc, FD);
4756
4757 // Some architectures (such as x86-64) have the ABI changed based on
4758 // attribute-target/features. Give them a chance to diagnose.
4759 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4760 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4761 }
4762
4763#ifndef NDEBUG
4764 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4765 // For an inalloca varargs function, we don't expect CallInfo to match the
4766 // function pointer's type, because the inalloca struct a will have extra
4767 // fields in it for the varargs parameters. Code later in this function
4768 // bitcasts the function pointer to the type derived from CallInfo.
4769 //
4770 // In other cases, we assert that the types match up (until pointers stop
4771 // having pointee types).
4772 if (Callee.isVirtual())
4773 assert(IRFuncTy == Callee.getVirtualFunctionType())(static_cast <bool> (IRFuncTy == Callee.getVirtualFunctionType
()) ? void (0) : __assert_fail ("IRFuncTy == Callee.getVirtualFunctionType()"
, "clang/lib/CodeGen/CGCall.cpp", 4773, __extension__ __PRETTY_FUNCTION__
))
;
4774 else {
4775 llvm::PointerType *PtrTy =
4776 llvm::cast<llvm::PointerType>(Callee.getFunctionPointer()->getType());
4777 assert(PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy))(static_cast <bool> (PtrTy->isOpaqueOrPointeeTypeMatches
(IRFuncTy)) ? void (0) : __assert_fail ("PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy)"
, "clang/lib/CodeGen/CGCall.cpp", 4777, __extension__ __PRETTY_FUNCTION__
))
;
4778 }
4779 }
4780#endif
4781
4782 // 1. Set up the arguments.
4783
4784 // If we're using inalloca, insert the allocation after the stack save.
4785 // FIXME: Do this earlier rather than hacking it in here!
4786 Address ArgMemory = Address::invalid();
4787 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4788 const llvm::DataLayout &DL = CGM.getDataLayout();
4789 llvm::Instruction *IP = CallArgs.getStackBase();
4790 llvm::AllocaInst *AI;
4791 if (IP) {
4792 IP = IP->getNextNode();
4793 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4794 "argmem", IP);
4795 } else {
4796 AI = CreateTempAlloca(ArgStruct, "argmem");
4797 }
4798 auto Align = CallInfo.getArgStructAlignment();
4799 AI->setAlignment(Align.getAsAlign());
4800 AI->setUsedWithInAlloca(true);
4801 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca())(static_cast <bool> (AI->isUsedWithInAlloca() &&
!AI->isStaticAlloca()) ? void (0) : __assert_fail ("AI->isUsedWithInAlloca() && !AI->isStaticAlloca()"
, "clang/lib/CodeGen/CGCall.cpp", 4801, __extension__ __PRETTY_FUNCTION__
))
;
4802 ArgMemory = Address(AI, ArgStruct, Align);
4803 }
4804
4805 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4806 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4807
4808 // If the call returns a temporary with struct return, create a temporary
4809 // alloca to hold the result, unless one is given to us.
4810 Address SRetPtr = Address::invalid();
4811 Address SRetAlloca = Address::invalid();
4812 llvm::Value *UnusedReturnSizePtr = nullptr;
4813 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4814 if (!ReturnValue.isNull()) {
4815 SRetPtr = ReturnValue.getValue();
4816 } else {
4817 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4818 if (HaveInsertPoint() && ReturnValue.isUnused()) {
4819 llvm::TypeSize size =
4820 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4821 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4822 }
4823 }
4824 if (IRFunctionArgs.hasSRetArg()) {
4825 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4826 } else if (RetAI.isInAlloca()) {
4827 Address Addr =
4828 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4829 Builder.CreateStore(SRetPtr.getPointer(), Addr);
4830 }
4831 }
4832
4833 Address swiftErrorTemp = Address::invalid();
4834 Address swiftErrorArg = Address::invalid();
4835
4836 // When passing arguments using temporary allocas, we need to add the
4837 // appropriate lifetime markers. This vector keeps track of all the lifetime
4838 // markers that need to be ended right after the call.
4839 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4840
4841 // Translate all of the arguments as necessary to match the IR lowering.
4842 assert(CallInfo.arg_size() == CallArgs.size() &&(static_cast <bool> (CallInfo.arg_size() == CallArgs.size
() && "Mismatch between function signature & arguments."
) ? void (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "clang/lib/CodeGen/CGCall.cpp", 4843, __extension__ __PRETTY_FUNCTION__
))
4843 "Mismatch between function signature & arguments.")(static_cast <bool> (CallInfo.arg_size() == CallArgs.size
() && "Mismatch between function signature & arguments."
) ? void (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "clang/lib/CodeGen/CGCall.cpp", 4843, __extension__ __PRETTY_FUNCTION__
))
;
4844 unsigned ArgNo = 0;
4845 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4846 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4847 I != E; ++I, ++info_it, ++ArgNo) {
4848 const ABIArgInfo &ArgInfo = info_it->info;
4849
4850 // Insert a padding argument to ensure proper alignment.
4851 if (IRFunctionArgs.hasPaddingArg(ArgNo))
4852 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4853 llvm::UndefValue::get(ArgInfo.getPaddingType());
4854
4855 unsigned FirstIRArg, NumIRArgs;
4856 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4857
4858 bool ArgHasMaybeUndefAttr =
4859 IsArgumentMaybeUndef(TargetDecl, CallInfo.getNumRequiredArgs(), ArgNo);
4860
4861 switch (ArgInfo.getKind()) {
4862 case ABIArgInfo::InAlloca: {
4863 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 4863, __extension__
__PRETTY_FUNCTION__))
;
4864 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)(static_cast <bool> (getTarget().getTriple().getArch() ==
llvm::Triple::x86) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "clang/lib/CodeGen/CGCall.cpp", 4864, __extension__ __PRETTY_FUNCTION__
))
;
4865 if (I->isAggregate()) {
4866 Address Addr = I->hasLValue()
4867 ? I->getKnownLValue().getAddress(*this)
4868 : I->getKnownRValue().getAggregateAddress();
4869 llvm::Instruction *Placeholder =
4870 cast<llvm::Instruction>(Addr.getPointer());
4871
4872 if (!ArgInfo.getInAllocaIndirect()) {
4873 // Replace the placeholder with the appropriate argument slot GEP.
4874 CGBuilderTy::InsertPoint IP = Builder.saveIP();
4875 Builder.SetInsertPoint(Placeholder);
4876 Addr = Builder.CreateStructGEP(ArgMemory,
4877 ArgInfo.getInAllocaFieldIndex());
4878 Builder.restoreIP(IP);
4879 } else {
4880 // For indirect things such as overaligned structs, replace the
4881 // placeholder with a regular aggregate temporary alloca. Store the
4882 // address of this alloca into the struct.
4883 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4884 Address ArgSlot = Builder.CreateStructGEP(
4885 ArgMemory, ArgInfo.getInAllocaFieldIndex());
4886 Builder.CreateStore(Addr.getPointer(), ArgSlot);
4887 }
4888 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4889 } else if (ArgInfo.getInAllocaIndirect()) {
4890 // Make a temporary alloca and store the address of it into the argument
4891 // struct.
4892 Address Addr = CreateMemTempWithoutCast(
4893 I->Ty, getContext().getTypeAlignInChars(I->Ty),
4894 "indirect-arg-temp");
4895 I->copyInto(*this, Addr);
4896 Address ArgSlot =
4897 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4898 Builder.CreateStore(Addr.getPointer(), ArgSlot);
4899 } else {
4900 // Store the RValue into the argument struct.
4901 Address Addr =
4902 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4903 // There are some cases where a trivial bitcast is not avoidable. The
4904 // definition of a type later in a translation unit may change it's type
4905 // from {}* to (%struct.foo*)*.
4906 Addr = Builder.CreateElementBitCast(Addr, ConvertTypeForMem(I->Ty));
4907 I->copyInto(*this, Addr);
4908 }
4909 break;
4910 }
4911
4912 case ABIArgInfo::Indirect:
4913 case ABIArgInfo::IndirectAliased: {
4914 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 4914, __extension__
__PRETTY_FUNCTION__))
;
4915 if (!I->isAggregate()) {
4916 // Make a temporary alloca to pass the argument.
4917 Address Addr = CreateMemTempWithoutCast(
4918 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4919
4920 llvm::Value *Val = Addr.getPointer();
4921 if (ArgHasMaybeUndefAttr)
4922 Val = Builder.CreateFreeze(Addr.getPointer());
4923 IRCallArgs[FirstIRArg] = Val;
4924
4925 I->copyInto(*this, Addr);
4926 } else {
4927 // We want to avoid creating an unnecessary temporary+copy here;
4928 // however, we need one in three cases:
4929 // 1. If the argument is not byval, and we are required to copy the
4930 // source. (This case doesn't occur on any common architecture.)
4931 // 2. If the argument is byval, RV is not sufficiently aligned, and
4932 // we cannot force it to be sufficiently aligned.
4933 // 3. If the argument is byval, but RV is not located in default
4934 // or alloca address space.
4935 Address Addr = I->hasLValue()
4936 ? I->getKnownLValue().getAddress(*this)
4937 : I->getKnownRValue().getAggregateAddress();
4938 llvm::Value *V = Addr.getPointer();
4939 CharUnits Align = ArgInfo.getIndirectAlign();
4940 const llvm::DataLayout *TD = &CGM.getDataLayout();
4941
4942 assert((FirstIRArg >= IRFuncTy->getNumParams() ||(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "clang/lib/CodeGen/CGCall.cpp", 4945, __extension__ __PRETTY_FUNCTION__
))
4943 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "clang/lib/CodeGen/CGCall.cpp", 4945, __extension__ __PRETTY_FUNCTION__
))
4944 TD->getAllocaAddrSpace()) &&(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "clang/lib/CodeGen/CGCall.cpp", 4945, __extension__ __PRETTY_FUNCTION__
))
4945 "indirect argument must be in alloca address space")(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "clang/lib/CodeGen/CGCall.cpp", 4945, __extension__ __PRETTY_FUNCTION__
))
;
4946
4947 bool NeedCopy = false;
4948
4949 if (Addr.getAlignment() < Align &&
4950 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4951 Align.getAsAlign()) {
4952 NeedCopy = true;
4953 } else if (I->hasLValue()) {
4954 auto LV = I->getKnownLValue();
4955 auto AS = LV.getAddressSpace();
4956
4957 if (!ArgInfo.getIndirectByVal() ||
4958 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4959 NeedCopy = true;
4960 }
4961 if (!getLangOpts().OpenCL) {
4962 if ((ArgInfo.getIndirectByVal() &&
4963 (AS != LangAS::Default &&
4964 AS != CGM.getASTAllocaAddressSpace()))) {
4965 NeedCopy = true;
4966 }
4967 }
4968 // For OpenCL even if RV is located in default or alloca address space
4969 // we don't want to perform address space cast for it.
4970 else if ((ArgInfo.getIndirectByVal() &&
4971 Addr.getType()->getAddressSpace() != IRFuncTy->
4972 getParamType(FirstIRArg)->getPointerAddressSpace())) {
4973 NeedCopy = true;
4974 }
4975 }
4976
4977 if (NeedCopy) {
4978 // Create an aligned temporary, and copy to it.
4979 Address AI = CreateMemTempWithoutCast(
4980 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4981 llvm::Value *Val = AI.getPointer();
4982 if (ArgHasMaybeUndefAttr)
4983 Val = Builder.CreateFreeze(AI.getPointer());
4984 IRCallArgs[FirstIRArg] = Val;
4985
4986 // Emit lifetime markers for the temporary alloca.
4987 llvm::TypeSize ByvalTempElementSize =
4988 CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4989 llvm::Value *LifetimeSize =
4990 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4991
4992 // Add cleanup code to emit the end lifetime marker after the call.
4993 if (LifetimeSize) // In case we disabled lifetime markers.
4994 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4995
4996 // Generate the copy.
4997 I->copyInto(*this, AI);
4998 } else {
4999 // Skip the extra memcpy call.
5000 auto *T = llvm::PointerType::getWithSamePointeeType(
5001 cast<llvm::PointerType>(V->getType()),
5002 CGM.getDataLayout().getAllocaAddrSpace());
5003
5004 llvm::Value *Val = getTargetHooks().performAddrSpaceCast(
5005 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
5006 true);
5007 if (ArgHasMaybeUndefAttr)
5008 Val = Builder.CreateFreeze(Val);
5009 IRCallArgs[FirstIRArg] = Val;
5010 }
5011 }
5012 break;
5013 }
5014
5015 case ABIArgInfo::Ignore:
5016 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 5016, __extension__
__PRETTY_FUNCTION__))
;
5017 break;
5018
5019 case ABIArgInfo::Extend:
5020 case ABIArgInfo::Direct: {
5021 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
5022 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
5023 ArgInfo.getDirectOffset() == 0) {
5024 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 5024, __extension__
__PRETTY_FUNCTION__))
;
5025 llvm::Value *V;
5026 if (!I->isAggregate())
5027 V = I->getKnownRValue().getScalarVal();
5028 else
5029 V = Builder.CreateLoad(
5030 I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5031 : I->getKnownRValue().getAggregateAddress());
5032
5033 // Implement swifterror by copying into a new swifterror argument.
5034 // We'll write back in the normal path out of the call.
5035 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
5036 == ParameterABI::SwiftErrorResult) {
5037 assert(!swiftErrorTemp.isValid() && "multiple swifterror args")(static_cast <bool> (!swiftErrorTemp.isValid() &&
"multiple swifterror args") ? void (0) : __assert_fail ("!swiftErrorTemp.isValid() && \"multiple swifterror args\""
, "clang/lib/CodeGen/CGCall.cpp", 5037, __extension__ __PRETTY_FUNCTION__
))
;
5038
5039 QualType pointeeTy = I->Ty->getPointeeType();
5040 swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy),
5041 getContext().getTypeAlignInChars(pointeeTy));
5042
5043 swiftErrorTemp =
5044 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
5045 V = swiftErrorTemp.getPointer();
5046 cast<llvm::AllocaInst>(V)->setSwiftError(true);
5047
5048 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
5049 Builder.CreateStore(errorValue, swiftErrorTemp);
5050 }
5051
5052 // We might have to widen integers, but we should never truncate.
5053 if (ArgInfo.getCoerceToType() != V->getType() &&
5054 V->getType()->isIntegerTy())
5055 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
5056
5057 // If the argument doesn't match, perform a bitcast to coerce it. This
5058 // can happen due to trivial type mismatches.
5059 if (FirstIRArg < IRFuncTy->getNumParams() &&
5060 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5061 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
5062
5063 if (ArgHasMaybeUndefAttr)
5064 V = Builder.CreateFreeze(V);
5065 IRCallArgs[FirstIRArg] = V;
5066 break;
5067 }
5068
5069 // FIXME: Avoid the conversion through memory if possible.
5070 Address Src = Address::invalid();
5071 if (!I->isAggregate()) {
5072 Src = CreateMemTemp(I->Ty, "coerce");
5073 I->copyInto(*this, Src);
5074 } else {
5075 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5076 : I->getKnownRValue().getAggregateAddress();
5077 }
5078
5079 // If the value is offset in memory, apply the offset now.
5080 Src = emitAddressAtOffset(*this, Src, ArgInfo);
5081
5082 // Fast-isel and the optimizer generally like scalar values better than
5083 // FCAs, so we flatten them if this is safe to do for this argument.
5084 llvm::StructType *STy =
5085 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
5086 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
5087 llvm::Type *SrcTy = Src.getElementType();
5088 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
5089 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
5090
5091 // If the source type is smaller than the destination type of the
5092 // coerce-to logic, copy the source value into a temp alloca the size
5093 // of the destination type to allow loading all of it. The bits past
5094 // the source value are left undef.
5095 if (SrcSize < DstSize) {
5096 Address TempAlloca
5097 = CreateTempAlloca(STy, Src.getAlignment(),
5098 Src.getName() + ".coerce");
5099 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5100 Src = TempAlloca;
5101 } else {
5102 Src = Builder.CreateElementBitCast(Src, STy);
5103 }
5104
5105 assert(NumIRArgs == STy->getNumElements())(static_cast <bool> (NumIRArgs == STy->getNumElements
()) ? void (0) : __assert_fail ("NumIRArgs == STy->getNumElements()"
, "clang/lib/CodeGen/CGCall.cpp", 5105, __extension__ __PRETTY_FUNCTION__
))
;
5106 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5107 Address EltPtr = Builder.CreateStructGEP(Src, i);
5108 llvm::Value *LI = Builder.CreateLoad(EltPtr);
5109 if (ArgHasMaybeUndefAttr)
5110 LI = Builder.CreateFreeze(LI);
5111 IRCallArgs[FirstIRArg + i] = LI;
5112 }
5113 } else {
5114 // In the simple case, just pass the coerced loaded value.
5115 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 5115, __extension__
__PRETTY_FUNCTION__))
;
5116 llvm::Value *Load =
5117 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
5118
5119 if (CallInfo.isCmseNSCall()) {
5120 // For certain parameter types, clear padding bits, as they may reveal
5121 // sensitive information.
5122 // Small struct/union types are passed as integer arrays.
5123 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5124 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5125 Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
5126 }
5127
5128 if (ArgHasMaybeUndefAttr)
5129 Load = Builder.CreateFreeze(Load);
5130 IRCallArgs[FirstIRArg] = Load;
5131 }
5132
5133 break;
5134 }
5135
5136 case ABIArgInfo::CoerceAndExpand: {
5137 auto coercionType = ArgInfo.getCoerceAndExpandType();
5138 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
5139
5140 llvm::Value *tempSize = nullptr;
5141 Address addr = Address::invalid();
5142 Address AllocaAddr = Address::invalid();
5143 if (I->isAggregate()) {
5144 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5145 : I->getKnownRValue().getAggregateAddress();
5146
5147 } else {
5148 RValue RV = I->getKnownRValue();
5149 assert(RV.isScalar())(static_cast <bool> (RV.isScalar()) ? void (0) : __assert_fail
("RV.isScalar()", "clang/lib/CodeGen/CGCall.cpp", 5149, __extension__
__PRETTY_FUNCTION__))
; // complex should always just be direct
5150
5151 llvm::Type *scalarType = RV.getScalarVal()->getType();
5152 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
5153 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
5154
5155 // Materialize to a temporary.
5156 addr =
5157 CreateTempAlloca(RV.getScalarVal()->getType(),
5158 CharUnits::fromQuantity(std::max(
5159 layout->getAlignment().value(), scalarAlign)),
5160 "tmp",
5161 /*ArraySize=*/nullptr, &AllocaAddr);
5162 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
5163
5164 Builder.CreateStore(RV.getScalarVal(), addr);
5165 }
5166
5167 addr = Builder.CreateElementBitCast(addr, coercionType);
5168
5169 unsigned IRArgPos = FirstIRArg;
5170 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5171 llvm::Type *eltType = coercionType->getElementType(i);
5172 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5173 Address eltAddr = Builder.CreateStructGEP(addr, i);
5174 llvm::Value *elt = Builder.CreateLoad(eltAddr);
5175 if (ArgHasMaybeUndefAttr)
5176 elt = Builder.CreateFreeze(elt);
5177 IRCallArgs[IRArgPos++] = elt;
5178 }
5179 assert(IRArgPos == FirstIRArg + NumIRArgs)(static_cast <bool> (IRArgPos == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 5179, __extension__ __PRETTY_FUNCTION__
))
;
5180
5181 if (tempSize) {
5182 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
5183 }
5184
5185 break;
5186 }
5187
5188 case ABIArgInfo::Expand: {
5189 unsigned IRArgPos = FirstIRArg;
5190 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5191 assert(IRArgPos == FirstIRArg + NumIRArgs)(static_cast <bool> (IRArgPos == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 5191, __extension__ __PRETTY_FUNCTION__
))
;
5192 break;
5193 }
5194 }
5195 }
5196
5197 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
5198 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
5199
5200 // If we're using inalloca, set up that argument.
5201 if (ArgMemory.isValid()) {
5202 llvm::Value *Arg = ArgMemory.getPointer();
5203 if (CallInfo.isVariadic()) {
5204 // When passing non-POD arguments by value to variadic functions, we will
5205 // end up with a variadic prototype and an inalloca call site. In such
5206 // cases, we can't do any parameter mismatch checks. Give up and bitcast
5207 // the callee.
5208 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
5209 CalleePtr =
5210 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
5211 } else {
5212 llvm::Type *LastParamTy =
5213 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
5214 if (Arg->getType() != LastParamTy) {
5215#ifndef NDEBUG
5216 // Assert that these structs have equivalent element types.
5217 llvm::StructType *FullTy = CallInfo.getArgStruct();
5218 if (!LastParamTy->isOpaquePointerTy()) {
5219 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
5220 LastParamTy->getNonOpaquePointerElementType());
5221 assert(DeclaredTy->getNumElements() == FullTy->getNumElements())(static_cast <bool> (DeclaredTy->getNumElements() ==
FullTy->getNumElements()) ? void (0) : __assert_fail ("DeclaredTy->getNumElements() == FullTy->getNumElements()"
, "clang/lib/CodeGen/CGCall.cpp", 5221, __extension__ __PRETTY_FUNCTION__
))
;
5222 for (auto DI = DeclaredTy->element_begin(),
5223 DE = DeclaredTy->element_end(),
5224 FI = FullTy->element_begin();
5225 DI != DE; ++DI, ++FI)
5226 assert(*DI == *FI)(static_cast <bool> (*DI == *FI) ? void (0) : __assert_fail
("*DI == *FI", "clang/lib/CodeGen/CGCall.cpp", 5226, __extension__
__PRETTY_FUNCTION__))
;
5227 }
5228#endif
5229 Arg = Builder.CreateBitCast(Arg, LastParamTy);
5230 }
5231 }
5232 assert(IRFunctionArgs.hasInallocaArg())(static_cast <bool> (IRFunctionArgs.hasInallocaArg()) ?
void (0) : __assert_fail ("IRFunctionArgs.hasInallocaArg()",
"clang/lib/CodeGen/CGCall.cpp", 5232, __extension__ __PRETTY_FUNCTION__
))
;
5233 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5234 }
5235
5236 // 2. Prepare the function pointer.
5237
5238 // If the callee is a bitcast of a non-variadic function to have a
5239 // variadic function pointer type, check to see if we can remove the
5240 // bitcast. This comes up with unprototyped functions.
5241 //
5242 // This makes the IR nicer, but more importantly it ensures that we
5243 // can inline the function at -O0 if it is marked always_inline.
5244 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5245 llvm::Value *Ptr) -> llvm::Function * {
5246 if (!CalleeFT->isVarArg())
5247 return nullptr;
5248
5249 // Get underlying value if it's a bitcast
5250 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5251 if (CE->getOpcode() == llvm::Instruction::BitCast)
5252 Ptr = CE->getOperand(0);
5253 }
5254
5255 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5256 if (!OrigFn)
5257 return nullptr;
5258
5259 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5260
5261 // If the original type is variadic, or if any of the component types
5262 // disagree, we cannot remove the cast.
5263 if (OrigFT->isVarArg() ||
5264 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5265 OrigFT->getReturnType() != CalleeFT->getReturnType())
5266 return nullptr;
5267
5268 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5269 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5270 return nullptr;
5271
5272 return OrigFn;
5273 };
5274
5275 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5276 CalleePtr = OrigFn;
5277 IRFuncTy = OrigFn->getFunctionType();
5278 }
5279
5280 // 3. Perform the actual call.
5281
5282 // Deactivate any cleanups that we're supposed to do immediately before
5283 // the call.
5284 if (!CallArgs.getCleanupsToDeactivate().empty())
5285 deactivateArgCleanupsBeforeCall(*this, CallArgs);
5286
5287 // Assert that the arguments we computed match up. The IR verifier
5288 // will catch this, but this is a common enough source of problems
5289 // during IRGen changes that it's way better for debugging to catch
5290 // it ourselves here.
5291#ifndef NDEBUG
5292 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg())(static_cast <bool> (IRCallArgs.size() == IRFuncTy->
getNumParams() || IRFuncTy->isVarArg()) ? void (0) : __assert_fail
("IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()"
, "clang/lib/CodeGen/CGCall.cpp", 5292, __extension__ __PRETTY_FUNCTION__
))
;
5293 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5294 // Inalloca argument can have different type.
5295 if (IRFunctionArgs.hasInallocaArg() &&
5296 i == IRFunctionArgs.getInallocaArgNo())
5297 continue;
5298 if (i < IRFuncTy->getNumParams())
5299 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i))(static_cast <bool> (IRCallArgs[i]->getType() == IRFuncTy
->getParamType(i)) ? void (0) : __assert_fail ("IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)"
, "clang/lib/CodeGen/CGCall.cpp", 5299, __extension__ __PRETTY_FUNCTION__
))
;
5300 }
5301#endif
5302
5303 // Update the largest vector width if any arguments have vector types.
5304 for (unsigned i = 0; i < IRCallArgs.size(); ++i)
5305 LargestVectorWidth = std::max(LargestVectorWidth,
5306 getMaxVectorWidth(IRCallArgs[i]->getType()));
5307
5308 // Compute the calling convention and attributes.
5309 unsigned CallingConv;
5310 llvm::AttributeList Attrs;
5311 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5312 Callee.getAbstractInfo(), Attrs, CallingConv,
5313 /*AttrOnCallSite=*/true,
5314 /*IsThunk=*/false);
5315
5316 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5317 if (FD->hasAttr<StrictFPAttr>())
5318 // All calls within a strictfp function are marked strictfp
5319 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5320
5321 // Add call-site nomerge attribute if exists.
5322 if (InNoMergeAttributedStmt)
5323 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge);
5324
5325 // Add call-site noinline attribute if exists.
5326 if (InNoInlineAttributedStmt)
5327 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5328
5329 // Add call-site always_inline attribute if exists.
5330 if (InAlwaysInlineAttributedStmt)
5331 Attrs =
5332 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5333
5334 // Apply some call-site-specific attributes.
5335 // TODO: work this into building the attribute set.
5336
5337 // Apply always_inline to all calls within flatten functions.
5338 // FIXME: should this really take priority over __try, below?
5339 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5340 !InNoInlineAttributedStmt &&
5341 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5342 Attrs =
5343 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5344 }
5345
5346 // Disable inlining inside SEH __try blocks.
5347 if (isSEHTryScope()) {
5348 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5349 }
5350
5351 // Decide whether to use a call or an invoke.
5352 bool CannotThrow;
5353 if (currentFunctionUsesSEHTry()) {
5354 // SEH cares about asynchronous exceptions, so everything can "throw."
5355 CannotThrow = false;
5356 } else if (isCleanupPadScope() &&
5357 EHPersonality::get(*this).isMSVCXXPersonality()) {
5358 // The MSVC++ personality will implicitly terminate the program if an
5359 // exception is thrown during a cleanup outside of a try/catch.
5360 // We don't need to model anything in IR to get this behavior.
5361 CannotThrow = true;
5362 } else {
5363 // Otherwise, nounwind call sites will never throw.
5364 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5365
5366 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5367 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5368 CannotThrow = true;
5369 }
5370
5371 // If we made a temporary, be sure to clean up after ourselves. Note that we
5372 // can't depend on being inside of an ExprWithCleanups, so we need to manually
5373 // pop this cleanup later on. Being eager about this is OK, since this
5374 // temporary is 'invisible' outside of the callee.
5375 if (UnusedReturnSizePtr)
5376 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5377 UnusedReturnSizePtr);
5378
5379 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5380
5381 SmallVector<llvm::OperandBundleDef, 1> BundleList =
5382 getBundlesForFunclet(CalleePtr);
5383
5384 if (SanOpts.has(SanitizerKind::KCFI) &&
5385 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5386 EmitKCFIOperandBundle(ConcreteCallee, BundleList);
5387
5388 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5389 if (FD->hasAttr<StrictFPAttr>())
5390 // All calls within a strictfp function are marked strictfp
5391 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5392
5393 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5394 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5395
5396 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5397 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5398
5399 // Emit the actual call/invoke instruction.
5400 llvm::CallBase *CI;
5401 if (!InvokeDest) {
5402 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5403 } else {
5404 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5405 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5406 BundleList);
5407 EmitBlock(Cont);
5408 }
5409 if (callOrInvoke)
5410 *callOrInvoke = CI;
5411
5412 // If this is within a function that has the guard(nocf) attribute and is an
5413 // indirect call, add the "guard_nocf" attribute to this call to indicate that
5414 // Control Flow Guard checks should not be added, even if the call is inlined.
5415 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5416 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5417 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5418 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf");
5419 }
5420 }
5421
5422 // Apply the attributes and calling convention.
5423 CI->setAttributes(Attrs);
5424 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5425
5426 // Apply various metadata.
5427
5428 if (!CI->getType()->isVoidTy())
5429 CI->setName("call");
5430
5431 // Update largest vector width from the return type.
5432 LargestVectorWidth =
5433 std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
5434
5435 // Insert instrumentation or attach profile metadata at indirect call sites.
5436 // For more details, see the comment before the definition of
5437 // IPVK_IndirectCallTarget in InstrProfData.inc.
5438 if (!CI->getCalledFunction())
5439 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5440 CI, CalleePtr);
5441
5442 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5443 // optimizer it can aggressively ignore unwind edges.
5444 if (CGM.getLangOpts().ObjCAutoRefCount)
5445 AddObjCARCExceptionMetadata(CI);
5446
5447 // Set tail call kind if necessary.
5448 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5449 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5450 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5451 else if (IsMustTail)
5452 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5453 }
5454
5455 // Add metadata for calls to MSAllocator functions
5456 if (getDebugInfo() && TargetDecl &&
5457 TargetDecl->hasAttr<MSAllocatorAttr>())
5458 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5459
5460 // Add metadata if calling an __attribute__((error(""))) or warning fn.
5461 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) {
5462 llvm::ConstantInt *Line =
5463 llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding());
5464 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line);
5465 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD});
5466 CI->setMetadata("srcloc", MDT);
5467 }
5468
5469 // 4. Finish the call.
5470
5471 // If the call doesn't return, finish the basic block and clear the
5472 // insertion point; this allows the rest of IRGen to discard
5473 // unreachable code.
5474 if (CI->doesNotReturn()) {
5475 if (UnusedReturnSizePtr)
5476 PopCleanupBlock();
5477
5478 // Strip away the noreturn attribute to better diagnose unreachable UB.
5479 if (SanOpts.has(SanitizerKind::Unreachable)) {
5480 // Also remove from function since CallBase::hasFnAttr additionally checks
5481 // attributes of the called function.
5482 if (auto *F = CI->getCalledFunction())
5483 F->removeFnAttr(llvm::Attribute::NoReturn);
5484 CI->removeFnAttr(llvm::Attribute::NoReturn);
5485
5486 // Avoid incompatibility with ASan which relies on the `noreturn`
5487 // attribute to insert handler calls.
5488 if (SanOpts.hasOneOf(SanitizerKind::Address |
5489 SanitizerKind::KernelAddress)) {
5490 SanitizerScope SanScope(this);
5491 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5492 Builder.SetInsertPoint(CI);
5493 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5494 llvm::FunctionCallee Fn =
5495 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5496 EmitNounwindRuntimeCall(Fn);
5497 }
5498 }
5499
5500 EmitUnreachable(Loc);
5501 Builder.ClearInsertionPoint();
5502
5503 // FIXME: For now, emit a dummy basic block because expr emitters in
5504 // generally are not ready to handle emitting expressions at unreachable
5505 // points.
5506 EnsureInsertPoint();
5507
5508 // Return a reasonable RValue.
5509 return GetUndefRValue(RetTy);
5510 }
5511
5512 // If this is a musttail call, return immediately. We do not branch to the
5513 // epilogue in this case.
5514 if (IsMustTail) {
5515 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end();
5516 ++it) {
5517 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it);
5518 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
5519 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups");
5520 }
5521 if (CI->getType()->isVoidTy())
5522 Builder.CreateRetVoid();
5523 else
5524 Builder.CreateRet(CI);
5525 Builder.ClearInsertionPoint();
5526 EnsureInsertPoint();
5527 return GetUndefRValue(RetTy);
5528 }
5529
5530 // Perform the swifterror writeback.
5531 if (swiftErrorTemp.isValid()) {
5532 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5533 Builder.CreateStore(errorResult, swiftErrorArg);
5534 }
5535
5536 // Emit any call-associated writebacks immediately. Arguably this
5537 // should happen after any return-value munging.
5538 if (CallArgs.hasWritebacks())
5539 emitWritebacks(*this, CallArgs);
5540
5541 // The stack cleanup for inalloca arguments has to run out of the normal
5542 // lexical order, so deactivate it and run it manually here.
5543 CallArgs.freeArgumentMemory(*this);
5544
5545 // Extract the return value.
5546 RValue Ret = [&] {
5547 switch (RetAI.getKind()) {
5548 case ABIArgInfo::CoerceAndExpand: {
5549 auto coercionType = RetAI.getCoerceAndExpandType();
5550
5551 Address addr = SRetPtr;
5552 addr = Builder.CreateElementBitCast(addr, coercionType);
5553
5554 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())(static_cast <bool> (CI->getType() == RetAI.getUnpaddedCoerceAndExpandType
()) ? void (0) : __assert_fail ("CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()"
, "clang/lib/CodeGen/CGCall.cpp", 5554, __extension__ __PRETTY_FUNCTION__
))
;
5555 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5556
5557 unsigned unpaddedIndex = 0;
5558 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5559 llvm::Type *eltType = coercionType->getElementType(i);
5560 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5561 Address eltAddr = Builder.CreateStructGEP(addr, i);
5562 llvm::Value *elt = CI;
5563 if (requiresExtract)
5564 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5565 else
5566 assert(unpaddedIndex == 0)(static_cast <bool> (unpaddedIndex == 0) ? void (0) : __assert_fail
("unpaddedIndex == 0", "clang/lib/CodeGen/CGCall.cpp", 5566,
__extension__ __PRETTY_FUNCTION__))
;
5567 Builder.CreateStore(elt, eltAddr);
5568 }
5569 // FALLTHROUGH
5570 [[fallthrough]];
5571 }
5572
5573 case ABIArgInfo::InAlloca:
5574 case ABIArgInfo::Indirect: {
5575 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5576 if (UnusedReturnSizePtr)
5577 PopCleanupBlock();
5578 return ret;
5579 }
5580
5581 case ABIArgInfo::Ignore:
5582 // If we are ignoring an argument that had a result, make sure to
5583 // construct the appropriate return value for our caller.
5584 return GetUndefRValue(RetTy);
5585
5586 case ABIArgInfo::Extend:
5587 case ABIArgInfo::Direct: {
5588 llvm::Type *RetIRTy = ConvertType(RetTy);
5589 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5590 switch (getEvaluationKind(RetTy)) {
5591 case TEK_Complex: {
5592 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5593 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5594 return RValue::getComplex(std::make_pair(Real, Imag));
5595 }
5596 case TEK_Aggregate: {
5597 Address DestPtr = ReturnValue.getValue();
5598 bool DestIsVolatile = ReturnValue.isVolatile();
5599
5600 if (!DestPtr.isValid()) {
5601 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5602 DestIsVolatile = false;
5603 }
5604 EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5605 return RValue::getAggregate(DestPtr);
5606 }
5607 case TEK_Scalar: {
5608 // If the argument doesn't match, perform a bitcast to coerce it. This
5609 // can happen due to trivial type mismatches.
5610 llvm::Value *V = CI;
5611 if (V->getType() != RetIRTy)
5612 V = Builder.CreateBitCast(V, RetIRTy);
5613 return RValue::get(V);
5614 }
5615 }
5616 llvm_unreachable("bad evaluation kind")::llvm::llvm_unreachable_internal("bad evaluation kind", "clang/lib/CodeGen/CGCall.cpp"
, 5616)
;
5617 }
5618
5619 Address DestPtr = ReturnValue.getValue();
5620 bool DestIsVolatile = ReturnValue.isVolatile();
5621
5622 if (!DestPtr.isValid()) {
5623 DestPtr = CreateMemTemp(RetTy, "coerce");
5624 DestIsVolatile = false;
5625 }
5626
5627 // If the value is offset in memory, apply the offset now.
5628 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5629 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5630
5631 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5632 }
5633
5634 case ABIArgInfo::Expand:
5635 case ABIArgInfo::IndirectAliased:
5636 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "clang/lib/CodeGen/CGCall.cpp", 5636)
;
5637 }
5638
5639 llvm_unreachable("Unhandled ABIArgInfo::Kind")::llvm::llvm_unreachable_internal("Unhandled ABIArgInfo::Kind"
, "clang/lib/CodeGen/CGCall.cpp", 5639)
;
5640 } ();
5641
5642 // Emit the assume_aligned check on the return value.
5643 if (Ret.isScalar() && TargetDecl) {
5644 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5645 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5646 }
5647
5648 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5649 // we can't use the full cleanup mechanism.
5650 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5651 LifetimeEnd.Emit(*this, /*Flags=*/{});
5652
5653 if (!ReturnValue.isExternallyDestructed() &&
5654 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5655 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),