Bug Summary

File:tools/clang/lib/CodeGen/CGCall.cpp
Warning:line 3095, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGCall.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn373517/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-10~svn373517/tools/clang/include -I /build/llvm-toolchain-snapshot-10~svn373517/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-10~svn373517/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn373517/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn373517/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn373517=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-10-02-234743-9763-1 -x c++ /build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp

/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp

1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCall.h"
15#include "ABIInfo.h"
16#include "CGBlocks.h"
17#include "CGCXXABI.h"
18#include "CGCleanup.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "TargetInfo.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/DeclCXX.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/Basic/CodeGenOptions.h"
26#include "clang/Basic/TargetBuiltins.h"
27#include "clang/Basic/TargetInfo.h"
28#include "clang/CodeGen/CGFunctionInfo.h"
29#include "clang/CodeGen/SwiftCallingConv.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/Transforms/Utils/Local.h"
32#include "llvm/Analysis/ValueTracking.h"
33#include "llvm/IR/Attributes.h"
34#include "llvm/IR/CallingConv.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/InlineAsm.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/Intrinsics.h"
39using namespace clang;
40using namespace CodeGen;
41
42/***/
43
44unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
45 switch (CC) {
46 default: return llvm::CallingConv::C;
47 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
48 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
49 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
50 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
51 case CC_Win64: return llvm::CallingConv::Win64;
52 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
53 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
54 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
55 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
56 // TODO: Add support for __pascal to LLVM.
57 case CC_X86Pascal: return llvm::CallingConv::C;
58 // TODO: Add support for __vectorcall to LLVM.
59 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
60 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
61 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
62 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
63 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
64 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
65 case CC_Swift: return llvm::CallingConv::Swift;
66 }
67}
68
69/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
70/// qualification. Either or both of RD and MD may be null. A null RD indicates
71/// that there is no meaningful 'this' type, and a null MD can occur when
72/// calling a method pointer.
73CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
74 const CXXMethodDecl *MD) {
75 QualType RecTy;
76 if (RD)
77 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
78 else
79 RecTy = Context.VoidTy;
80
81 if (MD)
82 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
83 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
84}
85
86/// Returns the canonical formal type of the given C++ method.
87static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
88 return MD->getType()->getCanonicalTypeUnqualified()
89 .getAs<FunctionProtoType>();
90}
91
92/// Returns the "extra-canonicalized" return type, which discards
93/// qualifiers on the return type. Codegen doesn't care about them,
94/// and it makes ABI code a little easier to be able to assume that
95/// all parameter and return types are top-level unqualified.
96static CanQualType GetReturnType(QualType RetTy) {
97 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
98}
99
100/// Arrange the argument and result information for a value of the given
101/// unprototyped freestanding function type.
102const CGFunctionInfo &
103CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
104 // When translating an unprototyped function type, always use a
105 // variadic type.
106 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
107 /*instanceMethod=*/false,
108 /*chainCall=*/false, None,
109 FTNP->getExtInfo(), {}, RequiredArgs(0));
110}
111
112static void addExtParameterInfosForCall(
113 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
114 const FunctionProtoType *proto,
115 unsigned prefixArgs,
116 unsigned totalArgs) {
117 assert(proto->hasExtParameterInfos())((proto->hasExtParameterInfos()) ? static_cast<void>
(0) : __assert_fail ("proto->hasExtParameterInfos()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 117, __PRETTY_FUNCTION__))
;
118 assert(paramInfos.size() <= prefixArgs)((paramInfos.size() <= prefixArgs) ? static_cast<void>
(0) : __assert_fail ("paramInfos.size() <= prefixArgs", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 118, __PRETTY_FUNCTION__))
;
119 assert(proto->getNumParams() + prefixArgs <= totalArgs)((proto->getNumParams() + prefixArgs <= totalArgs) ? static_cast
<void> (0) : __assert_fail ("proto->getNumParams() + prefixArgs <= totalArgs"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 119, __PRETTY_FUNCTION__))
;
120
121 paramInfos.reserve(totalArgs);
122
123 // Add default infos for any prefix args that don't already have infos.
124 paramInfos.resize(prefixArgs);
125
126 // Add infos for the prototype.
127 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
128 paramInfos.push_back(ParamInfo);
129 // pass_object_size params have no parameter info.
130 if (ParamInfo.hasPassObjectSize())
131 paramInfos.emplace_back();
132 }
133
134 assert(paramInfos.size() <= totalArgs &&((paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 135, __PRETTY_FUNCTION__))
135 "Did we forget to insert pass_object_size args?")((paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 135, __PRETTY_FUNCTION__))
;
136 // Add default infos for the variadic and/or suffix arguments.
137 paramInfos.resize(totalArgs);
138}
139
140/// Adds the formal parameters in FPT to the given prefix. If any parameter in
141/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
142static void appendParameterTypes(const CodeGenTypes &CGT,
143 SmallVectorImpl<CanQualType> &prefix,
144 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
145 CanQual<FunctionProtoType> FPT) {
146 // Fast path: don't touch param info if we don't need to.
147 if (!FPT->hasExtParameterInfos()) {
148 assert(paramInfos.empty() &&((paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 149, __PRETTY_FUNCTION__))
149 "We have paramInfos, but the prototype doesn't?")((paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 149, __PRETTY_FUNCTION__))
;
150 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
151 return;
152 }
153
154 unsigned PrefixSize = prefix.size();
155 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
156 // parameters; the only thing that can change this is the presence of
157 // pass_object_size. So, we preallocate for the common case.
158 prefix.reserve(prefix.size() + FPT->getNumParams());
159
160 auto ExtInfos = FPT->getExtParameterInfos();
161 assert(ExtInfos.size() == FPT->getNumParams())((ExtInfos.size() == FPT->getNumParams()) ? static_cast<
void> (0) : __assert_fail ("ExtInfos.size() == FPT->getNumParams()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 161, __PRETTY_FUNCTION__))
;
162 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
163 prefix.push_back(FPT->getParamType(I));
164 if (ExtInfos[I].hasPassObjectSize())
165 prefix.push_back(CGT.getContext().getSizeType());
166 }
167
168 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
169 prefix.size());
170}
171
172/// Arrange the LLVM function layout for a value of the given function
173/// type, on top of any implicit parameters already stored.
174static const CGFunctionInfo &
175arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
176 SmallVectorImpl<CanQualType> &prefix,
177 CanQual<FunctionProtoType> FTP) {
178 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
179 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
180 // FIXME: Kill copy.
181 appendParameterTypes(CGT, prefix, paramInfos, FTP);
182 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
183
184 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
185 /*chainCall=*/false, prefix,
186 FTP->getExtInfo(), paramInfos,
187 Required);
188}
189
190/// Arrange the argument and result information for a value of the
191/// given freestanding function type.
192const CGFunctionInfo &
193CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
194 SmallVector<CanQualType, 16> argTypes;
195 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
196 FTP);
197}
198
199static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
200 // Set the appropriate calling convention for the Function.
201 if (D->hasAttr<StdCallAttr>())
202 return CC_X86StdCall;
203
204 if (D->hasAttr<FastCallAttr>())
205 return CC_X86FastCall;
206
207 if (D->hasAttr<RegCallAttr>())
208 return CC_X86RegCall;
209
210 if (D->hasAttr<ThisCallAttr>())
211 return CC_X86ThisCall;
212
213 if (D->hasAttr<VectorCallAttr>())
214 return CC_X86VectorCall;
215
216 if (D->hasAttr<PascalAttr>())
217 return CC_X86Pascal;
218
219 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
220 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
221
222 if (D->hasAttr<AArch64VectorPcsAttr>())
223 return CC_AArch64VectorCall;
224
225 if (D->hasAttr<IntelOclBiccAttr>())
226 return CC_IntelOclBicc;
227
228 if (D->hasAttr<MSABIAttr>())
229 return IsWindows ? CC_C : CC_Win64;
230
231 if (D->hasAttr<SysVABIAttr>())
232 return IsWindows ? CC_X86_64SysV : CC_C;
233
234 if (D->hasAttr<PreserveMostAttr>())
235 return CC_PreserveMost;
236
237 if (D->hasAttr<PreserveAllAttr>())
238 return CC_PreserveAll;
239
240 return CC_C;
241}
242
243/// Arrange the argument and result information for a call to an
244/// unknown C++ non-static member function of the given abstract type.
245/// (A null RD means we don't have any meaningful "this" argument type,
246/// so fall back to a generic pointer type).
247/// The member function must be an ordinary function, i.e. not a
248/// constructor or destructor.
249const CGFunctionInfo &
250CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
251 const FunctionProtoType *FTP,
252 const CXXMethodDecl *MD) {
253 SmallVector<CanQualType, 16> argTypes;
254
255 // Add the 'this' pointer.
256 argTypes.push_back(DeriveThisType(RD, MD));
257
258 return ::arrangeLLVMFunctionInfo(
259 *this, true, argTypes,
260 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
261}
262
263/// Set calling convention for CUDA/HIP kernel.
264static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
265 const FunctionDecl *FD) {
266 if (FD->hasAttr<CUDAGlobalAttr>()) {
267 const FunctionType *FT = FTy->getAs<FunctionType>();
268 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
269 FTy = FT->getCanonicalTypeUnqualified();
270 }
271}
272
273/// Arrange the argument and result information for a declaration or
274/// definition of the given C++ non-static member function. The
275/// member function must be an ordinary function, i.e. not a
276/// constructor or destructor.
277const CGFunctionInfo &
278CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
279 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!")((!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"
) ? static_cast<void> (0) : __assert_fail ("!isa<CXXConstructorDecl>(MD) && \"wrong method for constructors!\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 279, __PRETTY_FUNCTION__))
;
280 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!")((!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"
) ? static_cast<void> (0) : __assert_fail ("!isa<CXXDestructorDecl>(MD) && \"wrong method for destructors!\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 280, __PRETTY_FUNCTION__))
;
281
282 CanQualType FT = GetFormalType(MD).getAs<Type>();
283 setCUDAKernelCallingConvention(FT, CGM, MD);
284 auto prototype = FT.getAs<FunctionProtoType>();
285
286 if (MD->isInstance()) {
287 // The abstract case is perfectly fine.
288 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
289 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
290 }
291
292 return arrangeFreeFunctionType(prototype);
293}
294
295bool CodeGenTypes::inheritingCtorHasParams(
296 const InheritedConstructor &Inherited, CXXCtorType Type) {
297 // Parameters are unnecessary if we're constructing a base class subobject
298 // and the inherited constructor lives in a virtual base.
299 return Type == Ctor_Complete ||
300 !Inherited.getShadowDecl()->constructsVirtualBase() ||
301 !Target.getCXXABI().hasConstructorVariants();
302}
303
304const CGFunctionInfo &
305CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
306 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
307
308 SmallVector<CanQualType, 16> argTypes;
309 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
310 argTypes.push_back(DeriveThisType(MD->getParent(), MD));
311
312 bool PassParams = true;
313
314 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
315 // A base class inheriting constructor doesn't get forwarded arguments
316 // needed to construct a virtual base (or base class thereof).
317 if (auto Inherited = CD->getInheritedConstructor())
318 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
319 }
320
321 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
322
323 // Add the formal parameters.
324 if (PassParams)
325 appendParameterTypes(*this, argTypes, paramInfos, FTP);
326
327 CGCXXABI::AddedStructorArgs AddedArgs =
328 TheCXXABI.buildStructorSignature(GD, argTypes);
329 if (!paramInfos.empty()) {
330 // Note: prefix implies after the first param.
331 if (AddedArgs.Prefix)
332 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
333 FunctionProtoType::ExtParameterInfo{});
334 if (AddedArgs.Suffix)
335 paramInfos.append(AddedArgs.Suffix,
336 FunctionProtoType::ExtParameterInfo{});
337 }
338
339 RequiredArgs required =
340 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
341 : RequiredArgs::All);
342
343 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
344 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
345 ? argTypes.front()
346 : TheCXXABI.hasMostDerivedReturn(GD)
347 ? CGM.getContext().VoidPtrTy
348 : Context.VoidTy;
349 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
350 /*chainCall=*/false, argTypes, extInfo,
351 paramInfos, required);
352}
353
354static SmallVector<CanQualType, 16>
355getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
356 SmallVector<CanQualType, 16> argTypes;
357 for (auto &arg : args)
358 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
359 return argTypes;
360}
361
362static SmallVector<CanQualType, 16>
363getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
364 SmallVector<CanQualType, 16> argTypes;
365 for (auto &arg : args)
366 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
367 return argTypes;
368}
369
370static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
371getExtParameterInfosForCall(const FunctionProtoType *proto,
372 unsigned prefixArgs, unsigned totalArgs) {
373 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
374 if (proto->hasExtParameterInfos()) {
375 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
376 }
377 return result;
378}
379
380/// Arrange a call to a C++ method, passing the given arguments.
381///
382/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
383/// parameter.
384/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
385/// args.
386/// PassProtoArgs indicates whether `args` has args for the parameters in the
387/// given CXXConstructorDecl.
388const CGFunctionInfo &
389CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
390 const CXXConstructorDecl *D,
391 CXXCtorType CtorKind,
392 unsigned ExtraPrefixArgs,
393 unsigned ExtraSuffixArgs,
394 bool PassProtoArgs) {
395 // FIXME: Kill copy.
396 SmallVector<CanQualType, 16> ArgTypes;
397 for (const auto &Arg : args)
398 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
399
400 // +1 for implicit this, which should always be args[0].
401 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
402
403 CanQual<FunctionProtoType> FPT = GetFormalType(D);
404 RequiredArgs Required = PassProtoArgs
405 ? RequiredArgs::forPrototypePlus(
406 FPT, TotalPrefixArgs + ExtraSuffixArgs)
407 : RequiredArgs::All;
408
409 GlobalDecl GD(D, CtorKind);
410 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
411 ? ArgTypes.front()
412 : TheCXXABI.hasMostDerivedReturn(GD)
413 ? CGM.getContext().VoidPtrTy
414 : Context.VoidTy;
415
416 FunctionType::ExtInfo Info = FPT->getExtInfo();
417 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
418 // If the prototype args are elided, we should only have ABI-specific args,
419 // which never have param info.
420 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
421 // ABI-specific suffix arguments are treated the same as variadic arguments.
422 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
423 ArgTypes.size());
424 }
425 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
426 /*chainCall=*/false, ArgTypes, Info,
427 ParamInfos, Required);
428}
429
430/// Arrange the argument and result information for the declaration or
431/// definition of the given function.
432const CGFunctionInfo &
433CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
434 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
435 if (MD->isInstance())
436 return arrangeCXXMethodDeclaration(MD);
437
438 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
439
440 assert(isa<FunctionType>(FTy))((isa<FunctionType>(FTy)) ? static_cast<void> (0)
: __assert_fail ("isa<FunctionType>(FTy)", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 440, __PRETTY_FUNCTION__))
;
441 setCUDAKernelCallingConvention(FTy, CGM, FD);
442
443 // When declaring a function without a prototype, always use a
444 // non-variadic type.
445 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
446 return arrangeLLVMFunctionInfo(
447 noProto->getReturnType(), /*instanceMethod=*/false,
448 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
449 }
450
451 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
452}
453
454/// Arrange the argument and result information for the declaration or
455/// definition of an Objective-C method.
456const CGFunctionInfo &
457CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
458 // It happens that this is the same as a call with no optional
459 // arguments, except also using the formal 'self' type.
460 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
461}
462
463/// Arrange the argument and result information for the function type
464/// through which to perform a send to the given Objective-C method,
465/// using the given receiver type. The receiver type is not always
466/// the 'self' type of the method or even an Objective-C pointer type.
467/// This is *not* the right method for actually performing such a
468/// message send, due to the possibility of optional arguments.
469const CGFunctionInfo &
470CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
471 QualType receiverType) {
472 SmallVector<CanQualType, 16> argTys;
473 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
474 argTys.push_back(Context.getCanonicalParamType(receiverType));
475 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
476 // FIXME: Kill copy?
477 for (const auto *I : MD->parameters()) {
478 argTys.push_back(Context.getCanonicalParamType(I->getType()));
479 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
480 I->hasAttr<NoEscapeAttr>());
481 extParamInfos.push_back(extParamInfo);
482 }
483
484 FunctionType::ExtInfo einfo;
485 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
486 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
487
488 if (getContext().getLangOpts().ObjCAutoRefCount &&
489 MD->hasAttr<NSReturnsRetainedAttr>())
490 einfo = einfo.withProducesResult(true);
491
492 RequiredArgs required =
493 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
494
495 return arrangeLLVMFunctionInfo(
496 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
497 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
498}
499
500const CGFunctionInfo &
501CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
502 const CallArgList &args) {
503 auto argTypes = getArgTypesForCall(Context, args);
504 FunctionType::ExtInfo einfo;
505
506 return arrangeLLVMFunctionInfo(
507 GetReturnType(returnType), /*instanceMethod=*/false,
508 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
509}
510
511const CGFunctionInfo &
512CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
513 // FIXME: Do we need to handle ObjCMethodDecl?
514 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
515
516 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
517 isa<CXXDestructorDecl>(GD.getDecl()))
518 return arrangeCXXStructorDeclaration(GD);
519
520 return arrangeFunctionDeclaration(FD);
521}
522
523/// Arrange a thunk that takes 'this' as the first parameter followed by
524/// varargs. Return a void pointer, regardless of the actual return type.
525/// The body of the thunk will end in a musttail call to a function of the
526/// correct type, and the caller will bitcast the function to the correct
527/// prototype.
528const CGFunctionInfo &
529CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
530 assert(MD->isVirtual() && "only methods have thunks")((MD->isVirtual() && "only methods have thunks") ?
static_cast<void> (0) : __assert_fail ("MD->isVirtual() && \"only methods have thunks\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 530, __PRETTY_FUNCTION__))
;
531 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
532 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
533 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534 /*chainCall=*/false, ArgTys,
535 FTP->getExtInfo(), {}, RequiredArgs(1));
536}
537
538const CGFunctionInfo &
539CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
540 CXXCtorType CT) {
541 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure)((CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure) ? static_cast
<void> (0) : __assert_fail ("CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 541, __PRETTY_FUNCTION__))
;
542
543 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
544 SmallVector<CanQualType, 2> ArgTys;
545 const CXXRecordDecl *RD = CD->getParent();
546 ArgTys.push_back(DeriveThisType(RD, CD));
547 if (CT == Ctor_CopyingClosure)
548 ArgTys.push_back(*FTP->param_type_begin());
549 if (RD->getNumVBases() > 0)
550 ArgTys.push_back(Context.IntTy);
551 CallingConv CC = Context.getDefaultCallingConvention(
552 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554 /*chainCall=*/false, ArgTys,
555 FunctionType::ExtInfo(CC), {},
556 RequiredArgs::All);
557}
558
559/// Arrange a call as unto a free function, except possibly with an
560/// additional number of formal parameters considered required.
561static const CGFunctionInfo &
562arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
563 CodeGenModule &CGM,
564 const CallArgList &args,
565 const FunctionType *fnType,
566 unsigned numExtraRequiredArgs,
567 bool chainCall) {
568 assert(args.size() >= numExtraRequiredArgs)((args.size() >= numExtraRequiredArgs) ? static_cast<void
> (0) : __assert_fail ("args.size() >= numExtraRequiredArgs"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 568, __PRETTY_FUNCTION__))
;
569
570 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
571
572 // In most cases, there are no optional arguments.
573 RequiredArgs required = RequiredArgs::All;
574
575 // If we have a variadic prototype, the required arguments are the
576 // extra prefix plus the arguments in the prototype.
577 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578 if (proto->isVariadic())
579 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
580
581 if (proto->hasExtParameterInfos())
582 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583 args.size());
584
585 // If we don't have a prototype at all, but we're supposed to
586 // explicitly use the variadic convention for unprototyped calls,
587 // treat all of the arguments as required but preserve the nominal
588 // possibility of variadics.
589 } else if (CGM.getTargetCodeGenInfo()
590 .isNoProtoCallVariadic(args,
591 cast<FunctionNoProtoType>(fnType))) {
592 required = RequiredArgs(args.size());
593 }
594
595 // FIXME: Kill copy.
596 SmallVector<CanQualType, 16> argTypes;
597 for (const auto &arg : args)
598 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
599 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
600 /*instanceMethod=*/false, chainCall,
601 argTypes, fnType->getExtInfo(), paramInfos,
602 required);
603}
604
605/// Figure out the rules for calling a function with the given formal
606/// type using the given arguments. The arguments are necessary
607/// because the function might be unprototyped, in which case it's
608/// target-dependent in crazy ways.
609const CGFunctionInfo &
610CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
611 const FunctionType *fnType,
612 bool chainCall) {
613 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614 chainCall ? 1 : 0, chainCall);
615}
616
617/// A block function is essentially a free function with an
618/// extra implicit argument.
619const CGFunctionInfo &
620CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
621 const FunctionType *fnType) {
622 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623 /*chainCall=*/false);
624}
625
626const CGFunctionInfo &
627CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
628 const FunctionArgList &params) {
629 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630 auto argTypes = getArgTypesForDeclaration(Context, params);
631
632 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
633 /*instanceMethod*/ false, /*chainCall*/ false,
634 argTypes, proto->getExtInfo(), paramInfos,
635 RequiredArgs::forPrototypePlus(proto, 1));
636}
637
638const CGFunctionInfo &
639CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
640 const CallArgList &args) {
641 // FIXME: Kill copy.
642 SmallVector<CanQualType, 16> argTypes;
643 for (const auto &Arg : args)
644 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
645 return arrangeLLVMFunctionInfo(
646 GetReturnType(resultType), /*instanceMethod=*/false,
647 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
648 /*paramInfos=*/ {}, RequiredArgs::All);
649}
650
651const CGFunctionInfo &
652CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
653 const FunctionArgList &args) {
654 auto argTypes = getArgTypesForDeclaration(Context, args);
655
656 return arrangeLLVMFunctionInfo(
657 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
658 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
659}
660
661const CGFunctionInfo &
662CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
663 ArrayRef<CanQualType> argTypes) {
664 return arrangeLLVMFunctionInfo(
665 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
666 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
667}
668
669/// Arrange a call to a C++ method, passing the given arguments.
670///
671/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
672/// does not count `this`.
673const CGFunctionInfo &
674CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
675 const FunctionProtoType *proto,
676 RequiredArgs required,
677 unsigned numPrefixArgs) {
678 assert(numPrefixArgs + 1 <= args.size() &&((numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"
) ? static_cast<void> (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 679, __PRETTY_FUNCTION__))
679 "Emitting a call with less args than the required prefix?")((numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"
) ? static_cast<void> (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 679, __PRETTY_FUNCTION__))
;
680 // Add one to account for `this`. It's a bit awkward here, but we don't count
681 // `this` in similar places elsewhere.
682 auto paramInfos =
683 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
684
685 // FIXME: Kill copy.
686 auto argTypes = getArgTypesForCall(Context, args);
687
688 FunctionType::ExtInfo info = proto->getExtInfo();
689 return arrangeLLVMFunctionInfo(
690 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
691 /*chainCall=*/false, argTypes, info, paramInfos, required);
692}
693
694const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
695 return arrangeLLVMFunctionInfo(
696 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
697 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
698}
699
700const CGFunctionInfo &
701CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
702 const CallArgList &args) {
703 assert(signature.arg_size() <= args.size())((signature.arg_size() <= args.size()) ? static_cast<void
> (0) : __assert_fail ("signature.arg_size() <= args.size()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 703, __PRETTY_FUNCTION__))
;
704 if (signature.arg_size() == args.size())
705 return signature;
706
707 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
708 auto sigParamInfos = signature.getExtParameterInfos();
709 if (!sigParamInfos.empty()) {
710 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
711 paramInfos.resize(args.size());
712 }
713
714 auto argTypes = getArgTypesForCall(Context, args);
715
716 assert(signature.getRequiredArgs().allowsOptionalArgs())((signature.getRequiredArgs().allowsOptionalArgs()) ? static_cast
<void> (0) : __assert_fail ("signature.getRequiredArgs().allowsOptionalArgs()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 716, __PRETTY_FUNCTION__))
;
717 return arrangeLLVMFunctionInfo(signature.getReturnType(),
718 signature.isInstanceMethod(),
719 signature.isChainCall(),
720 argTypes,
721 signature.getExtInfo(),
722 paramInfos,
723 signature.getRequiredArgs());
724}
725
726namespace clang {
727namespace CodeGen {
728void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
729}
730}
731
732/// Arrange the argument and result information for an abstract value
733/// of a given function type. This is the method which all of the
734/// above functions ultimately defer to.
735const CGFunctionInfo &
736CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
737 bool instanceMethod,
738 bool chainCall,
739 ArrayRef<CanQualType> argTypes,
740 FunctionType::ExtInfo info,
741 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
742 RequiredArgs required) {
743 assert(llvm::all_of(argTypes,((llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam
(); })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 744, __PRETTY_FUNCTION__))
744 [](CanQualType T) { return T.isCanonicalAsParam(); }))((llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam
(); })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 744, __PRETTY_FUNCTION__))
;
745
746 // Lookup or create unique function info.
747 llvm::FoldingSetNodeID ID;
748 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
749 required, resultType, argTypes);
750
751 void *insertPos = nullptr;
752 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
753 if (FI)
754 return *FI;
755
756 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
757
758 // Construct the function info. We co-allocate the ArgInfos.
759 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
760 paramInfos, resultType, argTypes, required);
761 FunctionInfos.InsertNode(FI, insertPos);
762
763 bool inserted = FunctionsBeingProcessed.insert(FI).second;
764 (void)inserted;
765 assert(inserted && "Recursively being processed?")((inserted && "Recursively being processed?") ? static_cast
<void> (0) : __assert_fail ("inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 765, __PRETTY_FUNCTION__))
;
766
767 // Compute ABI information.
768 if (CC == llvm::CallingConv::SPIR_KERNEL) {
769 // Force target independent argument handling for the host visible
770 // kernel functions.
771 computeSPIRKernelABIInfo(CGM, *FI);
772 } else if (info.getCC() == CC_Swift) {
773 swiftcall::computeABIInfo(CGM, *FI);
774 } else {
775 getABIInfo().computeInfo(*FI);
776 }
777
778 // Loop over all of the computed argument and return value info. If any of
779 // them are direct or extend without a specified coerce type, specify the
780 // default now.
781 ABIArgInfo &retInfo = FI->getReturnInfo();
782 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
783 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
784
785 for (auto &I : FI->arguments())
786 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
787 I.info.setCoerceToType(ConvertType(I.type));
788
789 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
790 assert(erased && "Not in set?")((erased && "Not in set?") ? static_cast<void> (
0) : __assert_fail ("erased && \"Not in set?\"", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 790, __PRETTY_FUNCTION__))
;
791
792 return *FI;
793}
794
795CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
796 bool instanceMethod,
797 bool chainCall,
798 const FunctionType::ExtInfo &info,
799 ArrayRef<ExtParameterInfo> paramInfos,
800 CanQualType resultType,
801 ArrayRef<CanQualType> argTypes,
802 RequiredArgs required) {
803 assert(paramInfos.empty() || paramInfos.size() == argTypes.size())((paramInfos.empty() || paramInfos.size() == argTypes.size())
? static_cast<void> (0) : __assert_fail ("paramInfos.empty() || paramInfos.size() == argTypes.size()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 803, __PRETTY_FUNCTION__))
;
804 assert(!required.allowsOptionalArgs() ||((!required.allowsOptionalArgs() || required.getNumRequiredArgs
() <= argTypes.size()) ? static_cast<void> (0) : __assert_fail
("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 805, __PRETTY_FUNCTION__))
805 required.getNumRequiredArgs() <= argTypes.size())((!required.allowsOptionalArgs() || required.getNumRequiredArgs
() <= argTypes.size()) ? static_cast<void> (0) : __assert_fail
("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 805, __PRETTY_FUNCTION__))
;
806
807 void *buffer =
808 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
809 argTypes.size() + 1, paramInfos.size()));
810
811 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
812 FI->CallingConvention = llvmCC;
813 FI->EffectiveCallingConvention = llvmCC;
814 FI->ASTCallingConvention = info.getCC();
815 FI->InstanceMethod = instanceMethod;
816 FI->ChainCall = chainCall;
817 FI->NoReturn = info.getNoReturn();
818 FI->ReturnsRetained = info.getProducesResult();
819 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
820 FI->NoCfCheck = info.getNoCfCheck();
821 FI->Required = required;
822 FI->HasRegParm = info.getHasRegParm();
823 FI->RegParm = info.getRegParm();
824 FI->ArgStruct = nullptr;
825 FI->ArgStructAlign = 0;
826 FI->NumArgs = argTypes.size();
827 FI->HasExtParameterInfos = !paramInfos.empty();
828 FI->getArgsBuffer()[0].type = resultType;
829 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
830 FI->getArgsBuffer()[i + 1].type = argTypes[i];
831 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
832 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
833 return FI;
834}
835
836/***/
837
838namespace {
839// ABIArgInfo::Expand implementation.
840
841// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
842struct TypeExpansion {
843 enum TypeExpansionKind {
844 // Elements of constant arrays are expanded recursively.
845 TEK_ConstantArray,
846 // Record fields are expanded recursively (but if record is a union, only
847 // the field with the largest size is expanded).
848 TEK_Record,
849 // For complex types, real and imaginary parts are expanded recursively.
850 TEK_Complex,
851 // All other types are not expandable.
852 TEK_None
853 };
854
855 const TypeExpansionKind Kind;
856
857 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
858 virtual ~TypeExpansion() {}
859};
860
861struct ConstantArrayExpansion : TypeExpansion {
862 QualType EltTy;
863 uint64_t NumElts;
864
865 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
866 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
867 static bool classof(const TypeExpansion *TE) {
868 return TE->Kind == TEK_ConstantArray;
869 }
870};
871
872struct RecordExpansion : TypeExpansion {
873 SmallVector<const CXXBaseSpecifier *, 1> Bases;
874
875 SmallVector<const FieldDecl *, 1> Fields;
876
877 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
878 SmallVector<const FieldDecl *, 1> &&Fields)
879 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
880 Fields(std::move(Fields)) {}
881 static bool classof(const TypeExpansion *TE) {
882 return TE->Kind == TEK_Record;
883 }
884};
885
886struct ComplexExpansion : TypeExpansion {
887 QualType EltTy;
888
889 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
890 static bool classof(const TypeExpansion *TE) {
891 return TE->Kind == TEK_Complex;
892 }
893};
894
895struct NoExpansion : TypeExpansion {
896 NoExpansion() : TypeExpansion(TEK_None) {}
897 static bool classof(const TypeExpansion *TE) {
898 return TE->Kind == TEK_None;
899 }
900};
901} // namespace
902
903static std::unique_ptr<TypeExpansion>
904getTypeExpansion(QualType Ty, const ASTContext &Context) {
905 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
906 return std::make_unique<ConstantArrayExpansion>(
907 AT->getElementType(), AT->getSize().getZExtValue());
908 }
909 if (const RecordType *RT = Ty->getAs<RecordType>()) {
910 SmallVector<const CXXBaseSpecifier *, 1> Bases;
911 SmallVector<const FieldDecl *, 1> Fields;
912 const RecordDecl *RD = RT->getDecl();
913 assert(!RD->hasFlexibleArrayMember() &&((!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."
) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 914, __PRETTY_FUNCTION__))
914 "Cannot expand structure with flexible array.")((!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."
) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 914, __PRETTY_FUNCTION__))
;
915 if (RD->isUnion()) {
916 // Unions can be here only in degenerative cases - all the fields are same
917 // after flattening. Thus we have to use the "largest" field.
918 const FieldDecl *LargestFD = nullptr;
919 CharUnits UnionSize = CharUnits::Zero();
920
921 for (const auto *FD : RD->fields()) {
922 if (FD->isZeroLengthBitField(Context))
923 continue;
924 assert(!FD->isBitField() &&((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 925, __PRETTY_FUNCTION__))
925 "Cannot expand structure with bit-field members.")((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 925, __PRETTY_FUNCTION__))
;
926 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
927 if (UnionSize < FieldSize) {
928 UnionSize = FieldSize;
929 LargestFD = FD;
930 }
931 }
932 if (LargestFD)
933 Fields.push_back(LargestFD);
934 } else {
935 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
936 assert(!CXXRD->isDynamicClass() &&((!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"
) ? static_cast<void> (0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 937, __PRETTY_FUNCTION__))
937 "cannot expand vtable pointers in dynamic classes")((!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"
) ? static_cast<void> (0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 937, __PRETTY_FUNCTION__))
;
938 for (const CXXBaseSpecifier &BS : CXXRD->bases())
939 Bases.push_back(&BS);
940 }
941
942 for (const auto *FD : RD->fields()) {
943 if (FD->isZeroLengthBitField(Context))
944 continue;
945 assert(!FD->isBitField() &&((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 946, __PRETTY_FUNCTION__))
946 "Cannot expand structure with bit-field members.")((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 946, __PRETTY_FUNCTION__))
;
947 Fields.push_back(FD);
948 }
949 }
950 return std::make_unique<RecordExpansion>(std::move(Bases),
951 std::move(Fields));
952 }
953 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
954 return std::make_unique<ComplexExpansion>(CT->getElementType());
955 }
956 return std::make_unique<NoExpansion>();
957}
958
959static int getExpansionSize(QualType Ty, const ASTContext &Context) {
960 auto Exp = getTypeExpansion(Ty, Context);
961 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
962 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
963 }
964 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
965 int Res = 0;
966 for (auto BS : RExp->Bases)
967 Res += getExpansionSize(BS->getType(), Context);
968 for (auto FD : RExp->Fields)
969 Res += getExpansionSize(FD->getType(), Context);
970 return Res;
971 }
972 if (isa<ComplexExpansion>(Exp.get()))
973 return 2;
974 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 974, __PRETTY_FUNCTION__))
;
975 return 1;
976}
977
978void
979CodeGenTypes::getExpandedTypes(QualType Ty,
980 SmallVectorImpl<llvm::Type *>::iterator &TI) {
981 auto Exp = getTypeExpansion(Ty, Context);
982 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
983 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
984 getExpandedTypes(CAExp->EltTy, TI);
985 }
986 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
987 for (auto BS : RExp->Bases)
988 getExpandedTypes(BS->getType(), TI);
989 for (auto FD : RExp->Fields)
990 getExpandedTypes(FD->getType(), TI);
991 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
992 llvm::Type *EltTy = ConvertType(CExp->EltTy);
993 *TI++ = EltTy;
994 *TI++ = EltTy;
995 } else {
996 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 996, __PRETTY_FUNCTION__))
;
997 *TI++ = ConvertType(Ty);
998 }
999}
1000
1001static void forConstantArrayExpansion(CodeGenFunction &CGF,
1002 ConstantArrayExpansion *CAE,
1003 Address BaseAddr,
1004 llvm::function_ref<void(Address)> Fn) {
1005 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1006 CharUnits EltAlign =
1007 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1008
1009 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1010 llvm::Value *EltAddr =
1011 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1012 Fn(Address(EltAddr, EltAlign));
1013 }
1014}
1015
1016void CodeGenFunction::ExpandTypeFromArgs(
1017 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1018 assert(LV.isSimple() &&((LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1019, __PRETTY_FUNCTION__))
1019 "Unexpected non-simple lvalue during struct expansion.")((LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1019, __PRETTY_FUNCTION__))
;
1020
1021 auto Exp = getTypeExpansion(Ty, getContext());
1022 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1023 forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1024 [&](Address EltAddr) {
1025 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1026 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1027 });
1028 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1029 Address This = LV.getAddress();
1030 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1031 // Perform a single step derived-to-base conversion.
1032 Address Base =
1033 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1034 /*NullCheckValue=*/false, SourceLocation());
1035 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1036
1037 // Recurse onto bases.
1038 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1039 }
1040 for (auto FD : RExp->Fields) {
1041 // FIXME: What are the right qualifiers here?
1042 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1043 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1044 }
1045 } else if (isa<ComplexExpansion>(Exp.get())) {
1046 auto realValue = *AI++;
1047 auto imagValue = *AI++;
1048 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1049 } else {
1050 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1050, __PRETTY_FUNCTION__))
;
1051 EmitStoreThroughLValue(RValue::get(*AI++), LV);
1052 }
1053}
1054
1055void CodeGenFunction::ExpandTypeToArgs(
1056 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1057 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1058 auto Exp = getTypeExpansion(Ty, getContext());
1059 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1060 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1061 : Arg.getKnownRValue().getAggregateAddress();
1062 forConstantArrayExpansion(
1063 *this, CAExp, Addr, [&](Address EltAddr) {
1064 CallArg EltArg = CallArg(
1065 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1066 CAExp->EltTy);
1067 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1068 IRCallArgPos);
1069 });
1070 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1071 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1072 : Arg.getKnownRValue().getAggregateAddress();
1073 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1074 // Perform a single step derived-to-base conversion.
1075 Address Base =
1076 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1077 /*NullCheckValue=*/false, SourceLocation());
1078 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1079
1080 // Recurse onto bases.
1081 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1082 IRCallArgPos);
1083 }
1084
1085 LValue LV = MakeAddrLValue(This, Ty);
1086 for (auto FD : RExp->Fields) {
1087 CallArg FldArg =
1088 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1089 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1090 IRCallArgPos);
1091 }
1092 } else if (isa<ComplexExpansion>(Exp.get())) {
1093 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1094 IRCallArgs[IRCallArgPos++] = CV.first;
1095 IRCallArgs[IRCallArgPos++] = CV.second;
1096 } else {
1097 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1097, __PRETTY_FUNCTION__))
;
1098 auto RV = Arg.getKnownRValue();
1099 assert(RV.isScalar() &&((RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1100, __PRETTY_FUNCTION__))
1100 "Unexpected non-scalar rvalue during struct expansion.")((RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1100, __PRETTY_FUNCTION__))
;
1101
1102 // Insert a bitcast as needed.
1103 llvm::Value *V = RV.getScalarVal();
1104 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1105 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1106 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1107
1108 IRCallArgs[IRCallArgPos++] = V;
1109 }
1110}
1111
1112/// Create a temporary allocation for the purposes of coercion.
1113static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1114 CharUnits MinAlign) {
1115 // Don't use an alignment that's worse than what LLVM would prefer.
1116 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1117 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1118
1119 return CGF.CreateTempAlloca(Ty, Align);
1120}
1121
1122/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1123/// accessing some number of bytes out of it, try to gep into the struct to get
1124/// at its inner goodness. Dive as deep as possible without entering an element
1125/// with an in-memory size smaller than DstSize.
1126static Address
1127EnterStructPointerForCoercedAccess(Address SrcPtr,
1128 llvm::StructType *SrcSTy,
1129 uint64_t DstSize, CodeGenFunction &CGF) {
1130 // We can't dive into a zero-element struct.
1131 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1132
1133 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1134
1135 // If the first elt is at least as large as what we're looking for, or if the
1136 // first element is the same size as the whole struct, we can enter it. The
1137 // comparison must be made on the store size and not the alloca size. Using
1138 // the alloca size may overstate the size of the load.
1139 uint64_t FirstEltSize =
1140 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1141 if (FirstEltSize < DstSize &&
1142 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1143 return SrcPtr;
1144
1145 // GEP into the first element.
1146 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1147
1148 // If the first element is a struct, recurse.
1149 llvm::Type *SrcTy = SrcPtr.getElementType();
1150 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1151 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1152
1153 return SrcPtr;
1154}
1155
1156/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1157/// are either integers or pointers. This does a truncation of the value if it
1158/// is too large or a zero extension if it is too small.
1159///
1160/// This behaves as if the value were coerced through memory, so on big-endian
1161/// targets the high bits are preserved in a truncation, while little-endian
1162/// targets preserve the low bits.
1163static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1164 llvm::Type *Ty,
1165 CodeGenFunction &CGF) {
1166 if (Val->getType() == Ty)
1167 return Val;
1168
1169 if (isa<llvm::PointerType>(Val->getType())) {
1170 // If this is Pointer->Pointer avoid conversion to and from int.
1171 if (isa<llvm::PointerType>(Ty))
1172 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1173
1174 // Convert the pointer to an integer so we can play with its width.
1175 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1176 }
1177
1178 llvm::Type *DestIntTy = Ty;
1179 if (isa<llvm::PointerType>(DestIntTy))
1180 DestIntTy = CGF.IntPtrTy;
1181
1182 if (Val->getType() != DestIntTy) {
1183 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1184 if (DL.isBigEndian()) {
1185 // Preserve the high bits on big-endian targets.
1186 // That is what memory coercion does.
1187 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1188 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1189
1190 if (SrcSize > DstSize) {
1191 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1192 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1193 } else {
1194 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1195 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1196 }
1197 } else {
1198 // Little-endian targets preserve the low bits. No shifts required.
1199 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1200 }
1201 }
1202
1203 if (isa<llvm::PointerType>(Ty))
1204 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1205 return Val;
1206}
1207
1208
1209
1210/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1211/// a pointer to an object of type \arg Ty, known to be aligned to
1212/// \arg SrcAlign bytes.
1213///
1214/// This safely handles the case when the src type is smaller than the
1215/// destination type; in this situation the values of bits which not
1216/// present in the src are undefined.
1217static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1218 CodeGenFunction &CGF) {
1219 llvm::Type *SrcTy = Src.getElementType();
1220
1221 // If SrcTy and Ty are the same, just do a load.
1222 if (SrcTy == Ty)
1223 return CGF.Builder.CreateLoad(Src);
1224
1225 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1226
1227 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1228 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1229 SrcTy = Src.getType()->getElementType();
1230 }
1231
1232 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1233
1234 // If the source and destination are integer or pointer types, just do an
1235 // extension or truncation to the desired type.
1236 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1237 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1238 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1239 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1240 }
1241
1242 // If load is legal, just bitcast the src pointer.
1243 if (SrcSize >= DstSize) {
1244 // Generally SrcSize is never greater than DstSize, since this means we are
1245 // losing bits. However, this can happen in cases where the structure has
1246 // additional padding, for example due to a user specified alignment.
1247 //
1248 // FIXME: Assert that we aren't truncating non-padding bits when have access
1249 // to that information.
1250 Src = CGF.Builder.CreateBitCast(Src,
1251 Ty->getPointerTo(Src.getAddressSpace()));
1252 return CGF.Builder.CreateLoad(Src);
1253 }
1254
1255 // Otherwise do coercion through memory. This is stupid, but simple.
1256 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1257 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1258 Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1259 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1260 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1261 false);
1262 return CGF.Builder.CreateLoad(Tmp);
1263}
1264
1265// Function to store a first-class aggregate into memory. We prefer to
1266// store the elements rather than the aggregate to be more friendly to
1267// fast-isel.
1268// FIXME: Do we need to recurse here?
1269static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1270 Address Dest, bool DestIsVolatile) {
1271 // Prefer scalar stores to first-class aggregate stores.
1272 if (llvm::StructType *STy =
1273 dyn_cast<llvm::StructType>(Val->getType())) {
1274 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1275 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
1276 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1277 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1278 }
1279 } else {
1280 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1281 }
1282}
1283
1284/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1285/// where the source and destination may have different types. The
1286/// destination is known to be aligned to \arg DstAlign bytes.
1287///
1288/// This safely handles the case when the src type is larger than the
1289/// destination type; the upper bits of the src will be lost.
1290static void CreateCoercedStore(llvm::Value *Src,
1291 Address Dst,
1292 bool DstIsVolatile,
1293 CodeGenFunction &CGF) {
1294 llvm::Type *SrcTy = Src->getType();
1295 llvm::Type *DstTy = Dst.getType()->getElementType();
1296 if (SrcTy == DstTy) {
1297 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1298 return;
1299 }
1300
1301 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1302
1303 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1304 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1305 DstTy = Dst.getType()->getElementType();
1306 }
1307
1308 // If the source and destination are integer or pointer types, just do an
1309 // extension or truncation to the desired type.
1310 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1311 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1312 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1313 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1314 return;
1315 }
1316
1317 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1318
1319 // If store is legal, just bitcast the src pointer.
1320 if (SrcSize <= DstSize) {
1321 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1322 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1323 } else {
1324 // Otherwise do coercion through memory. This is stupid, but
1325 // simple.
1326
1327 // Generally SrcSize is never greater than DstSize, since this means we are
1328 // losing bits. However, this can happen in cases where the structure has
1329 // additional padding, for example due to a user specified alignment.
1330 //
1331 // FIXME: Assert that we aren't truncating non-padding bits when have access
1332 // to that information.
1333 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1334 CGF.Builder.CreateStore(Src, Tmp);
1335 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1336 Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1337 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1338 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1339 false);
1340 }
1341}
1342
1343static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1344 const ABIArgInfo &info) {
1345 if (unsigned offset = info.getDirectOffset()) {
1346 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1347 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1348 CharUnits::fromQuantity(offset));
1349 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1350 }
1351 return addr;
1352}
1353
1354namespace {
1355
1356/// Encapsulates information about the way function arguments from
1357/// CGFunctionInfo should be passed to actual LLVM IR function.
1358class ClangToLLVMArgMapping {
1359 static const unsigned InvalidIndex = ~0U;
1360 unsigned InallocaArgNo;
1361 unsigned SRetArgNo;
1362 unsigned TotalIRArgs;
1363
1364 /// Arguments of LLVM IR function corresponding to single Clang argument.
1365 struct IRArgs {
1366 unsigned PaddingArgIndex;
1367 // Argument is expanded to IR arguments at positions
1368 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1369 unsigned FirstArgIndex;
1370 unsigned NumberOfArgs;
1371
1372 IRArgs()
1373 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1374 NumberOfArgs(0) {}
1375 };
1376
1377 SmallVector<IRArgs, 8> ArgInfo;
1378
1379public:
1380 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1381 bool OnlyRequiredArgs = false)
1382 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1383 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1384 construct(Context, FI, OnlyRequiredArgs);
1385 }
1386
1387 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1388 unsigned getInallocaArgNo() const {
1389 assert(hasInallocaArg())((hasInallocaArg()) ? static_cast<void> (0) : __assert_fail
("hasInallocaArg()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1389, __PRETTY_FUNCTION__))
;
1390 return InallocaArgNo;
1391 }
1392
1393 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1394 unsigned getSRetArgNo() const {
1395 assert(hasSRetArg())((hasSRetArg()) ? static_cast<void> (0) : __assert_fail
("hasSRetArg()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1395, __PRETTY_FUNCTION__))
;
1396 return SRetArgNo;
1397 }
1398
1399 unsigned totalIRArgs() const { return TotalIRArgs; }
1400
1401 bool hasPaddingArg(unsigned ArgNo) const {
1402 assert(ArgNo < ArgInfo.size())((ArgNo < ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1402, __PRETTY_FUNCTION__))
;
1403 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1404 }
1405 unsigned getPaddingArgNo(unsigned ArgNo) const {
1406 assert(hasPaddingArg(ArgNo))((hasPaddingArg(ArgNo)) ? static_cast<void> (0) : __assert_fail
("hasPaddingArg(ArgNo)", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1406, __PRETTY_FUNCTION__))
;
1407 return ArgInfo[ArgNo].PaddingArgIndex;
1408 }
1409
1410 /// Returns index of first IR argument corresponding to ArgNo, and their
1411 /// quantity.
1412 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1413 assert(ArgNo < ArgInfo.size())((ArgNo < ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1413, __PRETTY_FUNCTION__))
;
1414 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1415 ArgInfo[ArgNo].NumberOfArgs);
1416 }
1417
1418private:
1419 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1420 bool OnlyRequiredArgs);
1421};
1422
1423void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1424 const CGFunctionInfo &FI,
1425 bool OnlyRequiredArgs) {
1426 unsigned IRArgNo = 0;
1427 bool SwapThisWithSRet = false;
1428 const ABIArgInfo &RetAI = FI.getReturnInfo();
1429
1430 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1431 SwapThisWithSRet = RetAI.isSRetAfterThis();
1432 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1433 }
1434
1435 unsigned ArgNo = 0;
1436 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1437 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1438 ++I, ++ArgNo) {
1439 assert(I != FI.arg_end())((I != FI.arg_end()) ? static_cast<void> (0) : __assert_fail
("I != FI.arg_end()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1439, __PRETTY_FUNCTION__))
;
1440 QualType ArgType = I->type;
1441 const ABIArgInfo &AI = I->info;
1442 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1443 auto &IRArgs = ArgInfo[ArgNo];
1444
1445 if (AI.getPaddingType())
1446 IRArgs.PaddingArgIndex = IRArgNo++;
1447
1448 switch (AI.getKind()) {
1449 case ABIArgInfo::Extend:
1450 case ABIArgInfo::Direct: {
1451 // FIXME: handle sseregparm someday...
1452 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1453 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1454 IRArgs.NumberOfArgs = STy->getNumElements();
1455 } else {
1456 IRArgs.NumberOfArgs = 1;
1457 }
1458 break;
1459 }
1460 case ABIArgInfo::Indirect:
1461 IRArgs.NumberOfArgs = 1;
1462 break;
1463 case ABIArgInfo::Ignore:
1464 case ABIArgInfo::InAlloca:
1465 // ignore and inalloca doesn't have matching LLVM parameters.
1466 IRArgs.NumberOfArgs = 0;
1467 break;
1468 case ABIArgInfo::CoerceAndExpand:
1469 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1470 break;
1471 case ABIArgInfo::Expand:
1472 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1473 break;
1474 }
1475
1476 if (IRArgs.NumberOfArgs > 0) {
1477 IRArgs.FirstArgIndex = IRArgNo;
1478 IRArgNo += IRArgs.NumberOfArgs;
1479 }
1480
1481 // Skip over the sret parameter when it comes second. We already handled it
1482 // above.
1483 if (IRArgNo == 1 && SwapThisWithSRet)
1484 IRArgNo++;
1485 }
1486 assert(ArgNo == ArgInfo.size())((ArgNo == ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo == ArgInfo.size()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1486, __PRETTY_FUNCTION__))
;
1487
1488 if (FI.usesInAlloca())
1489 InallocaArgNo = IRArgNo++;
1490
1491 TotalIRArgs = IRArgNo;
1492}
1493} // namespace
1494
1495/***/
1496
1497bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1498 const auto &RI = FI.getReturnInfo();
1499 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1500}
1501
1502bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1503 return ReturnTypeUsesSRet(FI) &&
1504 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1505}
1506
1507bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1508 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1509 switch (BT->getKind()) {
1510 default:
1511 return false;
1512 case BuiltinType::Float:
1513 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1514 case BuiltinType::Double:
1515 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1516 case BuiltinType::LongDouble:
1517 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1518 }
1519 }
1520
1521 return false;
1522}
1523
1524bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1525 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1526 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1527 if (BT->getKind() == BuiltinType::LongDouble)
1528 return getTarget().useObjCFP2RetForComplexLongDouble();
1529 }
1530 }
1531
1532 return false;
1533}
1534
1535llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1536 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1537 return GetFunctionType(FI);
1538}
1539
1540llvm::FunctionType *
1541CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1542
1543 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1544 (void)Inserted;
1545 assert(Inserted && "Recursively being processed?")((Inserted && "Recursively being processed?") ? static_cast
<void> (0) : __assert_fail ("Inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1545, __PRETTY_FUNCTION__))
;
1546
1547 llvm::Type *resultType = nullptr;
1548 const ABIArgInfo &retAI = FI.getReturnInfo();
1549 switch (retAI.getKind()) {
1550 case ABIArgInfo::Expand:
1551 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1551)
;
1552
1553 case ABIArgInfo::Extend:
1554 case ABIArgInfo::Direct:
1555 resultType = retAI.getCoerceToType();
1556 break;
1557
1558 case ABIArgInfo::InAlloca:
1559 if (retAI.getInAllocaSRet()) {
1560 // sret things on win32 aren't void, they return the sret pointer.
1561 QualType ret = FI.getReturnType();
1562 llvm::Type *ty = ConvertType(ret);
1563 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1564 resultType = llvm::PointerType::get(ty, addressSpace);
1565 } else {
1566 resultType = llvm::Type::getVoidTy(getLLVMContext());
1567 }
1568 break;
1569
1570 case ABIArgInfo::Indirect:
1571 case ABIArgInfo::Ignore:
1572 resultType = llvm::Type::getVoidTy(getLLVMContext());
1573 break;
1574
1575 case ABIArgInfo::CoerceAndExpand:
1576 resultType = retAI.getUnpaddedCoerceAndExpandType();
1577 break;
1578 }
1579
1580 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1581 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1582
1583 // Add type for sret argument.
1584 if (IRFunctionArgs.hasSRetArg()) {
1585 QualType Ret = FI.getReturnType();
1586 llvm::Type *Ty = ConvertType(Ret);
1587 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1588 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1589 llvm::PointerType::get(Ty, AddressSpace);
1590 }
1591
1592 // Add type for inalloca argument.
1593 if (IRFunctionArgs.hasInallocaArg()) {
1594 auto ArgStruct = FI.getArgStruct();
1595 assert(ArgStruct)((ArgStruct) ? static_cast<void> (0) : __assert_fail ("ArgStruct"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1595, __PRETTY_FUNCTION__))
;
1596 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1597 }
1598
1599 // Add in all of the required arguments.
1600 unsigned ArgNo = 0;
1601 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1602 ie = it + FI.getNumRequiredArgs();
1603 for (; it != ie; ++it, ++ArgNo) {
1604 const ABIArgInfo &ArgInfo = it->info;
1605
1606 // Insert a padding type to ensure proper alignment.
1607 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1608 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1609 ArgInfo.getPaddingType();
1610
1611 unsigned FirstIRArg, NumIRArgs;
1612 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1613
1614 switch (ArgInfo.getKind()) {
1615 case ABIArgInfo::Ignore:
1616 case ABIArgInfo::InAlloca:
1617 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1617, __PRETTY_FUNCTION__))
;
1618 break;
1619
1620 case ABIArgInfo::Indirect: {
1621 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1621, __PRETTY_FUNCTION__))
;
1622 // indirect arguments are always on the stack, which is alloca addr space.
1623 llvm::Type *LTy = ConvertTypeForMem(it->type);
1624 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1625 CGM.getDataLayout().getAllocaAddrSpace());
1626 break;
1627 }
1628
1629 case ABIArgInfo::Extend:
1630 case ABIArgInfo::Direct: {
1631 // Fast-isel and the optimizer generally like scalar values better than
1632 // FCAs, so we flatten them if this is safe to do for this argument.
1633 llvm::Type *argType = ArgInfo.getCoerceToType();
1634 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1635 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1636 assert(NumIRArgs == st->getNumElements())((NumIRArgs == st->getNumElements()) ? static_cast<void
> (0) : __assert_fail ("NumIRArgs == st->getNumElements()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1636, __PRETTY_FUNCTION__))
;
1637 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1638 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1639 } else {
1640 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1640, __PRETTY_FUNCTION__))
;
1641 ArgTypes[FirstIRArg] = argType;
1642 }
1643 break;
1644 }
1645
1646 case ABIArgInfo::CoerceAndExpand: {
1647 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1648 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1649 *ArgTypesIter++ = EltTy;
1650 }
1651 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)((ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1651, __PRETTY_FUNCTION__))
;
1652 break;
1653 }
1654
1655 case ABIArgInfo::Expand:
1656 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1657 getExpandedTypes(it->type, ArgTypesIter);
1658 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)((ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1658, __PRETTY_FUNCTION__))
;
1659 break;
1660 }
1661 }
1662
1663 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1664 assert(Erased && "Not in set?")((Erased && "Not in set?") ? static_cast<void> (
0) : __assert_fail ("Erased && \"Not in set?\"", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1664, __PRETTY_FUNCTION__))
;
1665
1666 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1667}
1668
1669llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1670 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1671 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1672
1673 if (!isFuncTypeConvertible(FPT))
1674 return llvm::StructType::get(getLLVMContext());
1675
1676 return GetFunctionType(GD);
1677}
1678
1679static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1680 llvm::AttrBuilder &FuncAttrs,
1681 const FunctionProtoType *FPT) {
1682 if (!FPT)
1683 return;
1684
1685 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1686 FPT->isNothrow())
1687 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1688}
1689
1690void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1691 bool AttrOnCallSite,
1692 llvm::AttrBuilder &FuncAttrs) {
1693 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1694 if (!HasOptnone) {
1695 if (CodeGenOpts.OptimizeSize)
1696 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1697 if (CodeGenOpts.OptimizeSize == 2)
1698 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1699 }
1700
1701 if (CodeGenOpts.DisableRedZone)
1702 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1703 if (CodeGenOpts.IndirectTlsSegRefs)
1704 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1705 if (CodeGenOpts.NoImplicitFloat)
1706 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1707
1708 if (AttrOnCallSite) {
1709 // Attributes that should go on the call site only.
1710 if (!CodeGenOpts.SimplifyLibCalls ||
1711 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1712 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1713 if (!CodeGenOpts.TrapFuncName.empty())
1714 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1715 } else {
1716 StringRef FpKind;
1717 switch (CodeGenOpts.getFramePointer()) {
1718 case CodeGenOptions::FramePointerKind::None:
1719 FpKind = "none";
1720 break;
1721 case CodeGenOptions::FramePointerKind::NonLeaf:
1722 FpKind = "non-leaf";
1723 break;
1724 case CodeGenOptions::FramePointerKind::All:
1725 FpKind = "all";
1726 break;
1727 }
1728 FuncAttrs.addAttribute("frame-pointer", FpKind);
1729
1730 FuncAttrs.addAttribute("less-precise-fpmad",
1731 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1732
1733 if (CodeGenOpts.NullPointerIsValid)
1734 FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1735 if (!CodeGenOpts.FPDenormalMode.empty())
1736 FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1737
1738 FuncAttrs.addAttribute("no-trapping-math",
1739 llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1740
1741 // Strict (compliant) code is the default, so only add this attribute to
1742 // indicate that we are trying to workaround a problem case.
1743 if (!CodeGenOpts.StrictFloatCastOverflow)
1744 FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1745
1746 // TODO: Are these all needed?
1747 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1748 FuncAttrs.addAttribute("no-infs-fp-math",
1749 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1750 FuncAttrs.addAttribute("no-nans-fp-math",
1751 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1752 FuncAttrs.addAttribute("unsafe-fp-math",
1753 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1754 FuncAttrs.addAttribute("use-soft-float",
1755 llvm::toStringRef(CodeGenOpts.SoftFloat));
1756 FuncAttrs.addAttribute("stack-protector-buffer-size",
1757 llvm::utostr(CodeGenOpts.SSPBufferSize));
1758 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1759 llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1760 FuncAttrs.addAttribute(
1761 "correctly-rounded-divide-sqrt-fp-math",
1762 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1763
1764 if (getLangOpts().OpenCL)
1765 FuncAttrs.addAttribute("denorms-are-zero",
1766 llvm::toStringRef(CodeGenOpts.FlushDenorm));
1767
1768 // TODO: Reciprocal estimate codegen options should apply to instructions?
1769 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1770 if (!Recips.empty())
1771 FuncAttrs.addAttribute("reciprocal-estimates",
1772 llvm::join(Recips, ","));
1773
1774 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1775 CodeGenOpts.PreferVectorWidth != "none")
1776 FuncAttrs.addAttribute("prefer-vector-width",
1777 CodeGenOpts.PreferVectorWidth);
1778
1779 if (CodeGenOpts.StackRealignment)
1780 FuncAttrs.addAttribute("stackrealign");
1781 if (CodeGenOpts.Backchain)
1782 FuncAttrs.addAttribute("backchain");
1783
1784 if (CodeGenOpts.SpeculativeLoadHardening)
1785 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1786 }
1787
1788 if (getLangOpts().assumeFunctionsAreConvergent()) {
1789 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1790 // convergent (meaning, they may call an intrinsically convergent op, such
1791 // as __syncthreads() / barrier(), and so can't have certain optimizations
1792 // applied around them). LLVM will remove this attribute where it safely
1793 // can.
1794 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1795 }
1796
1797 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1798 // Exceptions aren't supported in CUDA device code.
1799 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1800
1801 // Respect -fcuda-flush-denormals-to-zero.
1802 if (CodeGenOpts.FlushDenorm)
1803 FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1804 }
1805
1806 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1807 StringRef Var, Value;
1808 std::tie(Var, Value) = Attr.split('=');
1809 FuncAttrs.addAttribute(Var, Value);
1810 }
1811}
1812
1813void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1814 llvm::AttrBuilder FuncAttrs;
1815 ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
1816 /* AttrOnCallSite = */ false, FuncAttrs);
1817 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1818}
1819
1820void CodeGenModule::ConstructAttributeList(
1821 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1822 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1823 llvm::AttrBuilder FuncAttrs;
1824 llvm::AttrBuilder RetAttrs;
1825
1826 CallingConv = FI.getEffectiveCallingConvention();
1827 if (FI.isNoReturn())
1828 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1829
1830 // If we have information about the function prototype, we can learn
1831 // attributes from there.
1832 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1833 CalleeInfo.getCalleeFunctionProtoType());
1834
1835 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1836
1837 bool HasOptnone = false;
1838 // FIXME: handle sseregparm someday...
1839 if (TargetDecl) {
1840 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1841 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1842 if (TargetDecl->hasAttr<NoThrowAttr>())
1843 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1844 if (TargetDecl->hasAttr<NoReturnAttr>())
1845 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1846 if (TargetDecl->hasAttr<ColdAttr>())
1847 FuncAttrs.addAttribute(llvm::Attribute::Cold);
1848 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1849 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1850 if (TargetDecl->hasAttr<ConvergentAttr>())
1851 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1852
1853 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1854 AddAttributesFromFunctionProtoType(
1855 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1856 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1857 // These attributes are not inherited by overloads.
1858 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1859 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1860 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1861 }
1862
1863 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1864 if (TargetDecl->hasAttr<ConstAttr>()) {
1865 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1866 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1867 } else if (TargetDecl->hasAttr<PureAttr>()) {
1868 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1869 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1870 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1871 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1872 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1873 }
1874 if (TargetDecl->hasAttr<RestrictAttr>())
1875 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1876 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1877 !CodeGenOpts.NullPointerIsValid)
1878 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1879 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1880 FuncAttrs.addAttribute("no_caller_saved_registers");
1881 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1882 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1883
1884 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1885 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1886 Optional<unsigned> NumElemsParam;
1887 if (AllocSize->getNumElemsParam().isValid())
1888 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1889 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1890 NumElemsParam);
1891 }
1892 }
1893
1894 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1895
1896 // This must run after constructing the default function attribute list
1897 // to ensure that the speculative load hardening attribute is removed
1898 // in the case where the -mspeculative-load-hardening flag was passed.
1899 if (TargetDecl) {
1900 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1901 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1902 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1903 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1904 }
1905
1906 if (CodeGenOpts.EnableSegmentedStacks &&
1907 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1908 FuncAttrs.addAttribute("split-stack");
1909
1910 // Add NonLazyBind attribute to function declarations when -fno-plt
1911 // is used.
1912 if (TargetDecl && CodeGenOpts.NoPLT) {
1913 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1914 if (!Fn->isDefined() && !AttrOnCallSite) {
1915 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1916 }
1917 }
1918 }
1919
1920 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1921 if (getLangOpts().OpenCLVersion <= 120) {
1922 // OpenCL v1.2 Work groups are always uniform
1923 FuncAttrs.addAttribute("uniform-work-group-size", "true");
1924 } else {
1925 // OpenCL v2.0 Work groups may be whether uniform or not.
1926 // '-cl-uniform-work-group-size' compile option gets a hint
1927 // to the compiler that the global work-size be a multiple of
1928 // the work-group size specified to clEnqueueNDRangeKernel
1929 // (i.e. work groups are uniform).
1930 FuncAttrs.addAttribute("uniform-work-group-size",
1931 llvm::toStringRef(CodeGenOpts.UniformWGSize));
1932 }
1933 }
1934
1935 if (!AttrOnCallSite) {
1936 bool DisableTailCalls = false;
1937
1938 if (CodeGenOpts.DisableTailCalls)
1939 DisableTailCalls = true;
1940 else if (TargetDecl) {
1941 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1942 TargetDecl->hasAttr<AnyX86InterruptAttr>())
1943 DisableTailCalls = true;
1944 else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1945 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1946 if (!BD->doesNotEscape())
1947 DisableTailCalls = true;
1948 }
1949 }
1950
1951 FuncAttrs.addAttribute("disable-tail-calls",
1952 llvm::toStringRef(DisableTailCalls));
1953 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1954 }
1955
1956 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1957
1958 QualType RetTy = FI.getReturnType();
1959 const ABIArgInfo &RetAI = FI.getReturnInfo();
1960 switch (RetAI.getKind()) {
1961 case ABIArgInfo::Extend:
1962 if (RetAI.isSignExt())
1963 RetAttrs.addAttribute(llvm::Attribute::SExt);
1964 else
1965 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1966 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1967 case ABIArgInfo::Direct:
1968 if (RetAI.getInReg())
1969 RetAttrs.addAttribute(llvm::Attribute::InReg);
1970 break;
1971 case ABIArgInfo::Ignore:
1972 break;
1973
1974 case ABIArgInfo::InAlloca:
1975 case ABIArgInfo::Indirect: {
1976 // inalloca and sret disable readnone and readonly
1977 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1978 .removeAttribute(llvm::Attribute::ReadNone);
1979 break;
1980 }
1981
1982 case ABIArgInfo::CoerceAndExpand:
1983 break;
1984
1985 case ABIArgInfo::Expand:
1986 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 1986)
;
1987 }
1988
1989 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1990 QualType PTy = RefTy->getPointeeType();
1991 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1992 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1993 .getQuantity());
1994 else if (getContext().getTargetAddressSpace(PTy) == 0 &&
1995 !CodeGenOpts.NullPointerIsValid)
1996 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1997 }
1998
1999 bool hasUsedSRet = false;
2000 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2001
2002 // Attach attributes to sret.
2003 if (IRFunctionArgs.hasSRetArg()) {
2004 llvm::AttrBuilder SRETAttrs;
2005 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2006 hasUsedSRet = true;
2007 if (RetAI.getInReg())
2008 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2009 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2010 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2011 }
2012
2013 // Attach attributes to inalloca argument.
2014 if (IRFunctionArgs.hasInallocaArg()) {
2015 llvm::AttrBuilder Attrs;
2016 Attrs.addAttribute(llvm::Attribute::InAlloca);
2017 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2018 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2019 }
2020
2021 unsigned ArgNo = 0;
2022 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2023 E = FI.arg_end();
2024 I != E; ++I, ++ArgNo) {
2025 QualType ParamType = I->type;
2026 const ABIArgInfo &AI = I->info;
2027 llvm::AttrBuilder Attrs;
2028
2029 // Add attribute for padding argument, if necessary.
2030 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2031 if (AI.getPaddingInReg()) {
2032 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2033 llvm::AttributeSet::get(
2034 getLLVMContext(),
2035 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2036 }
2037 }
2038
2039 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2040 // have the corresponding parameter variable. It doesn't make
2041 // sense to do it here because parameters are so messed up.
2042 switch (AI.getKind()) {
2043 case ABIArgInfo::Extend:
2044 if (AI.isSignExt())
2045 Attrs.addAttribute(llvm::Attribute::SExt);
2046 else
2047 Attrs.addAttribute(llvm::Attribute::ZExt);
2048 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2049 case ABIArgInfo::Direct:
2050 if (ArgNo == 0 && FI.isChainCall())
2051 Attrs.addAttribute(llvm::Attribute::Nest);
2052 else if (AI.getInReg())
2053 Attrs.addAttribute(llvm::Attribute::InReg);
2054 break;
2055
2056 case ABIArgInfo::Indirect: {
2057 if (AI.getInReg())
2058 Attrs.addAttribute(llvm::Attribute::InReg);
2059
2060 if (AI.getIndirectByVal())
2061 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2062
2063 CharUnits Align = AI.getIndirectAlign();
2064
2065 // In a byval argument, it is important that the required
2066 // alignment of the type is honored, as LLVM might be creating a
2067 // *new* stack object, and needs to know what alignment to give
2068 // it. (Sometimes it can deduce a sensible alignment on its own,
2069 // but not if clang decides it must emit a packed struct, or the
2070 // user specifies increased alignment requirements.)
2071 //
2072 // This is different from indirect *not* byval, where the object
2073 // exists already, and the align attribute is purely
2074 // informative.
2075 assert(!Align.isZero())((!Align.isZero()) ? static_cast<void> (0) : __assert_fail
("!Align.isZero()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2075, __PRETTY_FUNCTION__))
;
2076
2077 // For now, only add this when we have a byval argument.
2078 // TODO: be less lazy about updating test cases.
2079 if (AI.getIndirectByVal())
2080 Attrs.addAlignmentAttr(Align.getQuantity());
2081
2082 // byval disables readnone and readonly.
2083 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2084 .removeAttribute(llvm::Attribute::ReadNone);
2085 break;
2086 }
2087 case ABIArgInfo::Ignore:
2088 case ABIArgInfo::Expand:
2089 case ABIArgInfo::CoerceAndExpand:
2090 break;
2091
2092 case ABIArgInfo::InAlloca:
2093 // inalloca disables readnone and readonly.
2094 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2095 .removeAttribute(llvm::Attribute::ReadNone);
2096 continue;
2097 }
2098
2099 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2100 QualType PTy = RefTy->getPointeeType();
2101 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2102 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2103 .getQuantity());
2104 else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2105 !CodeGenOpts.NullPointerIsValid)
2106 Attrs.addAttribute(llvm::Attribute::NonNull);
2107 }
2108
2109 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2110 case ParameterABI::Ordinary:
2111 break;
2112
2113 case ParameterABI::SwiftIndirectResult: {
2114 // Add 'sret' if we haven't already used it for something, but
2115 // only if the result is void.
2116 if (!hasUsedSRet && RetTy->isVoidType()) {
2117 Attrs.addAttribute(llvm::Attribute::StructRet);
2118 hasUsedSRet = true;
2119 }
2120
2121 // Add 'noalias' in either case.
2122 Attrs.addAttribute(llvm::Attribute::NoAlias);
2123
2124 // Add 'dereferenceable' and 'alignment'.
2125 auto PTy = ParamType->getPointeeType();
2126 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2127 auto info = getContext().getTypeInfoInChars(PTy);
2128 Attrs.addDereferenceableAttr(info.first.getQuantity());
2129 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2130 info.second.getQuantity()));
2131 }
2132 break;
2133 }
2134
2135 case ParameterABI::SwiftErrorResult:
2136 Attrs.addAttribute(llvm::Attribute::SwiftError);
2137 break;
2138
2139 case ParameterABI::SwiftContext:
2140 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2141 break;
2142 }
2143
2144 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2145 Attrs.addAttribute(llvm::Attribute::NoCapture);
2146
2147 if (Attrs.hasAttributes()) {
2148 unsigned FirstIRArg, NumIRArgs;
2149 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2150 for (unsigned i = 0; i < NumIRArgs; i++)
2151 ArgAttrs[FirstIRArg + i] =
2152 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2153 }
2154 }
2155 assert(ArgNo == FI.arg_size())((ArgNo == FI.arg_size()) ? static_cast<void> (0) : __assert_fail
("ArgNo == FI.arg_size()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2155, __PRETTY_FUNCTION__))
;
2156
2157 AttrList = llvm::AttributeList::get(
2158 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2159 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2160}
2161
2162/// An argument came in as a promoted argument; demote it back to its
2163/// declared type.
2164static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2165 const VarDecl *var,
2166 llvm::Value *value) {
2167 llvm::Type *varType = CGF.ConvertType(var->getType());
2168
2169 // This can happen with promotions that actually don't change the
2170 // underlying type, like the enum promotions.
2171 if (value->getType() == varType) return value;
2172
2173 assert((varType->isIntegerTy() || varType->isFloatingPointTy())(((varType->isIntegerTy() || varType->isFloatingPointTy
()) && "unexpected promotion type") ? static_cast<
void> (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2174, __PRETTY_FUNCTION__))
2174 && "unexpected promotion type")(((varType->isIntegerTy() || varType->isFloatingPointTy
()) && "unexpected promotion type") ? static_cast<
void> (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2174, __PRETTY_FUNCTION__))
;
2175
2176 if (isa<llvm::IntegerType>(varType))
2177 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2178
2179 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2180}
2181
2182/// Returns the attribute (either parameter attribute, or function
2183/// attribute), which declares argument ArgNo to be non-null.
2184static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2185 QualType ArgType, unsigned ArgNo) {
2186 // FIXME: __attribute__((nonnull)) can also be applied to:
2187 // - references to pointers, where the pointee is known to be
2188 // nonnull (apparently a Clang extension)
2189 // - transparent unions containing pointers
2190 // In the former case, LLVM IR cannot represent the constraint. In
2191 // the latter case, we have no guarantee that the transparent union
2192 // is in fact passed as a pointer.
2193 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2194 return nullptr;
2195 // First, check attribute on parameter itself.
2196 if (PVD) {
2197 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2198 return ParmNNAttr;
2199 }
2200 // Check function attributes.
2201 if (!FD)
2202 return nullptr;
2203 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2204 if (NNAttr->isNonNull(ArgNo))
2205 return NNAttr;
2206 }
2207 return nullptr;
2208}
2209
2210namespace {
2211 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2212 Address Temp;
2213 Address Arg;
2214 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2215 void Emit(CodeGenFunction &CGF, Flags flags) override {
2216 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2217 CGF.Builder.CreateStore(errorValue, Arg);
2218 }
2219 };
2220}
2221
2222void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2223 llvm::Function *Fn,
2224 const FunctionArgList &Args) {
2225 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2226 // Naked functions don't have prologues.
2227 return;
2228
2229 // If this is an implicit-return-zero function, go ahead and
2230 // initialize the return value. TODO: it might be nice to have
2231 // a more general mechanism for this that didn't require synthesized
2232 // return statements.
2233 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2234 if (FD->hasImplicitReturnZero()) {
2235 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2236 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2237 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2238 Builder.CreateStore(Zero, ReturnValue);
2239 }
2240 }
2241
2242 // FIXME: We no longer need the types from FunctionArgList; lift up and
2243 // simplify.
2244
2245 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2246 // Flattened function arguments.
2247 SmallVector<llvm::Value *, 16> FnArgs;
2248 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2249 for (auto &Arg : Fn->args()) {
2250 FnArgs.push_back(&Arg);
2251 }
2252 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs())((FnArgs.size() == IRFunctionArgs.totalIRArgs()) ? static_cast
<void> (0) : __assert_fail ("FnArgs.size() == IRFunctionArgs.totalIRArgs()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2252, __PRETTY_FUNCTION__))
;
2253
2254 // If we're using inalloca, all the memory arguments are GEPs off of the last
2255 // parameter, which is a pointer to the complete memory area.
2256 Address ArgStruct = Address::invalid();
2257 if (IRFunctionArgs.hasInallocaArg()) {
2258 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2259 FI.getArgStructAlignment());
2260
2261 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo())((ArgStruct.getType() == FI.getArgStruct()->getPointerTo()
) ? static_cast<void> (0) : __assert_fail ("ArgStruct.getType() == FI.getArgStruct()->getPointerTo()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2261, __PRETTY_FUNCTION__))
;
2262 }
2263
2264 // Name the struct return parameter.
2265 if (IRFunctionArgs.hasSRetArg()) {
2266 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2267 AI->setName("agg.result");
2268 AI->addAttr(llvm::Attribute::NoAlias);
2269 }
2270
2271 // Track if we received the parameter as a pointer (indirect, byval, or
2272 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2273 // into a local alloca for us.
2274 SmallVector<ParamValue, 16> ArgVals;
2275 ArgVals.reserve(Args.size());
2276
2277 // Create a pointer value for every parameter declaration. This usually
2278 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2279 // any cleanups or do anything that might unwind. We do that separately, so
2280 // we can push the cleanups in the correct order for the ABI.
2281 assert(FI.arg_size() == Args.size() &&((FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2282, __PRETTY_FUNCTION__))
2282 "Mismatch between function signature & arguments.")((FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2282, __PRETTY_FUNCTION__))
;
2283 unsigned ArgNo = 0;
2284 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2285 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2286 i != e; ++i, ++info_it, ++ArgNo) {
2287 const VarDecl *Arg = *i;
2288 const ABIArgInfo &ArgI = info_it->info;
2289
2290 bool isPromoted =
2291 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2292 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2293 // the parameter is promoted. In this case we convert to
2294 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2295 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2296 assert(hasScalarEvaluationKind(Ty) ==((hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->
getType())) ? static_cast<void> (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2297, __PRETTY_FUNCTION__))
2297 hasScalarEvaluationKind(Arg->getType()))((hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->
getType())) ? static_cast<void> (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2297, __PRETTY_FUNCTION__))
;
2298
2299 unsigned FirstIRArg, NumIRArgs;
2300 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2301
2302 switch (ArgI.getKind()) {
2303 case ABIArgInfo::InAlloca: {
2304 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2304, __PRETTY_FUNCTION__))
;
2305 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2306 Address V =
2307 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2308 ArgVals.push_back(ParamValue::forIndirect(V));
2309 break;
2310 }
2311
2312 case ABIArgInfo::Indirect: {
2313 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2313, __PRETTY_FUNCTION__))
;
2314 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2315
2316 if (!hasScalarEvaluationKind(Ty)) {
2317 // Aggregates and complex variables are accessed by reference. All we
2318 // need to do is realign the value, if requested.
2319 Address V = ParamAddr;
2320 if (ArgI.getIndirectRealign()) {
2321 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2322
2323 // Copy from the incoming argument pointer to the temporary with the
2324 // appropriate alignment.
2325 //
2326 // FIXME: We should have a common utility for generating an aggregate
2327 // copy.
2328 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2329 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2330 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2331 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2332 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2333 V = AlignedTemp;
2334 }
2335 ArgVals.push_back(ParamValue::forIndirect(V));
2336 } else {
2337 // Load scalar value from indirect argument.
2338 llvm::Value *V =
2339 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2340
2341 if (isPromoted)
2342 V = emitArgumentDemotion(*this, Arg, V);
2343 ArgVals.push_back(ParamValue::forDirect(V));
2344 }
2345 break;
2346 }
2347
2348 case ABIArgInfo::Extend:
2349 case ABIArgInfo::Direct: {
2350
2351 // If we have the trivial case, handle it with no muss and fuss.
2352 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2353 ArgI.getCoerceToType() == ConvertType(Ty) &&
2354 ArgI.getDirectOffset() == 0) {
2355 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2355, __PRETTY_FUNCTION__))
;
2356 llvm::Value *V = FnArgs[FirstIRArg];
2357 auto AI = cast<llvm::Argument>(V);
2358
2359 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2360 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2361 PVD->getFunctionScopeIndex()) &&
2362 !CGM.getCodeGenOpts().NullPointerIsValid)
2363 AI->addAttr(llvm::Attribute::NonNull);
2364
2365 QualType OTy = PVD->getOriginalType();
2366 if (const auto *ArrTy =
2367 getContext().getAsConstantArrayType(OTy)) {
2368 // A C99 array parameter declaration with the static keyword also
2369 // indicates dereferenceability, and if the size is constant we can
2370 // use the dereferenceable attribute (which requires the size in
2371 // bytes).
2372 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2373 QualType ETy = ArrTy->getElementType();
2374 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2375 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2376 ArrSize) {
2377 llvm::AttrBuilder Attrs;
2378 Attrs.addDereferenceableAttr(
2379 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2380 AI->addAttrs(Attrs);
2381 } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2382 !CGM.getCodeGenOpts().NullPointerIsValid) {
2383 AI->addAttr(llvm::Attribute::NonNull);
2384 }
2385 }
2386 } else if (const auto *ArrTy =
2387 getContext().getAsVariableArrayType(OTy)) {
2388 // For C99 VLAs with the static keyword, we don't know the size so
2389 // we can't use the dereferenceable attribute, but in addrspace(0)
2390 // we know that it must be nonnull.
2391 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2392 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2393 !CGM.getCodeGenOpts().NullPointerIsValid)
2394 AI->addAttr(llvm::Attribute::NonNull);
2395 }
2396
2397 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2398 if (!AVAttr)
2399 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2400 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2401 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2402 // If alignment-assumption sanitizer is enabled, we do *not* add
2403 // alignment attribute here, but emit normal alignment assumption,
2404 // so the UBSAN check could function.
2405 llvm::Value *AlignmentValue =
2406 EmitScalarExpr(AVAttr->getAlignment());
2407 llvm::ConstantInt *AlignmentCI =
2408 cast<llvm::ConstantInt>(AlignmentValue);
2409 unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2410 +llvm::Value::MaximumAlignment);
2411 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2412 }
2413 }
2414
2415 if (Arg->getType().isRestrictQualified())
2416 AI->addAttr(llvm::Attribute::NoAlias);
2417
2418 // LLVM expects swifterror parameters to be used in very restricted
2419 // ways. Copy the value into a less-restricted temporary.
2420 if (FI.getExtParameterInfo(ArgNo).getABI()
2421 == ParameterABI::SwiftErrorResult) {
2422 QualType pointeeTy = Ty->getPointeeType();
2423 assert(pointeeTy->isPointerType())((pointeeTy->isPointerType()) ? static_cast<void> (0
) : __assert_fail ("pointeeTy->isPointerType()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2423, __PRETTY_FUNCTION__))
;
2424 Address temp =
2425 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2426 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2427 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2428 Builder.CreateStore(incomingErrorValue, temp);
2429 V = temp.getPointer();
2430
2431 // Push a cleanup to copy the value back at the end of the function.
2432 // The convention does not guarantee that the value will be written
2433 // back if the function exits with an unwind exception.
2434 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2435 }
2436
2437 // Ensure the argument is the correct type.
2438 if (V->getType() != ArgI.getCoerceToType())
2439 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2440
2441 if (isPromoted)
2442 V = emitArgumentDemotion(*this, Arg, V);
2443
2444 // Because of merging of function types from multiple decls it is
2445 // possible for the type of an argument to not match the corresponding
2446 // type in the function type. Since we are codegening the callee
2447 // in here, add a cast to the argument type.
2448 llvm::Type *LTy = ConvertType(Arg->getType());
2449 if (V->getType() != LTy)
2450 V = Builder.CreateBitCast(V, LTy);
2451
2452 ArgVals.push_back(ParamValue::forDirect(V));
2453 break;
2454 }
2455
2456 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2457 Arg->getName());
2458
2459 // Pointer to store into.
2460 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2461
2462 // Fast-isel and the optimizer generally like scalar values better than
2463 // FCAs, so we flatten them if this is safe to do for this argument.
2464 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2465 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2466 STy->getNumElements() > 1) {
2467 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2468 llvm::Type *DstTy = Ptr.getElementType();
2469 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2470
2471 Address AddrToStoreInto = Address::invalid();
2472 if (SrcSize <= DstSize) {
2473 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2474 } else {
2475 AddrToStoreInto =
2476 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2477 }
2478
2479 assert(STy->getNumElements() == NumIRArgs)((STy->getNumElements() == NumIRArgs) ? static_cast<void
> (0) : __assert_fail ("STy->getNumElements() == NumIRArgs"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2479, __PRETTY_FUNCTION__))
;
2480 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2481 auto AI = FnArgs[FirstIRArg + i];
2482 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2483 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2484 Builder.CreateStore(AI, EltPtr);
2485 }
2486
2487 if (SrcSize > DstSize) {
2488 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2489 }
2490
2491 } else {
2492 // Simple case, just do a coerced store of the argument into the alloca.
2493 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2493, __PRETTY_FUNCTION__))
;
2494 auto AI = FnArgs[FirstIRArg];
2495 AI->setName(Arg->getName() + ".coerce");
2496 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2497 }
2498
2499 // Match to what EmitParmDecl is expecting for this type.
2500 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2501 llvm::Value *V =
2502 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2503 if (isPromoted)
2504 V = emitArgumentDemotion(*this, Arg, V);
2505 ArgVals.push_back(ParamValue::forDirect(V));
2506 } else {
2507 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2508 }
2509 break;
2510 }
2511
2512 case ABIArgInfo::CoerceAndExpand: {
2513 // Reconstruct into a temporary.
2514 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2515 ArgVals.push_back(ParamValue::forIndirect(alloca));
2516
2517 auto coercionType = ArgI.getCoerceAndExpandType();
2518 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2519
2520 unsigned argIndex = FirstIRArg;
2521 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2522 llvm::Type *eltType = coercionType->getElementType(i);
2523 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2524 continue;
2525
2526 auto eltAddr = Builder.CreateStructGEP(alloca, i);
2527 auto elt = FnArgs[argIndex++];
2528 Builder.CreateStore(elt, eltAddr);
2529 }
2530 assert(argIndex == FirstIRArg + NumIRArgs)((argIndex == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("argIndex == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2530, __PRETTY_FUNCTION__))
;
2531 break;
2532 }
2533
2534 case ABIArgInfo::Expand: {
2535 // If this structure was expanded into multiple arguments then
2536 // we need to create a temporary and reconstruct it from the
2537 // arguments.
2538 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2539 LValue LV = MakeAddrLValue(Alloca, Ty);
2540 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2541
2542 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2543 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2544 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs)((FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs) ? static_cast
<void> (0) : __assert_fail ("FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2544, __PRETTY_FUNCTION__))
;
2545 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2546 auto AI = FnArgs[FirstIRArg + i];
2547 AI->setName(Arg->getName() + "." + Twine(i));
2548 }
2549 break;
2550 }
2551
2552 case ABIArgInfo::Ignore:
2553 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2553, __PRETTY_FUNCTION__))
;
2554 // Initialize the local variable appropriately.
2555 if (!hasScalarEvaluationKind(Ty)) {
2556 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2557 } else {
2558 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2559 ArgVals.push_back(ParamValue::forDirect(U));
2560 }
2561 break;
2562 }
2563 }
2564
2565 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2566 for (int I = Args.size() - 1; I >= 0; --I)
2567 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2568 } else {
2569 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2570 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2571 }
2572}
2573
2574static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2575 while (insn->use_empty()) {
2576 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2577 if (!bitcast) return;
2578
2579 // This is "safe" because we would have used a ConstantExpr otherwise.
2580 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2581 bitcast->eraseFromParent();
2582 }
2583}
2584
2585/// Try to emit a fused autorelease of a return result.
2586static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2587 llvm::Value *result) {
2588 // We must be immediately followed the cast.
2589 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2590 if (BB->empty()) return nullptr;
2591 if (&BB->back() != result) return nullptr;
2592
2593 llvm::Type *resultType = result->getType();
2594
2595 // result is in a BasicBlock and is therefore an Instruction.
2596 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2597
2598 SmallVector<llvm::Instruction *, 4> InstsToKill;
2599
2600 // Look for:
2601 // %generator = bitcast %type1* %generator2 to %type2*
2602 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2603 // We would have emitted this as a constant if the operand weren't
2604 // an Instruction.
2605 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2606
2607 // Require the generator to be immediately followed by the cast.
2608 if (generator->getNextNode() != bitcast)
2609 return nullptr;
2610
2611 InstsToKill.push_back(bitcast);
2612 }
2613
2614 // Look for:
2615 // %generator = call i8* @objc_retain(i8* %originalResult)
2616 // or
2617 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2618 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2619 if (!call) return nullptr;
2620
2621 bool doRetainAutorelease;
2622
2623 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2624 doRetainAutorelease = true;
2625 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2626 .objc_retainAutoreleasedReturnValue) {
2627 doRetainAutorelease = false;
2628
2629 // If we emitted an assembly marker for this call (and the
2630 // ARCEntrypoints field should have been set if so), go looking
2631 // for that call. If we can't find it, we can't do this
2632 // optimization. But it should always be the immediately previous
2633 // instruction, unless we needed bitcasts around the call.
2634 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2635 llvm::Instruction *prev = call->getPrevNode();
2636 assert(prev)((prev) ? static_cast<void> (0) : __assert_fail ("prev"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2636, __PRETTY_FUNCTION__))
;
2637 if (isa<llvm::BitCastInst>(prev)) {
2638 prev = prev->getPrevNode();
2639 assert(prev)((prev) ? static_cast<void> (0) : __assert_fail ("prev"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2639, __PRETTY_FUNCTION__))
;
2640 }
2641 assert(isa<llvm::CallInst>(prev))((isa<llvm::CallInst>(prev)) ? static_cast<void> (
0) : __assert_fail ("isa<llvm::CallInst>(prev)", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2641, __PRETTY_FUNCTION__))
;
2642 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==((cast<llvm::CallInst>(prev)->getCalledValue() == CGF
.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? static_cast<void> (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2643, __PRETTY_FUNCTION__))
2643 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker)((cast<llvm::CallInst>(prev)->getCalledValue() == CGF
.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? static_cast<void> (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2643, __PRETTY_FUNCTION__))
;
2644 InstsToKill.push_back(prev);
2645 }
2646 } else {
2647 return nullptr;
2648 }
2649
2650 result = call->getArgOperand(0);
2651 InstsToKill.push_back(call);
2652
2653 // Keep killing bitcasts, for sanity. Note that we no longer care
2654 // about precise ordering as long as there's exactly one use.
2655 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2656 if (!bitcast->hasOneUse()) break;
2657 InstsToKill.push_back(bitcast);
2658 result = bitcast->getOperand(0);
2659 }
2660
2661 // Delete all the unnecessary instructions, from latest to earliest.
2662 for (auto *I : InstsToKill)
2663 I->eraseFromParent();
2664
2665 // Do the fused retain/autorelease if we were asked to.
2666 if (doRetainAutorelease)
2667 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2668
2669 // Cast back to the result type.
2670 return CGF.Builder.CreateBitCast(result, resultType);
2671}
2672
2673/// If this is a +1 of the value of an immutable 'self', remove it.
2674static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2675 llvm::Value *result) {
2676 // This is only applicable to a method with an immutable 'self'.
2677 const ObjCMethodDecl *method =
2678 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2679 if (!method) return nullptr;
2680 const VarDecl *self = method->getSelfDecl();
2681 if (!self->getType().isConstQualified()) return nullptr;
2682
2683 // Look for a retain call.
2684 llvm::CallInst *retainCall =
2685 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2686 if (!retainCall ||
2687 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2688 return nullptr;
2689
2690 // Look for an ordinary load of 'self'.
2691 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2692 llvm::LoadInst *load =
2693 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2694 if (!load || load->isAtomic() || load->isVolatile() ||
2695 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2696 return nullptr;
2697
2698 // Okay! Burn it all down. This relies for correctness on the
2699 // assumption that the retain is emitted as part of the return and
2700 // that thereafter everything is used "linearly".
2701 llvm::Type *resultType = result->getType();
2702 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2703 assert(retainCall->use_empty())((retainCall->use_empty()) ? static_cast<void> (0) :
__assert_fail ("retainCall->use_empty()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2703, __PRETTY_FUNCTION__))
;
2704 retainCall->eraseFromParent();
2705 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2706
2707 return CGF.Builder.CreateBitCast(load, resultType);
2708}
2709
2710/// Emit an ARC autorelease of the result of a function.
2711///
2712/// \return the value to actually return from the function
2713static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2714 llvm::Value *result) {
2715 // If we're returning 'self', kill the initial retain. This is a
2716 // heuristic attempt to "encourage correctness" in the really unfortunate
2717 // case where we have a return of self during a dealloc and we desperately
2718 // need to avoid the possible autorelease.
2719 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2720 return self;
2721
2722 // At -O0, try to emit a fused retain/autorelease.
2723 if (CGF.shouldUseFusedARCCalls())
2724 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2725 return fused;
2726
2727 return CGF.EmitARCAutoreleaseReturnValue(result);
2728}
2729
2730/// Heuristically search for a dominating store to the return-value slot.
2731static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2732 // Check if a User is a store which pointerOperand is the ReturnValue.
2733 // We are looking for stores to the ReturnValue, not for stores of the
2734 // ReturnValue to some other location.
2735 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2736 auto *SI = dyn_cast<llvm::StoreInst>(U);
2737 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2738 return nullptr;
2739 // These aren't actually possible for non-coerced returns, and we
2740 // only care about non-coerced returns on this code path.
2741 assert(!SI->isAtomic() && !SI->isVolatile())((!SI->isAtomic() && !SI->isVolatile()) ? static_cast
<void> (0) : __assert_fail ("!SI->isAtomic() && !SI->isVolatile()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2741, __PRETTY_FUNCTION__))
;
2742 return SI;
2743 };
2744 // If there are multiple uses of the return-value slot, just check
2745 // for something immediately preceding the IP. Sometimes this can
2746 // happen with how we generate implicit-returns; it can also happen
2747 // with noreturn cleanups.
2748 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2749 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2750 if (IP->empty()) return nullptr;
2751 llvm::Instruction *I = &IP->back();
2752
2753 // Skip lifetime markers
2754 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2755 IE = IP->rend();
2756 II != IE; ++II) {
2757 if (llvm::IntrinsicInst *Intrinsic =
2758 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2759 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2760 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2761 ++II;
2762 if (II == IE)
2763 break;
2764 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2765 continue;
2766 }
2767 }
2768 I = &*II;
2769 break;
2770 }
2771
2772 return GetStoreIfValid(I);
2773 }
2774
2775 llvm::StoreInst *store =
2776 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2777 if (!store) return nullptr;
2778
2779 // Now do a first-and-dirty dominance check: just walk up the
2780 // single-predecessors chain from the current insertion point.
2781 llvm::BasicBlock *StoreBB = store->getParent();
2782 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2783 while (IP != StoreBB) {
2784 if (!(IP = IP->getSinglePredecessor()))
2785 return nullptr;
2786 }
2787
2788 // Okay, the store's basic block dominates the insertion point; we
2789 // can do our thing.
2790 return store;
2791}
2792
2793void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2794 bool EmitRetDbgLoc,
2795 SourceLocation EndLoc) {
2796 if (FI.isNoReturn()) {
2797 // Noreturn functions don't return.
2798 EmitUnreachable(EndLoc);
2799 return;
2800 }
2801
2802 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2803 // Naked functions don't have epilogues.
2804 Builder.CreateUnreachable();
2805 return;
2806 }
2807
2808 // Functions with no result always return void.
2809 if (!ReturnValue.isValid()) {
2810 Builder.CreateRetVoid();
2811 return;
2812 }
2813
2814 llvm::DebugLoc RetDbgLoc;
2815 llvm::Value *RV = nullptr;
2816 QualType RetTy = FI.getReturnType();
2817 const ABIArgInfo &RetAI = FI.getReturnInfo();
2818
2819 switch (RetAI.getKind()) {
2820 case ABIArgInfo::InAlloca:
2821 // Aggregrates get evaluated directly into the destination. Sometimes we
2822 // need to return the sret value in a register, though.
2823 assert(hasAggregateEvaluationKind(RetTy))((hasAggregateEvaluationKind(RetTy)) ? static_cast<void>
(0) : __assert_fail ("hasAggregateEvaluationKind(RetTy)", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2823, __PRETTY_FUNCTION__))
;
2824 if (RetAI.getInAllocaSRet()) {
2825 llvm::Function::arg_iterator EI = CurFn->arg_end();
2826 --EI;
2827 llvm::Value *ArgStruct = &*EI;
2828 llvm::Value *SRet = Builder.CreateStructGEP(
2829 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2830 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2831 }
2832 break;
2833
2834 case ABIArgInfo::Indirect: {
2835 auto AI = CurFn->arg_begin();
2836 if (RetAI.isSRetAfterThis())
2837 ++AI;
2838 switch (getEvaluationKind(RetTy)) {
2839 case TEK_Complex: {
2840 ComplexPairTy RT =
2841 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2842 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2843 /*isInit*/ true);
2844 break;
2845 }
2846 case TEK_Aggregate:
2847 // Do nothing; aggregrates get evaluated directly into the destination.
2848 break;
2849 case TEK_Scalar:
2850 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2851 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2852 /*isInit*/ true);
2853 break;
2854 }
2855 break;
2856 }
2857
2858 case ABIArgInfo::Extend:
2859 case ABIArgInfo::Direct:
2860 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2861 RetAI.getDirectOffset() == 0) {
2862 // The internal return value temp always will have pointer-to-return-type
2863 // type, just do a load.
2864
2865 // If there is a dominating store to ReturnValue, we can elide
2866 // the load, zap the store, and usually zap the alloca.
2867 if (llvm::StoreInst *SI =
2868 findDominatingStoreToReturnValue(*this)) {
2869 // Reuse the debug location from the store unless there is
2870 // cleanup code to be emitted between the store and return
2871 // instruction.
2872 if (EmitRetDbgLoc && !AutoreleaseResult)
2873 RetDbgLoc = SI->getDebugLoc();
2874 // Get the stored value and nuke the now-dead store.
2875 RV = SI->getValueOperand();
2876 SI->eraseFromParent();
2877
2878 // Otherwise, we have to do a simple load.
2879 } else {
2880 RV = Builder.CreateLoad(ReturnValue);
2881 }
2882 } else {
2883 // If the value is offset in memory, apply the offset now.
2884 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2885
2886 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2887 }
2888
2889 // In ARC, end functions that return a retainable type with a call
2890 // to objc_autoreleaseReturnValue.
2891 if (AutoreleaseResult) {
2892#ifndef NDEBUG
2893 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2894 // been stripped of the typedefs, so we cannot use RetTy here. Get the
2895 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2896 // CurCodeDecl or BlockInfo.
2897 QualType RT;
2898
2899 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2900 RT = FD->getReturnType();
2901 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2902 RT = MD->getReturnType();
2903 else if (isa<BlockDecl>(CurCodeDecl))
2904 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2905 else
2906 llvm_unreachable("Unexpected function/method type")::llvm::llvm_unreachable_internal("Unexpected function/method type"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2906)
;
2907
2908 assert(getLangOpts().ObjCAutoRefCount &&((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2910, __PRETTY_FUNCTION__))
2909 !FI.isReturnsRetained() &&((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2910, __PRETTY_FUNCTION__))
2910 RT->isObjCRetainableType())((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2910, __PRETTY_FUNCTION__))
;
2911#endif
2912 RV = emitAutoreleaseOfResult(*this, RV);
2913 }
2914
2915 break;
2916
2917 case ABIArgInfo::Ignore:
2918 break;
2919
2920 case ABIArgInfo::CoerceAndExpand: {
2921 auto coercionType = RetAI.getCoerceAndExpandType();
2922
2923 // Load all of the coerced elements out into results.
2924 llvm::SmallVector<llvm::Value*, 4> results;
2925 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2926 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2927 auto coercedEltType = coercionType->getElementType(i);
2928 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2929 continue;
2930
2931 auto eltAddr = Builder.CreateStructGEP(addr, i);
2932 auto elt = Builder.CreateLoad(eltAddr);
2933 results.push_back(elt);
2934 }
2935
2936 // If we have one result, it's the single direct result type.
2937 if (results.size() == 1) {
2938 RV = results[0];
2939
2940 // Otherwise, we need to make a first-class aggregate.
2941 } else {
2942 // Construct a return type that lacks padding elements.
2943 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2944
2945 RV = llvm::UndefValue::get(returnType);
2946 for (unsigned i = 0, e = results.size(); i != e; ++i) {
2947 RV = Builder.CreateInsertValue(RV, results[i], i);
2948 }
2949 }
2950 break;
2951 }
2952
2953 case ABIArgInfo::Expand:
2954 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2954)
;
2955 }
2956
2957 llvm::Instruction *Ret;
2958 if (RV) {
2959 EmitReturnValueCheck(RV);
2960 Ret = Builder.CreateRet(RV);
2961 } else {
2962 Ret = Builder.CreateRetVoid();
2963 }
2964
2965 if (RetDbgLoc)
2966 Ret->setDebugLoc(std::move(RetDbgLoc));
2967}
2968
2969void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2970 // A current decl may not be available when emitting vtable thunks.
2971 if (!CurCodeDecl)
2972 return;
2973
2974 ReturnsNonNullAttr *RetNNAttr = nullptr;
2975 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2976 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2977
2978 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2979 return;
2980
2981 // Prefer the returns_nonnull attribute if it's present.
2982 SourceLocation AttrLoc;
2983 SanitizerMask CheckKind;
2984 SanitizerHandler Handler;
2985 if (RetNNAttr) {
2986 assert(!requiresReturnValueNullabilityCheck() &&((!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"
) ? static_cast<void> (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2987, __PRETTY_FUNCTION__))
2987 "Cannot check nullability and the nonnull attribute")((!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"
) ? static_cast<void> (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 2987, __PRETTY_FUNCTION__))
;
2988 AttrLoc = RetNNAttr->getLocation();
2989 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2990 Handler = SanitizerHandler::NonnullReturn;
2991 } else {
2992 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2993 if (auto *TSI = DD->getTypeSourceInfo())
2994 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2995 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2996 CheckKind = SanitizerKind::NullabilityReturn;
2997 Handler = SanitizerHandler::NullabilityReturn;
2998 }
2999
3000 SanitizerScope SanScope(this);
3001
3002 // Make sure the "return" source location is valid. If we're checking a
3003 // nullability annotation, make sure the preconditions for the check are met.
3004 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3005 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3006 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3007 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3008 if (requiresReturnValueNullabilityCheck())
3009 CanNullCheck =
3010 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3011 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3012 EmitBlock(Check);
3013
3014 // Now do the null check.
3015 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3016 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3017 llvm::Value *DynamicData[] = {SLocPtr};
3018 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3019
3020 EmitBlock(NoCheck);
3021
3022#ifndef NDEBUG
3023 // The return location should not be used after the check has been emitted.
3024 ReturnLocation = Address::invalid();
3025#endif
3026}
3027
3028static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3029 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3030 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3031}
3032
3033static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3034 QualType Ty) {
3035 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3036 // placeholders.
3037 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3038 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3039 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3040
3041 // FIXME: When we generate this IR in one pass, we shouldn't need
3042 // this win32-specific alignment hack.
3043 CharUnits Align = CharUnits::fromQuantity(4);
3044 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3045
3046 return AggValueSlot::forAddr(Address(Placeholder, Align),
3047 Ty.getQualifiers(),
3048 AggValueSlot::IsNotDestructed,
3049 AggValueSlot::DoesNotNeedGCBarriers,
3050 AggValueSlot::IsNotAliased,
3051 AggValueSlot::DoesNotOverlap);
3052}
3053
3054void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3055 const VarDecl *param,
3056 SourceLocation loc) {
3057 // StartFunction converted the ABI-lowered parameter(s) into a
3058 // local alloca. We need to turn that into an r-value suitable
3059 // for EmitCall.
3060 Address local = GetAddrOfLocalVar(param);
3061
3062 QualType type = param->getType();
3063
3064 if (isInAllocaArgument(CGM.getCXXABI(), type)) {
1
Taking false branch
3065 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3066 }
3067
3068 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3069 // but the argument needs to be the original pointer.
3070 if (type->isReferenceType()) {
2
Taking false branch
3071 args.add(RValue::get(Builder.CreateLoad(local)), type);
3072
3073 // In ARC, move out of consumed arguments so that the release cleanup
3074 // entered by StartFunction doesn't cause an over-release. This isn't
3075 // optimal -O0 code generation, but it should get cleaned up when
3076 // optimization is enabled. This also assumes that delegate calls are
3077 // performed exactly once for a set of arguments, but that should be safe.
3078 } else if (getLangOpts().ObjCAutoRefCount &&
3
Assuming field 'ObjCAutoRefCount' is 0
3079 param->hasAttr<NSConsumedAttr>() &&
3080 type->isObjCRetainableType()) {
3081 llvm::Value *ptr = Builder.CreateLoad(local);
3082 auto null =
3083 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3084 Builder.CreateStore(null, local);
3085 args.add(RValue::get(ptr), type);
3086
3087 // For the most part, we just need to load the alloca, except that
3088 // aggregate r-values are actually pointers to temporaries.
3089 } else {
3090 args.add(convertTempToRValue(local, type, loc), type);
3091 }
3092
3093 // Deactivate the cleanup for the callee-destructed param that was pushed.
3094 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
4
Calling 'CodeGenFunction::hasAggregateEvaluationKind'
7
Returning from 'CodeGenFunction::hasAggregateEvaluationKind'
8
Assuming field 'CurFuncIsThunk' is false
3095 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
9
Assuming the object is not a 'RecordType'
10
Called C++ object pointer is null
3096 param->needsDestruction(getContext())) {
3097 EHScopeStack::stable_iterator cleanup =
3098 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3099 assert(cleanup.isValid() &&((cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? static_cast<void> (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3100, __PRETTY_FUNCTION__))
3100 "cleanup for callee-destructed param not recorded")((cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? static_cast<void> (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3100, __PRETTY_FUNCTION__))
;
3101 // This unreachable is a temporary marker which will be removed later.
3102 llvm::Instruction *isActive = Builder.CreateUnreachable();
3103 args.addArgCleanupDeactivation(cleanup, isActive);
3104 }
3105}
3106
3107static bool isProvablyNull(llvm::Value *addr) {
3108 return isa<llvm::ConstantPointerNull>(addr);
3109}
3110
3111/// Emit the actual writing-back of a writeback.
3112static void emitWriteback(CodeGenFunction &CGF,
3113 const CallArgList::Writeback &writeback) {
3114 const LValue &srcLV = writeback.Source;
3115 Address srcAddr = srcLV.getAddress();
3116 assert(!isProvablyNull(srcAddr.getPointer()) &&((!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"
) ? static_cast<void> (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3117, __PRETTY_FUNCTION__))
3117 "shouldn't have writeback for provably null argument")((!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"
) ? static_cast<void> (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3117, __PRETTY_FUNCTION__))
;
3118
3119 llvm::BasicBlock *contBB = nullptr;
3120
3121 // If the argument wasn't provably non-null, we need to null check
3122 // before doing the store.
3123 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3124 CGF.CGM.getDataLayout());
3125 if (!provablyNonNull) {
3126 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3127 contBB = CGF.createBasicBlock("icr.done");
3128
3129 llvm::Value *isNull =
3130 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3131 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3132 CGF.EmitBlock(writebackBB);
3133 }
3134
3135 // Load the value to writeback.
3136 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3137
3138 // Cast it back, in case we're writing an id to a Foo* or something.
3139 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3140 "icr.writeback-cast");
3141
3142 // Perform the writeback.
3143
3144 // If we have a "to use" value, it's something we need to emit a use
3145 // of. This has to be carefully threaded in: if it's done after the
3146 // release it's potentially undefined behavior (and the optimizer
3147 // will ignore it), and if it happens before the retain then the
3148 // optimizer could move the release there.
3149 if (writeback.ToUse) {
3150 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong)((srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) ? static_cast
<void> (0) : __assert_fail ("srcLV.getObjCLifetime() == Qualifiers::OCL_Strong"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3150, __PRETTY_FUNCTION__))
;
3151
3152 // Retain the new value. No need to block-copy here: the block's
3153 // being passed up the stack.
3154 value = CGF.EmitARCRetainNonBlock(value);
3155
3156 // Emit the intrinsic use here.
3157 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3158
3159 // Load the old value (primitively).
3160 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3161
3162 // Put the new value in place (primitively).
3163 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3164
3165 // Release the old value.
3166 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3167
3168 // Otherwise, we can just do a normal lvalue store.
3169 } else {
3170 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3171 }
3172
3173 // Jump to the continuation block.
3174 if (!provablyNonNull)
3175 CGF.EmitBlock(contBB);
3176}
3177
3178static void emitWritebacks(CodeGenFunction &CGF,
3179 const CallArgList &args) {
3180 for (const auto &I : args.writebacks())
3181 emitWriteback(CGF, I);
3182}
3183
3184static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3185 const CallArgList &CallArgs) {
3186 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3187 CallArgs.getCleanupsToDeactivate();
3188 // Iterate in reverse to increase the likelihood of popping the cleanup.
3189 for (const auto &I : llvm::reverse(Cleanups)) {
3190 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3191 I.IsActiveIP->eraseFromParent();
3192 }
3193}
3194
3195static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3196 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3197 if (uop->getOpcode() == UO_AddrOf)
3198 return uop->getSubExpr();
3199 return nullptr;
3200}
3201
3202/// Emit an argument that's being passed call-by-writeback. That is,
3203/// we are passing the address of an __autoreleased temporary; it
3204/// might be copy-initialized with the current value of the given
3205/// address, but it will definitely be copied out of after the call.
3206static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3207 const ObjCIndirectCopyRestoreExpr *CRE) {
3208 LValue srcLV;
3209
3210 // Make an optimistic effort to emit the address as an l-value.
3211 // This can fail if the argument expression is more complicated.
3212 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3213 srcLV = CGF.EmitLValue(lvExpr);
3214
3215 // Otherwise, just emit it as a scalar.
3216 } else {
3217 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3218
3219 QualType srcAddrType =
3220 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3221 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3222 }
3223 Address srcAddr = srcLV.getAddress();
3224
3225 // The dest and src types don't necessarily match in LLVM terms
3226 // because of the crazy ObjC compatibility rules.
3227
3228 llvm::PointerType *destType =
3229 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3230
3231 // If the address is a constant null, just pass the appropriate null.
3232 if (isProvablyNull(srcAddr.getPointer())) {
3233 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3234 CRE->getType());
3235 return;
3236 }
3237
3238 // Create the temporary.
3239 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3240 CGF.getPointerAlign(),
3241 "icr.temp");
3242 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3243 // and that cleanup will be conditional if we can't prove that the l-value
3244 // isn't null, so we need to register a dominating point so that the cleanups
3245 // system will make valid IR.
3246 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3247
3248 // Zero-initialize it if we're not doing a copy-initialization.
3249 bool shouldCopy = CRE->shouldCopy();
3250 if (!shouldCopy) {
3251 llvm::Value *null =
3252 llvm::ConstantPointerNull::get(
3253 cast<llvm::PointerType>(destType->getElementType()));
3254 CGF.Builder.CreateStore(null, temp);
3255 }
3256
3257 llvm::BasicBlock *contBB = nullptr;
3258 llvm::BasicBlock *originBB = nullptr;
3259
3260 // If the address is *not* known to be non-null, we need to switch.
3261 llvm::Value *finalArgument;
3262
3263 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3264 CGF.CGM.getDataLayout());
3265 if (provablyNonNull) {
3266 finalArgument = temp.getPointer();
3267 } else {
3268 llvm::Value *isNull =
3269 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3270
3271 finalArgument = CGF.Builder.CreateSelect(isNull,
3272 llvm::ConstantPointerNull::get(destType),
3273 temp.getPointer(), "icr.argument");
3274
3275 // If we need to copy, then the load has to be conditional, which
3276 // means we need control flow.
3277 if (shouldCopy) {
3278 originBB = CGF.Builder.GetInsertBlock();
3279 contBB = CGF.createBasicBlock("icr.cont");
3280 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3281 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3282 CGF.EmitBlock(copyBB);
3283 condEval.begin(CGF);
3284 }
3285 }
3286
3287 llvm::Value *valueToUse = nullptr;
3288
3289 // Perform a copy if necessary.
3290 if (shouldCopy) {
3291 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3292 assert(srcRV.isScalar())((srcRV.isScalar()) ? static_cast<void> (0) : __assert_fail
("srcRV.isScalar()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3292, __PRETTY_FUNCTION__))
;
3293
3294 llvm::Value *src = srcRV.getScalarVal();
3295 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3296 "icr.cast");
3297
3298 // Use an ordinary store, not a store-to-lvalue.
3299 CGF.Builder.CreateStore(src, temp);
3300
3301 // If optimization is enabled, and the value was held in a
3302 // __strong variable, we need to tell the optimizer that this
3303 // value has to stay alive until we're doing the store back.
3304 // This is because the temporary is effectively unretained,
3305 // and so otherwise we can violate the high-level semantics.
3306 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3307 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3308 valueToUse = src;
3309 }
3310 }
3311
3312 // Finish the control flow if we needed it.
3313 if (shouldCopy && !provablyNonNull) {
3314 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3315 CGF.EmitBlock(contBB);
3316
3317 // Make a phi for the value to intrinsically use.
3318 if (valueToUse) {
3319 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3320 "icr.to-use");
3321 phiToUse->addIncoming(valueToUse, copyBB);
3322 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3323 originBB);
3324 valueToUse = phiToUse;
3325 }
3326
3327 condEval.end(CGF);
3328 }
3329
3330 args.addWriteback(srcLV, temp, valueToUse);
3331 args.add(RValue::get(finalArgument), CRE->getType());
3332}
3333
3334void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3335 assert(!StackBase)((!StackBase) ? static_cast<void> (0) : __assert_fail (
"!StackBase", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3335, __PRETTY_FUNCTION__))
;
3336
3337 // Save the stack.
3338 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3339 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3340}
3341
3342void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3343 if (StackBase) {
3344 // Restore the stack after the call.
3345 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3346 CGF.Builder.CreateCall(F, StackBase);
3347 }
3348}
3349
3350void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3351 SourceLocation ArgLoc,
3352 AbstractCallee AC,
3353 unsigned ParmNum) {
3354 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3355 SanOpts.has(SanitizerKind::NullabilityArg)))
3356 return;
3357
3358 // The param decl may be missing in a variadic function.
3359 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3360 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3361
3362 // Prefer the nonnull attribute if it's present.
3363 const NonNullAttr *NNAttr = nullptr;
3364 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3365 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3366
3367 bool CanCheckNullability = false;
3368 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3369 auto Nullability = PVD->getType()->getNullability(getContext());
3370 CanCheckNullability = Nullability &&
3371 *Nullability == NullabilityKind::NonNull &&
3372 PVD->getTypeSourceInfo();
3373 }
3374
3375 if (!NNAttr && !CanCheckNullability)
3376 return;
3377
3378 SourceLocation AttrLoc;
3379 SanitizerMask CheckKind;
3380 SanitizerHandler Handler;
3381 if (NNAttr) {
3382 AttrLoc = NNAttr->getLocation();
3383 CheckKind = SanitizerKind::NonnullAttribute;
3384 Handler = SanitizerHandler::NonnullArg;
3385 } else {
3386 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3387 CheckKind = SanitizerKind::NullabilityArg;
3388 Handler = SanitizerHandler::NullabilityArg;
3389 }
3390
3391 SanitizerScope SanScope(this);
3392 assert(RV.isScalar())((RV.isScalar()) ? static_cast<void> (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3392, __PRETTY_FUNCTION__))
;
3393 llvm::Value *V = RV.getScalarVal();
3394 llvm::Value *Cond =
3395 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3396 llvm::Constant *StaticData[] = {
3397 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3398 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3399 };
3400 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3401}
3402
3403void CodeGenFunction::EmitCallArgs(
3404 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3405 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3406 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3407 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()))(((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())
) ? static_cast<void> (0) : __assert_fail ("(int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3407, __PRETTY_FUNCTION__))
;
3408
3409 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3410 // because arguments are destroyed left to right in the callee. As a special
3411 // case, there are certain language constructs that require left-to-right
3412 // evaluation, and in those cases we consider the evaluation order requirement
3413 // to trump the "destruction order is reverse construction order" guarantee.
3414 bool LeftToRight =
3415 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3416 ? Order == EvaluationOrder::ForceLeftToRight
3417 : Order != EvaluationOrder::ForceRightToLeft;
3418
3419 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3420 RValue EmittedArg) {
3421 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3422 return;
3423 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3424 if (PS == nullptr)
3425 return;
3426
3427 const auto &Context = getContext();
3428 auto SizeTy = Context.getSizeType();
3429 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3430 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?")((EmittedArg.getScalarVal() && "We emitted nothing for the arg?"
) ? static_cast<void> (0) : __assert_fail ("EmittedArg.getScalarVal() && \"We emitted nothing for the arg?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3430, __PRETTY_FUNCTION__))
;
3431 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3432 EmittedArg.getScalarVal(),
3433 PS->isDynamic());
3434 Args.add(RValue::get(V), SizeTy);
3435 // If we're emitting args in reverse, be sure to do so with
3436 // pass_object_size, as well.
3437 if (!LeftToRight)
3438 std::swap(Args.back(), *(&Args.back() - 1));
3439 };
3440
3441 // Insert a stack save if we're going to need any inalloca args.
3442 bool HasInAllocaArgs = false;
3443 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3444 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3445 I != E && !HasInAllocaArgs; ++I)
3446 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3447 if (HasInAllocaArgs) {
3448 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)((getTarget().getTriple().getArch() == llvm::Triple::x86) ? static_cast
<void> (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3448, __PRETTY_FUNCTION__))
;
3449 Args.allocateArgumentMemory(*this);
3450 }
3451 }
3452
3453 // Evaluate each argument in the appropriate order.
3454 size_t CallArgsStart = Args.size();
3455 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3456 unsigned Idx = LeftToRight ? I : E - I - 1;
3457 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3458 unsigned InitialArgSize = Args.size();
3459 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3460 // the argument and parameter match or the objc method is parameterized.
3461 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3466, __PRETTY_FUNCTION__))
3462 getContext().hasSameUnqualifiedType((*Arg)->getType(),(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3466, __PRETTY_FUNCTION__))
3463 ArgTypes[Idx]) ||(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3466, __PRETTY_FUNCTION__))
3464 (isa<ObjCMethodDecl>(AC.getDecl()) &&(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3466, __PRETTY_FUNCTION__))
3465 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3466, __PRETTY_FUNCTION__))
3466 "Argument and parameter types don't match")(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3466, __PRETTY_FUNCTION__))
;
3467 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3468 // In particular, we depend on it being the last arg in Args, and the
3469 // objectsize bits depend on there only being one arg if !LeftToRight.
3470 assert(InitialArgSize + 1 == Args.size() &&((InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"
) ? static_cast<void> (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3471, __PRETTY_FUNCTION__))
3471 "The code below depends on only adding one arg per EmitCallArg")((InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"
) ? static_cast<void> (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3471, __PRETTY_FUNCTION__))
;
3472 (void)InitialArgSize;
3473 // Since pointer argument are never emitted as LValue, it is safe to emit
3474 // non-null argument check for r-value only.
3475 if (!Args.back().hasLValue()) {
3476 RValue RVArg = Args.back().getKnownRValue();
3477 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3478 ParamsToSkip + Idx);
3479 // @llvm.objectsize should never have side-effects and shouldn't need
3480 // destruction/cleanups, so we can safely "emit" it after its arg,
3481 // regardless of right-to-leftness
3482 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3483 }
3484 }
3485
3486 if (!LeftToRight) {
3487 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3488 // IR function.
3489 std::reverse(Args.begin() + CallArgsStart, Args.end());
3490 }
3491}
3492
3493namespace {
3494
3495struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3496 DestroyUnpassedArg(Address Addr, QualType Ty)
3497 : Addr(Addr), Ty(Ty) {}
3498
3499 Address Addr;
3500 QualType Ty;
3501
3502 void Emit(CodeGenFunction &CGF, Flags flags) override {
3503 QualType::DestructionKind DtorKind = Ty.isDestructedType();
3504 if (DtorKind == QualType::DK_cxx_destructor) {
3505 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3506 assert(!Dtor->isTrivial())((!Dtor->isTrivial()) ? static_cast<void> (0) : __assert_fail
("!Dtor->isTrivial()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3506, __PRETTY_FUNCTION__))
;
3507 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3508 /*Delegating=*/false, Addr, Ty);
3509 } else {
3510 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3511 }
3512 }
3513};
3514
3515struct DisableDebugLocationUpdates {
3516 CodeGenFunction &CGF;
3517 bool disabledDebugInfo;
3518 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3519 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3520 CGF.disableDebugInfo();
3521 }
3522 ~DisableDebugLocationUpdates() {
3523 if (disabledDebugInfo)
3524 CGF.enableDebugInfo();
3525 }
3526};
3527
3528} // end anonymous namespace
3529
3530RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3531 if (!HasLV)
3532 return RV;
3533 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3534 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3535 LV.isVolatile());
3536 IsUsed = true;
3537 return RValue::getAggregate(Copy.getAddress());
3538}
3539
3540void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3541 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3542 if (!HasLV && RV.isScalar())
3543 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
3544 else if (!HasLV && RV.isComplex())
3545 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3546 else {
3547 auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3548 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3549 // We assume that call args are never copied into subobjects.
3550 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3551 HasLV ? LV.isVolatileQualified()
3552 : RV.isVolatileQualified());
3553 }
3554 IsUsed = true;
3555}
3556
3557void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3558 QualType type) {
3559 DisableDebugLocationUpdates Dis(*this, E);
3560 if (const ObjCIndirectCopyRestoreExpr *CRE
3561 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3562 assert(getLangOpts().ObjCAutoRefCount)((getLangOpts().ObjCAutoRefCount) ? static_cast<void> (
0) : __assert_fail ("getLangOpts().ObjCAutoRefCount", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3562, __PRETTY_FUNCTION__))
;
3563 return emitWritebackArg(*this, args, CRE);
3564 }
3565
3566 assert(type->isReferenceType() == E->isGLValue() &&((type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"
) ? static_cast<void> (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3567, __PRETTY_FUNCTION__))
3567 "reference binding to unmaterialized r-value!")((type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"
) ? static_cast<void> (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3567, __PRETTY_FUNCTION__))
;
3568
3569 if (E->isGLValue()) {
3570 assert(E->getObjectKind() == OK_Ordinary)((E->getObjectKind() == OK_Ordinary) ? static_cast<void
> (0) : __assert_fail ("E->getObjectKind() == OK_Ordinary"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3570, __PRETTY_FUNCTION__))
;
3571 return args.add(EmitReferenceBindingToExpr(E), type);
3572 }
3573
3574 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3575
3576 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3577 // However, we still have to push an EH-only cleanup in case we unwind before
3578 // we make it to the call.
3579 if (HasAggregateEvalKind &&
3580 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3581 // If we're using inalloca, use the argument memory. Otherwise, use a
3582 // temporary.
3583 AggValueSlot Slot;
3584 if (args.isUsingInAlloca())
3585 Slot = createPlaceholderSlot(*this, type);
3586 else
3587 Slot = CreateAggTemp(type, "agg.tmp");
3588
3589 bool DestroyedInCallee = true, NeedsEHCleanup = true;
3590 if (const auto *RD = type->getAsCXXRecordDecl())
3591 DestroyedInCallee = RD->hasNonTrivialDestructor();
3592 else
3593 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3594
3595 if (DestroyedInCallee)
3596 Slot.setExternallyDestructed();
3597
3598 EmitAggExpr(E, Slot);
3599 RValue RV = Slot.asRValue();
3600 args.add(RV, type);
3601
3602 if (DestroyedInCallee && NeedsEHCleanup) {
3603 // Create a no-op GEP between the placeholder and the cleanup so we can
3604 // RAUW it successfully. It also serves as a marker of the first
3605 // instruction where the cleanup is active.
3606 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3607 type);
3608 // This unreachable is a temporary marker which will be removed later.
3609 llvm::Instruction *IsActive = Builder.CreateUnreachable();
3610 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3611 }
3612 return;
3613 }
3614
3615 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3616 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3617 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3618 assert(L.isSimple())((L.isSimple()) ? static_cast<void> (0) : __assert_fail
("L.isSimple()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3618, __PRETTY_FUNCTION__))
;
3619 args.addUncopiedAggregate(L, type);
3620 return;
3621 }
3622
3623 args.add(EmitAnyExprToTemp(E), type);
3624}
3625
3626QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3627 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3628 // implicitly widens null pointer constants that are arguments to varargs
3629 // functions to pointer-sized ints.
3630 if (!getTarget().getTriple().isOSWindows())
3631 return Arg->getType();
3632
3633 if (Arg->getType()->isIntegerType() &&
3634 getContext().getTypeSize(Arg->getType()) <
3635 getContext().getTargetInfo().getPointerWidth(0) &&
3636 Arg->isNullPointerConstant(getContext(),
3637 Expr::NPC_ValueDependentIsNotNull)) {
3638 return getContext().getIntPtrType();
3639 }
3640
3641 return Arg->getType();
3642}
3643
3644// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3645// optimizer it can aggressively ignore unwind edges.
3646void
3647CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3648 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3649 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3650 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3651 CGM.getNoObjCARCExceptionsMetadata());
3652}
3653
3654/// Emits a call to the given no-arguments nounwind runtime function.
3655llvm::CallInst *
3656CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3657 const llvm::Twine &name) {
3658 return EmitNounwindRuntimeCall(callee, None, name);
3659}
3660
3661/// Emits a call to the given nounwind runtime function.
3662llvm::CallInst *
3663CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3664 ArrayRef<llvm::Value *> args,
3665 const llvm::Twine &name) {
3666 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3667 call->setDoesNotThrow();
3668 return call;
3669}
3670
3671/// Emits a simple call (never an invoke) to the given no-arguments
3672/// runtime function.
3673llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3674 const llvm::Twine &name) {
3675 return EmitRuntimeCall(callee, None, name);
3676}
3677
3678// Calls which may throw must have operand bundles indicating which funclet
3679// they are nested within.
3680SmallVector<llvm::OperandBundleDef, 1>
3681CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3682 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3683 // There is no need for a funclet operand bundle if we aren't inside a
3684 // funclet.
3685 if (!CurrentFuncletPad)
3686 return BundleList;
3687
3688 // Skip intrinsics which cannot throw.
3689 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3690 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3691 return BundleList;
3692
3693 BundleList.emplace_back("funclet", CurrentFuncletPad);
3694 return BundleList;
3695}
3696
3697/// Emits a simple call (never an invoke) to the given runtime function.
3698llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3699 ArrayRef<llvm::Value *> args,
3700 const llvm::Twine &name) {
3701 llvm::CallInst *call = Builder.CreateCall(
3702 callee, args, getBundlesForFunclet(callee.getCallee()), name);
3703 call->setCallingConv(getRuntimeCC());
3704 return call;
3705}
3706
3707/// Emits a call or invoke to the given noreturn runtime function.
3708void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
3709 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
3710 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3711 getBundlesForFunclet(callee.getCallee());
3712
3713 if (getInvokeDest()) {
3714 llvm::InvokeInst *invoke =
3715 Builder.CreateInvoke(callee,
3716 getUnreachableBlock(),
3717 getInvokeDest(),
3718 args,
3719 BundleList);
3720 invoke->setDoesNotReturn();
3721 invoke->setCallingConv(getRuntimeCC());
3722 } else {
3723 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3724 call->setDoesNotReturn();
3725 call->setCallingConv(getRuntimeCC());
3726 Builder.CreateUnreachable();
3727 }
3728}
3729
3730/// Emits a call or invoke instruction to the given nullary runtime function.
3731llvm::CallBase *
3732CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3733 const Twine &name) {
3734 return EmitRuntimeCallOrInvoke(callee, None, name);
3735}
3736
3737/// Emits a call or invoke instruction to the given runtime function.
3738llvm::CallBase *
3739CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3740 ArrayRef<llvm::Value *> args,
3741 const Twine &name) {
3742 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
3743 call->setCallingConv(getRuntimeCC());
3744 return call;
3745}
3746
3747/// Emits a call or invoke instruction to the given function, depending
3748/// on the current state of the EH stack.
3749llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
3750 ArrayRef<llvm::Value *> Args,
3751 const Twine &Name) {
3752 llvm::BasicBlock *InvokeDest = getInvokeDest();
3753 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3754 getBundlesForFunclet(Callee.getCallee());
3755
3756 llvm::CallBase *Inst;
3757 if (!InvokeDest)
3758 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3759 else {
3760 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3761 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3762 Name);
3763 EmitBlock(ContBB);
3764 }
3765
3766 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3767 // optimizer it can aggressively ignore unwind edges.
3768 if (CGM.getLangOpts().ObjCAutoRefCount)
3769 AddObjCARCExceptionMetadata(Inst);
3770
3771 return Inst;
3772}
3773
3774void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3775 llvm::Value *New) {
3776 DeferredReplacements.push_back(std::make_pair(Old, New));
3777}
3778
3779RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3780 const CGCallee &Callee,
3781 ReturnValueSlot ReturnValue,
3782 const CallArgList &CallArgs,
3783 llvm::CallBase **callOrInvoke,
3784 SourceLocation Loc) {
3785 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3786
3787 assert(Callee.isOrdinary() || Callee.isVirtual())((Callee.isOrdinary() || Callee.isVirtual()) ? static_cast<
void> (0) : __assert_fail ("Callee.isOrdinary() || Callee.isVirtual()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3787, __PRETTY_FUNCTION__))
;
3788
3789 // Handle struct-return functions by passing a pointer to the
3790 // location that we would like to return into.
3791 QualType RetTy = CallInfo.getReturnType();
3792 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3793
3794 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
3795
3796 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
3797 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
3798 // We can only guarantee that a function is called from the correct
3799 // context/function based on the appropriate target attributes,
3800 // so only check in the case where we have both always_inline and target
3801 // since otherwise we could be making a conditional call after a check for
3802 // the proper cpu features (and it won't cause code generation issues due to
3803 // function based code generation).
3804 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
3805 TargetDecl->hasAttr<TargetAttr>())
3806 checkTargetFeatures(Loc, FD);
3807
3808#ifndef NDEBUG
3809 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
3810 // For an inalloca varargs function, we don't expect CallInfo to match the
3811 // function pointer's type, because the inalloca struct a will have extra
3812 // fields in it for the varargs parameters. Code later in this function
3813 // bitcasts the function pointer to the type derived from CallInfo.
3814 //
3815 // In other cases, we assert that the types match up (until pointers stop
3816 // having pointee types).
3817 llvm::Type *TypeFromVal;
3818 if (Callee.isVirtual())
3819 TypeFromVal = Callee.getVirtualFunctionType();
3820 else
3821 TypeFromVal =
3822 Callee.getFunctionPointer()->getType()->getPointerElementType();
3823 assert(IRFuncTy == TypeFromVal)((IRFuncTy == TypeFromVal) ? static_cast<void> (0) : __assert_fail
("IRFuncTy == TypeFromVal", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3823, __PRETTY_FUNCTION__))
;
3824 }
3825#endif
3826
3827 // 1. Set up the arguments.
3828
3829 // If we're using inalloca, insert the allocation after the stack save.
3830 // FIXME: Do this earlier rather than hacking it in here!
3831 Address ArgMemory = Address::invalid();
3832 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3833 const llvm::DataLayout &DL = CGM.getDataLayout();
3834 llvm::Instruction *IP = CallArgs.getStackBase();
3835 llvm::AllocaInst *AI;
3836 if (IP) {
3837 IP = IP->getNextNode();
3838 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3839 "argmem", IP);
3840 } else {
3841 AI = CreateTempAlloca(ArgStruct, "argmem");
3842 }
3843 auto Align = CallInfo.getArgStructAlignment();
3844 AI->setAlignment(llvm::MaybeAlign(Align.getQuantity()));
3845 AI->setUsedWithInAlloca(true);
3846 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca())((AI->isUsedWithInAlloca() && !AI->isStaticAlloca
()) ? static_cast<void> (0) : __assert_fail ("AI->isUsedWithInAlloca() && !AI->isStaticAlloca()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3846, __PRETTY_FUNCTION__))
;
3847 ArgMemory = Address(AI, Align);
3848 }
3849
3850 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3851 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3852
3853 // If the call returns a temporary with struct return, create a temporary
3854 // alloca to hold the result, unless one is given to us.
3855 Address SRetPtr = Address::invalid();
3856 Address SRetAlloca = Address::invalid();
3857 llvm::Value *UnusedReturnSizePtr = nullptr;
3858 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3859 if (!ReturnValue.isNull()) {
3860 SRetPtr = ReturnValue.getValue();
3861 } else {
3862 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3863 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3864 uint64_t size =
3865 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3866 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3867 }
3868 }
3869 if (IRFunctionArgs.hasSRetArg()) {
3870 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3871 } else if (RetAI.isInAlloca()) {
3872 Address Addr =
3873 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
3874 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3875 }
3876 }
3877
3878 Address swiftErrorTemp = Address::invalid();
3879 Address swiftErrorArg = Address::invalid();
3880
3881 // Translate all of the arguments as necessary to match the IR lowering.
3882 assert(CallInfo.arg_size() == CallArgs.size() &&((CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3883, __PRETTY_FUNCTION__))
3883 "Mismatch between function signature & arguments.")((CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3883, __PRETTY_FUNCTION__))
;
3884 unsigned ArgNo = 0;
3885 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3886 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3887 I != E; ++I, ++info_it, ++ArgNo) {
3888 const ABIArgInfo &ArgInfo = info_it->info;
3889
3890 // Insert a padding argument to ensure proper alignment.
3891 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3892 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3893 llvm::UndefValue::get(ArgInfo.getPaddingType());
3894
3895 unsigned FirstIRArg, NumIRArgs;
3896 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3897
3898 switch (ArgInfo.getKind()) {
3899 case ABIArgInfo::InAlloca: {
3900 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3900, __PRETTY_FUNCTION__))
;
3901 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)((getTarget().getTriple().getArch() == llvm::Triple::x86) ? static_cast
<void> (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3901, __PRETTY_FUNCTION__))
;
3902 if (I->isAggregate()) {
3903 // Replace the placeholder with the appropriate argument slot GEP.
3904 Address Addr = I->hasLValue()
3905 ? I->getKnownLValue().getAddress()
3906 : I->getKnownRValue().getAggregateAddress();
3907 llvm::Instruction *Placeholder =
3908 cast<llvm::Instruction>(Addr.getPointer());
3909 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3910 Builder.SetInsertPoint(Placeholder);
3911 Addr =
3912 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3913 Builder.restoreIP(IP);
3914 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3915 } else {
3916 // Store the RValue into the argument struct.
3917 Address Addr =
3918 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3919 unsigned AS = Addr.getType()->getPointerAddressSpace();
3920 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3921 // There are some cases where a trivial bitcast is not avoidable. The
3922 // definition of a type later in a translation unit may change it's type
3923 // from {}* to (%struct.foo*)*.
3924 if (Addr.getType() != MemType)
3925 Addr = Builder.CreateBitCast(Addr, MemType);
3926 I->copyInto(*this, Addr);
3927 }
3928 break;
3929 }
3930
3931 case ABIArgInfo::Indirect: {
3932 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3932, __PRETTY_FUNCTION__))
;
3933 if (!I->isAggregate()) {
3934 // Make a temporary alloca to pass the argument.
3935 Address Addr = CreateMemTempWithoutCast(
3936 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3937 IRCallArgs[FirstIRArg] = Addr.getPointer();
3938
3939 I->copyInto(*this, Addr);
3940 } else {
3941 // We want to avoid creating an unnecessary temporary+copy here;
3942 // however, we need one in three cases:
3943 // 1. If the argument is not byval, and we are required to copy the
3944 // source. (This case doesn't occur on any common architecture.)
3945 // 2. If the argument is byval, RV is not sufficiently aligned, and
3946 // we cannot force it to be sufficiently aligned.
3947 // 3. If the argument is byval, but RV is not located in default
3948 // or alloca address space.
3949 Address Addr = I->hasLValue()
3950 ? I->getKnownLValue().getAddress()
3951 : I->getKnownRValue().getAggregateAddress();
3952 llvm::Value *V = Addr.getPointer();
3953 CharUnits Align = ArgInfo.getIndirectAlign();
3954 const llvm::DataLayout *TD = &CGM.getDataLayout();
3955
3956 assert((FirstIRArg >= IRFuncTy->getNumParams() ||(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3959, __PRETTY_FUNCTION__))
3957 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3959, __PRETTY_FUNCTION__))
3958 TD->getAllocaAddrSpace()) &&(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3959, __PRETTY_FUNCTION__))
3959 "indirect argument must be in alloca address space")(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 3959, __PRETTY_FUNCTION__))
;
3960
3961 bool NeedCopy = false;
3962
3963 if (Addr.getAlignment() < Align &&
3964 llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3965 Align.getQuantity()) {
3966 NeedCopy = true;
3967 } else if (I->hasLValue()) {
3968 auto LV = I->getKnownLValue();
3969 auto AS = LV.getAddressSpace();
3970
3971 if ((!ArgInfo.getIndirectByVal() &&
3972 (LV.getAlignment() >=
3973 getContext().getTypeAlignInChars(I->Ty)))) {
3974 NeedCopy = true;
3975 }
3976 if (!getLangOpts().OpenCL) {
3977 if ((ArgInfo.getIndirectByVal() &&
3978 (AS != LangAS::Default &&
3979 AS != CGM.getASTAllocaAddressSpace()))) {
3980 NeedCopy = true;
3981 }
3982 }
3983 // For OpenCL even if RV is located in default or alloca address space
3984 // we don't want to perform address space cast for it.
3985 else if ((ArgInfo.getIndirectByVal() &&
3986 Addr.getType()->getAddressSpace() != IRFuncTy->
3987 getParamType(FirstIRArg)->getPointerAddressSpace())) {
3988 NeedCopy = true;
3989 }
3990 }
3991
3992 if (NeedCopy) {
3993 // Create an aligned temporary, and copy to it.
3994 Address AI = CreateMemTempWithoutCast(
3995 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3996 IRCallArgs[FirstIRArg] = AI.getPointer();
3997 I->copyInto(*this, AI);
3998 } else {
3999 // Skip the extra memcpy call.
4000 auto *T = V->getType()->getPointerElementType()->getPointerTo(
4001 CGM.getDataLayout().getAllocaAddrSpace());
4002 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4003 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4004 true);
4005 }
4006 }
4007 break;
4008 }
4009
4010 case ABIArgInfo::Ignore:
4011 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4011, __PRETTY_FUNCTION__))
;
4012 break;
4013
4014 case ABIArgInfo::Extend:
4015 case ABIArgInfo::Direct: {
4016 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4017 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4018 ArgInfo.getDirectOffset() == 0) {
4019 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4019, __PRETTY_FUNCTION__))
;
4020 llvm::Value *V;
4021 if (!I->isAggregate())
4022 V = I->getKnownRValue().getScalarVal();
4023 else
4024 V = Builder.CreateLoad(
4025 I->hasLValue() ? I->getKnownLValue().getAddress()
4026 : I->getKnownRValue().getAggregateAddress());
4027
4028 // Implement swifterror by copying into a new swifterror argument.
4029 // We'll write back in the normal path out of the call.
4030 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4031 == ParameterABI::SwiftErrorResult) {
4032 assert(!swiftErrorTemp.isValid() && "multiple swifterror args")((!swiftErrorTemp.isValid() && "multiple swifterror args"
) ? static_cast<void> (0) : __assert_fail ("!swiftErrorTemp.isValid() && \"multiple swifterror args\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4032, __PRETTY_FUNCTION__))
;
4033
4034 QualType pointeeTy = I->Ty->getPointeeType();
4035 swiftErrorArg =
4036 Address(V, getContext().getTypeAlignInChars(pointeeTy));
4037
4038 swiftErrorTemp =
4039 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4040 V = swiftErrorTemp.getPointer();
4041 cast<llvm::AllocaInst>(V)->setSwiftError(true);
4042
4043 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4044 Builder.CreateStore(errorValue, swiftErrorTemp);
4045 }
4046
4047 // We might have to widen integers, but we should never truncate.
4048 if (ArgInfo.getCoerceToType() != V->getType() &&
4049 V->getType()->isIntegerTy())
4050 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4051
4052 // If the argument doesn't match, perform a bitcast to coerce it. This
4053 // can happen due to trivial type mismatches.
4054 if (FirstIRArg < IRFuncTy->getNumParams() &&
4055 V->getType() != IRFuncTy->getParamType(FirstIRArg))
4056 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4057
4058 IRCallArgs[FirstIRArg] = V;
4059 break;
4060 }
4061
4062 // FIXME: Avoid the conversion through memory if possible.
4063 Address Src = Address::invalid();
4064 if (!I->isAggregate()) {
4065 Src = CreateMemTemp(I->Ty, "coerce");
4066 I->copyInto(*this, Src);
4067 } else {
4068 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4069 : I->getKnownRValue().getAggregateAddress();
4070 }
4071
4072 // If the value is offset in memory, apply the offset now.
4073 Src = emitAddressAtOffset(*this, Src, ArgInfo);
4074
4075 // Fast-isel and the optimizer generally like scalar values better than
4076 // FCAs, so we flatten them if this is safe to do for this argument.
4077 llvm::StructType *STy =
4078 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4079 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4080 llvm::Type *SrcTy = Src.getType()->getElementType();
4081 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4082 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4083
4084 // If the source type is smaller than the destination type of the
4085 // coerce-to logic, copy the source value into a temp alloca the size
4086 // of the destination type to allow loading all of it. The bits past
4087 // the source value are left undef.
4088 if (SrcSize < DstSize) {
4089 Address TempAlloca
4090 = CreateTempAlloca(STy, Src.getAlignment(),
4091 Src.getName() + ".coerce");
4092 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4093 Src = TempAlloca;
4094 } else {
4095 Src = Builder.CreateBitCast(Src,
4096 STy->getPointerTo(Src.getAddressSpace()));
4097 }
4098
4099 assert(NumIRArgs == STy->getNumElements())((NumIRArgs == STy->getNumElements()) ? static_cast<void
> (0) : __assert_fail ("NumIRArgs == STy->getNumElements()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4099, __PRETTY_FUNCTION__))
;
4100 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4101 Address EltPtr = Builder.CreateStructGEP(Src, i);
4102 llvm::Value *LI = Builder.CreateLoad(EltPtr);
4103 IRCallArgs[FirstIRArg + i] = LI;
4104 }
4105 } else {
4106 // In the simple case, just pass the coerced loaded value.
4107 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4107, __PRETTY_FUNCTION__))
;
4108 IRCallArgs[FirstIRArg] =
4109 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4110 }
4111
4112 break;
4113 }
4114
4115 case ABIArgInfo::CoerceAndExpand: {
4116 auto coercionType = ArgInfo.getCoerceAndExpandType();
4117 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4118
4119 llvm::Value *tempSize = nullptr;
4120 Address addr = Address::invalid();
4121 Address AllocaAddr = Address::invalid();
4122 if (I->isAggregate()) {
4123 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4124 : I->getKnownRValue().getAggregateAddress();
4125
4126 } else {
4127 RValue RV = I->getKnownRValue();
4128 assert(RV.isScalar())((RV.isScalar()) ? static_cast<void> (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4128, __PRETTY_FUNCTION__))
; // complex should always just be direct
4129
4130 llvm::Type *scalarType = RV.getScalarVal()->getType();
4131 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4132 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4133
4134 // Materialize to a temporary.
4135 addr = CreateTempAlloca(
4136 RV.getScalarVal()->getType(),
4137 CharUnits::fromQuantity(std::max(
4138 (unsigned)layout->getAlignment().value(), scalarAlign)),
4139 "tmp",
4140 /*ArraySize=*/nullptr, &AllocaAddr);
4141 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4142
4143 Builder.CreateStore(RV.getScalarVal(), addr);
4144 }
4145
4146 addr = Builder.CreateElementBitCast(addr, coercionType);
4147
4148 unsigned IRArgPos = FirstIRArg;
4149 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4150 llvm::Type *eltType = coercionType->getElementType(i);
4151 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4152 Address eltAddr = Builder.CreateStructGEP(addr, i);
4153 llvm::Value *elt = Builder.CreateLoad(eltAddr);
4154 IRCallArgs[IRArgPos++] = elt;
4155 }
4156 assert(IRArgPos == FirstIRArg + NumIRArgs)((IRArgPos == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4156, __PRETTY_FUNCTION__))
;
4157
4158 if (tempSize) {
4159 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4160 }
4161
4162 break;
4163 }
4164
4165 case ABIArgInfo::Expand:
4166 unsigned IRArgPos = FirstIRArg;
4167 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4168 assert(IRArgPos == FirstIRArg + NumIRArgs)((IRArgPos == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4168, __PRETTY_FUNCTION__))
;
4169 break;
4170 }
4171 }
4172
4173 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4174 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4175
4176 // If we're using inalloca, set up that argument.
4177 if (ArgMemory.isValid()) {
4178 llvm::Value *Arg = ArgMemory.getPointer();
4179 if (CallInfo.isVariadic()) {
4180 // When passing non-POD arguments by value to variadic functions, we will
4181 // end up with a variadic prototype and an inalloca call site. In such
4182 // cases, we can't do any parameter mismatch checks. Give up and bitcast
4183 // the callee.
4184 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4185 CalleePtr =
4186 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4187 } else {
4188 llvm::Type *LastParamTy =
4189 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4190 if (Arg->getType() != LastParamTy) {
4191#ifndef NDEBUG
4192 // Assert that these structs have equivalent element types.
4193 llvm::StructType *FullTy = CallInfo.getArgStruct();
4194 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4195 cast<llvm::PointerType>(LastParamTy)->getElementType());
4196 assert(DeclaredTy->getNumElements() == FullTy->getNumElements())((DeclaredTy->getNumElements() == FullTy->getNumElements
()) ? static_cast<void> (0) : __assert_fail ("DeclaredTy->getNumElements() == FullTy->getNumElements()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4196, __PRETTY_FUNCTION__))
;
4197 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4198 DE = DeclaredTy->element_end(),
4199 FI = FullTy->element_begin();
4200 DI != DE; ++DI, ++FI)
4201 assert(*DI == *FI)((*DI == *FI) ? static_cast<void> (0) : __assert_fail (
"*DI == *FI", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4201, __PRETTY_FUNCTION__))
;
4202#endif
4203 Arg = Builder.CreateBitCast(Arg, LastParamTy);
4204 }
4205 }
4206 assert(IRFunctionArgs.hasInallocaArg())((IRFunctionArgs.hasInallocaArg()) ? static_cast<void> (
0) : __assert_fail ("IRFunctionArgs.hasInallocaArg()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4206, __PRETTY_FUNCTION__))
;
4207 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4208 }
4209
4210 // 2. Prepare the function pointer.
4211
4212 // If the callee is a bitcast of a non-variadic function to have a
4213 // variadic function pointer type, check to see if we can remove the
4214 // bitcast. This comes up with unprototyped functions.
4215 //
4216 // This makes the IR nicer, but more importantly it ensures that we
4217 // can inline the function at -O0 if it is marked always_inline.
4218 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4219 llvm::Value *Ptr) -> llvm::Function * {
4220 if (!CalleeFT->isVarArg())
4221 return nullptr;
4222
4223 // Get underlying value if it's a bitcast
4224 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4225 if (CE->getOpcode() == llvm::Instruction::BitCast)
4226 Ptr = CE->getOperand(0);
4227 }
4228
4229 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4230 if (!OrigFn)
4231 return nullptr;
4232
4233 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4234
4235 // If the original type is variadic, or if any of the component types
4236 // disagree, we cannot remove the cast.
4237 if (OrigFT->isVarArg() ||
4238 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4239 OrigFT->getReturnType() != CalleeFT->getReturnType())
4240 return nullptr;
4241
4242 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4243 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4244 return nullptr;
4245
4246 return OrigFn;
4247 };
4248
4249 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4250 CalleePtr = OrigFn;
4251 IRFuncTy = OrigFn->getFunctionType();
4252 }
4253
4254 // 3. Perform the actual call.
4255
4256 // Deactivate any cleanups that we're supposed to do immediately before
4257 // the call.
4258 if (!CallArgs.getCleanupsToDeactivate().empty())
4259 deactivateArgCleanupsBeforeCall(*this, CallArgs);
4260
4261 // Assert that the arguments we computed match up. The IR verifier
4262 // will catch this, but this is a common enough source of problems
4263 // during IRGen changes that it's way better for debugging to catch
4264 // it ourselves here.
4265#ifndef NDEBUG
4266 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg())((IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy
->isVarArg()) ? static_cast<void> (0) : __assert_fail
("IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4266, __PRETTY_FUNCTION__))
;
4267 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4268 // Inalloca argument can have different type.
4269 if (IRFunctionArgs.hasInallocaArg() &&
4270 i == IRFunctionArgs.getInallocaArgNo())
4271 continue;
4272 if (i < IRFuncTy->getNumParams())
4273 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i))((IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)
) ? static_cast<void> (0) : __assert_fail ("IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4273, __PRETTY_FUNCTION__))
;
4274 }
4275#endif
4276
4277 // Update the largest vector width if any arguments have vector types.
4278 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4279 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4280 LargestVectorWidth = std::max(LargestVectorWidth,
4281 VT->getPrimitiveSizeInBits());
4282 }
4283
4284 // Compute the calling convention and attributes.
4285 unsigned CallingConv;
4286 llvm::AttributeList Attrs;
4287 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4288 Callee.getAbstractInfo(), Attrs, CallingConv,
4289 /*AttrOnCallSite=*/true);
4290
4291 // Apply some call-site-specific attributes.
4292 // TODO: work this into building the attribute set.
4293
4294 // Apply always_inline to all calls within flatten functions.
4295 // FIXME: should this really take priority over __try, below?
4296 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4297 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4298 Attrs =
4299 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4300 llvm::Attribute::AlwaysInline);
4301 }
4302
4303 // Disable inlining inside SEH __try blocks.
4304 if (isSEHTryScope()) {
4305 Attrs =
4306 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4307 llvm::Attribute::NoInline);
4308 }
4309
4310 // Decide whether to use a call or an invoke.
4311 bool CannotThrow;
4312 if (currentFunctionUsesSEHTry()) {
4313 // SEH cares about asynchronous exceptions, so everything can "throw."
4314 CannotThrow = false;
4315 } else if (isCleanupPadScope() &&
4316 EHPersonality::get(*this).isMSVCXXPersonality()) {
4317 // The MSVC++ personality will implicitly terminate the program if an
4318 // exception is thrown during a cleanup outside of a try/catch.
4319 // We don't need to model anything in IR to get this behavior.
4320 CannotThrow = true;
4321 } else {
4322 // Otherwise, nounwind call sites will never throw.
4323 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4324 llvm::Attribute::NoUnwind);
4325 }
4326
4327 // If we made a temporary, be sure to clean up after ourselves. Note that we
4328 // can't depend on being inside of an ExprWithCleanups, so we need to manually
4329 // pop this cleanup later on. Being eager about this is OK, since this
4330 // temporary is 'invisible' outside of the callee.
4331 if (UnusedReturnSizePtr)
4332 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4333 UnusedReturnSizePtr);
4334
4335 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4336
4337 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4338 getBundlesForFunclet(CalleePtr);
4339
4340 // Emit the actual call/invoke instruction.
4341 llvm::CallBase *CI;
4342 if (!InvokeDest) {
4343 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4344 } else {
4345 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4346 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4347 BundleList);
4348 EmitBlock(Cont);
4349 }
4350 if (callOrInvoke)
4351 *callOrInvoke = CI;
4352
4353 // Apply the attributes and calling convention.
4354 CI->setAttributes(Attrs);
4355 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4356
4357 // Apply various metadata.
4358
4359 if (!CI->getType()->isVoidTy())
4360 CI->setName("call");
4361
4362 // Update largest vector width from the return type.
4363 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4364 LargestVectorWidth = std::max(LargestVectorWidth,
4365 VT->getPrimitiveSizeInBits());
4366
4367 // Insert instrumentation or attach profile metadata at indirect call sites.
4368 // For more details, see the comment before the definition of
4369 // IPVK_IndirectCallTarget in InstrProfData.inc.
4370 if (!CI->getCalledFunction())
4371 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4372 CI, CalleePtr);
4373
4374 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4375 // optimizer it can aggressively ignore unwind edges.
4376 if (CGM.getLangOpts().ObjCAutoRefCount)
4377 AddObjCARCExceptionMetadata(CI);
4378
4379 // Suppress tail calls if requested.
4380 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4381 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4382 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4383 }
4384
4385 // Add metadata for calls to MSAllocator functions
4386 if (getDebugInfo() && TargetDecl &&
4387 TargetDecl->hasAttr<MSAllocatorAttr>())
4388 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
4389
4390 // 4. Finish the call.
4391
4392 // If the call doesn't return, finish the basic block and clear the
4393 // insertion point; this allows the rest of IRGen to discard
4394 // unreachable code.
4395 if (CI->doesNotReturn()) {
4396 if (UnusedReturnSizePtr)
4397 PopCleanupBlock();
4398
4399 // Strip away the noreturn attribute to better diagnose unreachable UB.
4400 if (SanOpts.has(SanitizerKind::Unreachable)) {
4401 // Also remove from function since CallBase::hasFnAttr additionally checks
4402 // attributes of the called function.
4403 if (auto *F = CI->getCalledFunction())
4404 F->removeFnAttr(llvm::Attribute::NoReturn);
4405 CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4406 llvm::Attribute::NoReturn);
4407
4408 // Avoid incompatibility with ASan which relies on the `noreturn`
4409 // attribute to insert handler calls.
4410 if (SanOpts.hasOneOf(SanitizerKind::Address |
4411 SanitizerKind::KernelAddress)) {
4412 SanitizerScope SanScope(this);
4413 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4414 Builder.SetInsertPoint(CI);
4415 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4416 llvm::FunctionCallee Fn =
4417 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4418 EmitNounwindRuntimeCall(Fn);
4419 }
4420 }
4421
4422 EmitUnreachable(Loc);
4423 Builder.ClearInsertionPoint();
4424
4425 // FIXME: For now, emit a dummy basic block because expr emitters in
4426 // generally are not ready to handle emitting expressions at unreachable
4427 // points.
4428 EnsureInsertPoint();
4429
4430 // Return a reasonable RValue.
4431 return GetUndefRValue(RetTy);
4432 }
4433
4434 // Perform the swifterror writeback.
4435 if (swiftErrorTemp.isValid()) {
4436 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4437 Builder.CreateStore(errorResult, swiftErrorArg);
4438 }
4439
4440 // Emit any call-associated writebacks immediately. Arguably this
4441 // should happen after any return-value munging.
4442 if (CallArgs.hasWritebacks())
4443 emitWritebacks(*this, CallArgs);
4444
4445 // The stack cleanup for inalloca arguments has to run out of the normal
4446 // lexical order, so deactivate it and run it manually here.
4447 CallArgs.freeArgumentMemory(*this);
4448
4449 // Extract the return value.
4450 RValue Ret = [&] {
4451 switch (RetAI.getKind()) {
4452 case ABIArgInfo::CoerceAndExpand: {
4453 auto coercionType = RetAI.getCoerceAndExpandType();
4454
4455 Address addr = SRetPtr;
4456 addr = Builder.CreateElementBitCast(addr, coercionType);
4457
4458 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())((CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())
? static_cast<void> (0) : __assert_fail ("CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4458, __PRETTY_FUNCTION__))
;
4459 bool requiresExtract = isa<llvm::StructType>(CI->getType());
4460
4461 unsigned unpaddedIndex = 0;
4462 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4463 llvm::Type *eltType = coercionType->getElementType(i);
4464 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4465 Address eltAddr = Builder.CreateStructGEP(addr, i);
4466 llvm::Value *elt = CI;
4467 if (requiresExtract)
4468 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4469 else
4470 assert(unpaddedIndex == 0)((unpaddedIndex == 0) ? static_cast<void> (0) : __assert_fail
("unpaddedIndex == 0", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4470, __PRETTY_FUNCTION__))
;
4471 Builder.CreateStore(elt, eltAddr);
4472 }
4473 // FALLTHROUGH
4474 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4475 }
4476
4477 case ABIArgInfo::InAlloca:
4478 case ABIArgInfo::Indirect: {
4479 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4480 if (UnusedReturnSizePtr)
4481 PopCleanupBlock();
4482 return ret;
4483 }
4484
4485 case ABIArgInfo::Ignore:
4486 // If we are ignoring an argument that had a result, make sure to
4487 // construct the appropriate return value for our caller.
4488 return GetUndefRValue(RetTy);
4489
4490 case ABIArgInfo::Extend:
4491 case ABIArgInfo::Direct: {
4492 llvm::Type *RetIRTy = ConvertType(RetTy);
4493 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4494 switch (getEvaluationKind(RetTy)) {
4495 case TEK_Complex: {
4496 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4497 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4498 return RValue::getComplex(std::make_pair(Real, Imag));
4499 }
4500 case TEK_Aggregate: {
4501 Address DestPtr = ReturnValue.getValue();
4502 bool DestIsVolatile = ReturnValue.isVolatile();
4503
4504 if (!DestPtr.isValid()) {
4505 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4506 DestIsVolatile = false;
4507 }
4508 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4509 return RValue::getAggregate(DestPtr);
4510 }
4511 case TEK_Scalar: {
4512 // If the argument doesn't match, perform a bitcast to coerce it. This
4513 // can happen due to trivial type mismatches.
4514 llvm::Value *V = CI;
4515 if (V->getType() != RetIRTy)
4516 V = Builder.CreateBitCast(V, RetIRTy);
4517 return RValue::get(V);
4518 }
4519 }
4520 llvm_unreachable("bad evaluation kind")::llvm::llvm_unreachable_internal("bad evaluation kind", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4520)
;
4521 }
4522
4523 Address DestPtr = ReturnValue.getValue();
4524 bool DestIsVolatile = ReturnValue.isVolatile();
4525
4526 if (!DestPtr.isValid()) {
4527 DestPtr = CreateMemTemp(RetTy, "coerce");
4528 DestIsVolatile = false;
4529 }
4530
4531 // If the value is offset in memory, apply the offset now.
4532 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4533 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4534
4535 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4536 }
4537
4538 case ABIArgInfo::Expand:
4539 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4539)
;
4540 }
4541
4542 llvm_unreachable("Unhandled ABIArgInfo::Kind")::llvm::llvm_unreachable_internal("Unhandled ABIArgInfo::Kind"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CGCall.cpp"
, 4542)
;
4543 } ();
4544
4545 // Emit the assume_aligned check on the return value.
4546 if (Ret.isScalar() && TargetDecl) {
4547 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4548 llvm::Value *OffsetValue = nullptr;
4549 if (const auto *Offset = AA->getOffset())
4550 OffsetValue = EmitScalarExpr(Offset);
4551
4552 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4553 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4554 EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4555 AlignmentCI->getZExtValue(), OffsetValue);
4556 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4557 llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4558 .getRValue(*this)
4559 .getScalarVal();
4560 EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4561 AlignmentVal);
4562 }
4563 }
4564
4565 return Ret;
4566}
4567
4568CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4569 if (isVirtual()) {
4570 const CallExpr *CE = getVirtualCallExpr();
4571 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4572 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
4573 CE ? CE->getBeginLoc() : SourceLocation());
4574 }
4575
4576 return *this;
4577}
4578
4579/* VarArg handling */
4580
4581Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4582 VAListAddr = VE->isMicrosoftABI()
4583 ? EmitMSVAListRef(VE->getSubExpr())
4584 : EmitVAListRef(VE->getSubExpr());
4585 QualType Ty = VE->getType();
4586 if (VE->isMicrosoftABI())
4587 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4588 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4589}

/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h

1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGDebugInfo.h"
18#include "CGLoopInfo.h"
19#include "CGValue.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "EHScopeStack.h"
23#include "VarBypassDetector.h"
24#include "clang/AST/CharUnits.h"
25#include "clang/AST/CurrentSourceLocExprScope.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
28#include "clang/AST/ExprOpenMP.h"
29#include "clang/AST/Type.h"
30#include "clang/Basic/ABI.h"
31#include "clang/Basic/CapturedStmt.h"
32#include "clang/Basic/CodeGenOptions.h"
33#include "clang/Basic/OpenMPKinds.h"
34#include "clang/Basic/TargetInfo.h"
35#include "llvm/ADT/ArrayRef.h"
36#include "llvm/ADT/DenseMap.h"
37#include "llvm/ADT/MapVector.h"
38#include "llvm/ADT/SmallVector.h"
39#include "llvm/IR/ValueHandle.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Transforms/Utils/SanitizerStats.h"
42
43namespace llvm {
44class BasicBlock;
45class LLVMContext;
46class MDNode;
47class Module;
48class SwitchInst;
49class Twine;
50class Value;
51}
52
53namespace clang {
54class ASTContext;
55class BlockDecl;
56class CXXDestructorDecl;
57class CXXForRangeStmt;
58class CXXTryStmt;
59class Decl;
60class LabelDecl;
61class EnumConstantDecl;
62class FunctionDecl;
63class FunctionProtoType;
64class LabelStmt;
65class ObjCContainerDecl;
66class ObjCInterfaceDecl;
67class ObjCIvarDecl;
68class ObjCMethodDecl;
69class ObjCImplementationDecl;
70class ObjCPropertyImplDecl;
71class TargetInfo;
72class VarDecl;
73class ObjCForCollectionStmt;
74class ObjCAtTryStmt;
75class ObjCAtThrowStmt;
76class ObjCAtSynchronizedStmt;
77class ObjCAutoreleasePoolStmt;
78
79namespace analyze_os_log {
80class OSLogBufferLayout;
81}
82
83namespace CodeGen {
84class CodeGenTypes;
85class CGCallee;
86class CGFunctionInfo;
87class CGRecordLayout;
88class CGBlockInfo;
89class CGCXXABI;
90class BlockByrefHelpers;
91class BlockByrefInfo;
92class BlockFlags;
93class BlockFieldFlags;
94class RegionCodeGenTy;
95class TargetCodeGenInfo;
96struct OMPTaskDataTy;
97struct CGCoroData;
98
99/// The kind of evaluation to perform on values of a particular
100/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
101/// CGExprAgg?
102///
103/// TODO: should vectors maybe be split out into their own thing?
104enum TypeEvaluationKind {
105 TEK_Scalar,
106 TEK_Complex,
107 TEK_Aggregate
108};
109
110#define LIST_SANITIZER_CHECKSSANITIZER_CHECK(AddOverflow, add_overflow, 0) SANITIZER_CHECK
(BuiltinUnreachable, builtin_unreachable, 0) SANITIZER_CHECK(
CFICheckFail, cfi_check_fail, 0) SANITIZER_CHECK(DivremOverflow
, divrem_overflow, 0) SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss
, 0) SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0
) SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch
, 1) SANITIZER_CHECK(ImplicitConversion, implicit_conversion,
0) SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) SANITIZER_CHECK
(LoadInvalidValue, load_invalid_value, 0) SANITIZER_CHECK(MissingReturn
, missing_return, 0) SANITIZER_CHECK(MulOverflow, mul_overflow
, 0) SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) SANITIZER_CHECK
(NullabilityArg, nullability_arg, 0) SANITIZER_CHECK(NullabilityReturn
, nullability_return, 1) SANITIZER_CHECK(NonnullArg, nonnull_arg
, 0) SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) SANITIZER_CHECK
(OutOfBounds, out_of_bounds, 0) SANITIZER_CHECK(PointerOverflow
, pointer_overflow, 0) SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds
, 0) SANITIZER_CHECK(SubOverflow, sub_overflow, 0) SANITIZER_CHECK
(TypeMismatch, type_mismatch, 1) SANITIZER_CHECK(AlignmentAssumption
, alignment_assumption, 0) SANITIZER_CHECK(VLABoundNotPositive
, vla_bound_not_positive, 0)
\
111 SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
112 SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
113 SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
114 SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
115 SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
116 SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
117 SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1) \
118 SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
119 SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
120 SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
121 SANITIZER_CHECK(MissingReturn, missing_return, 0) \
122 SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
123 SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
124 SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
125 SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
126 SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
127 SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
128 SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
129 SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
130 SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
131 SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
132 SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
133 SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
134 SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
135
136enum SanitizerHandler {
137#define SANITIZER_CHECK(Enum, Name, Version) Enum,
138 LIST_SANITIZER_CHECKSSANITIZER_CHECK(AddOverflow, add_overflow, 0) SANITIZER_CHECK
(BuiltinUnreachable, builtin_unreachable, 0) SANITIZER_CHECK(
CFICheckFail, cfi_check_fail, 0) SANITIZER_CHECK(DivremOverflow
, divrem_overflow, 0) SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss
, 0) SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0
) SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch
, 1) SANITIZER_CHECK(ImplicitConversion, implicit_conversion,
0) SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) SANITIZER_CHECK
(LoadInvalidValue, load_invalid_value, 0) SANITIZER_CHECK(MissingReturn
, missing_return, 0) SANITIZER_CHECK(MulOverflow, mul_overflow
, 0) SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) SANITIZER_CHECK
(NullabilityArg, nullability_arg, 0) SANITIZER_CHECK(NullabilityReturn
, nullability_return, 1) SANITIZER_CHECK(NonnullArg, nonnull_arg
, 0) SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) SANITIZER_CHECK
(OutOfBounds, out_of_bounds, 0) SANITIZER_CHECK(PointerOverflow
, pointer_overflow, 0) SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds
, 0) SANITIZER_CHECK(SubOverflow, sub_overflow, 0) SANITIZER_CHECK
(TypeMismatch, type_mismatch, 1) SANITIZER_CHECK(AlignmentAssumption
, alignment_assumption, 0) SANITIZER_CHECK(VLABoundNotPositive
, vla_bound_not_positive, 0)
139#undef SANITIZER_CHECK
140};
141
142/// Helper class with most of the code for saving a value for a
143/// conditional expression cleanup.
144struct DominatingLLVMValue {
145 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
146
147 /// Answer whether the given value needs extra work to be saved.
148 static bool needsSaving(llvm::Value *value) {
149 // If it's not an instruction, we don't need to save.
150 if (!isa<llvm::Instruction>(value)) return false;
151
152 // If it's an instruction in the entry block, we don't need to save.
153 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
154 return (block != &block->getParent()->getEntryBlock());
155 }
156
157 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
158 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
159};
160
161/// A partial specialization of DominatingValue for llvm::Values that
162/// might be llvm::Instructions.
163template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
164 typedef T *type;
165 static type restore(CodeGenFunction &CGF, saved_type value) {
166 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
167 }
168};
169
170/// A specialization of DominatingValue for Address.
171template <> struct DominatingValue<Address> {
172 typedef Address type;
173
174 struct saved_type {
175 DominatingLLVMValue::saved_type SavedValue;
176 CharUnits Alignment;
177 };
178
179 static bool needsSaving(type value) {
180 return DominatingLLVMValue::needsSaving(value.getPointer());
181 }
182 static saved_type save(CodeGenFunction &CGF, type value) {
183 return { DominatingLLVMValue::save(CGF, value.getPointer()),
184 value.getAlignment() };
185 }
186 static type restore(CodeGenFunction &CGF, saved_type value) {
187 return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
188 value.Alignment);
189 }
190};
191
192/// A specialization of DominatingValue for RValue.
193template <> struct DominatingValue<RValue> {
194 typedef RValue type;
195 class saved_type {
196 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
197 AggregateAddress, ComplexAddress };
198
199 llvm::Value *Value;
200 unsigned K : 3;
201 unsigned Align : 29;
202 saved_type(llvm::Value *v, Kind k, unsigned a = 0)
203 : Value(v), K(k), Align(a) {}
204
205 public:
206 static bool needsSaving(RValue value);
207 static saved_type save(CodeGenFunction &CGF, RValue value);
208 RValue restore(CodeGenFunction &CGF);
209
210 // implementations in CGCleanup.cpp
211 };
212
213 static bool needsSaving(type value) {
214 return saved_type::needsSaving(value);
215 }
216 static saved_type save(CodeGenFunction &CGF, type value) {
217 return saved_type::save(CGF, value);
218 }
219 static type restore(CodeGenFunction &CGF, saved_type value) {
220 return value.restore(CGF);
221 }
222};
223
224/// CodeGenFunction - This class organizes the per-function state that is used
225/// while generating LLVM code.
226class CodeGenFunction : public CodeGenTypeCache {
227 CodeGenFunction(const CodeGenFunction &) = delete;
228 void operator=(const CodeGenFunction &) = delete;
229
230 friend class CGCXXABI;
231public:
232 /// A jump destination is an abstract label, branching to which may
233 /// require a jump out through normal cleanups.
234 struct JumpDest {
235 JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
236 JumpDest(llvm::BasicBlock *Block,
237 EHScopeStack::stable_iterator Depth,
238 unsigned Index)
239 : Block(Block), ScopeDepth(Depth), Index(Index) {}
240
241 bool isValid() const { return Block != nullptr; }
242 llvm::BasicBlock *getBlock() const { return Block; }
243 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
244 unsigned getDestIndex() const { return Index; }
245
246 // This should be used cautiously.
247 void setScopeDepth(EHScopeStack::stable_iterator depth) {
248 ScopeDepth = depth;
249 }
250
251 private:
252 llvm::BasicBlock *Block;
253 EHScopeStack::stable_iterator ScopeDepth;
254 unsigned Index;
255 };
256
257 CodeGenModule &CGM; // Per-module state.
258 const TargetInfo &Target;
259
260 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
261 LoopInfoStack LoopStack;
262 CGBuilderTy Builder;
263
264 // Stores variables for which we can't generate correct lifetime markers
265 // because of jumps.
266 VarBypassDetector Bypasses;
267
268 // CodeGen lambda for loops and support for ordered clause
269 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
270 JumpDest)>
271 CodeGenLoopTy;
272 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
273 const unsigned, const bool)>
274 CodeGenOrderedTy;
275
276 // Codegen lambda for loop bounds in worksharing loop constructs
277 typedef llvm::function_ref<std::pair<LValue, LValue>(
278 CodeGenFunction &, const OMPExecutableDirective &S)>
279 CodeGenLoopBoundsTy;
280
281 // Codegen lambda for loop bounds in dispatch-based loop implementation
282 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
283 CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
284 Address UB)>
285 CodeGenDispatchBoundsTy;
286
287 /// CGBuilder insert helper. This function is called after an
288 /// instruction is created using Builder.
289 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
290 llvm::BasicBlock *BB,
291 llvm::BasicBlock::iterator InsertPt) const;
292
293 /// CurFuncDecl - Holds the Decl for the current outermost
294 /// non-closure context.
295 const Decl *CurFuncDecl;
296 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
297 const Decl *CurCodeDecl;
298 const CGFunctionInfo *CurFnInfo;
299 QualType FnRetTy;
300 llvm::Function *CurFn = nullptr;
301
302 // Holds coroutine data if the current function is a coroutine. We use a
303 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
304 // in this header.
305 struct CGCoroInfo {
306 std::unique_ptr<CGCoroData> Data;
307 CGCoroInfo();
308 ~CGCoroInfo();
309 };
310 CGCoroInfo CurCoro;
311
312 bool isCoroutine() const {
313 return CurCoro.Data != nullptr;
314 }
315
316 /// CurGD - The GlobalDecl for the current function being compiled.
317 GlobalDecl CurGD;
318
319 /// PrologueCleanupDepth - The cleanup depth enclosing all the
320 /// cleanups associated with the parameters.
321 EHScopeStack::stable_iterator PrologueCleanupDepth;
322
323 /// ReturnBlock - Unified return block.
324 JumpDest ReturnBlock;
325
326 /// ReturnValue - The temporary alloca to hold the return
327 /// value. This is invalid iff the function has no return value.
328 Address ReturnValue = Address::invalid();
329
330 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
331 /// This is invalid if sret is not in use.
332 Address ReturnValuePointer = Address::invalid();
333
334 /// Return true if a label was seen in the current scope.
335 bool hasLabelBeenSeenInCurrentScope() const {
336 if (CurLexicalScope)
337 return CurLexicalScope->hasLabels();
338 return !LabelMap.empty();
339 }
340
341 /// AllocaInsertPoint - This is an instruction in the entry block before which
342 /// we prefer to insert allocas.
343 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
344
345 /// API for captured statement code generation.
346 class CGCapturedStmtInfo {
347 public:
348 explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default)
349 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
350 explicit CGCapturedStmtInfo(const CapturedStmt &S,
351 CapturedRegionKind K = CR_Default)
352 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
353
354 RecordDecl::field_iterator Field =
355 S.getCapturedRecordDecl()->field_begin();
356 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
357 E = S.capture_end();
358 I != E; ++I, ++Field) {
359 if (I->capturesThis())
360 CXXThisFieldDecl = *Field;
361 else if (I->capturesVariable())
362 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
363 else if (I->capturesVariableByCopy())
364 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
365 }
366 }
367
368 virtual ~CGCapturedStmtInfo();
369
370 CapturedRegionKind getKind() const { return Kind; }
371
372 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
373 // Retrieve the value of the context parameter.
374 virtual llvm::Value *getContextValue() const { return ThisValue; }
375
376 /// Lookup the captured field decl for a variable.
377 virtual const FieldDecl *lookup(const VarDecl *VD) const {
378 return CaptureFields.lookup(VD->getCanonicalDecl());
379 }
380
381 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
382 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
383
384 static bool classof(const CGCapturedStmtInfo *) {
385 return true;
386 }
387
388 /// Emit the captured statement body.
389 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
390 CGF.incrementProfileCounter(S);
391 CGF.EmitStmt(S);
392 }
393
394 /// Get the name of the capture helper.
395 virtual StringRef getHelperName() const { return "__captured_stmt"; }
396
397 private:
398 /// The kind of captured statement being generated.
399 CapturedRegionKind Kind;
400
401 /// Keep the map between VarDecl and FieldDecl.
402 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
403
404 /// The base address of the captured record, passed in as the first
405 /// argument of the parallel region function.
406 llvm::Value *ThisValue;
407
408 /// Captured 'this' type.
409 FieldDecl *CXXThisFieldDecl;
410 };
411 CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
412
413 /// RAII for correct setting/restoring of CapturedStmtInfo.
414 class CGCapturedStmtRAII {
415 private:
416 CodeGenFunction &CGF;
417 CGCapturedStmtInfo *PrevCapturedStmtInfo;
418 public:
419 CGCapturedStmtRAII(CodeGenFunction &CGF,
420 CGCapturedStmtInfo *NewCapturedStmtInfo)
421 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
422 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
423 }
424 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
425 };
426
427 /// An abstract representation of regular/ObjC call/message targets.
428 class AbstractCallee {
429 /// The function declaration of the callee.
430 const Decl *CalleeDecl;
431
432 public:
433 AbstractCallee() : CalleeDecl(nullptr) {}
434 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
435 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
436 bool hasFunctionDecl() const {
437 return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
438 }
439 const Decl *getDecl() const { return CalleeDecl; }
440 unsigned getNumParams() const {
441 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
442 return FD->getNumParams();
443 return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
444 }
445 const ParmVarDecl *getParamDecl(unsigned I) const {
446 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
447 return FD->getParamDecl(I);
448 return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
449 }
450 };
451
452 /// Sanitizers enabled for this function.
453 SanitizerSet SanOpts;
454
455 /// True if CodeGen currently emits code implementing sanitizer checks.
456 bool IsSanitizerScope = false;
457
458 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
459 class SanitizerScope {
460 CodeGenFunction *CGF;
461 public:
462 SanitizerScope(CodeGenFunction *CGF);
463 ~SanitizerScope();
464 };
465
466 /// In C++, whether we are code generating a thunk. This controls whether we
467 /// should emit cleanups.
468 bool CurFuncIsThunk = false;
469
470 /// In ARC, whether we should autorelease the return value.
471 bool AutoreleaseResult = false;
472
473 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
474 /// potentially set the return value.
475 bool SawAsmBlock = false;
476
477 const NamedDecl *CurSEHParent = nullptr;
478
479 /// True if the current function is an outlined SEH helper. This can be a
480 /// finally block or filter expression.
481 bool IsOutlinedSEHHelper = false;
482
483 /// True if CodeGen currently emits code inside presereved access index
484 /// region.
485 bool IsInPreservedAIRegion = false;
486
487 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
488 llvm::Value *BlockPointer = nullptr;
489
490 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
491 FieldDecl *LambdaThisCaptureField = nullptr;
492
493 /// A mapping from NRVO variables to the flags used to indicate
494 /// when the NRVO has been applied to this variable.
495 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
496
497 EHScopeStack EHStack;
498 llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
499 llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack;
500
501 llvm::Instruction *CurrentFuncletPad = nullptr;
502
503 class CallLifetimeEnd final : public EHScopeStack::Cleanup {
504 llvm::Value *Addr;
505 llvm::Value *Size;
506
507 public:
508 CallLifetimeEnd(Address addr, llvm::Value *size)
509 : Addr(addr.getPointer()), Size(size) {}
510
511 void Emit(CodeGenFunction &CGF, Flags flags) override {
512 CGF.EmitLifetimeEnd(Size, Addr);
513 }
514 };
515
516 /// Header for data within LifetimeExtendedCleanupStack.
517 struct LifetimeExtendedCleanupHeader {
518 /// The size of the following cleanup object.
519 unsigned Size;
520 /// The kind of cleanup to push: a value from the CleanupKind enumeration.
521 unsigned Kind : 31;
522 /// Whether this is a conditional cleanup.
523 unsigned IsConditional : 1;
524
525 size_t getSize() const { return Size; }
526 CleanupKind getKind() const { return (CleanupKind)Kind; }
527 bool isConditional() const { return IsConditional; }
528 };
529
530 /// i32s containing the indexes of the cleanup destinations.
531 Address NormalCleanupDest = Address::invalid();
532
533 unsigned NextCleanupDestIndex = 1;
534
535 /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
536 CGBlockInfo *FirstBlockInfo = nullptr;
537
538 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
539 llvm::BasicBlock *EHResumeBlock = nullptr;
540
541 /// The exception slot. All landing pads write the current exception pointer
542 /// into this alloca.
543 llvm::Value *ExceptionSlot = nullptr;
544
545 /// The selector slot. Under the MandatoryCleanup model, all landing pads
546 /// write the current selector value into this alloca.
547 llvm::AllocaInst *EHSelectorSlot = nullptr;
548
549 /// A stack of exception code slots. Entering an __except block pushes a slot
550 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
551 /// a value from the top of the stack.
552 SmallVector<Address, 1> SEHCodeSlotStack;
553
554 /// Value returned by __exception_info intrinsic.
555 llvm::Value *SEHInfo = nullptr;
556
557 /// Emits a landing pad for the current EH stack.
558 llvm::BasicBlock *EmitLandingPad();
559
560 llvm::BasicBlock *getInvokeDestImpl();
561
562 template <class T>
563 typename DominatingValue<T>::saved_type saveValueInCond(T value) {
564 return DominatingValue<T>::save(*this, value);
565 }
566
567public:
568 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
569 /// rethrows.
570 SmallVector<llvm::Value*, 8> ObjCEHValueStack;
571
572 /// A class controlling the emission of a finally block.
573 class FinallyInfo {
574 /// Where the catchall's edge through the cleanup should go.
575 JumpDest RethrowDest;
576
577 /// A function to call to enter the catch.
578 llvm::FunctionCallee BeginCatchFn;
579
580 /// An i1 variable indicating whether or not the @finally is
581 /// running for an exception.
582 llvm::AllocaInst *ForEHVar;
583
584 /// An i8* variable into which the exception pointer to rethrow
585 /// has been saved.
586 llvm::AllocaInst *SavedExnVar;
587
588 public:
589 void enter(CodeGenFunction &CGF, const Stmt *Finally,
590 llvm::FunctionCallee beginCatchFn,
591 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
592 void exit(CodeGenFunction &CGF);
593 };
594
595 /// Returns true inside SEH __try blocks.
596 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
597
598 /// Returns true while emitting a cleanuppad.
599 bool isCleanupPadScope() const {
600 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
601 }
602
603 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
604 /// current full-expression. Safe against the possibility that
605 /// we're currently inside a conditionally-evaluated expression.
606 template <class T, class... As>
607 void pushFullExprCleanup(CleanupKind kind, As... A) {
608 // If we're not in a conditional branch, or if none of the
609 // arguments requires saving, then use the unconditional cleanup.
610 if (!isInConditionalBranch())
611 return EHStack.pushCleanup<T>(kind, A...);
612
613 // Stash values in a tuple so we can guarantee the order of saves.
614 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
615 SavedTuple Saved{saveValueInCond(A)...};
616
617 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
618 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
619 initFullExprCleanup();
620 }
621
622 /// Queue a cleanup to be pushed after finishing the current
623 /// full-expression.
624 template <class T, class... As>
625 void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
626 if (!isInConditionalBranch())
627 return pushCleanupAfterFullExprImpl<T>(Kind, Address::invalid(), A...);
628
629 Address ActiveFlag = createCleanupActiveFlag();
630 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&((!DominatingValue<Address>::needsSaving(ActiveFlag) &&
"cleanup active flag should never need saving") ? static_cast
<void> (0) : __assert_fail ("!DominatingValue<Address>::needsSaving(ActiveFlag) && \"cleanup active flag should never need saving\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 631, __PRETTY_FUNCTION__))
631 "cleanup active flag should never need saving")((!DominatingValue<Address>::needsSaving(ActiveFlag) &&
"cleanup active flag should never need saving") ? static_cast
<void> (0) : __assert_fail ("!DominatingValue<Address>::needsSaving(ActiveFlag) && \"cleanup active flag should never need saving\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 631, __PRETTY_FUNCTION__))
;
632
633 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
634 SavedTuple Saved{saveValueInCond(A)...};
635
636 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
637 pushCleanupAfterFullExprImpl<CleanupType>(Kind, ActiveFlag, Saved);
638 }
639
640 template <class T, class... As>
641 void pushCleanupAfterFullExprImpl(CleanupKind Kind, Address ActiveFlag,
642 As... A) {
643 LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
644 ActiveFlag.isValid()};
645
646 size_t OldSize = LifetimeExtendedCleanupStack.size();
647 LifetimeExtendedCleanupStack.resize(
648 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
649 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
650
651 static_assert(sizeof(Header) % alignof(T) == 0,
652 "Cleanup will be allocated on misaligned address");
653 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
654 new (Buffer) LifetimeExtendedCleanupHeader(Header);
655 new (Buffer + sizeof(Header)) T(A...);
656 if (Header.IsConditional)
657 new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
658 }
659
660 /// Set up the last cleanup that was pushed as a conditional
661 /// full-expression cleanup.
662 void initFullExprCleanup() {
663 initFullExprCleanupWithFlag(createCleanupActiveFlag());
664 }
665
666 void initFullExprCleanupWithFlag(Address ActiveFlag);
667 Address createCleanupActiveFlag();
668
669 /// PushDestructorCleanup - Push a cleanup to call the
670 /// complete-object destructor of an object of the given type at the
671 /// given address. Does nothing if T is not a C++ class type with a
672 /// non-trivial destructor.
673 void PushDestructorCleanup(QualType T, Address Addr);
674
675 /// PushDestructorCleanup - Push a cleanup to call the
676 /// complete-object variant of the given destructor on the object at
677 /// the given address.
678 void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T,
679 Address Addr);
680
681 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
682 /// process all branch fixups.
683 void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
684
685 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
686 /// The block cannot be reactivated. Pops it if it's the top of the
687 /// stack.
688 ///
689 /// \param DominatingIP - An instruction which is known to
690 /// dominate the current IP (if set) and which lies along
691 /// all paths of execution between the current IP and the
692 /// the point at which the cleanup comes into scope.
693 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
694 llvm::Instruction *DominatingIP);
695
696 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
697 /// Cannot be used to resurrect a deactivated cleanup.
698 ///
699 /// \param DominatingIP - An instruction which is known to
700 /// dominate the current IP (if set) and which lies along
701 /// all paths of execution between the current IP and the
702 /// the point at which the cleanup comes into scope.
703 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
704 llvm::Instruction *DominatingIP);
705
706 /// Enters a new scope for capturing cleanups, all of which
707 /// will be executed once the scope is exited.
708 class RunCleanupsScope {
709 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
710 size_t LifetimeExtendedCleanupStackSize;
711 bool OldDidCallStackSave;
712 protected:
713 bool PerformCleanup;
714 private:
715
716 RunCleanupsScope(const RunCleanupsScope &) = delete;
717 void operator=(const RunCleanupsScope &) = delete;
718
719 protected:
720 CodeGenFunction& CGF;
721
722 public:
723 /// Enter a new cleanup scope.
724 explicit RunCleanupsScope(CodeGenFunction &CGF)
725 : PerformCleanup(true), CGF(CGF)
726 {
727 CleanupStackDepth = CGF.EHStack.stable_begin();
728 LifetimeExtendedCleanupStackSize =
729 CGF.LifetimeExtendedCleanupStack.size();
730 OldDidCallStackSave = CGF.DidCallStackSave;
731 CGF.DidCallStackSave = false;
732 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
733 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
734 }
735
736 /// Exit this cleanup scope, emitting any accumulated cleanups.
737 ~RunCleanupsScope() {
738 if (PerformCleanup)
739 ForceCleanup();
740 }
741
742 /// Determine whether this scope requires any cleanups.
743 bool requiresCleanups() const {
744 return CGF.EHStack.stable_begin() != CleanupStackDepth;
745 }
746
747 /// Force the emission of cleanups now, instead of waiting
748 /// until this object is destroyed.
749 /// \param ValuesToReload - A list of values that need to be available at
750 /// the insertion point after cleanup emission. If cleanup emission created
751 /// a shared cleanup block, these value pointers will be rewritten.
752 /// Otherwise, they not will be modified.
753 void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
754 assert(PerformCleanup && "Already forced cleanup")((PerformCleanup && "Already forced cleanup") ? static_cast
<void> (0) : __assert_fail ("PerformCleanup && \"Already forced cleanup\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 754, __PRETTY_FUNCTION__))
;
755 CGF.DidCallStackSave = OldDidCallStackSave;
756 CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
757 ValuesToReload);
758 PerformCleanup = false;
759 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
760 }
761 };
762
763 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
764 EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
765 EHScopeStack::stable_end();
766
767 class LexicalScope : public RunCleanupsScope {
768 SourceRange Range;
769 SmallVector<const LabelDecl*, 4> Labels;
770 LexicalScope *ParentScope;
771
772 LexicalScope(const LexicalScope &) = delete;
773 void operator=(const LexicalScope &) = delete;
774
775 public:
776 /// Enter a new cleanup scope.
777 explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
778 : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
779 CGF.CurLexicalScope = this;
780 if (CGDebugInfo *DI = CGF.getDebugInfo())
781 DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
782 }
783
784 void addLabel(const LabelDecl *label) {
785 assert(PerformCleanup && "adding label to dead scope?")((PerformCleanup && "adding label to dead scope?") ? static_cast
<void> (0) : __assert_fail ("PerformCleanup && \"adding label to dead scope?\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 785, __PRETTY_FUNCTION__))
;
786 Labels.push_back(label);
787 }
788
789 /// Exit this cleanup scope, emitting any accumulated
790 /// cleanups.
791 ~LexicalScope() {
792 if (CGDebugInfo *DI = CGF.getDebugInfo())
793 DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
794
795 // If we should perform a cleanup, force them now. Note that
796 // this ends the cleanup scope before rescoping any labels.
797 if (PerformCleanup) {
798 ApplyDebugLocation DL(CGF, Range.getEnd());
799 ForceCleanup();
800 }
801 }
802
803 /// Force the emission of cleanups now, instead of waiting
804 /// until this object is destroyed.
805 void ForceCleanup() {
806 CGF.CurLexicalScope = ParentScope;
807 RunCleanupsScope::ForceCleanup();
808
809 if (!Labels.empty())
810 rescopeLabels();
811 }
812
813 bool hasLabels() const {
814 return !Labels.empty();
815 }
816
817 void rescopeLabels();
818 };
819
820 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
821
822 /// The class used to assign some variables some temporarily addresses.
823 class OMPMapVars {
824 DeclMapTy SavedLocals;
825 DeclMapTy SavedTempAddresses;
826 OMPMapVars(const OMPMapVars &) = delete;
827 void operator=(const OMPMapVars &) = delete;
828
829 public:
830 explicit OMPMapVars() = default;
831 ~OMPMapVars() {
832 assert(SavedLocals.empty() && "Did not restored original addresses.")((SavedLocals.empty() && "Did not restored original addresses."
) ? static_cast<void> (0) : __assert_fail ("SavedLocals.empty() && \"Did not restored original addresses.\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 832, __PRETTY_FUNCTION__))
;
833 };
834
835 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
836 /// function \p CGF.
837 /// \return true if at least one variable was set already, false otherwise.
838 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
839 Address TempAddr) {
840 LocalVD = LocalVD->getCanonicalDecl();
841 // Only save it once.
842 if (SavedLocals.count(LocalVD)) return false;
843
844 // Copy the existing local entry to SavedLocals.
845 auto it = CGF.LocalDeclMap.find(LocalVD);
846 if (it != CGF.LocalDeclMap.end())
847 SavedLocals.try_emplace(LocalVD, it->second);
848 else
849 SavedLocals.try_emplace(LocalVD, Address::invalid());
850
851 // Generate the private entry.
852 QualType VarTy = LocalVD->getType();
853 if (VarTy->isReferenceType()) {
854 Address Temp = CGF.CreateMemTemp(VarTy);
855 CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
856 TempAddr = Temp;
857 }
858 SavedTempAddresses.try_emplace(LocalVD, TempAddr);
859
860 return true;
861 }
862
863 /// Applies new addresses to the list of the variables.
864 /// \return true if at least one variable is using new address, false
865 /// otherwise.
866 bool apply(CodeGenFunction &CGF) {
867 copyInto(SavedTempAddresses, CGF.LocalDeclMap);
868 SavedTempAddresses.clear();
869 return !SavedLocals.empty();
870 }
871
872 /// Restores original addresses of the variables.
873 void restore(CodeGenFunction &CGF) {
874 if (!SavedLocals.empty()) {
875 copyInto(SavedLocals, CGF.LocalDeclMap);
876 SavedLocals.clear();
877 }
878 }
879
880 private:
881 /// Copy all the entries in the source map over the corresponding
882 /// entries in the destination, which must exist.
883 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
884 for (auto &Pair : Src) {
885 if (!Pair.second.isValid()) {
886 Dest.erase(Pair.first);
887 continue;
888 }
889
890 auto I = Dest.find(Pair.first);
891 if (I != Dest.end())
892 I->second = Pair.second;
893 else
894 Dest.insert(Pair);
895 }
896 }
897 };
898
899 /// The scope used to remap some variables as private in the OpenMP loop body
900 /// (or other captured region emitted without outlining), and to restore old
901 /// vars back on exit.
902 class OMPPrivateScope : public RunCleanupsScope {
903 OMPMapVars MappedVars;
904 OMPPrivateScope(const OMPPrivateScope &) = delete;
905 void operator=(const OMPPrivateScope &) = delete;
906
907 public:
908 /// Enter a new OpenMP private scope.
909 explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
910
911 /// Registers \p LocalVD variable as a private and apply \p PrivateGen
912 /// function for it to generate corresponding private variable. \p
913 /// PrivateGen returns an address of the generated private variable.
914 /// \return true if the variable is registered as private, false if it has
915 /// been privatized already.
916 bool addPrivate(const VarDecl *LocalVD,
917 const llvm::function_ref<Address()> PrivateGen) {
918 assert(PerformCleanup && "adding private to dead scope")((PerformCleanup && "adding private to dead scope") ?
static_cast<void> (0) : __assert_fail ("PerformCleanup && \"adding private to dead scope\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 918, __PRETTY_FUNCTION__))
;
919 return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
920 }
921
922 /// Privatizes local variables previously registered as private.
923 /// Registration is separate from the actual privatization to allow
924 /// initializers use values of the original variables, not the private one.
925 /// This is important, for example, if the private variable is a class
926 /// variable initialized by a constructor that references other private
927 /// variables. But at initialization original variables must be used, not
928 /// private copies.
929 /// \return true if at least one variable was privatized, false otherwise.
930 bool Privatize() { return MappedVars.apply(CGF); }
931
932 void ForceCleanup() {
933 RunCleanupsScope::ForceCleanup();
934 MappedVars.restore(CGF);
935 }
936
937 /// Exit scope - all the mapped variables are restored.
938 ~OMPPrivateScope() {
939 if (PerformCleanup)
940 ForceCleanup();
941 }
942
943 /// Checks if the global variable is captured in current function.
944 bool isGlobalVarCaptured(const VarDecl *VD) const {
945 VD = VD->getCanonicalDecl();
946 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
947 }
948 };
949
950 /// Takes the old cleanup stack size and emits the cleanup blocks
951 /// that have been added.
952 void
953 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
954 std::initializer_list<llvm::Value **> ValuesToReload = {});
955
956 /// Takes the old cleanup stack size and emits the cleanup blocks
957 /// that have been added, then adds all lifetime-extended cleanups from
958 /// the given position to the stack.
959 void
960 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
961 size_t OldLifetimeExtendedStackSize,
962 std::initializer_list<llvm::Value **> ValuesToReload = {});
963
964 void ResolveBranchFixups(llvm::BasicBlock *Target);
965
966 /// The given basic block lies in the current EH scope, but may be a
967 /// target of a potentially scope-crossing jump; get a stable handle
968 /// to which we can perform this jump later.
969 JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
970 return JumpDest(Target,
971 EHStack.getInnermostNormalCleanup(),
972 NextCleanupDestIndex++);
973 }
974
975 /// The given basic block lies in the current EH scope, but may be a
976 /// target of a potentially scope-crossing jump; get a stable handle
977 /// to which we can perform this jump later.
978 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
979 return getJumpDestInCurrentScope(createBasicBlock(Name));
980 }
981
982 /// EmitBranchThroughCleanup - Emit a branch from the current insert
983 /// block through the normal cleanup handling code (if any) and then
984 /// on to \arg Dest.
985 void EmitBranchThroughCleanup(JumpDest Dest);
986
987 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
988 /// specified destination obviously has no cleanups to run. 'false' is always
989 /// a conservatively correct answer for this method.
990 bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
991
992 /// popCatchScope - Pops the catch scope at the top of the EHScope
993 /// stack, emitting any required code (other than the catch handlers
994 /// themselves).
995 void popCatchScope();
996
997 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
998 llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
999 llvm::BasicBlock *
1000 getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
1001
1002 /// An object to manage conditionally-evaluated expressions.
1003 class ConditionalEvaluation {
1004 llvm::BasicBlock *StartBB;
1005
1006 public:
1007 ConditionalEvaluation(CodeGenFunction &CGF)
1008 : StartBB(CGF.Builder.GetInsertBlock()) {}
1009
1010 void begin(CodeGenFunction &CGF) {
1011 assert(CGF.OutermostConditional != this)((CGF.OutermostConditional != this) ? static_cast<void>
(0) : __assert_fail ("CGF.OutermostConditional != this", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1011, __PRETTY_FUNCTION__))
;
1012 if (!CGF.OutermostConditional)
1013 CGF.OutermostConditional = this;
1014 }
1015
1016 void end(CodeGenFunction &CGF) {
1017 assert(CGF.OutermostConditional != nullptr)((CGF.OutermostConditional != nullptr) ? static_cast<void>
(0) : __assert_fail ("CGF.OutermostConditional != nullptr", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1017, __PRETTY_FUNCTION__))
;
1018 if (CGF.OutermostConditional == this)
1019 CGF.OutermostConditional = nullptr;
1020 }
1021
1022 /// Returns a block which will be executed prior to each
1023 /// evaluation of the conditional code.
1024 llvm::BasicBlock *getStartingBlock() const {
1025 return StartBB;
1026 }
1027 };
1028
1029 /// isInConditionalBranch - Return true if we're currently emitting
1030 /// one branch or the other of a conditional expression.
1031 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1032
1033 void setBeforeOutermostConditional(llvm::Value *value, Address addr) {
1034 assert(isInConditionalBranch())((isInConditionalBranch()) ? static_cast<void> (0) : __assert_fail
("isInConditionalBranch()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1034, __PRETTY_FUNCTION__))
;
1035 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1036 auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
1037 store->setAlignment(addr.getAlignment().getQuantity());
1038 }
1039
1040 /// An RAII object to record that we're evaluating a statement
1041 /// expression.
1042 class StmtExprEvaluation {
1043 CodeGenFunction &CGF;
1044
1045 /// We have to save the outermost conditional: cleanups in a
1046 /// statement expression aren't conditional just because the
1047 /// StmtExpr is.
1048 ConditionalEvaluation *SavedOutermostConditional;
1049
1050 public:
1051 StmtExprEvaluation(CodeGenFunction &CGF)
1052 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1053 CGF.OutermostConditional = nullptr;
1054 }
1055
1056 ~StmtExprEvaluation() {
1057 CGF.OutermostConditional = SavedOutermostConditional;
1058 CGF.EnsureInsertPoint();
1059 }
1060 };
1061
1062 /// An object which temporarily prevents a value from being
1063 /// destroyed by aggressive peephole optimizations that assume that
1064 /// all uses of a value have been realized in the IR.
1065 class PeepholeProtection {
1066 llvm::Instruction *Inst;
1067 friend class CodeGenFunction;
1068
1069 public:
1070 PeepholeProtection() : Inst(nullptr) {}
1071 };
1072
1073 /// A non-RAII class containing all the information about a bound
1074 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1075 /// this which makes individual mappings very simple; using this
1076 /// class directly is useful when you have a variable number of
1077 /// opaque values or don't want the RAII functionality for some
1078 /// reason.
1079 class OpaqueValueMappingData {
1080 const OpaqueValueExpr *OpaqueValue;
1081 bool BoundLValue;
1082 CodeGenFunction::PeepholeProtection Protection;
1083
1084 OpaqueValueMappingData(const OpaqueValueExpr *ov,
1085 bool boundLValue)
1086 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1087 public:
1088 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1089
1090 static bool shouldBindAsLValue(const Expr *expr) {
1091 // gl-values should be bound as l-values for obvious reasons.
1092 // Records should be bound as l-values because IR generation
1093 // always keeps them in memory. Expressions of function type
1094 // act exactly like l-values but are formally required to be
1095 // r-values in C.
1096 return expr->isGLValue() ||
1097 expr->getType()->isFunctionType() ||
1098 hasAggregateEvaluationKind(expr->getType());
1099 }
1100
1101 static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1102 const OpaqueValueExpr *ov,
1103 const Expr *e) {
1104 if (shouldBindAsLValue(ov))
1105 return bind(CGF, ov, CGF.EmitLValue(e));
1106 return bind(CGF, ov, CGF.EmitAnyExpr(e));
1107 }
1108
1109 static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1110 const OpaqueValueExpr *ov,
1111 const LValue &lv) {
1112 assert(shouldBindAsLValue(ov))((shouldBindAsLValue(ov)) ? static_cast<void> (0) : __assert_fail
("shouldBindAsLValue(ov)", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1112, __PRETTY_FUNCTION__))
;
1113 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1114 return OpaqueValueMappingData(ov, true);
1115 }
1116
1117 static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1118 const OpaqueValueExpr *ov,
1119 const RValue &rv) {
1120 assert(!shouldBindAsLValue(ov))((!shouldBindAsLValue(ov)) ? static_cast<void> (0) : __assert_fail
("!shouldBindAsLValue(ov)", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1120, __PRETTY_FUNCTION__))
;
1121 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1122
1123 OpaqueValueMappingData data(ov, false);
1124
1125 // Work around an extremely aggressive peephole optimization in
1126 // EmitScalarConversion which assumes that all other uses of a
1127 // value are extant.
1128 data.Protection = CGF.protectFromPeepholes(rv);
1129
1130 return data;
1131 }
1132
1133 bool isValid() const { return OpaqueValue != nullptr; }
1134 void clear() { OpaqueValue = nullptr; }
1135
1136 void unbind(CodeGenFunction &CGF) {
1137 assert(OpaqueValue && "no data to unbind!")((OpaqueValue && "no data to unbind!") ? static_cast<
void> (0) : __assert_fail ("OpaqueValue && \"no data to unbind!\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1137, __PRETTY_FUNCTION__))
;
1138
1139 if (BoundLValue) {
1140 CGF.OpaqueLValues.erase(OpaqueValue);
1141 } else {
1142 CGF.OpaqueRValues.erase(OpaqueValue);
1143 CGF.unprotectFromPeepholes(Protection);
1144 }
1145 }
1146 };
1147
1148 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1149 class OpaqueValueMapping {
1150 CodeGenFunction &CGF;
1151 OpaqueValueMappingData Data;
1152
1153 public:
1154 static bool shouldBindAsLValue(const Expr *expr) {
1155 return OpaqueValueMappingData::shouldBindAsLValue(expr);
1156 }
1157
1158 /// Build the opaque value mapping for the given conditional
1159 /// operator if it's the GNU ?: extension. This is a common
1160 /// enough pattern that the convenience operator is really
1161 /// helpful.
1162 ///
1163 OpaqueValueMapping(CodeGenFunction &CGF,
1164 const AbstractConditionalOperator *op) : CGF(CGF) {
1165 if (isa<ConditionalOperator>(op))
1166 // Leave Data empty.
1167 return;
1168
1169 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1170 Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1171 e->getCommon());
1172 }
1173
1174 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1175 /// expression is set to the expression the OVE represents.
1176 OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
1177 : CGF(CGF) {
1178 if (OV) {
1179 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "((OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
"for OVE with no source expression") ? static_cast<void>
(0) : __assert_fail ("OV->getSourceExpr() && \"wrong form of OpaqueValueMapping used \" \"for OVE with no source expression\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1180, __PRETTY_FUNCTION__))
1180 "for OVE with no source expression")((OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
"for OVE with no source expression") ? static_cast<void>
(0) : __assert_fail ("OV->getSourceExpr() && \"wrong form of OpaqueValueMapping used \" \"for OVE with no source expression\""
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1180, __PRETTY_FUNCTION__))
;
1181 Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
1182 }
1183 }
1184
1185 OpaqueValueMapping(CodeGenFunction &CGF,
1186 const OpaqueValueExpr *opaqueValue,
1187 LValue lvalue)
1188 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1189 }
1190
1191 OpaqueValueMapping(CodeGenFunction &CGF,
1192 const OpaqueValueExpr *opaqueValue,
1193 RValue rvalue)
1194 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1195 }
1196
1197 void pop() {
1198 Data.unbind(CGF);
1199 Data.clear();
1200 }
1201
1202 ~OpaqueValueMapping() {
1203 if (Data.isValid()) Data.unbind(CGF);
1204 }
1205 };
1206
1207private:
1208 CGDebugInfo *DebugInfo;
1209 /// Used to create unique names for artificial VLA size debug info variables.
1210 unsigned VLAExprCounter = 0;
1211 bool DisableDebugInfo = false;
1212
1213 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1214 /// calling llvm.stacksave for multiple VLAs in the same scope.
1215 bool DidCallStackSave = false;
1216
1217 /// IndirectBranch - The first time an indirect goto is seen we create a block
1218 /// with an indirect branch. Every time we see the address of a label taken,
1219 /// we add the label to the indirect goto. Every subsequent indirect goto is
1220 /// codegen'd as a jump to the IndirectBranch's basic block.
1221 llvm::IndirectBrInst *IndirectBranch = nullptr;
1222
1223 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1224 /// decls.
1225 DeclMapTy LocalDeclMap;
1226
1227 // Keep track of the cleanups for callee-destructed parameters pushed to the
1228 // cleanup stack so that they can be deactivated later.
1229 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1230 CalleeDestructedParamCleanups;
1231
1232 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1233 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1234 /// parameter.
1235 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1236 SizeArguments;
1237
1238 /// Track escaped local variables with auto storage. Used during SEH
1239 /// outlining to produce a call to llvm.localescape.
1240 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1241
1242 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1243 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1244
1245 // BreakContinueStack - This keeps track of where break and continue
1246 // statements should jump to.
1247 struct BreakContinue {
1248 BreakContinue(JumpDest Break, JumpDest Continue)
1249 : BreakBlock(Break), ContinueBlock(Continue) {}
1250
1251 JumpDest BreakBlock;
1252 JumpDest ContinueBlock;
1253 };
1254 SmallVector<BreakContinue, 8> BreakContinueStack;
1255
1256 /// Handles cancellation exit points in OpenMP-related constructs.
1257 class OpenMPCancelExitStack {
1258 /// Tracks cancellation exit point and join point for cancel-related exit
1259 /// and normal exit.
1260 struct CancelExit {
1261 CancelExit() = default;
1262 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1263 JumpDest ContBlock)
1264 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1265 OpenMPDirectiveKind Kind = OMPD_unknown;
1266 /// true if the exit block has been emitted already by the special
1267 /// emitExit() call, false if the default codegen is used.
1268 bool HasBeenEmitted = false;
1269 JumpDest ExitBlock;
1270 JumpDest ContBlock;
1271 };
1272
1273 SmallVector<CancelExit, 8> Stack;
1274
1275 public:
1276 OpenMPCancelExitStack() : Stack(1) {}
1277 ~OpenMPCancelExitStack() = default;
1278 /// Fetches the exit block for the current OpenMP construct.
1279 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1280 /// Emits exit block with special codegen procedure specific for the related
1281 /// OpenMP construct + emits code for normal construct cleanup.
1282 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1283 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1284 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1285 assert(CGF.getOMPCancelDestination(Kind).isValid())((CGF.getOMPCancelDestination(Kind).isValid()) ? static_cast<
void> (0) : __assert_fail ("CGF.getOMPCancelDestination(Kind).isValid()"
, "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1285, __PRETTY_FUNCTION__))
;
1286 assert(CGF.HaveInsertPoint())((CGF.HaveInsertPoint()) ? static_cast<void> (0) : __assert_fail
("CGF.HaveInsertPoint()", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1286, __PRETTY_FUNCTION__))
;
1287 assert(!Stack.back().HasBeenEmitted)((!Stack.back().HasBeenEmitted) ? static_cast<void> (0)
: __assert_fail ("!Stack.back().HasBeenEmitted", "/build/llvm-toolchain-snapshot-10~svn373517/tools/clang/lib/CodeGen/CodeGenFunction.h"
, 1287, __PRETTY_FUNCTION__))
;
1288 auto IP = CGF.Builder.saveAndClearIP();
1289 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1290 CodeGen(CGF);
1291 CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1292 CGF.Builder.restoreIP(IP);
1293 Stack.back().HasBeenEmitted = true;
1294 }
1295 CodeGen(CGF);
1296 }
1297 /// Enter the cancel supporting \a Kind construct.
1298 /// \param Kind OpenMP directive that supports cancel constructs.
1299 /// \param HasCancel true, if the construct has inner cancel directive,