Bug Summary

File:tools/clang/lib/CodeGen/CGCall.cpp
Warning:line 3843, column 31
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGCall.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn350071/tools/clang/include -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn350071/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-8~svn350071=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-12-27-042839-1215-1 -x c++ /build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp -faddrsig
1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "ABIInfo.h"
17#include "CGBlocks.h"
18#include "CGCXXABI.h"
19#include "CGCleanup.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "TargetInfo.h"
23#include "clang/AST/Decl.h"
24#include "clang/AST/DeclCXX.h"
25#include "clang/AST/DeclObjC.h"
26#include "clang/Basic/CodeGenOptions.h"
27#include "clang/Basic/TargetBuiltins.h"
28#include "clang/Basic/TargetInfo.h"
29#include "clang/CodeGen/CGFunctionInfo.h"
30#include "clang/CodeGen/SwiftCallingConv.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/Transforms/Utils/Local.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Attributes.h"
35#include "llvm/IR/CallSite.h"
36#include "llvm/IR/CallingConv.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/InlineAsm.h"
39#include "llvm/IR/IntrinsicInst.h"
40#include "llvm/IR/Intrinsics.h"
41using namespace clang;
42using namespace CodeGen;
43
44/***/
45
46unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47 switch (CC) {
48 default: return llvm::CallingConv::C;
49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53 case CC_Win64: return llvm::CallingConv::Win64;
54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58 // TODO: Add support for __pascal to LLVM.
59 case CC_X86Pascal: return llvm::CallingConv::C;
60 // TODO: Add support for __vectorcall to LLVM.
61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
63 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
65 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
66 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
67 case CC_Swift: return llvm::CallingConv::Swift;
68 }
69}
70
71/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
72/// qualification.
73static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD,
74 const CXXMethodDecl *MD) {
75 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
76 if (MD)
77 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getType().getAddressSpace());
78 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
79}
80
81/// Returns the canonical formal type of the given C++ method.
82static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
83 return MD->getType()->getCanonicalTypeUnqualified()
84 .getAs<FunctionProtoType>();
85}
86
87/// Returns the "extra-canonicalized" return type, which discards
88/// qualifiers on the return type. Codegen doesn't care about them,
89/// and it makes ABI code a little easier to be able to assume that
90/// all parameter and return types are top-level unqualified.
91static CanQualType GetReturnType(QualType RetTy) {
92 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
93}
94
95/// Arrange the argument and result information for a value of the given
96/// unprototyped freestanding function type.
97const CGFunctionInfo &
98CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
99 // When translating an unprototyped function type, always use a
100 // variadic type.
101 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
102 /*instanceMethod=*/false,
103 /*chainCall=*/false, None,
104 FTNP->getExtInfo(), {}, RequiredArgs(0));
105}
106
107static void addExtParameterInfosForCall(
108 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
109 const FunctionProtoType *proto,
110 unsigned prefixArgs,
111 unsigned totalArgs) {
112 assert(proto->hasExtParameterInfos())((proto->hasExtParameterInfos()) ? static_cast<void>
(0) : __assert_fail ("proto->hasExtParameterInfos()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 112, __PRETTY_FUNCTION__))
;
113 assert(paramInfos.size() <= prefixArgs)((paramInfos.size() <= prefixArgs) ? static_cast<void>
(0) : __assert_fail ("paramInfos.size() <= prefixArgs", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 113, __PRETTY_FUNCTION__))
;
114 assert(proto->getNumParams() + prefixArgs <= totalArgs)((proto->getNumParams() + prefixArgs <= totalArgs) ? static_cast
<void> (0) : __assert_fail ("proto->getNumParams() + prefixArgs <= totalArgs"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 114, __PRETTY_FUNCTION__))
;
115
116 paramInfos.reserve(totalArgs);
117
118 // Add default infos for any prefix args that don't already have infos.
119 paramInfos.resize(prefixArgs);
120
121 // Add infos for the prototype.
122 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
123 paramInfos.push_back(ParamInfo);
124 // pass_object_size params have no parameter info.
125 if (ParamInfo.hasPassObjectSize())
126 paramInfos.emplace_back();
127 }
128
129 assert(paramInfos.size() <= totalArgs &&((paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 130, __PRETTY_FUNCTION__))
130 "Did we forget to insert pass_object_size args?")((paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 130, __PRETTY_FUNCTION__))
;
131 // Add default infos for the variadic and/or suffix arguments.
132 paramInfos.resize(totalArgs);
133}
134
135/// Adds the formal parameters in FPT to the given prefix. If any parameter in
136/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
137static void appendParameterTypes(const CodeGenTypes &CGT,
138 SmallVectorImpl<CanQualType> &prefix,
139 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
140 CanQual<FunctionProtoType> FPT) {
141 // Fast path: don't touch param info if we don't need to.
142 if (!FPT->hasExtParameterInfos()) {
143 assert(paramInfos.empty() &&((paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 144, __PRETTY_FUNCTION__))
144 "We have paramInfos, but the prototype doesn't?")((paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 144, __PRETTY_FUNCTION__))
;
145 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
146 return;
147 }
148
149 unsigned PrefixSize = prefix.size();
150 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
151 // parameters; the only thing that can change this is the presence of
152 // pass_object_size. So, we preallocate for the common case.
153 prefix.reserve(prefix.size() + FPT->getNumParams());
154
155 auto ExtInfos = FPT->getExtParameterInfos();
156 assert(ExtInfos.size() == FPT->getNumParams())((ExtInfos.size() == FPT->getNumParams()) ? static_cast<
void> (0) : __assert_fail ("ExtInfos.size() == FPT->getNumParams()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 156, __PRETTY_FUNCTION__))
;
157 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
158 prefix.push_back(FPT->getParamType(I));
159 if (ExtInfos[I].hasPassObjectSize())
160 prefix.push_back(CGT.getContext().getSizeType());
161 }
162
163 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
164 prefix.size());
165}
166
167/// Arrange the LLVM function layout for a value of the given function
168/// type, on top of any implicit parameters already stored.
169static const CGFunctionInfo &
170arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
171 SmallVectorImpl<CanQualType> &prefix,
172 CanQual<FunctionProtoType> FTP,
173 const FunctionDecl *FD) {
174 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
175 RequiredArgs Required =
176 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
177 // FIXME: Kill copy.
178 appendParameterTypes(CGT, prefix, paramInfos, FTP);
179 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
180
181 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
182 /*chainCall=*/false, prefix,
183 FTP->getExtInfo(), paramInfos,
184 Required);
185}
186
187/// Arrange the argument and result information for a value of the
188/// given freestanding function type.
189const CGFunctionInfo &
190CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
191 const FunctionDecl *FD) {
192 SmallVector<CanQualType, 16> argTypes;
193 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
194 FTP, FD);
195}
196
197static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
198 // Set the appropriate calling convention for the Function.
199 if (D->hasAttr<StdCallAttr>())
200 return CC_X86StdCall;
201
202 if (D->hasAttr<FastCallAttr>())
203 return CC_X86FastCall;
204
205 if (D->hasAttr<RegCallAttr>())
206 return CC_X86RegCall;
207
208 if (D->hasAttr<ThisCallAttr>())
209 return CC_X86ThisCall;
210
211 if (D->hasAttr<VectorCallAttr>())
212 return CC_X86VectorCall;
213
214 if (D->hasAttr<PascalAttr>())
215 return CC_X86Pascal;
216
217 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
218 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
219
220 if (D->hasAttr<AArch64VectorPcsAttr>())
221 return CC_AArch64VectorCall;
222
223 if (D->hasAttr<IntelOclBiccAttr>())
224 return CC_IntelOclBicc;
225
226 if (D->hasAttr<MSABIAttr>())
227 return IsWindows ? CC_C : CC_Win64;
228
229 if (D->hasAttr<SysVABIAttr>())
230 return IsWindows ? CC_X86_64SysV : CC_C;
231
232 if (D->hasAttr<PreserveMostAttr>())
233 return CC_PreserveMost;
234
235 if (D->hasAttr<PreserveAllAttr>())
236 return CC_PreserveAll;
237
238 return CC_C;
239}
240
241/// Arrange the argument and result information for a call to an
242/// unknown C++ non-static member function of the given abstract type.
243/// (Zero value of RD means we don't have any meaningful "this" argument type,
244/// so fall back to a generic pointer type).
245/// The member function must be an ordinary function, i.e. not a
246/// constructor or destructor.
247const CGFunctionInfo &
248CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
249 const FunctionProtoType *FTP,
250 const CXXMethodDecl *MD) {
251 SmallVector<CanQualType, 16> argTypes;
252
253 // Add the 'this' pointer.
254 if (RD)
255 argTypes.push_back(GetThisType(Context, RD, MD));
256 else
257 argTypes.push_back(Context.VoidPtrTy);
258
259 return ::arrangeLLVMFunctionInfo(
260 *this, true, argTypes,
261 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
262}
263
264/// Set calling convention for CUDA/HIP kernel.
265static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
266 const FunctionDecl *FD) {
267 if (FD->hasAttr<CUDAGlobalAttr>()) {
268 const FunctionType *FT = FTy->getAs<FunctionType>();
269 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
270 FTy = FT->getCanonicalTypeUnqualified();
271 }
272}
273
274/// Arrange the argument and result information for a declaration or
275/// definition of the given C++ non-static member function. The
276/// member function must be an ordinary function, i.e. not a
277/// constructor or destructor.
278const CGFunctionInfo &
279CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
280 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!")((!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"
) ? static_cast<void> (0) : __assert_fail ("!isa<CXXConstructorDecl>(MD) && \"wrong method for constructors!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 280, __PRETTY_FUNCTION__))
;
281 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!")((!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"
) ? static_cast<void> (0) : __assert_fail ("!isa<CXXDestructorDecl>(MD) && \"wrong method for destructors!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 281, __PRETTY_FUNCTION__))
;
282
283 CanQualType FT = GetFormalType(MD).getAs<Type>();
284 setCUDAKernelCallingConvention(FT, CGM, MD);
285 auto prototype = FT.getAs<FunctionProtoType>();
286
287 if (MD->isInstance()) {
288 // The abstract case is perfectly fine.
289 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
290 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
291 }
292
293 return arrangeFreeFunctionType(prototype, MD);
294}
295
296bool CodeGenTypes::inheritingCtorHasParams(
297 const InheritedConstructor &Inherited, CXXCtorType Type) {
298 // Parameters are unnecessary if we're constructing a base class subobject
299 // and the inherited constructor lives in a virtual base.
300 return Type == Ctor_Complete ||
301 !Inherited.getShadowDecl()->constructsVirtualBase() ||
302 !Target.getCXXABI().hasConstructorVariants();
303 }
304
305const CGFunctionInfo &
306CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
307 StructorType Type) {
308
309 SmallVector<CanQualType, 16> argTypes;
310 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
311 argTypes.push_back(GetThisType(Context, MD->getParent(), MD));
312
313 bool PassParams = true;
314
315 GlobalDecl GD;
316 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
317 GD = GlobalDecl(CD, toCXXCtorType(Type));
318
319 // A base class inheriting constructor doesn't get forwarded arguments
320 // needed to construct a virtual base (or base class thereof).
321 if (auto Inherited = CD->getInheritedConstructor())
322 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
323 } else {
324 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
325 GD = GlobalDecl(DD, toCXXDtorType(Type));
326 }
327
328 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
329
330 // Add the formal parameters.
331 if (PassParams)
332 appendParameterTypes(*this, argTypes, paramInfos, FTP);
333
334 CGCXXABI::AddedStructorArgs AddedArgs =
335 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
336 if (!paramInfos.empty()) {
337 // Note: prefix implies after the first param.
338 if (AddedArgs.Prefix)
339 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
340 FunctionProtoType::ExtParameterInfo{});
341 if (AddedArgs.Suffix)
342 paramInfos.append(AddedArgs.Suffix,
343 FunctionProtoType::ExtParameterInfo{});
344 }
345
346 RequiredArgs required =
347 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
348 : RequiredArgs::All);
349
350 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
351 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
352 ? argTypes.front()
353 : TheCXXABI.hasMostDerivedReturn(GD)
354 ? CGM.getContext().VoidPtrTy
355 : Context.VoidTy;
356 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
357 /*chainCall=*/false, argTypes, extInfo,
358 paramInfos, required);
359}
360
361static SmallVector<CanQualType, 16>
362getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
363 SmallVector<CanQualType, 16> argTypes;
364 for (auto &arg : args)
365 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
366 return argTypes;
367}
368
369static SmallVector<CanQualType, 16>
370getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
371 SmallVector<CanQualType, 16> argTypes;
372 for (auto &arg : args)
373 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
374 return argTypes;
375}
376
377static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
378getExtParameterInfosForCall(const FunctionProtoType *proto,
379 unsigned prefixArgs, unsigned totalArgs) {
380 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
381 if (proto->hasExtParameterInfos()) {
382 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
383 }
384 return result;
385}
386
387/// Arrange a call to a C++ method, passing the given arguments.
388///
389/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
390/// parameter.
391/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
392/// args.
393/// PassProtoArgs indicates whether `args` has args for the parameters in the
394/// given CXXConstructorDecl.
395const CGFunctionInfo &
396CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
397 const CXXConstructorDecl *D,
398 CXXCtorType CtorKind,
399 unsigned ExtraPrefixArgs,
400 unsigned ExtraSuffixArgs,
401 bool PassProtoArgs) {
402 // FIXME: Kill copy.
403 SmallVector<CanQualType, 16> ArgTypes;
404 for (const auto &Arg : args)
405 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
406
407 // +1 for implicit this, which should always be args[0].
408 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
409
410 CanQual<FunctionProtoType> FPT = GetFormalType(D);
411 RequiredArgs Required =
412 RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
413 GlobalDecl GD(D, CtorKind);
414 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
415 ? ArgTypes.front()
416 : TheCXXABI.hasMostDerivedReturn(GD)
417 ? CGM.getContext().VoidPtrTy
418 : Context.VoidTy;
419
420 FunctionType::ExtInfo Info = FPT->getExtInfo();
421 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
422 // If the prototype args are elided, we should only have ABI-specific args,
423 // which never have param info.
424 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
425 // ABI-specific suffix arguments are treated the same as variadic arguments.
426 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
427 ArgTypes.size());
428 }
429 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
430 /*chainCall=*/false, ArgTypes, Info,
431 ParamInfos, Required);
432}
433
434/// Arrange the argument and result information for the declaration or
435/// definition of the given function.
436const CGFunctionInfo &
437CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
438 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
439 if (MD->isInstance())
440 return arrangeCXXMethodDeclaration(MD);
441
442 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
443
444 assert(isa<FunctionType>(FTy))((isa<FunctionType>(FTy)) ? static_cast<void> (0)
: __assert_fail ("isa<FunctionType>(FTy)", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 444, __PRETTY_FUNCTION__))
;
445 setCUDAKernelCallingConvention(FTy, CGM, FD);
446
447 // When declaring a function without a prototype, always use a
448 // non-variadic type.
449 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
450 return arrangeLLVMFunctionInfo(
451 noProto->getReturnType(), /*instanceMethod=*/false,
452 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
453 }
454
455 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
456}
457
458/// Arrange the argument and result information for the declaration or
459/// definition of an Objective-C method.
460const CGFunctionInfo &
461CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
462 // It happens that this is the same as a call with no optional
463 // arguments, except also using the formal 'self' type.
464 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
465}
466
467/// Arrange the argument and result information for the function type
468/// through which to perform a send to the given Objective-C method,
469/// using the given receiver type. The receiver type is not always
470/// the 'self' type of the method or even an Objective-C pointer type.
471/// This is *not* the right method for actually performing such a
472/// message send, due to the possibility of optional arguments.
473const CGFunctionInfo &
474CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
475 QualType receiverType) {
476 SmallVector<CanQualType, 16> argTys;
477 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
478 argTys.push_back(Context.getCanonicalParamType(receiverType));
479 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
480 // FIXME: Kill copy?
481 for (const auto *I : MD->parameters()) {
482 argTys.push_back(Context.getCanonicalParamType(I->getType()));
483 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
484 I->hasAttr<NoEscapeAttr>());
485 extParamInfos.push_back(extParamInfo);
486 }
487
488 FunctionType::ExtInfo einfo;
489 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
490 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
491
492 if (getContext().getLangOpts().ObjCAutoRefCount &&
493 MD->hasAttr<NSReturnsRetainedAttr>())
494 einfo = einfo.withProducesResult(true);
495
496 RequiredArgs required =
497 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
498
499 return arrangeLLVMFunctionInfo(
500 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
501 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
502}
503
504const CGFunctionInfo &
505CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
506 const CallArgList &args) {
507 auto argTypes = getArgTypesForCall(Context, args);
508 FunctionType::ExtInfo einfo;
509
510 return arrangeLLVMFunctionInfo(
511 GetReturnType(returnType), /*instanceMethod=*/false,
512 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
513}
514
515const CGFunctionInfo &
516CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
517 // FIXME: Do we need to handle ObjCMethodDecl?
518 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
519
520 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
521 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
522
523 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
524 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
525
526 return arrangeFunctionDeclaration(FD);
527}
528
529/// Arrange a thunk that takes 'this' as the first parameter followed by
530/// varargs. Return a void pointer, regardless of the actual return type.
531/// The body of the thunk will end in a musttail call to a function of the
532/// correct type, and the caller will bitcast the function to the correct
533/// prototype.
534const CGFunctionInfo &
535CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
536 assert(MD->isVirtual() && "only methods have thunks")((MD->isVirtual() && "only methods have thunks") ?
static_cast<void> (0) : __assert_fail ("MD->isVirtual() && \"only methods have thunks\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 536, __PRETTY_FUNCTION__))
;
537 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
538 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent(), MD) };
539 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
540 /*chainCall=*/false, ArgTys,
541 FTP->getExtInfo(), {}, RequiredArgs(1));
542}
543
544const CGFunctionInfo &
545CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
546 CXXCtorType CT) {
547 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure)((CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure) ? static_cast
<void> (0) : __assert_fail ("CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 547, __PRETTY_FUNCTION__))
;
548
549 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
550 SmallVector<CanQualType, 2> ArgTys;
551 const CXXRecordDecl *RD = CD->getParent();
552 ArgTys.push_back(GetThisType(Context, RD, CD));
553 if (CT == Ctor_CopyingClosure)
554 ArgTys.push_back(*FTP->param_type_begin());
555 if (RD->getNumVBases() > 0)
556 ArgTys.push_back(Context.IntTy);
557 CallingConv CC = Context.getDefaultCallingConvention(
558 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
559 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
560 /*chainCall=*/false, ArgTys,
561 FunctionType::ExtInfo(CC), {},
562 RequiredArgs::All);
563}
564
565/// Arrange a call as unto a free function, except possibly with an
566/// additional number of formal parameters considered required.
567static const CGFunctionInfo &
568arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
569 CodeGenModule &CGM,
570 const CallArgList &args,
571 const FunctionType *fnType,
572 unsigned numExtraRequiredArgs,
573 bool chainCall) {
574 assert(args.size() >= numExtraRequiredArgs)((args.size() >= numExtraRequiredArgs) ? static_cast<void
> (0) : __assert_fail ("args.size() >= numExtraRequiredArgs"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 574, __PRETTY_FUNCTION__))
;
575
576 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
577
578 // In most cases, there are no optional arguments.
579 RequiredArgs required = RequiredArgs::All;
580
581 // If we have a variadic prototype, the required arguments are the
582 // extra prefix plus the arguments in the prototype.
583 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
584 if (proto->isVariadic())
585 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
586
587 if (proto->hasExtParameterInfos())
588 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
589 args.size());
590
591 // If we don't have a prototype at all, but we're supposed to
592 // explicitly use the variadic convention for unprototyped calls,
593 // treat all of the arguments as required but preserve the nominal
594 // possibility of variadics.
595 } else if (CGM.getTargetCodeGenInfo()
596 .isNoProtoCallVariadic(args,
597 cast<FunctionNoProtoType>(fnType))) {
598 required = RequiredArgs(args.size());
599 }
600
601 // FIXME: Kill copy.
602 SmallVector<CanQualType, 16> argTypes;
603 for (const auto &arg : args)
604 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
605 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
606 /*instanceMethod=*/false, chainCall,
607 argTypes, fnType->getExtInfo(), paramInfos,
608 required);
609}
610
611/// Figure out the rules for calling a function with the given formal
612/// type using the given arguments. The arguments are necessary
613/// because the function might be unprototyped, in which case it's
614/// target-dependent in crazy ways.
615const CGFunctionInfo &
616CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
617 const FunctionType *fnType,
618 bool chainCall) {
619 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
620 chainCall ? 1 : 0, chainCall);
621}
622
623/// A block function is essentially a free function with an
624/// extra implicit argument.
625const CGFunctionInfo &
626CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
627 const FunctionType *fnType) {
628 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
629 /*chainCall=*/false);
630}
631
632const CGFunctionInfo &
633CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
634 const FunctionArgList &params) {
635 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
636 auto argTypes = getArgTypesForDeclaration(Context, params);
637
638 return arrangeLLVMFunctionInfo(
639 GetReturnType(proto->getReturnType()),
640 /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
641 proto->getExtInfo(), paramInfos,
642 RequiredArgs::forPrototypePlus(proto, 1, nullptr));
643}
644
645const CGFunctionInfo &
646CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
647 const CallArgList &args) {
648 // FIXME: Kill copy.
649 SmallVector<CanQualType, 16> argTypes;
650 for (const auto &Arg : args)
651 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
652 return arrangeLLVMFunctionInfo(
653 GetReturnType(resultType), /*instanceMethod=*/false,
654 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
655 /*paramInfos=*/ {}, RequiredArgs::All);
656}
657
658const CGFunctionInfo &
659CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
660 const FunctionArgList &args) {
661 auto argTypes = getArgTypesForDeclaration(Context, args);
662
663 return arrangeLLVMFunctionInfo(
664 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
665 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
666}
667
668const CGFunctionInfo &
669CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
670 ArrayRef<CanQualType> argTypes) {
671 return arrangeLLVMFunctionInfo(
672 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
673 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
674}
675
676/// Arrange a call to a C++ method, passing the given arguments.
677///
678/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
679/// does not count `this`.
680const CGFunctionInfo &
681CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
682 const FunctionProtoType *proto,
683 RequiredArgs required,
684 unsigned numPrefixArgs) {
685 assert(numPrefixArgs + 1 <= args.size() &&((numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"
) ? static_cast<void> (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 686, __PRETTY_FUNCTION__))
686 "Emitting a call with less args than the required prefix?")((numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"
) ? static_cast<void> (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 686, __PRETTY_FUNCTION__))
;
687 // Add one to account for `this`. It's a bit awkward here, but we don't count
688 // `this` in similar places elsewhere.
689 auto paramInfos =
690 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
691
692 // FIXME: Kill copy.
693 auto argTypes = getArgTypesForCall(Context, args);
694
695 FunctionType::ExtInfo info = proto->getExtInfo();
696 return arrangeLLVMFunctionInfo(
697 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
698 /*chainCall=*/false, argTypes, info, paramInfos, required);
699}
700
701const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
702 return arrangeLLVMFunctionInfo(
703 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
704 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
705}
706
707const CGFunctionInfo &
708CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
709 const CallArgList &args) {
710 assert(signature.arg_size() <= args.size())((signature.arg_size() <= args.size()) ? static_cast<void
> (0) : __assert_fail ("signature.arg_size() <= args.size()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 710, __PRETTY_FUNCTION__))
;
711 if (signature.arg_size() == args.size())
712 return signature;
713
714 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
715 auto sigParamInfos = signature.getExtParameterInfos();
716 if (!sigParamInfos.empty()) {
717 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
718 paramInfos.resize(args.size());
719 }
720
721 auto argTypes = getArgTypesForCall(Context, args);
722
723 assert(signature.getRequiredArgs().allowsOptionalArgs())((signature.getRequiredArgs().allowsOptionalArgs()) ? static_cast
<void> (0) : __assert_fail ("signature.getRequiredArgs().allowsOptionalArgs()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 723, __PRETTY_FUNCTION__))
;
724 return arrangeLLVMFunctionInfo(signature.getReturnType(),
725 signature.isInstanceMethod(),
726 signature.isChainCall(),
727 argTypes,
728 signature.getExtInfo(),
729 paramInfos,
730 signature.getRequiredArgs());
731}
732
733namespace clang {
734namespace CodeGen {
735void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
736}
737}
738
739/// Arrange the argument and result information for an abstract value
740/// of a given function type. This is the method which all of the
741/// above functions ultimately defer to.
742const CGFunctionInfo &
743CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
744 bool instanceMethod,
745 bool chainCall,
746 ArrayRef<CanQualType> argTypes,
747 FunctionType::ExtInfo info,
748 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
749 RequiredArgs required) {
750 assert(llvm::all_of(argTypes,((llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam
(); })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 751, __PRETTY_FUNCTION__))
751 [](CanQualType T) { return T.isCanonicalAsParam(); }))((llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam
(); })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 751, __PRETTY_FUNCTION__))
;
752
753 // Lookup or create unique function info.
754 llvm::FoldingSetNodeID ID;
755 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
756 required, resultType, argTypes);
757
758 void *insertPos = nullptr;
759 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
760 if (FI)
761 return *FI;
762
763 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
764
765 // Construct the function info. We co-allocate the ArgInfos.
766 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
767 paramInfos, resultType, argTypes, required);
768 FunctionInfos.InsertNode(FI, insertPos);
769
770 bool inserted = FunctionsBeingProcessed.insert(FI).second;
771 (void)inserted;
772 assert(inserted && "Recursively being processed?")((inserted && "Recursively being processed?") ? static_cast
<void> (0) : __assert_fail ("inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 772, __PRETTY_FUNCTION__))
;
773
774 // Compute ABI information.
775 if (CC == llvm::CallingConv::SPIR_KERNEL) {
776 // Force target independent argument handling for the host visible
777 // kernel functions.
778 computeSPIRKernelABIInfo(CGM, *FI);
779 } else if (info.getCC() == CC_Swift) {
780 swiftcall::computeABIInfo(CGM, *FI);
781 } else {
782 getABIInfo().computeInfo(*FI);
783 }
784
785 // Loop over all of the computed argument and return value info. If any of
786 // them are direct or extend without a specified coerce type, specify the
787 // default now.
788 ABIArgInfo &retInfo = FI->getReturnInfo();
789 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
790 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
791
792 for (auto &I : FI->arguments())
793 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
794 I.info.setCoerceToType(ConvertType(I.type));
795
796 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
797 assert(erased && "Not in set?")((erased && "Not in set?") ? static_cast<void> (
0) : __assert_fail ("erased && \"Not in set?\"", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 797, __PRETTY_FUNCTION__))
;
798
799 return *FI;
800}
801
802CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
803 bool instanceMethod,
804 bool chainCall,
805 const FunctionType::ExtInfo &info,
806 ArrayRef<ExtParameterInfo> paramInfos,
807 CanQualType resultType,
808 ArrayRef<CanQualType> argTypes,
809 RequiredArgs required) {
810 assert(paramInfos.empty() || paramInfos.size() == argTypes.size())((paramInfos.empty() || paramInfos.size() == argTypes.size())
? static_cast<void> (0) : __assert_fail ("paramInfos.empty() || paramInfos.size() == argTypes.size()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 810, __PRETTY_FUNCTION__))
;
811
812 void *buffer =
813 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
814 argTypes.size() + 1, paramInfos.size()));
815
816 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
817 FI->CallingConvention = llvmCC;
818 FI->EffectiveCallingConvention = llvmCC;
819 FI->ASTCallingConvention = info.getCC();
820 FI->InstanceMethod = instanceMethod;
821 FI->ChainCall = chainCall;
822 FI->NoReturn = info.getNoReturn();
823 FI->ReturnsRetained = info.getProducesResult();
824 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
825 FI->NoCfCheck = info.getNoCfCheck();
826 FI->Required = required;
827 FI->HasRegParm = info.getHasRegParm();
828 FI->RegParm = info.getRegParm();
829 FI->ArgStruct = nullptr;
830 FI->ArgStructAlign = 0;
831 FI->NumArgs = argTypes.size();
832 FI->HasExtParameterInfos = !paramInfos.empty();
833 FI->getArgsBuffer()[0].type = resultType;
834 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
835 FI->getArgsBuffer()[i + 1].type = argTypes[i];
836 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
837 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
838 return FI;
839}
840
841/***/
842
843namespace {
844// ABIArgInfo::Expand implementation.
845
846// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
847struct TypeExpansion {
848 enum TypeExpansionKind {
849 // Elements of constant arrays are expanded recursively.
850 TEK_ConstantArray,
851 // Record fields are expanded recursively (but if record is a union, only
852 // the field with the largest size is expanded).
853 TEK_Record,
854 // For complex types, real and imaginary parts are expanded recursively.
855 TEK_Complex,
856 // All other types are not expandable.
857 TEK_None
858 };
859
860 const TypeExpansionKind Kind;
861
862 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
863 virtual ~TypeExpansion() {}
864};
865
866struct ConstantArrayExpansion : TypeExpansion {
867 QualType EltTy;
868 uint64_t NumElts;
869
870 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
871 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
872 static bool classof(const TypeExpansion *TE) {
873 return TE->Kind == TEK_ConstantArray;
874 }
875};
876
877struct RecordExpansion : TypeExpansion {
878 SmallVector<const CXXBaseSpecifier *, 1> Bases;
879
880 SmallVector<const FieldDecl *, 1> Fields;
881
882 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
883 SmallVector<const FieldDecl *, 1> &&Fields)
884 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
885 Fields(std::move(Fields)) {}
886 static bool classof(const TypeExpansion *TE) {
887 return TE->Kind == TEK_Record;
888 }
889};
890
891struct ComplexExpansion : TypeExpansion {
892 QualType EltTy;
893
894 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
895 static bool classof(const TypeExpansion *TE) {
896 return TE->Kind == TEK_Complex;
897 }
898};
899
900struct NoExpansion : TypeExpansion {
901 NoExpansion() : TypeExpansion(TEK_None) {}
902 static bool classof(const TypeExpansion *TE) {
903 return TE->Kind == TEK_None;
904 }
905};
906} // namespace
907
908static std::unique_ptr<TypeExpansion>
909getTypeExpansion(QualType Ty, const ASTContext &Context) {
910 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
911 return llvm::make_unique<ConstantArrayExpansion>(
912 AT->getElementType(), AT->getSize().getZExtValue());
913 }
914 if (const RecordType *RT = Ty->getAs<RecordType>()) {
915 SmallVector<const CXXBaseSpecifier *, 1> Bases;
916 SmallVector<const FieldDecl *, 1> Fields;
917 const RecordDecl *RD = RT->getDecl();
918 assert(!RD->hasFlexibleArrayMember() &&((!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."
) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 919, __PRETTY_FUNCTION__))
919 "Cannot expand structure with flexible array.")((!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."
) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 919, __PRETTY_FUNCTION__))
;
920 if (RD->isUnion()) {
921 // Unions can be here only in degenerative cases - all the fields are same
922 // after flattening. Thus we have to use the "largest" field.
923 const FieldDecl *LargestFD = nullptr;
924 CharUnits UnionSize = CharUnits::Zero();
925
926 for (const auto *FD : RD->fields()) {
927 if (FD->isZeroLengthBitField(Context))
928 continue;
929 assert(!FD->isBitField() &&((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 930, __PRETTY_FUNCTION__))
930 "Cannot expand structure with bit-field members.")((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 930, __PRETTY_FUNCTION__))
;
931 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
932 if (UnionSize < FieldSize) {
933 UnionSize = FieldSize;
934 LargestFD = FD;
935 }
936 }
937 if (LargestFD)
938 Fields.push_back(LargestFD);
939 } else {
940 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
941 assert(!CXXRD->isDynamicClass() &&((!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"
) ? static_cast<void> (0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 942, __PRETTY_FUNCTION__))
942 "cannot expand vtable pointers in dynamic classes")((!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"
) ? static_cast<void> (0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 942, __PRETTY_FUNCTION__))
;
943 for (const CXXBaseSpecifier &BS : CXXRD->bases())
944 Bases.push_back(&BS);
945 }
946
947 for (const auto *FD : RD->fields()) {
948 if (FD->isZeroLengthBitField(Context))
949 continue;
950 assert(!FD->isBitField() &&((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 951, __PRETTY_FUNCTION__))
951 "Cannot expand structure with bit-field members.")((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 951, __PRETTY_FUNCTION__))
;
952 Fields.push_back(FD);
953 }
954 }
955 return llvm::make_unique<RecordExpansion>(std::move(Bases),
956 std::move(Fields));
957 }
958 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
959 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
960 }
961 return llvm::make_unique<NoExpansion>();
962}
963
964static int getExpansionSize(QualType Ty, const ASTContext &Context) {
965 auto Exp = getTypeExpansion(Ty, Context);
966 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
967 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
968 }
969 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
970 int Res = 0;
971 for (auto BS : RExp->Bases)
972 Res += getExpansionSize(BS->getType(), Context);
973 for (auto FD : RExp->Fields)
974 Res += getExpansionSize(FD->getType(), Context);
975 return Res;
976 }
977 if (isa<ComplexExpansion>(Exp.get()))
978 return 2;
979 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 979, __PRETTY_FUNCTION__))
;
980 return 1;
981}
982
983void
984CodeGenTypes::getExpandedTypes(QualType Ty,
985 SmallVectorImpl<llvm::Type *>::iterator &TI) {
986 auto Exp = getTypeExpansion(Ty, Context);
987 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
988 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
989 getExpandedTypes(CAExp->EltTy, TI);
990 }
991 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
992 for (auto BS : RExp->Bases)
993 getExpandedTypes(BS->getType(), TI);
994 for (auto FD : RExp->Fields)
995 getExpandedTypes(FD->getType(), TI);
996 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
997 llvm::Type *EltTy = ConvertType(CExp->EltTy);
998 *TI++ = EltTy;
999 *TI++ = EltTy;
1000 } else {
1001 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1001, __PRETTY_FUNCTION__))
;
1002 *TI++ = ConvertType(Ty);
1003 }
1004}
1005
1006static void forConstantArrayExpansion(CodeGenFunction &CGF,
1007 ConstantArrayExpansion *CAE,
1008 Address BaseAddr,
1009 llvm::function_ref<void(Address)> Fn) {
1010 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1011 CharUnits EltAlign =
1012 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1013
1014 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1015 llvm::Value *EltAddr =
1016 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1017 Fn(Address(EltAddr, EltAlign));
1018 }
1019}
1020
1021void CodeGenFunction::ExpandTypeFromArgs(
1022 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1023 assert(LV.isSimple() &&((LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1024, __PRETTY_FUNCTION__))
1024 "Unexpected non-simple lvalue during struct expansion.")((LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1024, __PRETTY_FUNCTION__))
;
1025
1026 auto Exp = getTypeExpansion(Ty, getContext());
1027 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1028 forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1029 [&](Address EltAddr) {
1030 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1031 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1032 });
1033 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1034 Address This = LV.getAddress();
1035 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1036 // Perform a single step derived-to-base conversion.
1037 Address Base =
1038 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1039 /*NullCheckValue=*/false, SourceLocation());
1040 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1041
1042 // Recurse onto bases.
1043 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1044 }
1045 for (auto FD : RExp->Fields) {
1046 // FIXME: What are the right qualifiers here?
1047 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1048 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1049 }
1050 } else if (isa<ComplexExpansion>(Exp.get())) {
1051 auto realValue = *AI++;
1052 auto imagValue = *AI++;
1053 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1054 } else {
1055 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1055, __PRETTY_FUNCTION__))
;
1056 EmitStoreThroughLValue(RValue::get(*AI++), LV);
1057 }
1058}
1059
1060void CodeGenFunction::ExpandTypeToArgs(
1061 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1062 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1063 auto Exp = getTypeExpansion(Ty, getContext());
1064 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1065 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1066 : Arg.getKnownRValue().getAggregateAddress();
1067 forConstantArrayExpansion(
1068 *this, CAExp, Addr, [&](Address EltAddr) {
1069 CallArg EltArg = CallArg(
1070 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1071 CAExp->EltTy);
1072 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1073 IRCallArgPos);
1074 });
1075 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1076 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1077 : Arg.getKnownRValue().getAggregateAddress();
1078 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1079 // Perform a single step derived-to-base conversion.
1080 Address Base =
1081 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1082 /*NullCheckValue=*/false, SourceLocation());
1083 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1084
1085 // Recurse onto bases.
1086 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1087 IRCallArgPos);
1088 }
1089
1090 LValue LV = MakeAddrLValue(This, Ty);
1091 for (auto FD : RExp->Fields) {
1092 CallArg FldArg =
1093 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1094 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1095 IRCallArgPos);
1096 }
1097 } else if (isa<ComplexExpansion>(Exp.get())) {
1098 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1099 IRCallArgs[IRCallArgPos++] = CV.first;
1100 IRCallArgs[IRCallArgPos++] = CV.second;
1101 } else {
1102 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1102, __PRETTY_FUNCTION__))
;
1103 auto RV = Arg.getKnownRValue();
1104 assert(RV.isScalar() &&((RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1105, __PRETTY_FUNCTION__))
1105 "Unexpected non-scalar rvalue during struct expansion.")((RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1105, __PRETTY_FUNCTION__))
;
1106
1107 // Insert a bitcast as needed.
1108 llvm::Value *V = RV.getScalarVal();
1109 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1110 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1111 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1112
1113 IRCallArgs[IRCallArgPos++] = V;
1114 }
1115}
1116
1117/// Create a temporary allocation for the purposes of coercion.
1118static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1119 CharUnits MinAlign) {
1120 // Don't use an alignment that's worse than what LLVM would prefer.
1121 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1122 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1123
1124 return CGF.CreateTempAlloca(Ty, Align);
1125}
1126
1127/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1128/// accessing some number of bytes out of it, try to gep into the struct to get
1129/// at its inner goodness. Dive as deep as possible without entering an element
1130/// with an in-memory size smaller than DstSize.
1131static Address
1132EnterStructPointerForCoercedAccess(Address SrcPtr,
1133 llvm::StructType *SrcSTy,
1134 uint64_t DstSize, CodeGenFunction &CGF) {
1135 // We can't dive into a zero-element struct.
1136 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1137
1138 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1139
1140 // If the first elt is at least as large as what we're looking for, or if the
1141 // first element is the same size as the whole struct, we can enter it. The
1142 // comparison must be made on the store size and not the alloca size. Using
1143 // the alloca size may overstate the size of the load.
1144 uint64_t FirstEltSize =
1145 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1146 if (FirstEltSize < DstSize &&
1147 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1148 return SrcPtr;
1149
1150 // GEP into the first element.
1151 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1152
1153 // If the first element is a struct, recurse.
1154 llvm::Type *SrcTy = SrcPtr.getElementType();
1155 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1156 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1157
1158 return SrcPtr;
1159}
1160
1161/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1162/// are either integers or pointers. This does a truncation of the value if it
1163/// is too large or a zero extension if it is too small.
1164///
1165/// This behaves as if the value were coerced through memory, so on big-endian
1166/// targets the high bits are preserved in a truncation, while little-endian
1167/// targets preserve the low bits.
1168static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1169 llvm::Type *Ty,
1170 CodeGenFunction &CGF) {
1171 if (Val->getType() == Ty)
1172 return Val;
1173
1174 if (isa<llvm::PointerType>(Val->getType())) {
1175 // If this is Pointer->Pointer avoid conversion to and from int.
1176 if (isa<llvm::PointerType>(Ty))
1177 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1178
1179 // Convert the pointer to an integer so we can play with its width.
1180 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1181 }
1182
1183 llvm::Type *DestIntTy = Ty;
1184 if (isa<llvm::PointerType>(DestIntTy))
1185 DestIntTy = CGF.IntPtrTy;
1186
1187 if (Val->getType() != DestIntTy) {
1188 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1189 if (DL.isBigEndian()) {
1190 // Preserve the high bits on big-endian targets.
1191 // That is what memory coercion does.
1192 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1193 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1194
1195 if (SrcSize > DstSize) {
1196 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1197 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1198 } else {
1199 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1200 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1201 }
1202 } else {
1203 // Little-endian targets preserve the low bits. No shifts required.
1204 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1205 }
1206 }
1207
1208 if (isa<llvm::PointerType>(Ty))
1209 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1210 return Val;
1211}
1212
1213
1214
1215/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1216/// a pointer to an object of type \arg Ty, known to be aligned to
1217/// \arg SrcAlign bytes.
1218///
1219/// This safely handles the case when the src type is smaller than the
1220/// destination type; in this situation the values of bits which not
1221/// present in the src are undefined.
1222static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1223 CodeGenFunction &CGF) {
1224 llvm::Type *SrcTy = Src.getElementType();
1225
1226 // If SrcTy and Ty are the same, just do a load.
1227 if (SrcTy == Ty)
1228 return CGF.Builder.CreateLoad(Src);
1229
1230 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1231
1232 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1233 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1234 SrcTy = Src.getType()->getElementType();
1235 }
1236
1237 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1238
1239 // If the source and destination are integer or pointer types, just do an
1240 // extension or truncation to the desired type.
1241 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1242 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1243 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1244 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1245 }
1246
1247 // If load is legal, just bitcast the src pointer.
1248 if (SrcSize >= DstSize) {
1249 // Generally SrcSize is never greater than DstSize, since this means we are
1250 // losing bits. However, this can happen in cases where the structure has
1251 // additional padding, for example due to a user specified alignment.
1252 //
1253 // FIXME: Assert that we aren't truncating non-padding bits when have access
1254 // to that information.
1255 Src = CGF.Builder.CreateBitCast(Src,
1256 Ty->getPointerTo(Src.getAddressSpace()));
1257 return CGF.Builder.CreateLoad(Src);
1258 }
1259
1260 // Otherwise do coercion through memory. This is stupid, but simple.
1261 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1262 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1263 Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1264 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1265 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1266 false);
1267 return CGF.Builder.CreateLoad(Tmp);
1268}
1269
1270// Function to store a first-class aggregate into memory. We prefer to
1271// store the elements rather than the aggregate to be more friendly to
1272// fast-isel.
1273// FIXME: Do we need to recurse here?
1274static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1275 Address Dest, bool DestIsVolatile) {
1276 // Prefer scalar stores to first-class aggregate stores.
1277 if (llvm::StructType *STy =
1278 dyn_cast<llvm::StructType>(Val->getType())) {
1279 const llvm::StructLayout *Layout =
1280 CGF.CGM.getDataLayout().getStructLayout(STy);
1281
1282 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1283 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1284 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1285 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1286 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1287 }
1288 } else {
1289 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1290 }
1291}
1292
1293/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1294/// where the source and destination may have different types. The
1295/// destination is known to be aligned to \arg DstAlign bytes.
1296///
1297/// This safely handles the case when the src type is larger than the
1298/// destination type; the upper bits of the src will be lost.
1299static void CreateCoercedStore(llvm::Value *Src,
1300 Address Dst,
1301 bool DstIsVolatile,
1302 CodeGenFunction &CGF) {
1303 llvm::Type *SrcTy = Src->getType();
1304 llvm::Type *DstTy = Dst.getType()->getElementType();
1305 if (SrcTy == DstTy) {
1306 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1307 return;
1308 }
1309
1310 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1311
1312 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1313 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1314 DstTy = Dst.getType()->getElementType();
1315 }
1316
1317 // If the source and destination are integer or pointer types, just do an
1318 // extension or truncation to the desired type.
1319 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1320 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1321 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1322 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1323 return;
1324 }
1325
1326 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1327
1328 // If store is legal, just bitcast the src pointer.
1329 if (SrcSize <= DstSize) {
1330 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1331 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1332 } else {
1333 // Otherwise do coercion through memory. This is stupid, but
1334 // simple.
1335
1336 // Generally SrcSize is never greater than DstSize, since this means we are
1337 // losing bits. However, this can happen in cases where the structure has
1338 // additional padding, for example due to a user specified alignment.
1339 //
1340 // FIXME: Assert that we aren't truncating non-padding bits when have access
1341 // to that information.
1342 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1343 CGF.Builder.CreateStore(Src, Tmp);
1344 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1345 Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1346 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1347 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1348 false);
1349 }
1350}
1351
1352static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1353 const ABIArgInfo &info) {
1354 if (unsigned offset = info.getDirectOffset()) {
1355 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1356 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1357 CharUnits::fromQuantity(offset));
1358 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1359 }
1360 return addr;
1361}
1362
1363namespace {
1364
1365/// Encapsulates information about the way function arguments from
1366/// CGFunctionInfo should be passed to actual LLVM IR function.
1367class ClangToLLVMArgMapping {
1368 static const unsigned InvalidIndex = ~0U;
1369 unsigned InallocaArgNo;
1370 unsigned SRetArgNo;
1371 unsigned TotalIRArgs;
1372
1373 /// Arguments of LLVM IR function corresponding to single Clang argument.
1374 struct IRArgs {
1375 unsigned PaddingArgIndex;
1376 // Argument is expanded to IR arguments at positions
1377 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1378 unsigned FirstArgIndex;
1379 unsigned NumberOfArgs;
1380
1381 IRArgs()
1382 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1383 NumberOfArgs(0) {}
1384 };
1385
1386 SmallVector<IRArgs, 8> ArgInfo;
1387
1388public:
1389 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1390 bool OnlyRequiredArgs = false)
1391 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1392 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1393 construct(Context, FI, OnlyRequiredArgs);
1394 }
1395
1396 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1397 unsigned getInallocaArgNo() const {
1398 assert(hasInallocaArg())((hasInallocaArg()) ? static_cast<void> (0) : __assert_fail
("hasInallocaArg()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1398, __PRETTY_FUNCTION__))
;
1399 return InallocaArgNo;
1400 }
1401
1402 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1403 unsigned getSRetArgNo() const {
1404 assert(hasSRetArg())((hasSRetArg()) ? static_cast<void> (0) : __assert_fail
("hasSRetArg()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1404, __PRETTY_FUNCTION__))
;
1405 return SRetArgNo;
1406 }
1407
1408 unsigned totalIRArgs() const { return TotalIRArgs; }
1409
1410 bool hasPaddingArg(unsigned ArgNo) const {
1411 assert(ArgNo < ArgInfo.size())((ArgNo < ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1411, __PRETTY_FUNCTION__))
;
1412 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1413 }
1414 unsigned getPaddingArgNo(unsigned ArgNo) const {
1415 assert(hasPaddingArg(ArgNo))((hasPaddingArg(ArgNo)) ? static_cast<void> (0) : __assert_fail
("hasPaddingArg(ArgNo)", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1415, __PRETTY_FUNCTION__))
;
1416 return ArgInfo[ArgNo].PaddingArgIndex;
1417 }
1418
1419 /// Returns index of first IR argument corresponding to ArgNo, and their
1420 /// quantity.
1421 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1422 assert(ArgNo < ArgInfo.size())((ArgNo < ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1422, __PRETTY_FUNCTION__))
;
1423 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1424 ArgInfo[ArgNo].NumberOfArgs);
1425 }
1426
1427private:
1428 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1429 bool OnlyRequiredArgs);
1430};
1431
1432void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1433 const CGFunctionInfo &FI,
1434 bool OnlyRequiredArgs) {
1435 unsigned IRArgNo = 0;
1436 bool SwapThisWithSRet = false;
1437 const ABIArgInfo &RetAI = FI.getReturnInfo();
1438
1439 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1440 SwapThisWithSRet = RetAI.isSRetAfterThis();
1441 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1442 }
1443
1444 unsigned ArgNo = 0;
1445 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1446 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1447 ++I, ++ArgNo) {
1448 assert(I != FI.arg_end())((I != FI.arg_end()) ? static_cast<void> (0) : __assert_fail
("I != FI.arg_end()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1448, __PRETTY_FUNCTION__))
;
1449 QualType ArgType = I->type;
1450 const ABIArgInfo &AI = I->info;
1451 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1452 auto &IRArgs = ArgInfo[ArgNo];
1453
1454 if (AI.getPaddingType())
1455 IRArgs.PaddingArgIndex = IRArgNo++;
1456
1457 switch (AI.getKind()) {
1458 case ABIArgInfo::Extend:
1459 case ABIArgInfo::Direct: {
1460 // FIXME: handle sseregparm someday...
1461 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1462 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1463 IRArgs.NumberOfArgs = STy->getNumElements();
1464 } else {
1465 IRArgs.NumberOfArgs = 1;
1466 }
1467 break;
1468 }
1469 case ABIArgInfo::Indirect:
1470 IRArgs.NumberOfArgs = 1;
1471 break;
1472 case ABIArgInfo::Ignore:
1473 case ABIArgInfo::InAlloca:
1474 // ignore and inalloca doesn't have matching LLVM parameters.
1475 IRArgs.NumberOfArgs = 0;
1476 break;
1477 case ABIArgInfo::CoerceAndExpand:
1478 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1479 break;
1480 case ABIArgInfo::Expand:
1481 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1482 break;
1483 }
1484
1485 if (IRArgs.NumberOfArgs > 0) {
1486 IRArgs.FirstArgIndex = IRArgNo;
1487 IRArgNo += IRArgs.NumberOfArgs;
1488 }
1489
1490 // Skip over the sret parameter when it comes second. We already handled it
1491 // above.
1492 if (IRArgNo == 1 && SwapThisWithSRet)
1493 IRArgNo++;
1494 }
1495 assert(ArgNo == ArgInfo.size())((ArgNo == ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo == ArgInfo.size()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1495, __PRETTY_FUNCTION__))
;
1496
1497 if (FI.usesInAlloca())
1498 InallocaArgNo = IRArgNo++;
1499
1500 TotalIRArgs = IRArgNo;
1501}
1502} // namespace
1503
1504/***/
1505
1506bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1507 const auto &RI = FI.getReturnInfo();
1508 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1509}
1510
1511bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1512 return ReturnTypeUsesSRet(FI) &&
1513 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1514}
1515
1516bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1517 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1518 switch (BT->getKind()) {
1519 default:
1520 return false;
1521 case BuiltinType::Float:
1522 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1523 case BuiltinType::Double:
1524 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1525 case BuiltinType::LongDouble:
1526 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1527 }
1528 }
1529
1530 return false;
1531}
1532
1533bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1534 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1535 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1536 if (BT->getKind() == BuiltinType::LongDouble)
1537 return getTarget().useObjCFP2RetForComplexLongDouble();
1538 }
1539 }
1540
1541 return false;
1542}
1543
1544llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1545 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1546 return GetFunctionType(FI);
1547}
1548
1549llvm::FunctionType *
1550CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1551
1552 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1553 (void)Inserted;
1554 assert(Inserted && "Recursively being processed?")((Inserted && "Recursively being processed?") ? static_cast
<void> (0) : __assert_fail ("Inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1554, __PRETTY_FUNCTION__))
;
1555
1556 llvm::Type *resultType = nullptr;
1557 const ABIArgInfo &retAI = FI.getReturnInfo();
1558 switch (retAI.getKind()) {
1559 case ABIArgInfo::Expand:
1560 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1560)
;
1561
1562 case ABIArgInfo::Extend:
1563 case ABIArgInfo::Direct:
1564 resultType = retAI.getCoerceToType();
1565 break;
1566
1567 case ABIArgInfo::InAlloca:
1568 if (retAI.getInAllocaSRet()) {
1569 // sret things on win32 aren't void, they return the sret pointer.
1570 QualType ret = FI.getReturnType();
1571 llvm::Type *ty = ConvertType(ret);
1572 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1573 resultType = llvm::PointerType::get(ty, addressSpace);
1574 } else {
1575 resultType = llvm::Type::getVoidTy(getLLVMContext());
1576 }
1577 break;
1578
1579 case ABIArgInfo::Indirect:
1580 case ABIArgInfo::Ignore:
1581 resultType = llvm::Type::getVoidTy(getLLVMContext());
1582 break;
1583
1584 case ABIArgInfo::CoerceAndExpand:
1585 resultType = retAI.getUnpaddedCoerceAndExpandType();
1586 break;
1587 }
1588
1589 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1590 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1591
1592 // Add type for sret argument.
1593 if (IRFunctionArgs.hasSRetArg()) {
1594 QualType Ret = FI.getReturnType();
1595 llvm::Type *Ty = ConvertType(Ret);
1596 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1597 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1598 llvm::PointerType::get(Ty, AddressSpace);
1599 }
1600
1601 // Add type for inalloca argument.
1602 if (IRFunctionArgs.hasInallocaArg()) {
1603 auto ArgStruct = FI.getArgStruct();
1604 assert(ArgStruct)((ArgStruct) ? static_cast<void> (0) : __assert_fail ("ArgStruct"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1604, __PRETTY_FUNCTION__))
;
1605 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1606 }
1607
1608 // Add in all of the required arguments.
1609 unsigned ArgNo = 0;
1610 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1611 ie = it + FI.getNumRequiredArgs();
1612 for (; it != ie; ++it, ++ArgNo) {
1613 const ABIArgInfo &ArgInfo = it->info;
1614
1615 // Insert a padding type to ensure proper alignment.
1616 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1617 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1618 ArgInfo.getPaddingType();
1619
1620 unsigned FirstIRArg, NumIRArgs;
1621 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1622
1623 switch (ArgInfo.getKind()) {
1624 case ABIArgInfo::Ignore:
1625 case ABIArgInfo::InAlloca:
1626 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1626, __PRETTY_FUNCTION__))
;
1627 break;
1628
1629 case ABIArgInfo::Indirect: {
1630 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1630, __PRETTY_FUNCTION__))
;
1631 // indirect arguments are always on the stack, which is alloca addr space.
1632 llvm::Type *LTy = ConvertTypeForMem(it->type);
1633 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1634 CGM.getDataLayout().getAllocaAddrSpace());
1635 break;
1636 }
1637
1638 case ABIArgInfo::Extend:
1639 case ABIArgInfo::Direct: {
1640 // Fast-isel and the optimizer generally like scalar values better than
1641 // FCAs, so we flatten them if this is safe to do for this argument.
1642 llvm::Type *argType = ArgInfo.getCoerceToType();
1643 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1644 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1645 assert(NumIRArgs == st->getNumElements())((NumIRArgs == st->getNumElements()) ? static_cast<void
> (0) : __assert_fail ("NumIRArgs == st->getNumElements()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1645, __PRETTY_FUNCTION__))
;
1646 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1647 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1648 } else {
1649 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1649, __PRETTY_FUNCTION__))
;
1650 ArgTypes[FirstIRArg] = argType;
1651 }
1652 break;
1653 }
1654
1655 case ABIArgInfo::CoerceAndExpand: {
1656 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1657 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1658 *ArgTypesIter++ = EltTy;
1659 }
1660 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)((ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1660, __PRETTY_FUNCTION__))
;
1661 break;
1662 }
1663
1664 case ABIArgInfo::Expand:
1665 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1666 getExpandedTypes(it->type, ArgTypesIter);
1667 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)((ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1667, __PRETTY_FUNCTION__))
;
1668 break;
1669 }
1670 }
1671
1672 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1673 assert(Erased && "Not in set?")((Erased && "Not in set?") ? static_cast<void> (
0) : __assert_fail ("Erased && \"Not in set?\"", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1673, __PRETTY_FUNCTION__))
;
1674
1675 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1676}
1677
1678llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1679 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1680 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1681
1682 if (!isFuncTypeConvertible(FPT))
1683 return llvm::StructType::get(getLLVMContext());
1684
1685 const CGFunctionInfo *Info;
1686 if (isa<CXXDestructorDecl>(MD))
1687 Info =
1688 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1689 else
1690 Info = &arrangeCXXMethodDeclaration(MD);
1691 return GetFunctionType(*Info);
1692}
1693
1694static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1695 llvm::AttrBuilder &FuncAttrs,
1696 const FunctionProtoType *FPT) {
1697 if (!FPT)
1698 return;
1699
1700 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1701 FPT->isNothrow())
1702 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1703}
1704
1705void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1706 bool AttrOnCallSite,
1707 llvm::AttrBuilder &FuncAttrs) {
1708 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1709 if (!HasOptnone) {
1710 if (CodeGenOpts.OptimizeSize)
1711 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1712 if (CodeGenOpts.OptimizeSize == 2)
1713 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1714 }
1715
1716 if (CodeGenOpts.DisableRedZone)
1717 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1718 if (CodeGenOpts.IndirectTlsSegRefs)
1719 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1720 if (CodeGenOpts.NoImplicitFloat)
1721 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1722
1723 if (AttrOnCallSite) {
1724 // Attributes that should go on the call site only.
1725 if (!CodeGenOpts.SimplifyLibCalls ||
1726 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1727 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1728 if (!CodeGenOpts.TrapFuncName.empty())
1729 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1730 } else {
1731 // Attributes that should go on the function, but not the call site.
1732 if (!CodeGenOpts.DisableFPElim) {
1733 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1734 } else if (CodeGenOpts.OmitLeafFramePointer) {
1735 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1736 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1737 } else {
1738 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1739 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1740 }
1741
1742 FuncAttrs.addAttribute("less-precise-fpmad",
1743 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1744
1745 if (CodeGenOpts.NullPointerIsValid)
1746 FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1747 if (!CodeGenOpts.FPDenormalMode.empty())
1748 FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1749
1750 FuncAttrs.addAttribute("no-trapping-math",
1751 llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1752
1753 // Strict (compliant) code is the default, so only add this attribute to
1754 // indicate that we are trying to workaround a problem case.
1755 if (!CodeGenOpts.StrictFloatCastOverflow)
1756 FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1757
1758 // TODO: Are these all needed?
1759 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1760 FuncAttrs.addAttribute("no-infs-fp-math",
1761 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1762 FuncAttrs.addAttribute("no-nans-fp-math",
1763 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1764 FuncAttrs.addAttribute("unsafe-fp-math",
1765 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1766 FuncAttrs.addAttribute("use-soft-float",
1767 llvm::toStringRef(CodeGenOpts.SoftFloat));
1768 FuncAttrs.addAttribute("stack-protector-buffer-size",
1769 llvm::utostr(CodeGenOpts.SSPBufferSize));
1770 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1771 llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1772 FuncAttrs.addAttribute(
1773 "correctly-rounded-divide-sqrt-fp-math",
1774 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1775
1776 if (getLangOpts().OpenCL)
1777 FuncAttrs.addAttribute("denorms-are-zero",
1778 llvm::toStringRef(CodeGenOpts.FlushDenorm));
1779
1780 // TODO: Reciprocal estimate codegen options should apply to instructions?
1781 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1782 if (!Recips.empty())
1783 FuncAttrs.addAttribute("reciprocal-estimates",
1784 llvm::join(Recips, ","));
1785
1786 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1787 CodeGenOpts.PreferVectorWidth != "none")
1788 FuncAttrs.addAttribute("prefer-vector-width",
1789 CodeGenOpts.PreferVectorWidth);
1790
1791 if (CodeGenOpts.StackRealignment)
1792 FuncAttrs.addAttribute("stackrealign");
1793 if (CodeGenOpts.Backchain)
1794 FuncAttrs.addAttribute("backchain");
1795
1796 // FIXME: The interaction of this attribute with the SLH command line flag
1797 // has not been determined.
1798 if (CodeGenOpts.SpeculativeLoadHardening)
1799 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1800 }
1801
1802 if (getLangOpts().assumeFunctionsAreConvergent()) {
1803 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1804 // convergent (meaning, they may call an intrinsically convergent op, such
1805 // as __syncthreads() / barrier(), and so can't have certain optimizations
1806 // applied around them). LLVM will remove this attribute where it safely
1807 // can.
1808 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1809 }
1810
1811 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1812 // Exceptions aren't supported in CUDA device code.
1813 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1814
1815 // Respect -fcuda-flush-denormals-to-zero.
1816 if (CodeGenOpts.FlushDenorm)
1817 FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1818 }
1819}
1820
1821void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1822 llvm::AttrBuilder FuncAttrs;
1823 ConstructDefaultFnAttrList(F.getName(),
1824 F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1825 /* AttrOnCallsite = */ false, FuncAttrs);
1826 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1827}
1828
1829void CodeGenModule::ConstructAttributeList(
1830 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1831 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1832 llvm::AttrBuilder FuncAttrs;
1833 llvm::AttrBuilder RetAttrs;
1834
1835 CallingConv = FI.getEffectiveCallingConvention();
1836 if (FI.isNoReturn())
1837 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1838
1839 // If we have information about the function prototype, we can learn
1840 // attributes from there.
1841 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1842 CalleeInfo.getCalleeFunctionProtoType());
1843
1844 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1845
1846 bool HasOptnone = false;
1847 // FIXME: handle sseregparm someday...
1848 if (TargetDecl) {
1849 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1850 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1851 if (TargetDecl->hasAttr<NoThrowAttr>())
1852 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1853 if (TargetDecl->hasAttr<NoReturnAttr>())
1854 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1855 if (TargetDecl->hasAttr<ColdAttr>())
1856 FuncAttrs.addAttribute(llvm::Attribute::Cold);
1857 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1858 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1859 if (TargetDecl->hasAttr<ConvergentAttr>())
1860 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1861 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1862 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1863
1864 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1865 AddAttributesFromFunctionProtoType(
1866 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1867 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1868 // These attributes are not inherited by overloads.
1869 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1870 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1871 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1872 }
1873
1874 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1875 if (TargetDecl->hasAttr<ConstAttr>()) {
1876 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1877 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1878 } else if (TargetDecl->hasAttr<PureAttr>()) {
1879 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1880 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1881 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1882 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1883 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1884 }
1885 if (TargetDecl->hasAttr<RestrictAttr>())
1886 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1887 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1888 !CodeGenOpts.NullPointerIsValid)
1889 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1890 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1891 FuncAttrs.addAttribute("no_caller_saved_registers");
1892 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1893 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1894
1895 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1896 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1897 Optional<unsigned> NumElemsParam;
1898 if (AllocSize->getNumElemsParam().isValid())
1899 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1900 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1901 NumElemsParam);
1902 }
1903 }
1904
1905 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1906
1907 if (CodeGenOpts.EnableSegmentedStacks &&
1908 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1909 FuncAttrs.addAttribute("split-stack");
1910
1911 // Add NonLazyBind attribute to function declarations when -fno-plt
1912 // is used.
1913 if (TargetDecl && CodeGenOpts.NoPLT) {
1914 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1915 if (!Fn->isDefined() && !AttrOnCallSite) {
1916 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1917 }
1918 }
1919 }
1920
1921 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1922 if (getLangOpts().OpenCLVersion <= 120) {
1923 // OpenCL v1.2 Work groups are always uniform
1924 FuncAttrs.addAttribute("uniform-work-group-size", "true");
1925 } else {
1926 // OpenCL v2.0 Work groups may be whether uniform or not.
1927 // '-cl-uniform-work-group-size' compile option gets a hint
1928 // to the compiler that the global work-size be a multiple of
1929 // the work-group size specified to clEnqueueNDRangeKernel
1930 // (i.e. work groups are uniform).
1931 FuncAttrs.addAttribute("uniform-work-group-size",
1932 llvm::toStringRef(CodeGenOpts.UniformWGSize));
1933 }
1934 }
1935
1936 if (!AttrOnCallSite) {
1937 bool DisableTailCalls = false;
1938
1939 if (CodeGenOpts.DisableTailCalls)
1940 DisableTailCalls = true;
1941 else if (TargetDecl) {
1942 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1943 TargetDecl->hasAttr<AnyX86InterruptAttr>())
1944 DisableTailCalls = true;
1945 else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1946 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1947 if (!BD->doesNotEscape())
1948 DisableTailCalls = true;
1949 }
1950 }
1951
1952 FuncAttrs.addAttribute("disable-tail-calls",
1953 llvm::toStringRef(DisableTailCalls));
1954 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1955 }
1956
1957 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1958
1959 QualType RetTy = FI.getReturnType();
1960 const ABIArgInfo &RetAI = FI.getReturnInfo();
1961 switch (RetAI.getKind()) {
1962 case ABIArgInfo::Extend:
1963 if (RetAI.isSignExt())
1964 RetAttrs.addAttribute(llvm::Attribute::SExt);
1965 else
1966 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1967 LLVM_FALLTHROUGH[[clang::fallthrough]];
1968 case ABIArgInfo::Direct:
1969 if (RetAI.getInReg())
1970 RetAttrs.addAttribute(llvm::Attribute::InReg);
1971 break;
1972 case ABIArgInfo::Ignore:
1973 break;
1974
1975 case ABIArgInfo::InAlloca:
1976 case ABIArgInfo::Indirect: {
1977 // inalloca and sret disable readnone and readonly
1978 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1979 .removeAttribute(llvm::Attribute::ReadNone);
1980 break;
1981 }
1982
1983 case ABIArgInfo::CoerceAndExpand:
1984 break;
1985
1986 case ABIArgInfo::Expand:
1987 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 1987)
;
1988 }
1989
1990 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1991 QualType PTy = RefTy->getPointeeType();
1992 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1993 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1994 .getQuantity());
1995 else if (getContext().getTargetAddressSpace(PTy) == 0 &&
1996 !CodeGenOpts.NullPointerIsValid)
1997 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1998 }
1999
2000 bool hasUsedSRet = false;
2001 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2002
2003 // Attach attributes to sret.
2004 if (IRFunctionArgs.hasSRetArg()) {
2005 llvm::AttrBuilder SRETAttrs;
2006 if (!RetAI.getSuppressSRet())
2007 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2008 hasUsedSRet = true;
2009 if (RetAI.getInReg())
2010 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2011 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2012 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2013 }
2014
2015 // Attach attributes to inalloca argument.
2016 if (IRFunctionArgs.hasInallocaArg()) {
2017 llvm::AttrBuilder Attrs;
2018 Attrs.addAttribute(llvm::Attribute::InAlloca);
2019 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2020 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2021 }
2022
2023 unsigned ArgNo = 0;
2024 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2025 E = FI.arg_end();
2026 I != E; ++I, ++ArgNo) {
2027 QualType ParamType = I->type;
2028 const ABIArgInfo &AI = I->info;
2029 llvm::AttrBuilder Attrs;
2030
2031 // Add attribute for padding argument, if necessary.
2032 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2033 if (AI.getPaddingInReg()) {
2034 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2035 llvm::AttributeSet::get(
2036 getLLVMContext(),
2037 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2038 }
2039 }
2040
2041 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2042 // have the corresponding parameter variable. It doesn't make
2043 // sense to do it here because parameters are so messed up.
2044 switch (AI.getKind()) {
2045 case ABIArgInfo::Extend:
2046 if (AI.isSignExt())
2047 Attrs.addAttribute(llvm::Attribute::SExt);
2048 else
2049 Attrs.addAttribute(llvm::Attribute::ZExt);
2050 LLVM_FALLTHROUGH[[clang::fallthrough]];
2051 case ABIArgInfo::Direct:
2052 if (ArgNo == 0 && FI.isChainCall())
2053 Attrs.addAttribute(llvm::Attribute::Nest);
2054 else if (AI.getInReg())
2055 Attrs.addAttribute(llvm::Attribute::InReg);
2056 break;
2057
2058 case ABIArgInfo::Indirect: {
2059 if (AI.getInReg())
2060 Attrs.addAttribute(llvm::Attribute::InReg);
2061
2062 if (AI.getIndirectByVal())
2063 Attrs.addAttribute(llvm::Attribute::ByVal);
2064
2065 CharUnits Align = AI.getIndirectAlign();
2066
2067 // In a byval argument, it is important that the required
2068 // alignment of the type is honored, as LLVM might be creating a
2069 // *new* stack object, and needs to know what alignment to give
2070 // it. (Sometimes it can deduce a sensible alignment on its own,
2071 // but not if clang decides it must emit a packed struct, or the
2072 // user specifies increased alignment requirements.)
2073 //
2074 // This is different from indirect *not* byval, where the object
2075 // exists already, and the align attribute is purely
2076 // informative.
2077 assert(!Align.isZero())((!Align.isZero()) ? static_cast<void> (0) : __assert_fail
("!Align.isZero()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2077, __PRETTY_FUNCTION__))
;
2078
2079 // For now, only add this when we have a byval argument.
2080 // TODO: be less lazy about updating test cases.
2081 if (AI.getIndirectByVal())
2082 Attrs.addAlignmentAttr(Align.getQuantity());
2083
2084 // byval disables readnone and readonly.
2085 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2086 .removeAttribute(llvm::Attribute::ReadNone);
2087 break;
2088 }
2089 case ABIArgInfo::Ignore:
2090 case ABIArgInfo::Expand:
2091 case ABIArgInfo::CoerceAndExpand:
2092 break;
2093
2094 case ABIArgInfo::InAlloca:
2095 // inalloca disables readnone and readonly.
2096 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2097 .removeAttribute(llvm::Attribute::ReadNone);
2098 continue;
2099 }
2100
2101 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2102 QualType PTy = RefTy->getPointeeType();
2103 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2104 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2105 .getQuantity());
2106 else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2107 !CodeGenOpts.NullPointerIsValid)
2108 Attrs.addAttribute(llvm::Attribute::NonNull);
2109 }
2110
2111 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2112 case ParameterABI::Ordinary:
2113 break;
2114
2115 case ParameterABI::SwiftIndirectResult: {
2116 // Add 'sret' if we haven't already used it for something, but
2117 // only if the result is void.
2118 if (!hasUsedSRet && RetTy->isVoidType()) {
2119 Attrs.addAttribute(llvm::Attribute::StructRet);
2120 hasUsedSRet = true;
2121 }
2122
2123 // Add 'noalias' in either case.
2124 Attrs.addAttribute(llvm::Attribute::NoAlias);
2125
2126 // Add 'dereferenceable' and 'alignment'.
2127 auto PTy = ParamType->getPointeeType();
2128 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2129 auto info = getContext().getTypeInfoInChars(PTy);
2130 Attrs.addDereferenceableAttr(info.first.getQuantity());
2131 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2132 info.second.getQuantity()));
2133 }
2134 break;
2135 }
2136
2137 case ParameterABI::SwiftErrorResult:
2138 Attrs.addAttribute(llvm::Attribute::SwiftError);
2139 break;
2140
2141 case ParameterABI::SwiftContext:
2142 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2143 break;
2144 }
2145
2146 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2147 Attrs.addAttribute(llvm::Attribute::NoCapture);
2148
2149 if (Attrs.hasAttributes()) {
2150 unsigned FirstIRArg, NumIRArgs;
2151 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2152 for (unsigned i = 0; i < NumIRArgs; i++)
2153 ArgAttrs[FirstIRArg + i] =
2154 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2155 }
2156 }
2157 assert(ArgNo == FI.arg_size())((ArgNo == FI.arg_size()) ? static_cast<void> (0) : __assert_fail
("ArgNo == FI.arg_size()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2157, __PRETTY_FUNCTION__))
;
2158
2159 AttrList = llvm::AttributeList::get(
2160 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2161 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2162}
2163
2164/// An argument came in as a promoted argument; demote it back to its
2165/// declared type.
2166static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2167 const VarDecl *var,
2168 llvm::Value *value) {
2169 llvm::Type *varType = CGF.ConvertType(var->getType());
2170
2171 // This can happen with promotions that actually don't change the
2172 // underlying type, like the enum promotions.
2173 if (value->getType() == varType) return value;
2174
2175 assert((varType->isIntegerTy() || varType->isFloatingPointTy())(((varType->isIntegerTy() || varType->isFloatingPointTy
()) && "unexpected promotion type") ? static_cast<
void> (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2176, __PRETTY_FUNCTION__))
2176 && "unexpected promotion type")(((varType->isIntegerTy() || varType->isFloatingPointTy
()) && "unexpected promotion type") ? static_cast<
void> (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2176, __PRETTY_FUNCTION__))
;
2177
2178 if (isa<llvm::IntegerType>(varType))
2179 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2180
2181 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2182}
2183
2184/// Returns the attribute (either parameter attribute, or function
2185/// attribute), which declares argument ArgNo to be non-null.
2186static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2187 QualType ArgType, unsigned ArgNo) {
2188 // FIXME: __attribute__((nonnull)) can also be applied to:
2189 // - references to pointers, where the pointee is known to be
2190 // nonnull (apparently a Clang extension)
2191 // - transparent unions containing pointers
2192 // In the former case, LLVM IR cannot represent the constraint. In
2193 // the latter case, we have no guarantee that the transparent union
2194 // is in fact passed as a pointer.
2195 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2196 return nullptr;
2197 // First, check attribute on parameter itself.
2198 if (PVD) {
2199 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2200 return ParmNNAttr;
2201 }
2202 // Check function attributes.
2203 if (!FD)
2204 return nullptr;
2205 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2206 if (NNAttr->isNonNull(ArgNo))
2207 return NNAttr;
2208 }
2209 return nullptr;
2210}
2211
2212namespace {
2213 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2214 Address Temp;
2215 Address Arg;
2216 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2217 void Emit(CodeGenFunction &CGF, Flags flags) override {
2218 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2219 CGF.Builder.CreateStore(errorValue, Arg);
2220 }
2221 };
2222}
2223
2224void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2225 llvm::Function *Fn,
2226 const FunctionArgList &Args) {
2227 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2228 // Naked functions don't have prologues.
2229 return;
2230
2231 // If this is an implicit-return-zero function, go ahead and
2232 // initialize the return value. TODO: it might be nice to have
2233 // a more general mechanism for this that didn't require synthesized
2234 // return statements.
2235 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2236 if (FD->hasImplicitReturnZero()) {
2237 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2238 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2239 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2240 Builder.CreateStore(Zero, ReturnValue);
2241 }
2242 }
2243
2244 // FIXME: We no longer need the types from FunctionArgList; lift up and
2245 // simplify.
2246
2247 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2248 // Flattened function arguments.
2249 SmallVector<llvm::Value *, 16> FnArgs;
2250 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2251 for (auto &Arg : Fn->args()) {
2252 FnArgs.push_back(&Arg);
2253 }
2254 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs())((FnArgs.size() == IRFunctionArgs.totalIRArgs()) ? static_cast
<void> (0) : __assert_fail ("FnArgs.size() == IRFunctionArgs.totalIRArgs()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2254, __PRETTY_FUNCTION__))
;
2255
2256 // If we're using inalloca, all the memory arguments are GEPs off of the last
2257 // parameter, which is a pointer to the complete memory area.
2258 Address ArgStruct = Address::invalid();
2259 const llvm::StructLayout *ArgStructLayout = nullptr;
2260 if (IRFunctionArgs.hasInallocaArg()) {
2261 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2262 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2263 FI.getArgStructAlignment());
2264
2265 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo())((ArgStruct.getType() == FI.getArgStruct()->getPointerTo()
) ? static_cast<void> (0) : __assert_fail ("ArgStruct.getType() == FI.getArgStruct()->getPointerTo()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2265, __PRETTY_FUNCTION__))
;
2266 }
2267
2268 // Name the struct return parameter.
2269 if (IRFunctionArgs.hasSRetArg()) {
2270 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2271 AI->setName("agg.result");
2272 AI->addAttr(llvm::Attribute::NoAlias);
2273 }
2274
2275 // Track if we received the parameter as a pointer (indirect, byval, or
2276 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2277 // into a local alloca for us.
2278 SmallVector<ParamValue, 16> ArgVals;
2279 ArgVals.reserve(Args.size());
2280
2281 // Create a pointer value for every parameter declaration. This usually
2282 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2283 // any cleanups or do anything that might unwind. We do that separately, so
2284 // we can push the cleanups in the correct order for the ABI.
2285 assert(FI.arg_size() == Args.size() &&((FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2286, __PRETTY_FUNCTION__))
2286 "Mismatch between function signature & arguments.")((FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2286, __PRETTY_FUNCTION__))
;
2287 unsigned ArgNo = 0;
2288 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2289 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2290 i != e; ++i, ++info_it, ++ArgNo) {
2291 const VarDecl *Arg = *i;
2292 const ABIArgInfo &ArgI = info_it->info;
2293
2294 bool isPromoted =
2295 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2296 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2297 // the parameter is promoted. In this case we convert to
2298 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2299 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2300 assert(hasScalarEvaluationKind(Ty) ==((hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->
getType())) ? static_cast<void> (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2301, __PRETTY_FUNCTION__))
2301 hasScalarEvaluationKind(Arg->getType()))((hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->
getType())) ? static_cast<void> (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2301, __PRETTY_FUNCTION__))
;
2302
2303 unsigned FirstIRArg, NumIRArgs;
2304 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2305
2306 switch (ArgI.getKind()) {
2307 case ABIArgInfo::InAlloca: {
2308 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2308, __PRETTY_FUNCTION__))
;
2309 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2310 CharUnits FieldOffset =
2311 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2312 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2313 Arg->getName());
2314 ArgVals.push_back(ParamValue::forIndirect(V));
2315 break;
2316 }
2317
2318 case ABIArgInfo::Indirect: {
2319 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2319, __PRETTY_FUNCTION__))
;
2320 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2321
2322 if (!hasScalarEvaluationKind(Ty)) {
2323 // Aggregates and complex variables are accessed by reference. All we
2324 // need to do is realign the value, if requested.
2325 Address V = ParamAddr;
2326 if (ArgI.getIndirectRealign()) {
2327 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2328
2329 // Copy from the incoming argument pointer to the temporary with the
2330 // appropriate alignment.
2331 //
2332 // FIXME: We should have a common utility for generating an aggregate
2333 // copy.
2334 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2335 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2336 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2337 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2338 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2339 V = AlignedTemp;
2340 }
2341 ArgVals.push_back(ParamValue::forIndirect(V));
2342 } else {
2343 // Load scalar value from indirect argument.
2344 llvm::Value *V =
2345 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2346
2347 if (isPromoted)
2348 V = emitArgumentDemotion(*this, Arg, V);
2349 ArgVals.push_back(ParamValue::forDirect(V));
2350 }
2351 break;
2352 }
2353
2354 case ABIArgInfo::Extend:
2355 case ABIArgInfo::Direct: {
2356
2357 // If we have the trivial case, handle it with no muss and fuss.
2358 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2359 ArgI.getCoerceToType() == ConvertType(Ty) &&
2360 ArgI.getDirectOffset() == 0) {
2361 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2361, __PRETTY_FUNCTION__))
;
2362 llvm::Value *V = FnArgs[FirstIRArg];
2363 auto AI = cast<llvm::Argument>(V);
2364
2365 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2366 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2367 PVD->getFunctionScopeIndex()) &&
2368 !CGM.getCodeGenOpts().NullPointerIsValid)
2369 AI->addAttr(llvm::Attribute::NonNull);
2370
2371 QualType OTy = PVD->getOriginalType();
2372 if (const auto *ArrTy =
2373 getContext().getAsConstantArrayType(OTy)) {
2374 // A C99 array parameter declaration with the static keyword also
2375 // indicates dereferenceability, and if the size is constant we can
2376 // use the dereferenceable attribute (which requires the size in
2377 // bytes).
2378 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2379 QualType ETy = ArrTy->getElementType();
2380 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2381 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2382 ArrSize) {
2383 llvm::AttrBuilder Attrs;
2384 Attrs.addDereferenceableAttr(
2385 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2386 AI->addAttrs(Attrs);
2387 } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2388 !CGM.getCodeGenOpts().NullPointerIsValid) {
2389 AI->addAttr(llvm::Attribute::NonNull);
2390 }
2391 }
2392 } else if (const auto *ArrTy =
2393 getContext().getAsVariableArrayType(OTy)) {
2394 // For C99 VLAs with the static keyword, we don't know the size so
2395 // we can't use the dereferenceable attribute, but in addrspace(0)
2396 // we know that it must be nonnull.
2397 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2398 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2399 !CGM.getCodeGenOpts().NullPointerIsValid)
2400 AI->addAttr(llvm::Attribute::NonNull);
2401 }
2402
2403 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2404 if (!AVAttr)
2405 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2406 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2407 if (AVAttr) {
2408 llvm::Value *AlignmentValue =
2409 EmitScalarExpr(AVAttr->getAlignment());
2410 llvm::ConstantInt *AlignmentCI =
2411 cast<llvm::ConstantInt>(AlignmentValue);
2412 unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2413 +llvm::Value::MaximumAlignment);
2414 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2415 }
2416 }
2417
2418 if (Arg->getType().isRestrictQualified())
2419 AI->addAttr(llvm::Attribute::NoAlias);
2420
2421 // LLVM expects swifterror parameters to be used in very restricted
2422 // ways. Copy the value into a less-restricted temporary.
2423 if (FI.getExtParameterInfo(ArgNo).getABI()
2424 == ParameterABI::SwiftErrorResult) {
2425 QualType pointeeTy = Ty->getPointeeType();
2426 assert(pointeeTy->isPointerType())((pointeeTy->isPointerType()) ? static_cast<void> (0
) : __assert_fail ("pointeeTy->isPointerType()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2426, __PRETTY_FUNCTION__))
;
2427 Address temp =
2428 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2429 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2430 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2431 Builder.CreateStore(incomingErrorValue, temp);
2432 V = temp.getPointer();
2433
2434 // Push a cleanup to copy the value back at the end of the function.
2435 // The convention does not guarantee that the value will be written
2436 // back if the function exits with an unwind exception.
2437 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2438 }
2439
2440 // Ensure the argument is the correct type.
2441 if (V->getType() != ArgI.getCoerceToType())
2442 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2443
2444 if (isPromoted)
2445 V = emitArgumentDemotion(*this, Arg, V);
2446
2447 // Because of merging of function types from multiple decls it is
2448 // possible for the type of an argument to not match the corresponding
2449 // type in the function type. Since we are codegening the callee
2450 // in here, add a cast to the argument type.
2451 llvm::Type *LTy = ConvertType(Arg->getType());
2452 if (V->getType() != LTy)
2453 V = Builder.CreateBitCast(V, LTy);
2454
2455 ArgVals.push_back(ParamValue::forDirect(V));
2456 break;
2457 }
2458
2459 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2460 Arg->getName());
2461
2462 // Pointer to store into.
2463 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2464
2465 // Fast-isel and the optimizer generally like scalar values better than
2466 // FCAs, so we flatten them if this is safe to do for this argument.
2467 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2468 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2469 STy->getNumElements() > 1) {
2470 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2471 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2472 llvm::Type *DstTy = Ptr.getElementType();
2473 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2474
2475 Address AddrToStoreInto = Address::invalid();
2476 if (SrcSize <= DstSize) {
2477 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2478 } else {
2479 AddrToStoreInto =
2480 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2481 }
2482
2483 assert(STy->getNumElements() == NumIRArgs)((STy->getNumElements() == NumIRArgs) ? static_cast<void
> (0) : __assert_fail ("STy->getNumElements() == NumIRArgs"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2483, __PRETTY_FUNCTION__))
;
2484 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2485 auto AI = FnArgs[FirstIRArg + i];
2486 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2487 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2488 Address EltPtr =
2489 Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2490 Builder.CreateStore(AI, EltPtr);
2491 }
2492
2493 if (SrcSize > DstSize) {
2494 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2495 }
2496
2497 } else {
2498 // Simple case, just do a coerced store of the argument into the alloca.
2499 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2499, __PRETTY_FUNCTION__))
;
2500 auto AI = FnArgs[FirstIRArg];
2501 AI->setName(Arg->getName() + ".coerce");
2502 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2503 }
2504
2505 // Match to what EmitParmDecl is expecting for this type.
2506 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2507 llvm::Value *V =
2508 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2509 if (isPromoted)
2510 V = emitArgumentDemotion(*this, Arg, V);
2511 ArgVals.push_back(ParamValue::forDirect(V));
2512 } else {
2513 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2514 }
2515 break;
2516 }
2517
2518 case ABIArgInfo::CoerceAndExpand: {
2519 // Reconstruct into a temporary.
2520 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2521 ArgVals.push_back(ParamValue::forIndirect(alloca));
2522
2523 auto coercionType = ArgI.getCoerceAndExpandType();
2524 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2525 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2526
2527 unsigned argIndex = FirstIRArg;
2528 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2529 llvm::Type *eltType = coercionType->getElementType(i);
2530 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2531 continue;
2532
2533 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2534 auto elt = FnArgs[argIndex++];
2535 Builder.CreateStore(elt, eltAddr);
2536 }
2537 assert(argIndex == FirstIRArg + NumIRArgs)((argIndex == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("argIndex == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2537, __PRETTY_FUNCTION__))
;
2538 break;
2539 }
2540
2541 case ABIArgInfo::Expand: {
2542 // If this structure was expanded into multiple arguments then
2543 // we need to create a temporary and reconstruct it from the
2544 // arguments.
2545 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2546 LValue LV = MakeAddrLValue(Alloca, Ty);
2547 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2548
2549 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2550 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2551 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs)((FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs) ? static_cast
<void> (0) : __assert_fail ("FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2551, __PRETTY_FUNCTION__))
;
2552 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2553 auto AI = FnArgs[FirstIRArg + i];
2554 AI->setName(Arg->getName() + "." + Twine(i));
2555 }
2556 break;
2557 }
2558
2559 case ABIArgInfo::Ignore:
2560 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2560, __PRETTY_FUNCTION__))
;
2561 // Initialize the local variable appropriately.
2562 if (!hasScalarEvaluationKind(Ty)) {
2563 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2564 } else {
2565 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2566 ArgVals.push_back(ParamValue::forDirect(U));
2567 }
2568 break;
2569 }
2570 }
2571
2572 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2573 for (int I = Args.size() - 1; I >= 0; --I)
2574 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2575 } else {
2576 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2577 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2578 }
2579}
2580
2581static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2582 while (insn->use_empty()) {
2583 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2584 if (!bitcast) return;
2585
2586 // This is "safe" because we would have used a ConstantExpr otherwise.
2587 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2588 bitcast->eraseFromParent();
2589 }
2590}
2591
2592/// Try to emit a fused autorelease of a return result.
2593static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2594 llvm::Value *result) {
2595 // We must be immediately followed the cast.
2596 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2597 if (BB->empty()) return nullptr;
2598 if (&BB->back() != result) return nullptr;
2599
2600 llvm::Type *resultType = result->getType();
2601
2602 // result is in a BasicBlock and is therefore an Instruction.
2603 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2604
2605 SmallVector<llvm::Instruction *, 4> InstsToKill;
2606
2607 // Look for:
2608 // %generator = bitcast %type1* %generator2 to %type2*
2609 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2610 // We would have emitted this as a constant if the operand weren't
2611 // an Instruction.
2612 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2613
2614 // Require the generator to be immediately followed by the cast.
2615 if (generator->getNextNode() != bitcast)
2616 return nullptr;
2617
2618 InstsToKill.push_back(bitcast);
2619 }
2620
2621 // Look for:
2622 // %generator = call i8* @objc_retain(i8* %originalResult)
2623 // or
2624 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2625 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2626 if (!call) return nullptr;
2627
2628 bool doRetainAutorelease;
2629
2630 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2631 doRetainAutorelease = true;
2632 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2633 .objc_retainAutoreleasedReturnValue) {
2634 doRetainAutorelease = false;
2635
2636 // If we emitted an assembly marker for this call (and the
2637 // ARCEntrypoints field should have been set if so), go looking
2638 // for that call. If we can't find it, we can't do this
2639 // optimization. But it should always be the immediately previous
2640 // instruction, unless we needed bitcasts around the call.
2641 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2642 llvm::Instruction *prev = call->getPrevNode();
2643 assert(prev)((prev) ? static_cast<void> (0) : __assert_fail ("prev"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2643, __PRETTY_FUNCTION__))
;
2644 if (isa<llvm::BitCastInst>(prev)) {
2645 prev = prev->getPrevNode();
2646 assert(prev)((prev) ? static_cast<void> (0) : __assert_fail ("prev"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2646, __PRETTY_FUNCTION__))
;
2647 }
2648 assert(isa<llvm::CallInst>(prev))((isa<llvm::CallInst>(prev)) ? static_cast<void> (
0) : __assert_fail ("isa<llvm::CallInst>(prev)", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2648, __PRETTY_FUNCTION__))
;
2649 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==((cast<llvm::CallInst>(prev)->getCalledValue() == CGF
.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? static_cast<void> (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2650, __PRETTY_FUNCTION__))
2650 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker)((cast<llvm::CallInst>(prev)->getCalledValue() == CGF
.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? static_cast<void> (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2650, __PRETTY_FUNCTION__))
;
2651 InstsToKill.push_back(prev);
2652 }
2653 } else {
2654 return nullptr;
2655 }
2656
2657 result = call->getArgOperand(0);
2658 InstsToKill.push_back(call);
2659
2660 // Keep killing bitcasts, for sanity. Note that we no longer care
2661 // about precise ordering as long as there's exactly one use.
2662 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2663 if (!bitcast->hasOneUse()) break;
2664 InstsToKill.push_back(bitcast);
2665 result = bitcast->getOperand(0);
2666 }
2667
2668 // Delete all the unnecessary instructions, from latest to earliest.
2669 for (auto *I : InstsToKill)
2670 I->eraseFromParent();
2671
2672 // Do the fused retain/autorelease if we were asked to.
2673 if (doRetainAutorelease)
2674 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2675
2676 // Cast back to the result type.
2677 return CGF.Builder.CreateBitCast(result, resultType);
2678}
2679
2680/// If this is a +1 of the value of an immutable 'self', remove it.
2681static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2682 llvm::Value *result) {
2683 // This is only applicable to a method with an immutable 'self'.
2684 const ObjCMethodDecl *method =
2685 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2686 if (!method) return nullptr;
2687 const VarDecl *self = method->getSelfDecl();
2688 if (!self->getType().isConstQualified()) return nullptr;
2689
2690 // Look for a retain call.
2691 llvm::CallInst *retainCall =
2692 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2693 if (!retainCall ||
2694 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2695 return nullptr;
2696
2697 // Look for an ordinary load of 'self'.
2698 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2699 llvm::LoadInst *load =
2700 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2701 if (!load || load->isAtomic() || load->isVolatile() ||
2702 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2703 return nullptr;
2704
2705 // Okay! Burn it all down. This relies for correctness on the
2706 // assumption that the retain is emitted as part of the return and
2707 // that thereafter everything is used "linearly".
2708 llvm::Type *resultType = result->getType();
2709 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2710 assert(retainCall->use_empty())((retainCall->use_empty()) ? static_cast<void> (0) :
__assert_fail ("retainCall->use_empty()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2710, __PRETTY_FUNCTION__))
;
2711 retainCall->eraseFromParent();
2712 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2713
2714 return CGF.Builder.CreateBitCast(load, resultType);
2715}
2716
2717/// Emit an ARC autorelease of the result of a function.
2718///
2719/// \return the value to actually return from the function
2720static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2721 llvm::Value *result) {
2722 // If we're returning 'self', kill the initial retain. This is a
2723 // heuristic attempt to "encourage correctness" in the really unfortunate
2724 // case where we have a return of self during a dealloc and we desperately
2725 // need to avoid the possible autorelease.
2726 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2727 return self;
2728
2729 // At -O0, try to emit a fused retain/autorelease.
2730 if (CGF.shouldUseFusedARCCalls())
2731 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2732 return fused;
2733
2734 return CGF.EmitARCAutoreleaseReturnValue(result);
2735}
2736
2737/// Heuristically search for a dominating store to the return-value slot.
2738static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2739 // Check if a User is a store which pointerOperand is the ReturnValue.
2740 // We are looking for stores to the ReturnValue, not for stores of the
2741 // ReturnValue to some other location.
2742 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2743 auto *SI = dyn_cast<llvm::StoreInst>(U);
2744 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2745 return nullptr;
2746 // These aren't actually possible for non-coerced returns, and we
2747 // only care about non-coerced returns on this code path.
2748 assert(!SI->isAtomic() && !SI->isVolatile())((!SI->isAtomic() && !SI->isVolatile()) ? static_cast
<void> (0) : __assert_fail ("!SI->isAtomic() && !SI->isVolatile()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2748, __PRETTY_FUNCTION__))
;
2749 return SI;
2750 };
2751 // If there are multiple uses of the return-value slot, just check
2752 // for something immediately preceding the IP. Sometimes this can
2753 // happen with how we generate implicit-returns; it can also happen
2754 // with noreturn cleanups.
2755 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2756 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2757 if (IP->empty()) return nullptr;
2758 llvm::Instruction *I = &IP->back();
2759
2760 // Skip lifetime markers
2761 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2762 IE = IP->rend();
2763 II != IE; ++II) {
2764 if (llvm::IntrinsicInst *Intrinsic =
2765 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2766 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2767 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2768 ++II;
2769 if (II == IE)
2770 break;
2771 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2772 continue;
2773 }
2774 }
2775 I = &*II;
2776 break;
2777 }
2778
2779 return GetStoreIfValid(I);
2780 }
2781
2782 llvm::StoreInst *store =
2783 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2784 if (!store) return nullptr;
2785
2786 // Now do a first-and-dirty dominance check: just walk up the
2787 // single-predecessors chain from the current insertion point.
2788 llvm::BasicBlock *StoreBB = store->getParent();
2789 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2790 while (IP != StoreBB) {
2791 if (!(IP = IP->getSinglePredecessor()))
2792 return nullptr;
2793 }
2794
2795 // Okay, the store's basic block dominates the insertion point; we
2796 // can do our thing.
2797 return store;
2798}
2799
2800void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2801 bool EmitRetDbgLoc,
2802 SourceLocation EndLoc) {
2803 if (FI.isNoReturn()) {
2804 // Noreturn functions don't return.
2805 EmitUnreachable(EndLoc);
2806 return;
2807 }
2808
2809 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2810 // Naked functions don't have epilogues.
2811 Builder.CreateUnreachable();
2812 return;
2813 }
2814
2815 // Functions with no result always return void.
2816 if (!ReturnValue.isValid()) {
2817 Builder.CreateRetVoid();
2818 return;
2819 }
2820
2821 llvm::DebugLoc RetDbgLoc;
2822 llvm::Value *RV = nullptr;
2823 QualType RetTy = FI.getReturnType();
2824 const ABIArgInfo &RetAI = FI.getReturnInfo();
2825
2826 switch (RetAI.getKind()) {
2827 case ABIArgInfo::InAlloca:
2828 // Aggregrates get evaluated directly into the destination. Sometimes we
2829 // need to return the sret value in a register, though.
2830 assert(hasAggregateEvaluationKind(RetTy))((hasAggregateEvaluationKind(RetTy)) ? static_cast<void>
(0) : __assert_fail ("hasAggregateEvaluationKind(RetTy)", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2830, __PRETTY_FUNCTION__))
;
2831 if (RetAI.getInAllocaSRet()) {
2832 llvm::Function::arg_iterator EI = CurFn->arg_end();
2833 --EI;
2834 llvm::Value *ArgStruct = &*EI;
2835 llvm::Value *SRet = Builder.CreateStructGEP(
2836 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2837 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2838 }
2839 break;
2840
2841 case ABIArgInfo::Indirect: {
2842 auto AI = CurFn->arg_begin();
2843 if (RetAI.isSRetAfterThis())
2844 ++AI;
2845 switch (getEvaluationKind(RetTy)) {
2846 case TEK_Complex: {
2847 ComplexPairTy RT =
2848 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2849 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2850 /*isInit*/ true);
2851 break;
2852 }
2853 case TEK_Aggregate:
2854 // Do nothing; aggregrates get evaluated directly into the destination.
2855 break;
2856 case TEK_Scalar:
2857 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2858 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2859 /*isInit*/ true);
2860 break;
2861 }
2862 break;
2863 }
2864
2865 case ABIArgInfo::Extend:
2866 case ABIArgInfo::Direct:
2867 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2868 RetAI.getDirectOffset() == 0) {
2869 // The internal return value temp always will have pointer-to-return-type
2870 // type, just do a load.
2871
2872 // If there is a dominating store to ReturnValue, we can elide
2873 // the load, zap the store, and usually zap the alloca.
2874 if (llvm::StoreInst *SI =
2875 findDominatingStoreToReturnValue(*this)) {
2876 // Reuse the debug location from the store unless there is
2877 // cleanup code to be emitted between the store and return
2878 // instruction.
2879 if (EmitRetDbgLoc && !AutoreleaseResult)
2880 RetDbgLoc = SI->getDebugLoc();
2881 // Get the stored value and nuke the now-dead store.
2882 RV = SI->getValueOperand();
2883 SI->eraseFromParent();
2884
2885 // If that was the only use of the return value, nuke it as well now.
2886 auto returnValueInst = ReturnValue.getPointer();
2887 if (returnValueInst->use_empty()) {
2888 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2889 alloca->eraseFromParent();
2890 ReturnValue = Address::invalid();
2891 }
2892 }
2893
2894 // Otherwise, we have to do a simple load.
2895 } else {
2896 RV = Builder.CreateLoad(ReturnValue);
2897 }
2898 } else {
2899 // If the value is offset in memory, apply the offset now.
2900 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2901
2902 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2903 }
2904
2905 // In ARC, end functions that return a retainable type with a call
2906 // to objc_autoreleaseReturnValue.
2907 if (AutoreleaseResult) {
2908#ifndef NDEBUG
2909 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2910 // been stripped of the typedefs, so we cannot use RetTy here. Get the
2911 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2912 // CurCodeDecl or BlockInfo.
2913 QualType RT;
2914
2915 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2916 RT = FD->getReturnType();
2917 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2918 RT = MD->getReturnType();
2919 else if (isa<BlockDecl>(CurCodeDecl))
2920 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2921 else
2922 llvm_unreachable("Unexpected function/method type")::llvm::llvm_unreachable_internal("Unexpected function/method type"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2922)
;
2923
2924 assert(getLangOpts().ObjCAutoRefCount &&((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2926, __PRETTY_FUNCTION__))
2925 !FI.isReturnsRetained() &&((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2926, __PRETTY_FUNCTION__))
2926 RT->isObjCRetainableType())((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2926, __PRETTY_FUNCTION__))
;
2927#endif
2928 RV = emitAutoreleaseOfResult(*this, RV);
2929 }
2930
2931 break;
2932
2933 case ABIArgInfo::Ignore:
2934 break;
2935
2936 case ABIArgInfo::CoerceAndExpand: {
2937 auto coercionType = RetAI.getCoerceAndExpandType();
2938 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2939
2940 // Load all of the coerced elements out into results.
2941 llvm::SmallVector<llvm::Value*, 4> results;
2942 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2943 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2944 auto coercedEltType = coercionType->getElementType(i);
2945 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2946 continue;
2947
2948 auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2949 auto elt = Builder.CreateLoad(eltAddr);
2950 results.push_back(elt);
2951 }
2952
2953 // If we have one result, it's the single direct result type.
2954 if (results.size() == 1) {
2955 RV = results[0];
2956
2957 // Otherwise, we need to make a first-class aggregate.
2958 } else {
2959 // Construct a return type that lacks padding elements.
2960 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2961
2962 RV = llvm::UndefValue::get(returnType);
2963 for (unsigned i = 0, e = results.size(); i != e; ++i) {
2964 RV = Builder.CreateInsertValue(RV, results[i], i);
2965 }
2966 }
2967 break;
2968 }
2969
2970 case ABIArgInfo::Expand:
2971 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 2971)
;
2972 }
2973
2974 llvm::Instruction *Ret;
2975 if (RV) {
2976 EmitReturnValueCheck(RV);
2977 Ret = Builder.CreateRet(RV);
2978 } else {
2979 Ret = Builder.CreateRetVoid();
2980 }
2981
2982 if (RetDbgLoc)
2983 Ret->setDebugLoc(std::move(RetDbgLoc));
2984}
2985
2986void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2987 // A current decl may not be available when emitting vtable thunks.
2988 if (!CurCodeDecl)
2989 return;
2990
2991 ReturnsNonNullAttr *RetNNAttr = nullptr;
2992 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2993 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2994
2995 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2996 return;
2997
2998 // Prefer the returns_nonnull attribute if it's present.
2999 SourceLocation AttrLoc;
3000 SanitizerMask CheckKind;
3001 SanitizerHandler Handler;
3002 if (RetNNAttr) {
3003 assert(!requiresReturnValueNullabilityCheck() &&((!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"
) ? static_cast<void> (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3004, __PRETTY_FUNCTION__))
3004 "Cannot check nullability and the nonnull attribute")((!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"
) ? static_cast<void> (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3004, __PRETTY_FUNCTION__))
;
3005 AttrLoc = RetNNAttr->getLocation();
3006 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3007 Handler = SanitizerHandler::NonnullReturn;
3008 } else {
3009 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3010 if (auto *TSI = DD->getTypeSourceInfo())
3011 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3012 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3013 CheckKind = SanitizerKind::NullabilityReturn;
3014 Handler = SanitizerHandler::NullabilityReturn;
3015 }
3016
3017 SanitizerScope SanScope(this);
3018
3019 // Make sure the "return" source location is valid. If we're checking a
3020 // nullability annotation, make sure the preconditions for the check are met.
3021 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3022 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3023 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3024 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3025 if (requiresReturnValueNullabilityCheck())
3026 CanNullCheck =
3027 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3028 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3029 EmitBlock(Check);
3030
3031 // Now do the null check.
3032 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3033 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3034 llvm::Value *DynamicData[] = {SLocPtr};
3035 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3036
3037 EmitBlock(NoCheck);
3038
3039#ifndef NDEBUG
3040 // The return location should not be used after the check has been emitted.
3041 ReturnLocation = Address::invalid();
3042#endif
3043}
3044
3045static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3046 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3047 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3048}
3049
3050static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3051 QualType Ty) {
3052 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3053 // placeholders.
3054 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3055 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3056 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3057
3058 // FIXME: When we generate this IR in one pass, we shouldn't need
3059 // this win32-specific alignment hack.
3060 CharUnits Align = CharUnits::fromQuantity(4);
3061 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3062
3063 return AggValueSlot::forAddr(Address(Placeholder, Align),
3064 Ty.getQualifiers(),
3065 AggValueSlot::IsNotDestructed,
3066 AggValueSlot::DoesNotNeedGCBarriers,
3067 AggValueSlot::IsNotAliased,
3068 AggValueSlot::DoesNotOverlap);
3069}
3070
3071void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3072 const VarDecl *param,
3073 SourceLocation loc) {
3074 // StartFunction converted the ABI-lowered parameter(s) into a
3075 // local alloca. We need to turn that into an r-value suitable
3076 // for EmitCall.
3077 Address local = GetAddrOfLocalVar(param);
3078
3079 QualType type = param->getType();
3080
3081 if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3082 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3083 }
3084
3085 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3086 // but the argument needs to be the original pointer.
3087 if (type->isReferenceType()) {
3088 args.add(RValue::get(Builder.CreateLoad(local)), type);
3089
3090 // In ARC, move out of consumed arguments so that the release cleanup
3091 // entered by StartFunction doesn't cause an over-release. This isn't
3092 // optimal -O0 code generation, but it should get cleaned up when
3093 // optimization is enabled. This also assumes that delegate calls are
3094 // performed exactly once for a set of arguments, but that should be safe.
3095 } else if (getLangOpts().ObjCAutoRefCount &&
3096 param->hasAttr<NSConsumedAttr>() &&
3097 type->isObjCRetainableType()) {
3098 llvm::Value *ptr = Builder.CreateLoad(local);
3099 auto null =
3100 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3101 Builder.CreateStore(null, local);
3102 args.add(RValue::get(ptr), type);
3103
3104 // For the most part, we just need to load the alloca, except that
3105 // aggregate r-values are actually pointers to temporaries.
3106 } else {
3107 args.add(convertTempToRValue(local, type, loc), type);
3108 }
3109
3110 // Deactivate the cleanup for the callee-destructed param that was pushed.
3111 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3112 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3113 type.isDestructedType()) {
3114 EHScopeStack::stable_iterator cleanup =
3115 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3116 assert(cleanup.isValid() &&((cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? static_cast<void> (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3117, __PRETTY_FUNCTION__))
3117 "cleanup for callee-destructed param not recorded")((cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? static_cast<void> (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3117, __PRETTY_FUNCTION__))
;
3118 // This unreachable is a temporary marker which will be removed later.
3119 llvm::Instruction *isActive = Builder.CreateUnreachable();
3120 args.addArgCleanupDeactivation(cleanup, isActive);
3121 }
3122}
3123
3124static bool isProvablyNull(llvm::Value *addr) {
3125 return isa<llvm::ConstantPointerNull>(addr);
3126}
3127
3128/// Emit the actual writing-back of a writeback.
3129static void emitWriteback(CodeGenFunction &CGF,
3130 const CallArgList::Writeback &writeback) {
3131 const LValue &srcLV = writeback.Source;
3132 Address srcAddr = srcLV.getAddress();
3133 assert(!isProvablyNull(srcAddr.getPointer()) &&((!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"
) ? static_cast<void> (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3134, __PRETTY_FUNCTION__))
3134 "shouldn't have writeback for provably null argument")((!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"
) ? static_cast<void> (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3134, __PRETTY_FUNCTION__))
;
3135
3136 llvm::BasicBlock *contBB = nullptr;
3137
3138 // If the argument wasn't provably non-null, we need to null check
3139 // before doing the store.
3140 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3141 CGF.CGM.getDataLayout());
3142 if (!provablyNonNull) {
3143 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3144 contBB = CGF.createBasicBlock("icr.done");
3145
3146 llvm::Value *isNull =
3147 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3148 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3149 CGF.EmitBlock(writebackBB);
3150 }
3151
3152 // Load the value to writeback.
3153 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3154
3155 // Cast it back, in case we're writing an id to a Foo* or something.
3156 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3157 "icr.writeback-cast");
3158
3159 // Perform the writeback.
3160
3161 // If we have a "to use" value, it's something we need to emit a use
3162 // of. This has to be carefully threaded in: if it's done after the
3163 // release it's potentially undefined behavior (and the optimizer
3164 // will ignore it), and if it happens before the retain then the
3165 // optimizer could move the release there.
3166 if (writeback.ToUse) {
3167 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong)((srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) ? static_cast
<void> (0) : __assert_fail ("srcLV.getObjCLifetime() == Qualifiers::OCL_Strong"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3167, __PRETTY_FUNCTION__))
;
3168
3169 // Retain the new value. No need to block-copy here: the block's
3170 // being passed up the stack.
3171 value = CGF.EmitARCRetainNonBlock(value);
3172
3173 // Emit the intrinsic use here.
3174 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3175
3176 // Load the old value (primitively).
3177 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3178
3179 // Put the new value in place (primitively).
3180 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3181
3182 // Release the old value.
3183 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3184
3185 // Otherwise, we can just do a normal lvalue store.
3186 } else {
3187 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3188 }
3189
3190 // Jump to the continuation block.
3191 if (!provablyNonNull)
3192 CGF.EmitBlock(contBB);
3193}
3194
3195static void emitWritebacks(CodeGenFunction &CGF,
3196 const CallArgList &args) {
3197 for (const auto &I : args.writebacks())
3198 emitWriteback(CGF, I);
3199}
3200
3201static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3202 const CallArgList &CallArgs) {
3203 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3204 CallArgs.getCleanupsToDeactivate();
3205 // Iterate in reverse to increase the likelihood of popping the cleanup.
3206 for (const auto &I : llvm::reverse(Cleanups)) {
3207 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3208 I.IsActiveIP->eraseFromParent();
3209 }
3210}
3211
3212static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3213 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3214 if (uop->getOpcode() == UO_AddrOf)
3215 return uop->getSubExpr();
3216 return nullptr;
3217}
3218
3219/// Emit an argument that's being passed call-by-writeback. That is,
3220/// we are passing the address of an __autoreleased temporary; it
3221/// might be copy-initialized with the current value of the given
3222/// address, but it will definitely be copied out of after the call.
3223static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3224 const ObjCIndirectCopyRestoreExpr *CRE) {
3225 LValue srcLV;
3226
3227 // Make an optimistic effort to emit the address as an l-value.
3228 // This can fail if the argument expression is more complicated.
3229 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3230 srcLV = CGF.EmitLValue(lvExpr);
3231
3232 // Otherwise, just emit it as a scalar.
3233 } else {
3234 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3235
3236 QualType srcAddrType =
3237 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3238 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3239 }
3240 Address srcAddr = srcLV.getAddress();
3241
3242 // The dest and src types don't necessarily match in LLVM terms
3243 // because of the crazy ObjC compatibility rules.
3244
3245 llvm::PointerType *destType =
3246 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3247
3248 // If the address is a constant null, just pass the appropriate null.
3249 if (isProvablyNull(srcAddr.getPointer())) {
3250 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3251 CRE->getType());
3252 return;
3253 }
3254
3255 // Create the temporary.
3256 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3257 CGF.getPointerAlign(),
3258 "icr.temp");
3259 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3260 // and that cleanup will be conditional if we can't prove that the l-value
3261 // isn't null, so we need to register a dominating point so that the cleanups
3262 // system will make valid IR.
3263 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3264
3265 // Zero-initialize it if we're not doing a copy-initialization.
3266 bool shouldCopy = CRE->shouldCopy();
3267 if (!shouldCopy) {
3268 llvm::Value *null =
3269 llvm::ConstantPointerNull::get(
3270 cast<llvm::PointerType>(destType->getElementType()));
3271 CGF.Builder.CreateStore(null, temp);
3272 }
3273
3274 llvm::BasicBlock *contBB = nullptr;
3275 llvm::BasicBlock *originBB = nullptr;
3276
3277 // If the address is *not* known to be non-null, we need to switch.
3278 llvm::Value *finalArgument;
3279
3280 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3281 CGF.CGM.getDataLayout());
3282 if (provablyNonNull) {
3283 finalArgument = temp.getPointer();
3284 } else {
3285 llvm::Value *isNull =
3286 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3287
3288 finalArgument = CGF.Builder.CreateSelect(isNull,
3289 llvm::ConstantPointerNull::get(destType),
3290 temp.getPointer(), "icr.argument");
3291
3292 // If we need to copy, then the load has to be conditional, which
3293 // means we need control flow.
3294 if (shouldCopy) {
3295 originBB = CGF.Builder.GetInsertBlock();
3296 contBB = CGF.createBasicBlock("icr.cont");
3297 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3298 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3299 CGF.EmitBlock(copyBB);
3300 condEval.begin(CGF);
3301 }
3302 }
3303
3304 llvm::Value *valueToUse = nullptr;
3305
3306 // Perform a copy if necessary.
3307 if (shouldCopy) {
3308 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3309 assert(srcRV.isScalar())((srcRV.isScalar()) ? static_cast<void> (0) : __assert_fail
("srcRV.isScalar()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3309, __PRETTY_FUNCTION__))
;
3310
3311 llvm::Value *src = srcRV.getScalarVal();
3312 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3313 "icr.cast");
3314
3315 // Use an ordinary store, not a store-to-lvalue.
3316 CGF.Builder.CreateStore(src, temp);
3317
3318 // If optimization is enabled, and the value was held in a
3319 // __strong variable, we need to tell the optimizer that this
3320 // value has to stay alive until we're doing the store back.
3321 // This is because the temporary is effectively unretained,
3322 // and so otherwise we can violate the high-level semantics.
3323 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3324 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3325 valueToUse = src;
3326 }
3327 }
3328
3329 // Finish the control flow if we needed it.
3330 if (shouldCopy && !provablyNonNull) {
3331 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3332 CGF.EmitBlock(contBB);
3333
3334 // Make a phi for the value to intrinsically use.
3335 if (valueToUse) {
3336 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3337 "icr.to-use");
3338 phiToUse->addIncoming(valueToUse, copyBB);
3339 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3340 originBB);
3341 valueToUse = phiToUse;
3342 }
3343
3344 condEval.end(CGF);
3345 }
3346
3347 args.addWriteback(srcLV, temp, valueToUse);
3348 args.add(RValue::get(finalArgument), CRE->getType());
3349}
3350
3351void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3352 assert(!StackBase)((!StackBase) ? static_cast<void> (0) : __assert_fail (
"!StackBase", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3352, __PRETTY_FUNCTION__))
;
3353
3354 // Save the stack.
3355 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3356 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3357}
3358
3359void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3360 if (StackBase) {
3361 // Restore the stack after the call.
3362 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3363 CGF.Builder.CreateCall(F, StackBase);
3364 }
3365}
3366
3367void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3368 SourceLocation ArgLoc,
3369 AbstractCallee AC,
3370 unsigned ParmNum) {
3371 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3372 SanOpts.has(SanitizerKind::NullabilityArg)))
3373 return;
3374
3375 // The param decl may be missing in a variadic function.
3376 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3377 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3378
3379 // Prefer the nonnull attribute if it's present.
3380 const NonNullAttr *NNAttr = nullptr;
3381 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3382 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3383
3384 bool CanCheckNullability = false;
3385 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3386 auto Nullability = PVD->getType()->getNullability(getContext());
3387 CanCheckNullability = Nullability &&
3388 *Nullability == NullabilityKind::NonNull &&
3389 PVD->getTypeSourceInfo();
3390 }
3391
3392 if (!NNAttr && !CanCheckNullability)
3393 return;
3394
3395 SourceLocation AttrLoc;
3396 SanitizerMask CheckKind;
3397 SanitizerHandler Handler;
3398 if (NNAttr) {
3399 AttrLoc = NNAttr->getLocation();
3400 CheckKind = SanitizerKind::NonnullAttribute;
3401 Handler = SanitizerHandler::NonnullArg;
3402 } else {
3403 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3404 CheckKind = SanitizerKind::NullabilityArg;
3405 Handler = SanitizerHandler::NullabilityArg;
3406 }
3407
3408 SanitizerScope SanScope(this);
3409 assert(RV.isScalar())((RV.isScalar()) ? static_cast<void> (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3409, __PRETTY_FUNCTION__))
;
3410 llvm::Value *V = RV.getScalarVal();
3411 llvm::Value *Cond =
3412 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3413 llvm::Constant *StaticData[] = {
3414 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3415 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3416 };
3417 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3418}
3419
3420void CodeGenFunction::EmitCallArgs(
3421 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3422 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3423 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3424 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()))(((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())
) ? static_cast<void> (0) : __assert_fail ("(int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3424, __PRETTY_FUNCTION__))
;
3425
3426 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3427 // because arguments are destroyed left to right in the callee. As a special
3428 // case, there are certain language constructs that require left-to-right
3429 // evaluation, and in those cases we consider the evaluation order requirement
3430 // to trump the "destruction order is reverse construction order" guarantee.
3431 bool LeftToRight =
3432 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3433 ? Order == EvaluationOrder::ForceLeftToRight
3434 : Order != EvaluationOrder::ForceRightToLeft;
3435
3436 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3437 RValue EmittedArg) {
3438 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3439 return;
3440 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3441 if (PS == nullptr)
3442 return;
3443
3444 const auto &Context = getContext();
3445 auto SizeTy = Context.getSizeType();
3446 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3447 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?")((EmittedArg.getScalarVal() && "We emitted nothing for the arg?"
) ? static_cast<void> (0) : __assert_fail ("EmittedArg.getScalarVal() && \"We emitted nothing for the arg?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3447, __PRETTY_FUNCTION__))
;
3448 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3449 EmittedArg.getScalarVal());
3450 Args.add(RValue::get(V), SizeTy);
3451 // If we're emitting args in reverse, be sure to do so with
3452 // pass_object_size, as well.
3453 if (!LeftToRight)
3454 std::swap(Args.back(), *(&Args.back() - 1));
3455 };
3456
3457 // Insert a stack save if we're going to need any inalloca args.
3458 bool HasInAllocaArgs = false;
3459 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3460 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3461 I != E && !HasInAllocaArgs; ++I)
3462 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3463 if (HasInAllocaArgs) {
3464 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)((getTarget().getTriple().getArch() == llvm::Triple::x86) ? static_cast
<void> (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3464, __PRETTY_FUNCTION__))
;
3465 Args.allocateArgumentMemory(*this);
3466 }
3467 }
3468
3469 // Evaluate each argument in the appropriate order.
3470 size_t CallArgsStart = Args.size();
3471 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3472 unsigned Idx = LeftToRight ? I : E - I - 1;
3473 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3474 unsigned InitialArgSize = Args.size();
3475 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3476 // the argument and parameter match or the objc method is parameterized.
3477 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3482, __PRETTY_FUNCTION__))
3478 getContext().hasSameUnqualifiedType((*Arg)->getType(),(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3482, __PRETTY_FUNCTION__))
3479 ArgTypes[Idx]) ||(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3482, __PRETTY_FUNCTION__))
3480 (isa<ObjCMethodDecl>(AC.getDecl()) &&(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3482, __PRETTY_FUNCTION__))
3481 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3482, __PRETTY_FUNCTION__))
3482 "Argument and parameter types don't match")(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3482, __PRETTY_FUNCTION__))
;
3483 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3484 // In particular, we depend on it being the last arg in Args, and the
3485 // objectsize bits depend on there only being one arg if !LeftToRight.
3486 assert(InitialArgSize + 1 == Args.size() &&((InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"
) ? static_cast<void> (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3487, __PRETTY_FUNCTION__))
3487 "The code below depends on only adding one arg per EmitCallArg")((InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"
) ? static_cast<void> (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3487, __PRETTY_FUNCTION__))
;
3488 (void)InitialArgSize;
3489 // Since pointer argument are never emitted as LValue, it is safe to emit
3490 // non-null argument check for r-value only.
3491 if (!Args.back().hasLValue()) {
3492 RValue RVArg = Args.back().getKnownRValue();
3493 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3494 ParamsToSkip + Idx);
3495 // @llvm.objectsize should never have side-effects and shouldn't need
3496 // destruction/cleanups, so we can safely "emit" it after its arg,
3497 // regardless of right-to-leftness
3498 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3499 }
3500 }
3501
3502 if (!LeftToRight) {
3503 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3504 // IR function.
3505 std::reverse(Args.begin() + CallArgsStart, Args.end());
3506 }
3507}
3508
3509namespace {
3510
3511struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3512 DestroyUnpassedArg(Address Addr, QualType Ty)
3513 : Addr(Addr), Ty(Ty) {}
3514
3515 Address Addr;
3516 QualType Ty;
3517
3518 void Emit(CodeGenFunction &CGF, Flags flags) override {
3519 QualType::DestructionKind DtorKind = Ty.isDestructedType();
3520 if (DtorKind == QualType::DK_cxx_destructor) {
3521 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3522 assert(!Dtor->isTrivial())((!Dtor->isTrivial()) ? static_cast<void> (0) : __assert_fail
("!Dtor->isTrivial()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3522, __PRETTY_FUNCTION__))
;
3523 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3524 /*Delegating=*/false, Addr);
3525 } else {
3526 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3527 }
3528 }
3529};
3530
3531struct DisableDebugLocationUpdates {
3532 CodeGenFunction &CGF;
3533 bool disabledDebugInfo;
3534 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3535 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3536 CGF.disableDebugInfo();
3537 }
3538 ~DisableDebugLocationUpdates() {
3539 if (disabledDebugInfo)
3540 CGF.enableDebugInfo();
3541 }
3542};
3543
3544} // end anonymous namespace
3545
3546RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3547 if (!HasLV)
3548 return RV;
3549 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3550 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3551 LV.isVolatile());
3552 IsUsed = true;
3553 return RValue::getAggregate(Copy.getAddress());
3554}
3555
3556void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3557 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3558 if (!HasLV && RV.isScalar())
3559 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3560 else if (!HasLV && RV.isComplex())
3561 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3562 else {
3563 auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3564 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3565 // We assume that call args are never copied into subobjects.
3566 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3567 HasLV ? LV.isVolatileQualified()
3568 : RV.isVolatileQualified());
3569 }
3570 IsUsed = true;
3571}
3572
3573void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3574 QualType type) {
3575 DisableDebugLocationUpdates Dis(*this, E);
3576 if (const ObjCIndirectCopyRestoreExpr *CRE
3577 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3578 assert(getLangOpts().ObjCAutoRefCount)((getLangOpts().ObjCAutoRefCount) ? static_cast<void> (
0) : __assert_fail ("getLangOpts().ObjCAutoRefCount", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3578, __PRETTY_FUNCTION__))
;
3579 return emitWritebackArg(*this, args, CRE);
3580 }
3581
3582 assert(type->isReferenceType() == E->isGLValue() &&((type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"
) ? static_cast<void> (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3583, __PRETTY_FUNCTION__))
3583 "reference binding to unmaterialized r-value!")((type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"
) ? static_cast<void> (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3583, __PRETTY_FUNCTION__))
;
3584
3585 if (E->isGLValue()) {
3586 assert(E->getObjectKind() == OK_Ordinary)((E->getObjectKind() == OK_Ordinary) ? static_cast<void
> (0) : __assert_fail ("E->getObjectKind() == OK_Ordinary"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3586, __PRETTY_FUNCTION__))
;
3587 return args.add(EmitReferenceBindingToExpr(E), type);
3588 }
3589
3590 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3591
3592 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3593 // However, we still have to push an EH-only cleanup in case we unwind before
3594 // we make it to the call.
3595 if (HasAggregateEvalKind &&
3596 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3597 // If we're using inalloca, use the argument memory. Otherwise, use a
3598 // temporary.
3599 AggValueSlot Slot;
3600 if (args.isUsingInAlloca())
3601 Slot = createPlaceholderSlot(*this, type);
3602 else
3603 Slot = CreateAggTemp(type, "agg.tmp");
3604
3605 bool DestroyedInCallee = true, NeedsEHCleanup = true;
3606 if (const auto *RD = type->getAsCXXRecordDecl())
3607 DestroyedInCallee = RD->hasNonTrivialDestructor();
3608 else
3609 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3610
3611 if (DestroyedInCallee)
3612 Slot.setExternallyDestructed();
3613
3614 EmitAggExpr(E, Slot);
3615 RValue RV = Slot.asRValue();
3616 args.add(RV, type);
3617
3618 if (DestroyedInCallee && NeedsEHCleanup) {
3619 // Create a no-op GEP between the placeholder and the cleanup so we can
3620 // RAUW it successfully. It also serves as a marker of the first
3621 // instruction where the cleanup is active.
3622 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3623 type);
3624 // This unreachable is a temporary marker which will be removed later.
3625 llvm::Instruction *IsActive = Builder.CreateUnreachable();
3626 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3627 }
3628 return;
3629 }
3630
3631 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3632 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3633 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3634 assert(L.isSimple())((L.isSimple()) ? static_cast<void> (0) : __assert_fail
("L.isSimple()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3634, __PRETTY_FUNCTION__))
;
3635 args.addUncopiedAggregate(L, type);
3636 return;
3637 }
3638
3639 args.add(EmitAnyExprToTemp(E), type);
3640}
3641
3642QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3643 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3644 // implicitly widens null pointer constants that are arguments to varargs
3645 // functions to pointer-sized ints.
3646 if (!getTarget().getTriple().isOSWindows())
3647 return Arg->getType();
3648
3649 if (Arg->getType()->isIntegerType() &&
3650 getContext().getTypeSize(Arg->getType()) <
3651 getContext().getTargetInfo().getPointerWidth(0) &&
3652 Arg->isNullPointerConstant(getContext(),
3653 Expr::NPC_ValueDependentIsNotNull)) {
3654 return getContext().getIntPtrType();
3655 }
3656
3657 return Arg->getType();
3658}
3659
3660// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3661// optimizer it can aggressively ignore unwind edges.
3662void
3663CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3664 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3665 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3666 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3667 CGM.getNoObjCARCExceptionsMetadata());
3668}
3669
3670/// Emits a call to the given no-arguments nounwind runtime function.
3671llvm::CallInst *
3672CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3673 const llvm::Twine &name) {
3674 return EmitNounwindRuntimeCall(callee, None, name);
3675}
3676
3677/// Emits a call to the given nounwind runtime function.
3678llvm::CallInst *
3679CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3680 ArrayRef<llvm::Value*> args,
3681 const llvm::Twine &name) {
3682 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3683 call->setDoesNotThrow();
3684 return call;
3685}
3686
3687/// Emits a simple call (never an invoke) to the given no-arguments
3688/// runtime function.
3689llvm::CallInst *
3690CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3691 const llvm::Twine &name) {
3692 return EmitRuntimeCall(callee, None, name);
3693}
3694
3695// Calls which may throw must have operand bundles indicating which funclet
3696// they are nested within.
3697SmallVector<llvm::OperandBundleDef, 1>
3698CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3699 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3700 // There is no need for a funclet operand bundle if we aren't inside a
3701 // funclet.
3702 if (!CurrentFuncletPad)
3703 return BundleList;
3704
3705 // Skip intrinsics which cannot throw.
3706 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3707 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3708 return BundleList;
3709
3710 BundleList.emplace_back("funclet", CurrentFuncletPad);
3711 return BundleList;
3712}
3713
3714/// Emits a simple call (never an invoke) to the given runtime function.
3715llvm::CallInst *
3716CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3717 ArrayRef<llvm::Value*> args,
3718 const llvm::Twine &name) {
3719 llvm::CallInst *call =
3720 Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3721 call->setCallingConv(getRuntimeCC());
3722 return call;
3723}
3724
3725/// Emits a call or invoke to the given noreturn runtime function.
3726void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3727 ArrayRef<llvm::Value*> args) {
3728 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3729 getBundlesForFunclet(callee);
3730
3731 if (getInvokeDest()) {
3732 llvm::InvokeInst *invoke =
3733 Builder.CreateInvoke(callee,
3734 getUnreachableBlock(),
3735 getInvokeDest(),
3736 args,
3737 BundleList);
3738 invoke->setDoesNotReturn();
3739 invoke->setCallingConv(getRuntimeCC());
3740 } else {
3741 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3742 call->setDoesNotReturn();
3743 call->setCallingConv(getRuntimeCC());
3744 Builder.CreateUnreachable();
3745 }
3746}
3747
3748/// Emits a call or invoke instruction to the given nullary runtime function.
3749llvm::CallSite
3750CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3751 const Twine &name) {
3752 return EmitRuntimeCallOrInvoke(callee, None, name);
3753}
3754
3755/// Emits a call or invoke instruction to the given runtime function.
3756llvm::CallSite
3757CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3758 ArrayRef<llvm::Value*> args,
3759 const Twine &name) {
3760 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3761 callSite.setCallingConv(getRuntimeCC());
3762 return callSite;
3763}
3764
3765/// Emits a call or invoke instruction to the given function, depending
3766/// on the current state of the EH stack.
3767llvm::CallSite
3768CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3769 ArrayRef<llvm::Value *> Args,
3770 const Twine &Name) {
3771 llvm::BasicBlock *InvokeDest = getInvokeDest();
3772 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3773 getBundlesForFunclet(Callee);
3774
3775 llvm::Instruction *Inst;
3776 if (!InvokeDest)
3777 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3778 else {
3779 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3780 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3781 Name);
3782 EmitBlock(ContBB);
3783 }
3784
3785 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3786 // optimizer it can aggressively ignore unwind edges.
3787 if (CGM.getLangOpts().ObjCAutoRefCount)
3788 AddObjCARCExceptionMetadata(Inst);
3789
3790 return llvm::CallSite(Inst);
3791}
3792
3793void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3794 llvm::Value *New) {
3795 DeferredReplacements.push_back(std::make_pair(Old, New));
3796}
3797
3798RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3799 const CGCallee &Callee,
3800 ReturnValueSlot ReturnValue,
3801 const CallArgList &CallArgs,
3802 llvm::Instruction **callOrInvoke,
3803 SourceLocation Loc) {
3804 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3805
3806 assert(Callee.isOrdinary() || Callee.isVirtual())((Callee.isOrdinary() || Callee.isVirtual()) ? static_cast<
void> (0) : __assert_fail ("Callee.isOrdinary() || Callee.isVirtual()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3806, __PRETTY_FUNCTION__))
;
1
'?' condition is true
3807
3808 // Handle struct-return functions by passing a pointer to the
3809 // location that we would like to return into.
3810 QualType RetTy = CallInfo.getReturnType();
3811 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3812
3813 llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3814
3815 // 1. Set up the arguments.
3816
3817 // If we're using inalloca, insert the allocation after the stack save.
3818 // FIXME: Do this earlier rather than hacking it in here!
3819 Address ArgMemory = Address::invalid();
3820 const llvm::StructLayout *ArgMemoryLayout = nullptr;
2
'ArgMemoryLayout' initialized to a null pointer value
3821 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3
Assuming 'ArgStruct' is null
4
Taking false branch
3822 const llvm::DataLayout &DL = CGM.getDataLayout();
3823 ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3824 llvm::Instruction *IP = CallArgs.getStackBase();
3825 llvm::AllocaInst *AI;
3826 if (IP) {
3827 IP = IP->getNextNode();
3828 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3829 "argmem", IP);
3830 } else {
3831 AI = CreateTempAlloca(ArgStruct, "argmem");
3832 }
3833 auto Align = CallInfo.getArgStructAlignment();
3834 AI->setAlignment(Align.getQuantity());
3835 AI->setUsedWithInAlloca(true);
3836 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca())((AI->isUsedWithInAlloca() && !AI->isStaticAlloca
()) ? static_cast<void> (0) : __assert_fail ("AI->isUsedWithInAlloca() && !AI->isStaticAlloca()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3836, __PRETTY_FUNCTION__))
;
3837 ArgMemory = Address(AI, Align);
3838 }
3839
3840 // Helper function to drill into the inalloca allocation.
3841 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3842 auto FieldOffset =
3843 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
18
Called C++ object pointer is null
3844 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3845 };
3846
3847 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3848 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3849
3850 // If the call returns a temporary with struct return, create a temporary
3851 // alloca to hold the result, unless one is given to us.
3852 Address SRetPtr = Address::invalid();
3853 Address SRetAlloca = Address::invalid();
3854 llvm::Value *UnusedReturnSizePtr = nullptr;
3855 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
5
Taking false branch
3856 if (!ReturnValue.isNull()) {
3857 SRetPtr = ReturnValue.getValue();
3858 } else {
3859 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3860 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3861 uint64_t size =
3862 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3863 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3864 }
3865 }
3866 if (IRFunctionArgs.hasSRetArg()) {
3867 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3868 } else if (RetAI.isInAlloca()) {
3869 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3870 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3871 }
3872 }
3873
3874 Address swiftErrorTemp = Address::invalid();
3875 Address swiftErrorArg = Address::invalid();
3876
3877 // Translate all of the arguments as necessary to match the IR lowering.
3878 assert(CallInfo.arg_size() == CallArgs.size() &&((CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3879, __PRETTY_FUNCTION__))
6
Assuming the condition is true
7
'?' condition is true
3879 "Mismatch between function signature & arguments.")((CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3879, __PRETTY_FUNCTION__))
;
3880 unsigned ArgNo = 0;
3881 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3882 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
9
Loop condition is true. Entering loop body
3883 I != E; ++I, ++info_it, ++ArgNo) {
8
Assuming 'I' is not equal to 'E'
3884 const ABIArgInfo &ArgInfo = info_it->info;
3885
3886 // Insert a padding argument to ensure proper alignment.
3887 if (IRFunctionArgs.hasPaddingArg(ArgNo))
10
Taking false branch
3888 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3889 llvm::UndefValue::get(ArgInfo.getPaddingType());
3890
3891 unsigned FirstIRArg, NumIRArgs;
3892 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3893
3894 switch (ArgInfo.getKind()) {
11
Control jumps to 'case InAlloca:' at line 3895
3895 case ABIArgInfo::InAlloca: {
3896 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3896, __PRETTY_FUNCTION__))
;
12
Assuming 'NumIRArgs' is equal to 0
13
'?' condition is true
3897 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)((getTarget().getTriple().getArch() == llvm::Triple::x86) ? static_cast
<void> (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3897, __PRETTY_FUNCTION__))
;
14
Assuming the condition is true
15
'?' condition is true
3898 if (I->isAggregate()) {
16
Taking false branch
3899 // Replace the placeholder with the appropriate argument slot GEP.
3900 Address Addr = I->hasLValue()
3901 ? I->getKnownLValue().getAddress()
3902 : I->getKnownRValue().getAggregateAddress();
3903 llvm::Instruction *Placeholder =
3904 cast<llvm::Instruction>(Addr.getPointer());
3905 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3906 Builder.SetInsertPoint(Placeholder);
3907 Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3908 Builder.restoreIP(IP);
3909 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3910 } else {
3911 // Store the RValue into the argument struct.
3912 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
17
Calling 'operator()'
3913 unsigned AS = Addr.getType()->getPointerAddressSpace();
3914 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3915 // There are some cases where a trivial bitcast is not avoidable. The
3916 // definition of a type later in a translation unit may change it's type
3917 // from {}* to (%struct.foo*)*.
3918 if (Addr.getType() != MemType)
3919 Addr = Builder.CreateBitCast(Addr, MemType);
3920 I->copyInto(*this, Addr);
3921 }
3922 break;
3923 }
3924
3925 case ABIArgInfo::Indirect: {
3926 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3926, __PRETTY_FUNCTION__))
;
3927 if (!I->isAggregate()) {
3928 // Make a temporary alloca to pass the argument.
3929 Address Addr = CreateMemTempWithoutCast(
3930 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3931 IRCallArgs[FirstIRArg] = Addr.getPointer();
3932
3933 I->copyInto(*this, Addr);
3934 } else {
3935 // We want to avoid creating an unnecessary temporary+copy here;
3936 // however, we need one in three cases:
3937 // 1. If the argument is not byval, and we are required to copy the
3938 // source. (This case doesn't occur on any common architecture.)
3939 // 2. If the argument is byval, RV is not sufficiently aligned, and
3940 // we cannot force it to be sufficiently aligned.
3941 // 3. If the argument is byval, but RV is not located in default
3942 // or alloca address space.
3943 Address Addr = I->hasLValue()
3944 ? I->getKnownLValue().getAddress()
3945 : I->getKnownRValue().getAggregateAddress();
3946 llvm::Value *V = Addr.getPointer();
3947 CharUnits Align = ArgInfo.getIndirectAlign();
3948 const llvm::DataLayout *TD = &CGM.getDataLayout();
3949
3950 assert((FirstIRArg >= IRFuncTy->getNumParams() ||(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3953, __PRETTY_FUNCTION__))
3951 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3953, __PRETTY_FUNCTION__))
3952 TD->getAllocaAddrSpace()) &&(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3953, __PRETTY_FUNCTION__))
3953 "indirect argument must be in alloca address space")(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 3953, __PRETTY_FUNCTION__))
;
3954
3955 bool NeedCopy = false;
3956
3957 if (Addr.getAlignment() < Align &&
3958 llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3959 Align.getQuantity()) {
3960 NeedCopy = true;
3961 } else if (I->hasLValue()) {
3962 auto LV = I->getKnownLValue();
3963 auto AS = LV.getAddressSpace();
3964
3965 if ((!ArgInfo.getIndirectByVal() &&
3966 (LV.getAlignment() >=
3967 getContext().getTypeAlignInChars(I->Ty)))) {
3968 NeedCopy = true;
3969 }
3970 if (!getLangOpts().OpenCL) {
3971 if ((ArgInfo.getIndirectByVal() &&
3972 (AS != LangAS::Default &&
3973 AS != CGM.getASTAllocaAddressSpace()))) {
3974 NeedCopy = true;
3975 }
3976 }
3977 // For OpenCL even if RV is located in default or alloca address space
3978 // we don't want to perform address space cast for it.
3979 else if ((ArgInfo.getIndirectByVal() &&
3980 Addr.getType()->getAddressSpace() != IRFuncTy->
3981 getParamType(FirstIRArg)->getPointerAddressSpace())) {
3982 NeedCopy = true;
3983 }
3984 }
3985
3986 if (NeedCopy) {
3987 // Create an aligned temporary, and copy to it.
3988 Address AI = CreateMemTempWithoutCast(
3989 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3990 IRCallArgs[FirstIRArg] = AI.getPointer();
3991 I->copyInto(*this, AI);
3992 } else {
3993 // Skip the extra memcpy call.
3994 auto *T = V->getType()->getPointerElementType()->getPointerTo(
3995 CGM.getDataLayout().getAllocaAddrSpace());
3996 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3997 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3998 true);
3999 }
4000 }
4001 break;
4002 }
4003
4004 case ABIArgInfo::Ignore:
4005 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4005, __PRETTY_FUNCTION__))
;
4006 break;
4007
4008 case ABIArgInfo::Extend:
4009 case ABIArgInfo::Direct: {
4010 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4011 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4012 ArgInfo.getDirectOffset() == 0) {
4013 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4013, __PRETTY_FUNCTION__))
;
4014 llvm::Value *V;
4015 if (!I->isAggregate())
4016 V = I->getKnownRValue().getScalarVal();
4017 else
4018 V = Builder.CreateLoad(
4019 I->hasLValue() ? I->getKnownLValue().getAddress()
4020 : I->getKnownRValue().getAggregateAddress());
4021
4022 // Implement swifterror by copying into a new swifterror argument.
4023 // We'll write back in the normal path out of the call.
4024 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4025 == ParameterABI::SwiftErrorResult) {
4026 assert(!swiftErrorTemp.isValid() && "multiple swifterror args")((!swiftErrorTemp.isValid() && "multiple swifterror args"
) ? static_cast<void> (0) : __assert_fail ("!swiftErrorTemp.isValid() && \"multiple swifterror args\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4026, __PRETTY_FUNCTION__))
;
4027
4028 QualType pointeeTy = I->Ty->getPointeeType();
4029 swiftErrorArg =
4030 Address(V, getContext().getTypeAlignInChars(pointeeTy));
4031
4032 swiftErrorTemp =
4033 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4034 V = swiftErrorTemp.getPointer();
4035 cast<llvm::AllocaInst>(V)->setSwiftError(true);
4036
4037 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4038 Builder.CreateStore(errorValue, swiftErrorTemp);
4039 }
4040
4041 // We might have to widen integers, but we should never truncate.
4042 if (ArgInfo.getCoerceToType() != V->getType() &&
4043 V->getType()->isIntegerTy())
4044 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4045
4046 // If the argument doesn't match, perform a bitcast to coerce it. This
4047 // can happen due to trivial type mismatches.
4048 if (FirstIRArg < IRFuncTy->getNumParams() &&
4049 V->getType() != IRFuncTy->getParamType(FirstIRArg))
4050 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4051
4052 IRCallArgs[FirstIRArg] = V;
4053 break;
4054 }
4055
4056 // FIXME: Avoid the conversion through memory if possible.
4057 Address Src = Address::invalid();
4058 if (!I->isAggregate()) {
4059 Src = CreateMemTemp(I->Ty, "coerce");
4060 I->copyInto(*this, Src);
4061 } else {
4062 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4063 : I->getKnownRValue().getAggregateAddress();
4064 }
4065
4066 // If the value is offset in memory, apply the offset now.
4067 Src = emitAddressAtOffset(*this, Src, ArgInfo);
4068
4069 // Fast-isel and the optimizer generally like scalar values better than
4070 // FCAs, so we flatten them if this is safe to do for this argument.
4071 llvm::StructType *STy =
4072 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4073 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4074 llvm::Type *SrcTy = Src.getType()->getElementType();
4075 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4076 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4077
4078 // If the source type is smaller than the destination type of the
4079 // coerce-to logic, copy the source value into a temp alloca the size
4080 // of the destination type to allow loading all of it. The bits past
4081 // the source value are left undef.
4082 if (SrcSize < DstSize) {
4083 Address TempAlloca
4084 = CreateTempAlloca(STy, Src.getAlignment(),
4085 Src.getName() + ".coerce");
4086 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4087 Src = TempAlloca;
4088 } else {
4089 Src = Builder.CreateBitCast(Src,
4090 STy->getPointerTo(Src.getAddressSpace()));
4091 }
4092
4093 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4094 assert(NumIRArgs == STy->getNumElements())((NumIRArgs == STy->getNumElements()) ? static_cast<void
> (0) : __assert_fail ("NumIRArgs == STy->getNumElements()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4094, __PRETTY_FUNCTION__))
;
4095 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4096 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4097 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4098 llvm::Value *LI = Builder.CreateLoad(EltPtr);
4099 IRCallArgs[FirstIRArg + i] = LI;
4100 }
4101 } else {
4102 // In the simple case, just pass the coerced loaded value.
4103 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4103, __PRETTY_FUNCTION__))
;
4104 IRCallArgs[FirstIRArg] =
4105 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4106 }
4107
4108 break;
4109 }
4110
4111 case ABIArgInfo::CoerceAndExpand: {
4112 auto coercionType = ArgInfo.getCoerceAndExpandType();
4113 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4114
4115 llvm::Value *tempSize = nullptr;
4116 Address addr = Address::invalid();
4117 Address AllocaAddr = Address::invalid();
4118 if (I->isAggregate()) {
4119 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4120 : I->getKnownRValue().getAggregateAddress();
4121
4122 } else {
4123 RValue RV = I->getKnownRValue();
4124 assert(RV.isScalar())((RV.isScalar()) ? static_cast<void> (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4124, __PRETTY_FUNCTION__))
; // complex should always just be direct
4125
4126 llvm::Type *scalarType = RV.getScalarVal()->getType();
4127 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4128 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4129
4130 // Materialize to a temporary.
4131 addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4132 CharUnits::fromQuantity(std::max(
4133 layout->getAlignment(), scalarAlign)),
4134 "tmp",
4135 /*ArraySize=*/nullptr, &AllocaAddr);
4136 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4137
4138 Builder.CreateStore(RV.getScalarVal(), addr);
4139 }
4140
4141 addr = Builder.CreateElementBitCast(addr, coercionType);
4142
4143 unsigned IRArgPos = FirstIRArg;
4144 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4145 llvm::Type *eltType = coercionType->getElementType(i);
4146 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4147 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4148 llvm::Value *elt = Builder.CreateLoad(eltAddr);
4149 IRCallArgs[IRArgPos++] = elt;
4150 }
4151 assert(IRArgPos == FirstIRArg + NumIRArgs)((IRArgPos == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4151, __PRETTY_FUNCTION__))
;
4152
4153 if (tempSize) {
4154 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4155 }
4156
4157 break;
4158 }
4159
4160 case ABIArgInfo::Expand:
4161 unsigned IRArgPos = FirstIRArg;
4162 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4163 assert(IRArgPos == FirstIRArg + NumIRArgs)((IRArgPos == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4163, __PRETTY_FUNCTION__))
;
4164 break;
4165 }
4166 }
4167
4168 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4169 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4170
4171 // If we're using inalloca, set up that argument.
4172 if (ArgMemory.isValid()) {
4173 llvm::Value *Arg = ArgMemory.getPointer();
4174 if (CallInfo.isVariadic()) {
4175 // When passing non-POD arguments by value to variadic functions, we will
4176 // end up with a variadic prototype and an inalloca call site. In such
4177 // cases, we can't do any parameter mismatch checks. Give up and bitcast
4178 // the callee.
4179 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4180 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4181 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4182 } else {
4183 llvm::Type *LastParamTy =
4184 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4185 if (Arg->getType() != LastParamTy) {
4186#ifndef NDEBUG
4187 // Assert that these structs have equivalent element types.
4188 llvm::StructType *FullTy = CallInfo.getArgStruct();
4189 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4190 cast<llvm::PointerType>(LastParamTy)->getElementType());
4191 assert(DeclaredTy->getNumElements() == FullTy->getNumElements())((DeclaredTy->getNumElements() == FullTy->getNumElements
()) ? static_cast<void> (0) : __assert_fail ("DeclaredTy->getNumElements() == FullTy->getNumElements()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4191, __PRETTY_FUNCTION__))
;
4192 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4193 DE = DeclaredTy->element_end(),
4194 FI = FullTy->element_begin();
4195 DI != DE; ++DI, ++FI)
4196 assert(*DI == *FI)((*DI == *FI) ? static_cast<void> (0) : __assert_fail (
"*DI == *FI", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4196, __PRETTY_FUNCTION__))
;
4197#endif
4198 Arg = Builder.CreateBitCast(Arg, LastParamTy);
4199 }
4200 }
4201 assert(IRFunctionArgs.hasInallocaArg())((IRFunctionArgs.hasInallocaArg()) ? static_cast<void> (
0) : __assert_fail ("IRFunctionArgs.hasInallocaArg()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4201, __PRETTY_FUNCTION__))
;
4202 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4203 }
4204
4205 // 2. Prepare the function pointer.
4206
4207 // If the callee is a bitcast of a non-variadic function to have a
4208 // variadic function pointer type, check to see if we can remove the
4209 // bitcast. This comes up with unprototyped functions.
4210 //
4211 // This makes the IR nicer, but more importantly it ensures that we
4212 // can inline the function at -O0 if it is marked always_inline.
4213 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4214 llvm::FunctionType *CalleeFT =
4215 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4216 if (!CalleeFT->isVarArg())
4217 return Ptr;
4218
4219 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4220 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4221 return Ptr;
4222
4223 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4224 if (!OrigFn)
4225 return Ptr;
4226
4227 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4228
4229 // If the original type is variadic, or if any of the component types
4230 // disagree, we cannot remove the cast.
4231 if (OrigFT->isVarArg() ||
4232 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4233 OrigFT->getReturnType() != CalleeFT->getReturnType())
4234 return Ptr;
4235
4236 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4237 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4238 return Ptr;
4239
4240 return OrigFn;
4241 };
4242 CalleePtr = simplifyVariadicCallee(CalleePtr);
4243
4244 // 3. Perform the actual call.
4245
4246 // Deactivate any cleanups that we're supposed to do immediately before
4247 // the call.
4248 if (!CallArgs.getCleanupsToDeactivate().empty())
4249 deactivateArgCleanupsBeforeCall(*this, CallArgs);
4250
4251 // Assert that the arguments we computed match up. The IR verifier
4252 // will catch this, but this is a common enough source of problems
4253 // during IRGen changes that it's way better for debugging to catch
4254 // it ourselves here.
4255#ifndef NDEBUG
4256 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg())((IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy
->isVarArg()) ? static_cast<void> (0) : __assert_fail
("IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4256, __PRETTY_FUNCTION__))
;
4257 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4258 // Inalloca argument can have different type.
4259 if (IRFunctionArgs.hasInallocaArg() &&
4260 i == IRFunctionArgs.getInallocaArgNo())
4261 continue;
4262 if (i < IRFuncTy->getNumParams())
4263 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i))((IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)
) ? static_cast<void> (0) : __assert_fail ("IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4263, __PRETTY_FUNCTION__))
;
4264 }
4265#endif
4266
4267 // Update the largest vector width if any arguments have vector types.
4268 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4269 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4270 LargestVectorWidth = std::max(LargestVectorWidth,
4271 VT->getPrimitiveSizeInBits());
4272 }
4273
4274 // Compute the calling convention and attributes.
4275 unsigned CallingConv;
4276 llvm::AttributeList Attrs;
4277 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4278 Callee.getAbstractInfo(), Attrs, CallingConv,
4279 /*AttrOnCallSite=*/true);
4280
4281 // Apply some call-site-specific attributes.
4282 // TODO: work this into building the attribute set.
4283
4284 // Apply always_inline to all calls within flatten functions.
4285 // FIXME: should this really take priority over __try, below?
4286 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4287 !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
4288 Callee.getAbstractInfo()
4289 .getCalleeDecl()
4290 .getDecl()
4291 ->hasAttr<NoInlineAttr>())) {
4292 Attrs =
4293 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4294 llvm::Attribute::AlwaysInline);
4295 }
4296
4297 // Disable inlining inside SEH __try blocks.
4298 if (isSEHTryScope()) {
4299 Attrs =
4300 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4301 llvm::Attribute::NoInline);
4302 }
4303
4304 // Decide whether to use a call or an invoke.
4305 bool CannotThrow;
4306 if (currentFunctionUsesSEHTry()) {
4307 // SEH cares about asynchronous exceptions, so everything can "throw."
4308 CannotThrow = false;
4309 } else if (isCleanupPadScope() &&
4310 EHPersonality::get(*this).isMSVCXXPersonality()) {
4311 // The MSVC++ personality will implicitly terminate the program if an
4312 // exception is thrown during a cleanup outside of a try/catch.
4313 // We don't need to model anything in IR to get this behavior.
4314 CannotThrow = true;
4315 } else {
4316 // Otherwise, nounwind call sites will never throw.
4317 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4318 llvm::Attribute::NoUnwind);
4319 }
4320
4321 // If we made a temporary, be sure to clean up after ourselves. Note that we
4322 // can't depend on being inside of an ExprWithCleanups, so we need to manually
4323 // pop this cleanup later on. Being eager about this is OK, since this
4324 // temporary is 'invisible' outside of the callee.
4325 if (UnusedReturnSizePtr)
4326 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4327 UnusedReturnSizePtr);
4328
4329 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4330
4331 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4332 getBundlesForFunclet(CalleePtr);
4333
4334 // Emit the actual call/invoke instruction.
4335 llvm::CallSite CS;
4336 if (!InvokeDest) {
4337 CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4338 } else {
4339 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4340 CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4341 BundleList);
4342 EmitBlock(Cont);
4343 }
4344 llvm::Instruction *CI = CS.getInstruction();
4345 if (callOrInvoke)
4346 *callOrInvoke = CI;
4347
4348 // Apply the attributes and calling convention.
4349 CS.setAttributes(Attrs);
4350 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4351
4352 // Apply various metadata.
4353
4354 if (!CI->getType()->isVoidTy())
4355 CI->setName("call");
4356
4357 // Update largest vector width from the return type.
4358 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4359 LargestVectorWidth = std::max(LargestVectorWidth,
4360 VT->getPrimitiveSizeInBits());
4361
4362 // Insert instrumentation or attach profile metadata at indirect call sites.
4363 // For more details, see the comment before the definition of
4364 // IPVK_IndirectCallTarget in InstrProfData.inc.
4365 if (!CS.getCalledFunction())
4366 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4367 CI, CalleePtr);
4368
4369 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4370 // optimizer it can aggressively ignore unwind edges.
4371 if (CGM.getLangOpts().ObjCAutoRefCount)
4372 AddObjCARCExceptionMetadata(CI);
4373
4374 // Suppress tail calls if requested.
4375 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4376 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4377 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4378 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4379 }
4380
4381 // 4. Finish the call.
4382
4383 // If the call doesn't return, finish the basic block and clear the
4384 // insertion point; this allows the rest of IRGen to discard
4385 // unreachable code.
4386 if (CS.doesNotReturn()) {
4387 if (UnusedReturnSizePtr)
4388 PopCleanupBlock();
4389
4390 // Strip away the noreturn attribute to better diagnose unreachable UB.
4391 if (SanOpts.has(SanitizerKind::Unreachable)) {
4392 if (auto *F = CS.getCalledFunction())
4393 F->removeFnAttr(llvm::Attribute::NoReturn);
4394 CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4395 llvm::Attribute::NoReturn);
4396 }
4397
4398 EmitUnreachable(Loc);
4399 Builder.ClearInsertionPoint();
4400
4401 // FIXME: For now, emit a dummy basic block because expr emitters in
4402 // generally are not ready to handle emitting expressions at unreachable
4403 // points.
4404 EnsureInsertPoint();
4405
4406 // Return a reasonable RValue.
4407 return GetUndefRValue(RetTy);
4408 }
4409
4410 // Perform the swifterror writeback.
4411 if (swiftErrorTemp.isValid()) {
4412 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4413 Builder.CreateStore(errorResult, swiftErrorArg);
4414 }
4415
4416 // Emit any call-associated writebacks immediately. Arguably this
4417 // should happen after any return-value munging.
4418 if (CallArgs.hasWritebacks())
4419 emitWritebacks(*this, CallArgs);
4420
4421 // The stack cleanup for inalloca arguments has to run out of the normal
4422 // lexical order, so deactivate it and run it manually here.
4423 CallArgs.freeArgumentMemory(*this);
4424
4425 // Extract the return value.
4426 RValue Ret = [&] {
4427 switch (RetAI.getKind()) {
4428 case ABIArgInfo::CoerceAndExpand: {
4429 auto coercionType = RetAI.getCoerceAndExpandType();
4430 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4431
4432 Address addr = SRetPtr;
4433 addr = Builder.CreateElementBitCast(addr, coercionType);
4434
4435 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())((CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())
? static_cast<void> (0) : __assert_fail ("CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4435, __PRETTY_FUNCTION__))
;
4436 bool requiresExtract = isa<llvm::StructType>(CI->getType());
4437
4438 unsigned unpaddedIndex = 0;
4439 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4440 llvm::Type *eltType = coercionType->getElementType(i);
4441 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4442 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4443 llvm::Value *elt = CI;
4444 if (requiresExtract)
4445 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4446 else
4447 assert(unpaddedIndex == 0)((unpaddedIndex == 0) ? static_cast<void> (0) : __assert_fail
("unpaddedIndex == 0", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4447, __PRETTY_FUNCTION__))
;
4448 Builder.CreateStore(elt, eltAddr);
4449 }
4450 // FALLTHROUGH
4451 LLVM_FALLTHROUGH[[clang::fallthrough]];
4452 }
4453
4454 case ABIArgInfo::InAlloca:
4455 case ABIArgInfo::Indirect: {
4456 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4457 if (UnusedReturnSizePtr)
4458 PopCleanupBlock();
4459 return ret;
4460 }
4461
4462 case ABIArgInfo::Ignore:
4463 // If we are ignoring an argument that had a result, make sure to
4464 // construct the appropriate return value for our caller.
4465 return GetUndefRValue(RetTy);
4466
4467 case ABIArgInfo::Extend:
4468 case ABIArgInfo::Direct: {
4469 llvm::Type *RetIRTy = ConvertType(RetTy);
4470 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4471 switch (getEvaluationKind(RetTy)) {
4472 case TEK_Complex: {
4473 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4474 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4475 return RValue::getComplex(std::make_pair(Real, Imag));
4476 }
4477 case TEK_Aggregate: {
4478 Address DestPtr = ReturnValue.getValue();
4479 bool DestIsVolatile = ReturnValue.isVolatile();
4480
4481 if (!DestPtr.isValid()) {
4482 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4483 DestIsVolatile = false;
4484 }
4485 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4486 return RValue::getAggregate(DestPtr);
4487 }
4488 case TEK_Scalar: {
4489 // If the argument doesn't match, perform a bitcast to coerce it. This
4490 // can happen due to trivial type mismatches.
4491 llvm::Value *V = CI;
4492 if (V->getType() != RetIRTy)
4493 V = Builder.CreateBitCast(V, RetIRTy);
4494 return RValue::get(V);
4495 }
4496 }
4497 llvm_unreachable("bad evaluation kind")::llvm::llvm_unreachable_internal("bad evaluation kind", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4497)
;
4498 }
4499
4500 Address DestPtr = ReturnValue.getValue();
4501 bool DestIsVolatile = ReturnValue.isVolatile();
4502
4503 if (!DestPtr.isValid()) {
4504 DestPtr = CreateMemTemp(RetTy, "coerce");
4505 DestIsVolatile = false;
4506 }
4507
4508 // If the value is offset in memory, apply the offset now.
4509 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4510 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4511
4512 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4513 }
4514
4515 case ABIArgInfo::Expand:
4516 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4516)
;
4517 }
4518
4519 llvm_unreachable("Unhandled ABIArgInfo::Kind")::llvm::llvm_unreachable_internal("Unhandled ABIArgInfo::Kind"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGCall.cpp"
, 4519)
;
4520 } ();
4521
4522 // Emit the assume_aligned check on the return value.
4523 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4524 if (Ret.isScalar() && TargetDecl) {
4525 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4526 llvm::Value *OffsetValue = nullptr;
4527 if (const auto *Offset = AA->getOffset())
4528 OffsetValue = EmitScalarExpr(Offset);
4529
4530 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4531 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4532 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4533 OffsetValue);
4534 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4535 llvm::Value *ParamVal =
4536 CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
4537 *this).getScalarVal();
4538 EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4539 }
4540 }
4541
4542 return Ret;
4543}
4544
4545CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4546 if (isVirtual()) {
4547 const CallExpr *CE = getVirtualCallExpr();
4548 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4549 CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
4550 CE ? CE->getBeginLoc() : SourceLocation());
4551 }
4552
4553 return *this;
4554}
4555
4556/* VarArg handling */
4557
4558Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4559 VAListAddr = VE->isMicrosoftABI()
4560 ? EmitMSVAListRef(VE->getSubExpr())
4561 : EmitVAListRef(VE->getSubExpr());
4562 QualType Ty = VE->getType();
4563 if (VE->isMicrosoftABI())
4564 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4565 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4566}