Bug Summary

File:tools/clang/lib/CodeGen/CGCall.cpp
Warning:line 3818, column 31
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGCall.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn337204/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-7~svn337204/tools/clang/include -I /build/llvm-toolchain-snapshot-7~svn337204/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-7~svn337204/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn337204/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn337204/build-llvm/tools/clang/lib/CodeGen -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-17-043059-5239-1 -x c++ /build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp
1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "ABIInfo.h"
17#include "CGBlocks.h"
18#include "CGCXXABI.h"
19#include "CGCleanup.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "TargetInfo.h"
23#include "clang/AST/Decl.h"
24#include "clang/AST/DeclCXX.h"
25#include "clang/AST/DeclObjC.h"
26#include "clang/Basic/TargetBuiltins.h"
27#include "clang/Basic/TargetInfo.h"
28#include "clang/CodeGen/CGFunctionInfo.h"
29#include "clang/CodeGen/SwiftCallingConv.h"
30#include "clang/Frontend/CodeGenOptions.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/Transforms/Utils/Local.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Attributes.h"
35#include "llvm/IR/CallSite.h"
36#include "llvm/IR/CallingConv.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/InlineAsm.h"
39#include "llvm/IR/IntrinsicInst.h"
40#include "llvm/IR/Intrinsics.h"
41using namespace clang;
42using namespace CodeGen;
43
44/***/
45
46unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47 switch (CC) {
48 default: return llvm::CallingConv::C;
49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53 case CC_Win64: return llvm::CallingConv::Win64;
54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58 // TODO: Add support for __pascal to LLVM.
59 case CC_X86Pascal: return llvm::CallingConv::C;
60 // TODO: Add support for __vectorcall to LLVM.
61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
64 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66 case CC_Swift: return llvm::CallingConv::Swift;
67 }
68}
69
70/// Derives the 'this' type for codegen purposes, i.e. ignoring method
71/// qualification.
72/// FIXME: address space qualification?
73static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76}
77
78/// Returns the canonical formal type of the given C++ method.
79static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
80 return MD->getType()->getCanonicalTypeUnqualified()
81 .getAs<FunctionProtoType>();
82}
83
84/// Returns the "extra-canonicalized" return type, which discards
85/// qualifiers on the return type. Codegen doesn't care about them,
86/// and it makes ABI code a little easier to be able to assume that
87/// all parameter and return types are top-level unqualified.
88static CanQualType GetReturnType(QualType RetTy) {
89 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
90}
91
92/// Arrange the argument and result information for a value of the given
93/// unprototyped freestanding function type.
94const CGFunctionInfo &
95CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
96 // When translating an unprototyped function type, always use a
97 // variadic type.
98 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99 /*instanceMethod=*/false,
100 /*chainCall=*/false, None,
101 FTNP->getExtInfo(), {}, RequiredArgs(0));
102}
103
104static void addExtParameterInfosForCall(
105 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
106 const FunctionProtoType *proto,
107 unsigned prefixArgs,
108 unsigned totalArgs) {
109 assert(proto->hasExtParameterInfos())(static_cast <bool> (proto->hasExtParameterInfos()) ?
void (0) : __assert_fail ("proto->hasExtParameterInfos()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 109, __extension__ __PRETTY_FUNCTION__))
;
110 assert(paramInfos.size() <= prefixArgs)(static_cast <bool> (paramInfos.size() <= prefixArgs
) ? void (0) : __assert_fail ("paramInfos.size() <= prefixArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 110, __extension__ __PRETTY_FUNCTION__))
;
111 assert(proto->getNumParams() + prefixArgs <= totalArgs)(static_cast <bool> (proto->getNumParams() + prefixArgs
<= totalArgs) ? void (0) : __assert_fail ("proto->getNumParams() + prefixArgs <= totalArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 111, __extension__ __PRETTY_FUNCTION__))
;
112
113 paramInfos.reserve(totalArgs);
114
115 // Add default infos for any prefix args that don't already have infos.
116 paramInfos.resize(prefixArgs);
117
118 // Add infos for the prototype.
119 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120 paramInfos.push_back(ParamInfo);
121 // pass_object_size params have no parameter info.
122 if (ParamInfo.hasPassObjectSize())
123 paramInfos.emplace_back();
124 }
125
126 assert(paramInfos.size() <= totalArgs &&(static_cast <bool> (paramInfos.size() <= totalArgs &&
"Did we forget to insert pass_object_size args?") ? void (0)
: __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 127, __extension__ __PRETTY_FUNCTION__))
127 "Did we forget to insert pass_object_size args?")(static_cast <bool> (paramInfos.size() <= totalArgs &&
"Did we forget to insert pass_object_size args?") ? void (0)
: __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 127, __extension__ __PRETTY_FUNCTION__))
;
128 // Add default infos for the variadic and/or suffix arguments.
129 paramInfos.resize(totalArgs);
130}
131
132/// Adds the formal parameters in FPT to the given prefix. If any parameter in
133/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134static void appendParameterTypes(const CodeGenTypes &CGT,
135 SmallVectorImpl<CanQualType> &prefix,
136 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
137 CanQual<FunctionProtoType> FPT) {
138 // Fast path: don't touch param info if we don't need to.
139 if (!FPT->hasExtParameterInfos()) {
140 assert(paramInfos.empty() &&(static_cast <bool> (paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? void (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 141, __extension__ __PRETTY_FUNCTION__))
141 "We have paramInfos, but the prototype doesn't?")(static_cast <bool> (paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? void (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 141, __extension__ __PRETTY_FUNCTION__))
;
142 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143 return;
144 }
145
146 unsigned PrefixSize = prefix.size();
147 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148 // parameters; the only thing that can change this is the presence of
149 // pass_object_size. So, we preallocate for the common case.
150 prefix.reserve(prefix.size() + FPT->getNumParams());
151
152 auto ExtInfos = FPT->getExtParameterInfos();
153 assert(ExtInfos.size() == FPT->getNumParams())(static_cast <bool> (ExtInfos.size() == FPT->getNumParams
()) ? void (0) : __assert_fail ("ExtInfos.size() == FPT->getNumParams()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 153, __extension__ __PRETTY_FUNCTION__))
;
154 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155 prefix.push_back(FPT->getParamType(I));
156 if (ExtInfos[I].hasPassObjectSize())
157 prefix.push_back(CGT.getContext().getSizeType());
158 }
159
160 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161 prefix.size());
162}
163
164/// Arrange the LLVM function layout for a value of the given function
165/// type, on top of any implicit parameters already stored.
166static const CGFunctionInfo &
167arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
168 SmallVectorImpl<CanQualType> &prefix,
169 CanQual<FunctionProtoType> FTP,
170 const FunctionDecl *FD) {
171 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
172 RequiredArgs Required =
173 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174 // FIXME: Kill copy.
175 appendParameterTypes(CGT, prefix, paramInfos, FTP);
176 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177
178 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179 /*chainCall=*/false, prefix,
180 FTP->getExtInfo(), paramInfos,
181 Required);
182}
183
184/// Arrange the argument and result information for a value of the
185/// given freestanding function type.
186const CGFunctionInfo &
187CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
188 const FunctionDecl *FD) {
189 SmallVector<CanQualType, 16> argTypes;
190 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191 FTP, FD);
192}
193
194static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195 // Set the appropriate calling convention for the Function.
196 if (D->hasAttr<StdCallAttr>())
197 return CC_X86StdCall;
198
199 if (D->hasAttr<FastCallAttr>())
200 return CC_X86FastCall;
201
202 if (D->hasAttr<RegCallAttr>())
203 return CC_X86RegCall;
204
205 if (D->hasAttr<ThisCallAttr>())
206 return CC_X86ThisCall;
207
208 if (D->hasAttr<VectorCallAttr>())
209 return CC_X86VectorCall;
210
211 if (D->hasAttr<PascalAttr>())
212 return CC_X86Pascal;
213
214 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216
217 if (D->hasAttr<IntelOclBiccAttr>())
218 return CC_IntelOclBicc;
219
220 if (D->hasAttr<MSABIAttr>())
221 return IsWindows ? CC_C : CC_Win64;
222
223 if (D->hasAttr<SysVABIAttr>())
224 return IsWindows ? CC_X86_64SysV : CC_C;
225
226 if (D->hasAttr<PreserveMostAttr>())
227 return CC_PreserveMost;
228
229 if (D->hasAttr<PreserveAllAttr>())
230 return CC_PreserveAll;
231
232 return CC_C;
233}
234
235/// Arrange the argument and result information for a call to an
236/// unknown C++ non-static member function of the given abstract type.
237/// (Zero value of RD means we don't have any meaningful "this" argument type,
238/// so fall back to a generic pointer type).
239/// The member function must be an ordinary function, i.e. not a
240/// constructor or destructor.
241const CGFunctionInfo &
242CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
243 const FunctionProtoType *FTP,
244 const CXXMethodDecl *MD) {
245 SmallVector<CanQualType, 16> argTypes;
246
247 // Add the 'this' pointer.
248 if (RD)
249 argTypes.push_back(GetThisType(Context, RD));
250 else
251 argTypes.push_back(Context.VoidPtrTy);
252
253 return ::arrangeLLVMFunctionInfo(
254 *this, true, argTypes,
255 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
256}
257
258/// Set calling convention for CUDA/HIP kernel.
259static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
260 const FunctionDecl *FD) {
261 if (FD->hasAttr<CUDAGlobalAttr>()) {
262 const FunctionType *FT = FTy->getAs<FunctionType>();
263 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
264 FTy = FT->getCanonicalTypeUnqualified();
265 }
266}
267
268/// Arrange the argument and result information for a declaration or
269/// definition of the given C++ non-static member function. The
270/// member function must be an ordinary function, i.e. not a
271/// constructor or destructor.
272const CGFunctionInfo &
273CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
274 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!")(static_cast <bool> (!isa<CXXConstructorDecl>(MD)
&& "wrong method for constructors!") ? void (0) : __assert_fail
("!isa<CXXConstructorDecl>(MD) && \"wrong method for constructors!\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 274, __extension__ __PRETTY_FUNCTION__))
;
275 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!")(static_cast <bool> (!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!") ? void (0) : __assert_fail (
"!isa<CXXDestructorDecl>(MD) && \"wrong method for destructors!\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 275, __extension__ __PRETTY_FUNCTION__))
;
276
277 CanQualType FT = GetFormalType(MD).getAs<Type>();
278 setCUDAKernelCallingConvention(FT, CGM, MD);
279 auto prototype = FT.getAs<FunctionProtoType>();
280
281 if (MD->isInstance()) {
282 // The abstract case is perfectly fine.
283 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
284 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
285 }
286
287 return arrangeFreeFunctionType(prototype, MD);
288}
289
290bool CodeGenTypes::inheritingCtorHasParams(
291 const InheritedConstructor &Inherited, CXXCtorType Type) {
292 // Parameters are unnecessary if we're constructing a base class subobject
293 // and the inherited constructor lives in a virtual base.
294 return Type == Ctor_Complete ||
295 !Inherited.getShadowDecl()->constructsVirtualBase() ||
296 !Target.getCXXABI().hasConstructorVariants();
297 }
298
299const CGFunctionInfo &
300CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
301 StructorType Type) {
302
303 SmallVector<CanQualType, 16> argTypes;
304 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
305 argTypes.push_back(GetThisType(Context, MD->getParent()));
306
307 bool PassParams = true;
308
309 GlobalDecl GD;
310 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
311 GD = GlobalDecl(CD, toCXXCtorType(Type));
312
313 // A base class inheriting constructor doesn't get forwarded arguments
314 // needed to construct a virtual base (or base class thereof).
315 if (auto Inherited = CD->getInheritedConstructor())
316 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
317 } else {
318 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
319 GD = GlobalDecl(DD, toCXXDtorType(Type));
320 }
321
322 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
323
324 // Add the formal parameters.
325 if (PassParams)
326 appendParameterTypes(*this, argTypes, paramInfos, FTP);
327
328 CGCXXABI::AddedStructorArgs AddedArgs =
329 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
330 if (!paramInfos.empty()) {
331 // Note: prefix implies after the first param.
332 if (AddedArgs.Prefix)
333 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
334 FunctionProtoType::ExtParameterInfo{});
335 if (AddedArgs.Suffix)
336 paramInfos.append(AddedArgs.Suffix,
337 FunctionProtoType::ExtParameterInfo{});
338 }
339
340 RequiredArgs required =
341 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
342 : RequiredArgs::All);
343
344 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
345 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
346 ? argTypes.front()
347 : TheCXXABI.hasMostDerivedReturn(GD)
348 ? CGM.getContext().VoidPtrTy
349 : Context.VoidTy;
350 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
351 /*chainCall=*/false, argTypes, extInfo,
352 paramInfos, required);
353}
354
355static SmallVector<CanQualType, 16>
356getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
357 SmallVector<CanQualType, 16> argTypes;
358 for (auto &arg : args)
359 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
360 return argTypes;
361}
362
363static SmallVector<CanQualType, 16>
364getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
365 SmallVector<CanQualType, 16> argTypes;
366 for (auto &arg : args)
367 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
368 return argTypes;
369}
370
371static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
372getExtParameterInfosForCall(const FunctionProtoType *proto,
373 unsigned prefixArgs, unsigned totalArgs) {
374 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
375 if (proto->hasExtParameterInfos()) {
376 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
377 }
378 return result;
379}
380
381/// Arrange a call to a C++ method, passing the given arguments.
382///
383/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
384/// parameter.
385/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
386/// args.
387/// PassProtoArgs indicates whether `args` has args for the parameters in the
388/// given CXXConstructorDecl.
389const CGFunctionInfo &
390CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
391 const CXXConstructorDecl *D,
392 CXXCtorType CtorKind,
393 unsigned ExtraPrefixArgs,
394 unsigned ExtraSuffixArgs,
395 bool PassProtoArgs) {
396 // FIXME: Kill copy.
397 SmallVector<CanQualType, 16> ArgTypes;
398 for (const auto &Arg : args)
399 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
400
401 // +1 for implicit this, which should always be args[0].
402 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
403
404 CanQual<FunctionProtoType> FPT = GetFormalType(D);
405 RequiredArgs Required =
406 RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
407 GlobalDecl GD(D, CtorKind);
408 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
409 ? ArgTypes.front()
410 : TheCXXABI.hasMostDerivedReturn(GD)
411 ? CGM.getContext().VoidPtrTy
412 : Context.VoidTy;
413
414 FunctionType::ExtInfo Info = FPT->getExtInfo();
415 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
416 // If the prototype args are elided, we should only have ABI-specific args,
417 // which never have param info.
418 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
419 // ABI-specific suffix arguments are treated the same as variadic arguments.
420 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
421 ArgTypes.size());
422 }
423 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
424 /*chainCall=*/false, ArgTypes, Info,
425 ParamInfos, Required);
426}
427
428/// Arrange the argument and result information for the declaration or
429/// definition of the given function.
430const CGFunctionInfo &
431CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
432 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
433 if (MD->isInstance())
434 return arrangeCXXMethodDeclaration(MD);
435
436 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
437
438 assert(isa<FunctionType>(FTy))(static_cast <bool> (isa<FunctionType>(FTy)) ? void
(0) : __assert_fail ("isa<FunctionType>(FTy)", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 438, __extension__ __PRETTY_FUNCTION__))
;
439 setCUDAKernelCallingConvention(FTy, CGM, FD);
440
441 // When declaring a function without a prototype, always use a
442 // non-variadic type.
443 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
444 return arrangeLLVMFunctionInfo(
445 noProto->getReturnType(), /*instanceMethod=*/false,
446 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
447 }
448
449 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
450}
451
452/// Arrange the argument and result information for the declaration or
453/// definition of an Objective-C method.
454const CGFunctionInfo &
455CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
456 // It happens that this is the same as a call with no optional
457 // arguments, except also using the formal 'self' type.
458 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
459}
460
461/// Arrange the argument and result information for the function type
462/// through which to perform a send to the given Objective-C method,
463/// using the given receiver type. The receiver type is not always
464/// the 'self' type of the method or even an Objective-C pointer type.
465/// This is *not* the right method for actually performing such a
466/// message send, due to the possibility of optional arguments.
467const CGFunctionInfo &
468CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
469 QualType receiverType) {
470 SmallVector<CanQualType, 16> argTys;
471 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
472 argTys.push_back(Context.getCanonicalParamType(receiverType));
473 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
474 // FIXME: Kill copy?
475 for (const auto *I : MD->parameters()) {
476 argTys.push_back(Context.getCanonicalParamType(I->getType()));
477 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
478 I->hasAttr<NoEscapeAttr>());
479 extParamInfos.push_back(extParamInfo);
480 }
481
482 FunctionType::ExtInfo einfo;
483 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
484 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
485
486 if (getContext().getLangOpts().ObjCAutoRefCount &&
487 MD->hasAttr<NSReturnsRetainedAttr>())
488 einfo = einfo.withProducesResult(true);
489
490 RequiredArgs required =
491 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
492
493 return arrangeLLVMFunctionInfo(
494 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
495 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
496}
497
498const CGFunctionInfo &
499CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
500 const CallArgList &args) {
501 auto argTypes = getArgTypesForCall(Context, args);
502 FunctionType::ExtInfo einfo;
503
504 return arrangeLLVMFunctionInfo(
505 GetReturnType(returnType), /*instanceMethod=*/false,
506 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
507}
508
509const CGFunctionInfo &
510CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
511 // FIXME: Do we need to handle ObjCMethodDecl?
512 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
513
514 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
515 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
516
517 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
518 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
519
520 return arrangeFunctionDeclaration(FD);
521}
522
523/// Arrange a thunk that takes 'this' as the first parameter followed by
524/// varargs. Return a void pointer, regardless of the actual return type.
525/// The body of the thunk will end in a musttail call to a function of the
526/// correct type, and the caller will bitcast the function to the correct
527/// prototype.
528const CGFunctionInfo &
529CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
530 assert(MD->isVirtual() && "only methods have thunks")(static_cast <bool> (MD->isVirtual() && "only methods have thunks"
) ? void (0) : __assert_fail ("MD->isVirtual() && \"only methods have thunks\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 530, __extension__ __PRETTY_FUNCTION__))
;
531 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
532 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
533 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534 /*chainCall=*/false, ArgTys,
535 FTP->getExtInfo(), {}, RequiredArgs(1));
536}
537
538const CGFunctionInfo &
539CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
540 CXXCtorType CT) {
541 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure)(static_cast <bool> (CT == Ctor_CopyingClosure || CT ==
Ctor_DefaultClosure) ? void (0) : __assert_fail ("CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 541, __extension__ __PRETTY_FUNCTION__))
;
542
543 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
544 SmallVector<CanQualType, 2> ArgTys;
545 const CXXRecordDecl *RD = CD->getParent();
546 ArgTys.push_back(GetThisType(Context, RD));
547 if (CT == Ctor_CopyingClosure)
548 ArgTys.push_back(*FTP->param_type_begin());
549 if (RD->getNumVBases() > 0)
550 ArgTys.push_back(Context.IntTy);
551 CallingConv CC = Context.getDefaultCallingConvention(
552 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554 /*chainCall=*/false, ArgTys,
555 FunctionType::ExtInfo(CC), {},
556 RequiredArgs::All);
557}
558
559/// Arrange a call as unto a free function, except possibly with an
560/// additional number of formal parameters considered required.
561static const CGFunctionInfo &
562arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
563 CodeGenModule &CGM,
564 const CallArgList &args,
565 const FunctionType *fnType,
566 unsigned numExtraRequiredArgs,
567 bool chainCall) {
568 assert(args.size() >= numExtraRequiredArgs)(static_cast <bool> (args.size() >= numExtraRequiredArgs
) ? void (0) : __assert_fail ("args.size() >= numExtraRequiredArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 568, __extension__ __PRETTY_FUNCTION__))
;
569
570 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
571
572 // In most cases, there are no optional arguments.
573 RequiredArgs required = RequiredArgs::All;
574
575 // If we have a variadic prototype, the required arguments are the
576 // extra prefix plus the arguments in the prototype.
577 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578 if (proto->isVariadic())
579 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
580
581 if (proto->hasExtParameterInfos())
582 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583 args.size());
584
585 // If we don't have a prototype at all, but we're supposed to
586 // explicitly use the variadic convention for unprototyped calls,
587 // treat all of the arguments as required but preserve the nominal
588 // possibility of variadics.
589 } else if (CGM.getTargetCodeGenInfo()
590 .isNoProtoCallVariadic(args,
591 cast<FunctionNoProtoType>(fnType))) {
592 required = RequiredArgs(args.size());
593 }
594
595 // FIXME: Kill copy.
596 SmallVector<CanQualType, 16> argTypes;
597 for (const auto &arg : args)
598 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
599 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
600 /*instanceMethod=*/false, chainCall,
601 argTypes, fnType->getExtInfo(), paramInfos,
602 required);
603}
604
605/// Figure out the rules for calling a function with the given formal
606/// type using the given arguments. The arguments are necessary
607/// because the function might be unprototyped, in which case it's
608/// target-dependent in crazy ways.
609const CGFunctionInfo &
610CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
611 const FunctionType *fnType,
612 bool chainCall) {
613 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614 chainCall ? 1 : 0, chainCall);
615}
616
617/// A block function is essentially a free function with an
618/// extra implicit argument.
619const CGFunctionInfo &
620CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
621 const FunctionType *fnType) {
622 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623 /*chainCall=*/false);
624}
625
626const CGFunctionInfo &
627CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
628 const FunctionArgList &params) {
629 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630 auto argTypes = getArgTypesForDeclaration(Context, params);
631
632 return arrangeLLVMFunctionInfo(
633 GetReturnType(proto->getReturnType()),
634 /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
635 proto->getExtInfo(), paramInfos,
636 RequiredArgs::forPrototypePlus(proto, 1, nullptr));
637}
638
639const CGFunctionInfo &
640CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
641 const CallArgList &args) {
642 // FIXME: Kill copy.
643 SmallVector<CanQualType, 16> argTypes;
644 for (const auto &Arg : args)
645 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
646 return arrangeLLVMFunctionInfo(
647 GetReturnType(resultType), /*instanceMethod=*/false,
648 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
649 /*paramInfos=*/ {}, RequiredArgs::All);
650}
651
652const CGFunctionInfo &
653CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
654 const FunctionArgList &args) {
655 auto argTypes = getArgTypesForDeclaration(Context, args);
656
657 return arrangeLLVMFunctionInfo(
658 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
659 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
660}
661
662const CGFunctionInfo &
663CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
664 ArrayRef<CanQualType> argTypes) {
665 return arrangeLLVMFunctionInfo(
666 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
667 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
668}
669
670/// Arrange a call to a C++ method, passing the given arguments.
671///
672/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
673/// does not count `this`.
674const CGFunctionInfo &
675CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
676 const FunctionProtoType *proto,
677 RequiredArgs required,
678 unsigned numPrefixArgs) {
679 assert(numPrefixArgs + 1 <= args.size() &&(static_cast <bool> (numPrefixArgs + 1 <= args.size(
) && "Emitting a call with less args than the required prefix?"
) ? void (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 680, __extension__ __PRETTY_FUNCTION__))
680 "Emitting a call with less args than the required prefix?")(static_cast <bool> (numPrefixArgs + 1 <= args.size(
) && "Emitting a call with less args than the required prefix?"
) ? void (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 680, __extension__ __PRETTY_FUNCTION__))
;
681 // Add one to account for `this`. It's a bit awkward here, but we don't count
682 // `this` in similar places elsewhere.
683 auto paramInfos =
684 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
685
686 // FIXME: Kill copy.
687 auto argTypes = getArgTypesForCall(Context, args);
688
689 FunctionType::ExtInfo info = proto->getExtInfo();
690 return arrangeLLVMFunctionInfo(
691 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
692 /*chainCall=*/false, argTypes, info, paramInfos, required);
693}
694
695const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
696 return arrangeLLVMFunctionInfo(
697 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
698 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
699}
700
701const CGFunctionInfo &
702CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
703 const CallArgList &args) {
704 assert(signature.arg_size() <= args.size())(static_cast <bool> (signature.arg_size() <= args.size
()) ? void (0) : __assert_fail ("signature.arg_size() <= args.size()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 704, __extension__ __PRETTY_FUNCTION__))
;
705 if (signature.arg_size() == args.size())
706 return signature;
707
708 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
709 auto sigParamInfos = signature.getExtParameterInfos();
710 if (!sigParamInfos.empty()) {
711 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
712 paramInfos.resize(args.size());
713 }
714
715 auto argTypes = getArgTypesForCall(Context, args);
716
717 assert(signature.getRequiredArgs().allowsOptionalArgs())(static_cast <bool> (signature.getRequiredArgs().allowsOptionalArgs
()) ? void (0) : __assert_fail ("signature.getRequiredArgs().allowsOptionalArgs()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 717, __extension__ __PRETTY_FUNCTION__))
;
718 return arrangeLLVMFunctionInfo(signature.getReturnType(),
719 signature.isInstanceMethod(),
720 signature.isChainCall(),
721 argTypes,
722 signature.getExtInfo(),
723 paramInfos,
724 signature.getRequiredArgs());
725}
726
727namespace clang {
728namespace CodeGen {
729void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
730}
731}
732
733/// Arrange the argument and result information for an abstract value
734/// of a given function type. This is the method which all of the
735/// above functions ultimately defer to.
736const CGFunctionInfo &
737CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
738 bool instanceMethod,
739 bool chainCall,
740 ArrayRef<CanQualType> argTypes,
741 FunctionType::ExtInfo info,
742 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
743 RequiredArgs required) {
744 assert(std::all_of(argTypes.begin(), argTypes.end(),(static_cast <bool> (std::all_of(argTypes.begin(), argTypes
.end(), [](CanQualType T) { return T.isCanonicalAsParam(); })
) ? void (0) : __assert_fail ("std::all_of(argTypes.begin(), argTypes.end(), [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 745, __extension__ __PRETTY_FUNCTION__))
745 [](CanQualType T) { return T.isCanonicalAsParam(); }))(static_cast <bool> (std::all_of(argTypes.begin(), argTypes
.end(), [](CanQualType T) { return T.isCanonicalAsParam(); })
) ? void (0) : __assert_fail ("std::all_of(argTypes.begin(), argTypes.end(), [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 745, __extension__ __PRETTY_FUNCTION__))
;
746
747 // Lookup or create unique function info.
748 llvm::FoldingSetNodeID ID;
749 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
750 required, resultType, argTypes);
751
752 void *insertPos = nullptr;
753 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
754 if (FI)
755 return *FI;
756
757 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
758
759 // Construct the function info. We co-allocate the ArgInfos.
760 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
761 paramInfos, resultType, argTypes, required);
762 FunctionInfos.InsertNode(FI, insertPos);
763
764 bool inserted = FunctionsBeingProcessed.insert(FI).second;
765 (void)inserted;
766 assert(inserted && "Recursively being processed?")(static_cast <bool> (inserted && "Recursively being processed?"
) ? void (0) : __assert_fail ("inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 766, __extension__ __PRETTY_FUNCTION__))
;
767
768 // Compute ABI information.
769 if (CC == llvm::CallingConv::SPIR_KERNEL) {
770 // Force target independent argument handling for the host visible
771 // kernel functions.
772 computeSPIRKernelABIInfo(CGM, *FI);
773 } else if (info.getCC() == CC_Swift) {
774 swiftcall::computeABIInfo(CGM, *FI);
775 } else {
776 getABIInfo().computeInfo(*FI);
777 }
778
779 // Loop over all of the computed argument and return value info. If any of
780 // them are direct or extend without a specified coerce type, specify the
781 // default now.
782 ABIArgInfo &retInfo = FI->getReturnInfo();
783 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
784 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
785
786 for (auto &I : FI->arguments())
787 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
788 I.info.setCoerceToType(ConvertType(I.type));
789
790 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
791 assert(erased && "Not in set?")(static_cast <bool> (erased && "Not in set?") ?
void (0) : __assert_fail ("erased && \"Not in set?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 791, __extension__ __PRETTY_FUNCTION__))
;
792
793 return *FI;
794}
795
796CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
797 bool instanceMethod,
798 bool chainCall,
799 const FunctionType::ExtInfo &info,
800 ArrayRef<ExtParameterInfo> paramInfos,
801 CanQualType resultType,
802 ArrayRef<CanQualType> argTypes,
803 RequiredArgs required) {
804 assert(paramInfos.empty() || paramInfos.size() == argTypes.size())(static_cast <bool> (paramInfos.empty() || paramInfos.size
() == argTypes.size()) ? void (0) : __assert_fail ("paramInfos.empty() || paramInfos.size() == argTypes.size()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 804, __extension__ __PRETTY_FUNCTION__))
;
805
806 void *buffer =
807 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
808 argTypes.size() + 1, paramInfos.size()));
809
810 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
811 FI->CallingConvention = llvmCC;
812 FI->EffectiveCallingConvention = llvmCC;
813 FI->ASTCallingConvention = info.getCC();
814 FI->InstanceMethod = instanceMethod;
815 FI->ChainCall = chainCall;
816 FI->NoReturn = info.getNoReturn();
817 FI->ReturnsRetained = info.getProducesResult();
818 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
819 FI->NoCfCheck = info.getNoCfCheck();
820 FI->Required = required;
821 FI->HasRegParm = info.getHasRegParm();
822 FI->RegParm = info.getRegParm();
823 FI->ArgStruct = nullptr;
824 FI->ArgStructAlign = 0;
825 FI->NumArgs = argTypes.size();
826 FI->HasExtParameterInfos = !paramInfos.empty();
827 FI->getArgsBuffer()[0].type = resultType;
828 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
829 FI->getArgsBuffer()[i + 1].type = argTypes[i];
830 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
831 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
832 return FI;
833}
834
835/***/
836
837namespace {
838// ABIArgInfo::Expand implementation.
839
840// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
841struct TypeExpansion {
842 enum TypeExpansionKind {
843 // Elements of constant arrays are expanded recursively.
844 TEK_ConstantArray,
845 // Record fields are expanded recursively (but if record is a union, only
846 // the field with the largest size is expanded).
847 TEK_Record,
848 // For complex types, real and imaginary parts are expanded recursively.
849 TEK_Complex,
850 // All other types are not expandable.
851 TEK_None
852 };
853
854 const TypeExpansionKind Kind;
855
856 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
857 virtual ~TypeExpansion() {}
858};
859
860struct ConstantArrayExpansion : TypeExpansion {
861 QualType EltTy;
862 uint64_t NumElts;
863
864 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
865 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
866 static bool classof(const TypeExpansion *TE) {
867 return TE->Kind == TEK_ConstantArray;
868 }
869};
870
871struct RecordExpansion : TypeExpansion {
872 SmallVector<const CXXBaseSpecifier *, 1> Bases;
873
874 SmallVector<const FieldDecl *, 1> Fields;
875
876 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
877 SmallVector<const FieldDecl *, 1> &&Fields)
878 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
879 Fields(std::move(Fields)) {}
880 static bool classof(const TypeExpansion *TE) {
881 return TE->Kind == TEK_Record;
882 }
883};
884
885struct ComplexExpansion : TypeExpansion {
886 QualType EltTy;
887
888 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
889 static bool classof(const TypeExpansion *TE) {
890 return TE->Kind == TEK_Complex;
891 }
892};
893
894struct NoExpansion : TypeExpansion {
895 NoExpansion() : TypeExpansion(TEK_None) {}
896 static bool classof(const TypeExpansion *TE) {
897 return TE->Kind == TEK_None;
898 }
899};
900} // namespace
901
902static std::unique_ptr<TypeExpansion>
903getTypeExpansion(QualType Ty, const ASTContext &Context) {
904 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
905 return llvm::make_unique<ConstantArrayExpansion>(
906 AT->getElementType(), AT->getSize().getZExtValue());
907 }
908 if (const RecordType *RT = Ty->getAs<RecordType>()) {
909 SmallVector<const CXXBaseSpecifier *, 1> Bases;
910 SmallVector<const FieldDecl *, 1> Fields;
911 const RecordDecl *RD = RT->getDecl();
912 assert(!RD->hasFlexibleArrayMember() &&(static_cast <bool> (!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.") ? void (0) :
__assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 913, __extension__ __PRETTY_FUNCTION__))
913 "Cannot expand structure with flexible array.")(static_cast <bool> (!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.") ? void (0) :
__assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 913, __extension__ __PRETTY_FUNCTION__))
;
914 if (RD->isUnion()) {
915 // Unions can be here only in degenerative cases - all the fields are same
916 // after flattening. Thus we have to use the "largest" field.
917 const FieldDecl *LargestFD = nullptr;
918 CharUnits UnionSize = CharUnits::Zero();
919
920 for (const auto *FD : RD->fields()) {
921 if (FD->isZeroLengthBitField(Context))
922 continue;
923 assert(!FD->isBitField() &&(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 924, __extension__ __PRETTY_FUNCTION__))
924 "Cannot expand structure with bit-field members.")(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 924, __extension__ __PRETTY_FUNCTION__))
;
925 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
926 if (UnionSize < FieldSize) {
927 UnionSize = FieldSize;
928 LargestFD = FD;
929 }
930 }
931 if (LargestFD)
932 Fields.push_back(LargestFD);
933 } else {
934 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
935 assert(!CXXRD->isDynamicClass() &&(static_cast <bool> (!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes") ? void (
0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 936, __extension__ __PRETTY_FUNCTION__))
936 "cannot expand vtable pointers in dynamic classes")(static_cast <bool> (!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes") ? void (
0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 936, __extension__ __PRETTY_FUNCTION__))
;
937 for (const CXXBaseSpecifier &BS : CXXRD->bases())
938 Bases.push_back(&BS);
939 }
940
941 for (const auto *FD : RD->fields()) {
942 if (FD->isZeroLengthBitField(Context))
943 continue;
944 assert(!FD->isBitField() &&(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 945, __extension__ __PRETTY_FUNCTION__))
945 "Cannot expand structure with bit-field members.")(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 945, __extension__ __PRETTY_FUNCTION__))
;
946 Fields.push_back(FD);
947 }
948 }
949 return llvm::make_unique<RecordExpansion>(std::move(Bases),
950 std::move(Fields));
951 }
952 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
953 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
954 }
955 return llvm::make_unique<NoExpansion>();
956}
957
958static int getExpansionSize(QualType Ty, const ASTContext &Context) {
959 auto Exp = getTypeExpansion(Ty, Context);
960 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
961 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
962 }
963 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
964 int Res = 0;
965 for (auto BS : RExp->Bases)
966 Res += getExpansionSize(BS->getType(), Context);
967 for (auto FD : RExp->Fields)
968 Res += getExpansionSize(FD->getType(), Context);
969 return Res;
970 }
971 if (isa<ComplexExpansion>(Exp.get()))
972 return 2;
973 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 973, __extension__ __PRETTY_FUNCTION__))
;
974 return 1;
975}
976
977void
978CodeGenTypes::getExpandedTypes(QualType Ty,
979 SmallVectorImpl<llvm::Type *>::iterator &TI) {
980 auto Exp = getTypeExpansion(Ty, Context);
981 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
982 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
983 getExpandedTypes(CAExp->EltTy, TI);
984 }
985 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
986 for (auto BS : RExp->Bases)
987 getExpandedTypes(BS->getType(), TI);
988 for (auto FD : RExp->Fields)
989 getExpandedTypes(FD->getType(), TI);
990 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
991 llvm::Type *EltTy = ConvertType(CExp->EltTy);
992 *TI++ = EltTy;
993 *TI++ = EltTy;
994 } else {
995 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 995, __extension__ __PRETTY_FUNCTION__))
;
996 *TI++ = ConvertType(Ty);
997 }
998}
999
1000static void forConstantArrayExpansion(CodeGenFunction &CGF,
1001 ConstantArrayExpansion *CAE,
1002 Address BaseAddr,
1003 llvm::function_ref<void(Address)> Fn) {
1004 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1005 CharUnits EltAlign =
1006 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1007
1008 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1009 llvm::Value *EltAddr =
1010 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1011 Fn(Address(EltAddr, EltAlign));
1012 }
1013}
1014
1015void CodeGenFunction::ExpandTypeFromArgs(
1016 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1017 assert(LV.isSimple() &&(static_cast <bool> (LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? void (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1018, __extension__ __PRETTY_FUNCTION__))
1018 "Unexpected non-simple lvalue during struct expansion.")(static_cast <bool> (LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? void (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1018, __extension__ __PRETTY_FUNCTION__))
;
1019
1020 auto Exp = getTypeExpansion(Ty, getContext());
1021 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1022 forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1023 [&](Address EltAddr) {
1024 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1025 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1026 });
1027 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1028 Address This = LV.getAddress();
1029 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1030 // Perform a single step derived-to-base conversion.
1031 Address Base =
1032 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1033 /*NullCheckValue=*/false, SourceLocation());
1034 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1035
1036 // Recurse onto bases.
1037 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1038 }
1039 for (auto FD : RExp->Fields) {
1040 // FIXME: What are the right qualifiers here?
1041 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1042 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1043 }
1044 } else if (isa<ComplexExpansion>(Exp.get())) {
1045 auto realValue = *AI++;
1046 auto imagValue = *AI++;
1047 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1048 } else {
1049 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1049, __extension__ __PRETTY_FUNCTION__))
;
1050 EmitStoreThroughLValue(RValue::get(*AI++), LV);
1051 }
1052}
1053
1054void CodeGenFunction::ExpandTypeToArgs(
1055 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1056 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1057 auto Exp = getTypeExpansion(Ty, getContext());
1058 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1059 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1060 : Arg.getKnownRValue().getAggregateAddress();
1061 forConstantArrayExpansion(
1062 *this, CAExp, Addr, [&](Address EltAddr) {
1063 CallArg EltArg = CallArg(
1064 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1065 CAExp->EltTy);
1066 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1067 IRCallArgPos);
1068 });
1069 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1070 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1071 : Arg.getKnownRValue().getAggregateAddress();
1072 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1073 // Perform a single step derived-to-base conversion.
1074 Address Base =
1075 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1076 /*NullCheckValue=*/false, SourceLocation());
1077 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1078
1079 // Recurse onto bases.
1080 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1081 IRCallArgPos);
1082 }
1083
1084 LValue LV = MakeAddrLValue(This, Ty);
1085 for (auto FD : RExp->Fields) {
1086 CallArg FldArg =
1087 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1088 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1089 IRCallArgPos);
1090 }
1091 } else if (isa<ComplexExpansion>(Exp.get())) {
1092 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1093 IRCallArgs[IRCallArgPos++] = CV.first;
1094 IRCallArgs[IRCallArgPos++] = CV.second;
1095 } else {
1096 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1096, __extension__ __PRETTY_FUNCTION__))
;
1097 auto RV = Arg.getKnownRValue();
1098 assert(RV.isScalar() &&(static_cast <bool> (RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? void (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1099, __extension__ __PRETTY_FUNCTION__))
1099 "Unexpected non-scalar rvalue during struct expansion.")(static_cast <bool> (RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? void (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1099, __extension__ __PRETTY_FUNCTION__))
;
1100
1101 // Insert a bitcast as needed.
1102 llvm::Value *V = RV.getScalarVal();
1103 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1104 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1105 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1106
1107 IRCallArgs[IRCallArgPos++] = V;
1108 }
1109}
1110
1111/// Create a temporary allocation for the purposes of coercion.
1112static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1113 CharUnits MinAlign) {
1114 // Don't use an alignment that's worse than what LLVM would prefer.
1115 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1116 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1117
1118 return CGF.CreateTempAlloca(Ty, Align);
1119}
1120
1121/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1122/// accessing some number of bytes out of it, try to gep into the struct to get
1123/// at its inner goodness. Dive as deep as possible without entering an element
1124/// with an in-memory size smaller than DstSize.
1125static Address
1126EnterStructPointerForCoercedAccess(Address SrcPtr,
1127 llvm::StructType *SrcSTy,
1128 uint64_t DstSize, CodeGenFunction &CGF) {
1129 // We can't dive into a zero-element struct.
1130 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1131
1132 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1133
1134 // If the first elt is at least as large as what we're looking for, or if the
1135 // first element is the same size as the whole struct, we can enter it. The
1136 // comparison must be made on the store size and not the alloca size. Using
1137 // the alloca size may overstate the size of the load.
1138 uint64_t FirstEltSize =
1139 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1140 if (FirstEltSize < DstSize &&
1141 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1142 return SrcPtr;
1143
1144 // GEP into the first element.
1145 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1146
1147 // If the first element is a struct, recurse.
1148 llvm::Type *SrcTy = SrcPtr.getElementType();
1149 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1150 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1151
1152 return SrcPtr;
1153}
1154
1155/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1156/// are either integers or pointers. This does a truncation of the value if it
1157/// is too large or a zero extension if it is too small.
1158///
1159/// This behaves as if the value were coerced through memory, so on big-endian
1160/// targets the high bits are preserved in a truncation, while little-endian
1161/// targets preserve the low bits.
1162static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1163 llvm::Type *Ty,
1164 CodeGenFunction &CGF) {
1165 if (Val->getType() == Ty)
1166 return Val;
1167
1168 if (isa<llvm::PointerType>(Val->getType())) {
1169 // If this is Pointer->Pointer avoid conversion to and from int.
1170 if (isa<llvm::PointerType>(Ty))
1171 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1172
1173 // Convert the pointer to an integer so we can play with its width.
1174 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1175 }
1176
1177 llvm::Type *DestIntTy = Ty;
1178 if (isa<llvm::PointerType>(DestIntTy))
1179 DestIntTy = CGF.IntPtrTy;
1180
1181 if (Val->getType() != DestIntTy) {
1182 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1183 if (DL.isBigEndian()) {
1184 // Preserve the high bits on big-endian targets.
1185 // That is what memory coercion does.
1186 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1187 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1188
1189 if (SrcSize > DstSize) {
1190 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1191 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1192 } else {
1193 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1194 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1195 }
1196 } else {
1197 // Little-endian targets preserve the low bits. No shifts required.
1198 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1199 }
1200 }
1201
1202 if (isa<llvm::PointerType>(Ty))
1203 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1204 return Val;
1205}
1206
1207
1208
1209/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1210/// a pointer to an object of type \arg Ty, known to be aligned to
1211/// \arg SrcAlign bytes.
1212///
1213/// This safely handles the case when the src type is smaller than the
1214/// destination type; in this situation the values of bits which not
1215/// present in the src are undefined.
1216static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1217 CodeGenFunction &CGF) {
1218 llvm::Type *SrcTy = Src.getElementType();
1219
1220 // If SrcTy and Ty are the same, just do a load.
1221 if (SrcTy == Ty)
1222 return CGF.Builder.CreateLoad(Src);
1223
1224 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1225
1226 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1227 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1228 SrcTy = Src.getType()->getElementType();
1229 }
1230
1231 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1232
1233 // If the source and destination are integer or pointer types, just do an
1234 // extension or truncation to the desired type.
1235 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1236 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1237 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1238 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1239 }
1240
1241 // If load is legal, just bitcast the src pointer.
1242 if (SrcSize >= DstSize) {
1243 // Generally SrcSize is never greater than DstSize, since this means we are
1244 // losing bits. However, this can happen in cases where the structure has
1245 // additional padding, for example due to a user specified alignment.
1246 //
1247 // FIXME: Assert that we aren't truncating non-padding bits when have access
1248 // to that information.
1249 Src = CGF.Builder.CreateBitCast(Src,
1250 Ty->getPointerTo(Src.getAddressSpace()));
1251 return CGF.Builder.CreateLoad(Src);
1252 }
1253
1254 // Otherwise do coercion through memory. This is stupid, but simple.
1255 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1256 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1257 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
1258 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1259 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1260 false);
1261 return CGF.Builder.CreateLoad(Tmp);
1262}
1263
1264// Function to store a first-class aggregate into memory. We prefer to
1265// store the elements rather than the aggregate to be more friendly to
1266// fast-isel.
1267// FIXME: Do we need to recurse here?
1268static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1269 Address Dest, bool DestIsVolatile) {
1270 // Prefer scalar stores to first-class aggregate stores.
1271 if (llvm::StructType *STy =
1272 dyn_cast<llvm::StructType>(Val->getType())) {
1273 const llvm::StructLayout *Layout =
1274 CGF.CGM.getDataLayout().getStructLayout(STy);
1275
1276 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1277 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1278 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1279 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1280 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1281 }
1282 } else {
1283 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1284 }
1285}
1286
1287/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1288/// where the source and destination may have different types. The
1289/// destination is known to be aligned to \arg DstAlign bytes.
1290///
1291/// This safely handles the case when the src type is larger than the
1292/// destination type; the upper bits of the src will be lost.
1293static void CreateCoercedStore(llvm::Value *Src,
1294 Address Dst,
1295 bool DstIsVolatile,
1296 CodeGenFunction &CGF) {
1297 llvm::Type *SrcTy = Src->getType();
1298 llvm::Type *DstTy = Dst.getType()->getElementType();
1299 if (SrcTy == DstTy) {
1300 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1301 return;
1302 }
1303
1304 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1305
1306 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1307 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1308 DstTy = Dst.getType()->getElementType();
1309 }
1310
1311 // If the source and destination are integer or pointer types, just do an
1312 // extension or truncation to the desired type.
1313 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1314 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1315 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1316 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1317 return;
1318 }
1319
1320 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1321
1322 // If store is legal, just bitcast the src pointer.
1323 if (SrcSize <= DstSize) {
1324 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1325 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1326 } else {
1327 // Otherwise do coercion through memory. This is stupid, but
1328 // simple.
1329
1330 // Generally SrcSize is never greater than DstSize, since this means we are
1331 // losing bits. However, this can happen in cases where the structure has
1332 // additional padding, for example due to a user specified alignment.
1333 //
1334 // FIXME: Assert that we aren't truncating non-padding bits when have access
1335 // to that information.
1336 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1337 CGF.Builder.CreateStore(Src, Tmp);
1338 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1339 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
1340 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1341 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1342 false);
1343 }
1344}
1345
1346static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1347 const ABIArgInfo &info) {
1348 if (unsigned offset = info.getDirectOffset()) {
1349 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1350 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1351 CharUnits::fromQuantity(offset));
1352 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1353 }
1354 return addr;
1355}
1356
1357namespace {
1358
1359/// Encapsulates information about the way function arguments from
1360/// CGFunctionInfo should be passed to actual LLVM IR function.
1361class ClangToLLVMArgMapping {
1362 static const unsigned InvalidIndex = ~0U;
1363 unsigned InallocaArgNo;
1364 unsigned SRetArgNo;
1365 unsigned TotalIRArgs;
1366
1367 /// Arguments of LLVM IR function corresponding to single Clang argument.
1368 struct IRArgs {
1369 unsigned PaddingArgIndex;
1370 // Argument is expanded to IR arguments at positions
1371 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1372 unsigned FirstArgIndex;
1373 unsigned NumberOfArgs;
1374
1375 IRArgs()
1376 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1377 NumberOfArgs(0) {}
1378 };
1379
1380 SmallVector<IRArgs, 8> ArgInfo;
1381
1382public:
1383 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1384 bool OnlyRequiredArgs = false)
1385 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1386 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1387 construct(Context, FI, OnlyRequiredArgs);
1388 }
1389
1390 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1391 unsigned getInallocaArgNo() const {
1392 assert(hasInallocaArg())(static_cast <bool> (hasInallocaArg()) ? void (0) : __assert_fail
("hasInallocaArg()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1392, __extension__ __PRETTY_FUNCTION__))
;
1393 return InallocaArgNo;
1394 }
1395
1396 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1397 unsigned getSRetArgNo() const {
1398 assert(hasSRetArg())(static_cast <bool> (hasSRetArg()) ? void (0) : __assert_fail
("hasSRetArg()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1398, __extension__ __PRETTY_FUNCTION__))
;
1399 return SRetArgNo;
1400 }
1401
1402 unsigned totalIRArgs() const { return TotalIRArgs; }
1403
1404 bool hasPaddingArg(unsigned ArgNo) const {
1405 assert(ArgNo < ArgInfo.size())(static_cast <bool> (ArgNo < ArgInfo.size()) ? void (
0) : __assert_fail ("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1405, __extension__ __PRETTY_FUNCTION__))
;
1406 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1407 }
1408 unsigned getPaddingArgNo(unsigned ArgNo) const {
1409 assert(hasPaddingArg(ArgNo))(static_cast <bool> (hasPaddingArg(ArgNo)) ? void (0) :
__assert_fail ("hasPaddingArg(ArgNo)", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1409, __extension__ __PRETTY_FUNCTION__))
;
1410 return ArgInfo[ArgNo].PaddingArgIndex;
1411 }
1412
1413 /// Returns index of first IR argument corresponding to ArgNo, and their
1414 /// quantity.
1415 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1416 assert(ArgNo < ArgInfo.size())(static_cast <bool> (ArgNo < ArgInfo.size()) ? void (
0) : __assert_fail ("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1416, __extension__ __PRETTY_FUNCTION__))
;
1417 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1418 ArgInfo[ArgNo].NumberOfArgs);
1419 }
1420
1421private:
1422 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1423 bool OnlyRequiredArgs);
1424};
1425
1426void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1427 const CGFunctionInfo &FI,
1428 bool OnlyRequiredArgs) {
1429 unsigned IRArgNo = 0;
1430 bool SwapThisWithSRet = false;
1431 const ABIArgInfo &RetAI = FI.getReturnInfo();
1432
1433 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1434 SwapThisWithSRet = RetAI.isSRetAfterThis();
1435 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1436 }
1437
1438 unsigned ArgNo = 0;
1439 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1440 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1441 ++I, ++ArgNo) {
1442 assert(I != FI.arg_end())(static_cast <bool> (I != FI.arg_end()) ? void (0) : __assert_fail
("I != FI.arg_end()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1442, __extension__ __PRETTY_FUNCTION__))
;
1443 QualType ArgType = I->type;
1444 const ABIArgInfo &AI = I->info;
1445 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1446 auto &IRArgs = ArgInfo[ArgNo];
1447
1448 if (AI.getPaddingType())
1449 IRArgs.PaddingArgIndex = IRArgNo++;
1450
1451 switch (AI.getKind()) {
1452 case ABIArgInfo::Extend:
1453 case ABIArgInfo::Direct: {
1454 // FIXME: handle sseregparm someday...
1455 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1456 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1457 IRArgs.NumberOfArgs = STy->getNumElements();
1458 } else {
1459 IRArgs.NumberOfArgs = 1;
1460 }
1461 break;
1462 }
1463 case ABIArgInfo::Indirect:
1464 IRArgs.NumberOfArgs = 1;
1465 break;
1466 case ABIArgInfo::Ignore:
1467 case ABIArgInfo::InAlloca:
1468 // ignore and inalloca doesn't have matching LLVM parameters.
1469 IRArgs.NumberOfArgs = 0;
1470 break;
1471 case ABIArgInfo::CoerceAndExpand:
1472 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1473 break;
1474 case ABIArgInfo::Expand:
1475 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1476 break;
1477 }
1478
1479 if (IRArgs.NumberOfArgs > 0) {
1480 IRArgs.FirstArgIndex = IRArgNo;
1481 IRArgNo += IRArgs.NumberOfArgs;
1482 }
1483
1484 // Skip over the sret parameter when it comes second. We already handled it
1485 // above.
1486 if (IRArgNo == 1 && SwapThisWithSRet)
1487 IRArgNo++;
1488 }
1489 assert(ArgNo == ArgInfo.size())(static_cast <bool> (ArgNo == ArgInfo.size()) ? void (0
) : __assert_fail ("ArgNo == ArgInfo.size()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1489, __extension__ __PRETTY_FUNCTION__))
;
1490
1491 if (FI.usesInAlloca())
1492 InallocaArgNo = IRArgNo++;
1493
1494 TotalIRArgs = IRArgNo;
1495}
1496} // namespace
1497
1498/***/
1499
1500bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1501 const auto &RI = FI.getReturnInfo();
1502 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1503}
1504
1505bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1506 return ReturnTypeUsesSRet(FI) &&
1507 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1508}
1509
1510bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1511 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1512 switch (BT->getKind()) {
1513 default:
1514 return false;
1515 case BuiltinType::Float:
1516 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1517 case BuiltinType::Double:
1518 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1519 case BuiltinType::LongDouble:
1520 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1521 }
1522 }
1523
1524 return false;
1525}
1526
1527bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1528 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1529 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1530 if (BT->getKind() == BuiltinType::LongDouble)
1531 return getTarget().useObjCFP2RetForComplexLongDouble();
1532 }
1533 }
1534
1535 return false;
1536}
1537
1538llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1539 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1540 return GetFunctionType(FI);
1541}
1542
1543llvm::FunctionType *
1544CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1545
1546 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1547 (void)Inserted;
1548 assert(Inserted && "Recursively being processed?")(static_cast <bool> (Inserted && "Recursively being processed?"
) ? void (0) : __assert_fail ("Inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1548, __extension__ __PRETTY_FUNCTION__))
;
1549
1550 llvm::Type *resultType = nullptr;
1551 const ABIArgInfo &retAI = FI.getReturnInfo();
1552 switch (retAI.getKind()) {
1553 case ABIArgInfo::Expand:
1554 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1554)
;
1555
1556 case ABIArgInfo::Extend:
1557 case ABIArgInfo::Direct:
1558 resultType = retAI.getCoerceToType();
1559 break;
1560
1561 case ABIArgInfo::InAlloca:
1562 if (retAI.getInAllocaSRet()) {
1563 // sret things on win32 aren't void, they return the sret pointer.
1564 QualType ret = FI.getReturnType();
1565 llvm::Type *ty = ConvertType(ret);
1566 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1567 resultType = llvm::PointerType::get(ty, addressSpace);
1568 } else {
1569 resultType = llvm::Type::getVoidTy(getLLVMContext());
1570 }
1571 break;
1572
1573 case ABIArgInfo::Indirect:
1574 case ABIArgInfo::Ignore:
1575 resultType = llvm::Type::getVoidTy(getLLVMContext());
1576 break;
1577
1578 case ABIArgInfo::CoerceAndExpand:
1579 resultType = retAI.getUnpaddedCoerceAndExpandType();
1580 break;
1581 }
1582
1583 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1584 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1585
1586 // Add type for sret argument.
1587 if (IRFunctionArgs.hasSRetArg()) {
1588 QualType Ret = FI.getReturnType();
1589 llvm::Type *Ty = ConvertType(Ret);
1590 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1591 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1592 llvm::PointerType::get(Ty, AddressSpace);
1593 }
1594
1595 // Add type for inalloca argument.
1596 if (IRFunctionArgs.hasInallocaArg()) {
1597 auto ArgStruct = FI.getArgStruct();
1598 assert(ArgStruct)(static_cast <bool> (ArgStruct) ? void (0) : __assert_fail
("ArgStruct", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1598, __extension__ __PRETTY_FUNCTION__))
;
1599 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1600 }
1601
1602 // Add in all of the required arguments.
1603 unsigned ArgNo = 0;
1604 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1605 ie = it + FI.getNumRequiredArgs();
1606 for (; it != ie; ++it, ++ArgNo) {
1607 const ABIArgInfo &ArgInfo = it->info;
1608
1609 // Insert a padding type to ensure proper alignment.
1610 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1611 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1612 ArgInfo.getPaddingType();
1613
1614 unsigned FirstIRArg, NumIRArgs;
1615 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1616
1617 switch (ArgInfo.getKind()) {
1618 case ABIArgInfo::Ignore:
1619 case ABIArgInfo::InAlloca:
1620 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1620, __extension__ __PRETTY_FUNCTION__))
;
1621 break;
1622
1623 case ABIArgInfo::Indirect: {
1624 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1624, __extension__ __PRETTY_FUNCTION__))
;
1625 // indirect arguments are always on the stack, which is alloca addr space.
1626 llvm::Type *LTy = ConvertTypeForMem(it->type);
1627 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1628 CGM.getDataLayout().getAllocaAddrSpace());
1629 break;
1630 }
1631
1632 case ABIArgInfo::Extend:
1633 case ABIArgInfo::Direct: {
1634 // Fast-isel and the optimizer generally like scalar values better than
1635 // FCAs, so we flatten them if this is safe to do for this argument.
1636 llvm::Type *argType = ArgInfo.getCoerceToType();
1637 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1638 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1639 assert(NumIRArgs == st->getNumElements())(static_cast <bool> (NumIRArgs == st->getNumElements
()) ? void (0) : __assert_fail ("NumIRArgs == st->getNumElements()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1639, __extension__ __PRETTY_FUNCTION__))
;
1640 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1641 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1642 } else {
1643 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1643, __extension__ __PRETTY_FUNCTION__))
;
1644 ArgTypes[FirstIRArg] = argType;
1645 }
1646 break;
1647 }
1648
1649 case ABIArgInfo::CoerceAndExpand: {
1650 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1651 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1652 *ArgTypesIter++ = EltTy;
1653 }
1654 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (ArgTypesIter == ArgTypes.begin() +
FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1654, __extension__ __PRETTY_FUNCTION__))
;
1655 break;
1656 }
1657
1658 case ABIArgInfo::Expand:
1659 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1660 getExpandedTypes(it->type, ArgTypesIter);
1661 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (ArgTypesIter == ArgTypes.begin() +
FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1661, __extension__ __PRETTY_FUNCTION__))
;
1662 break;
1663 }
1664 }
1665
1666 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1667 assert(Erased && "Not in set?")(static_cast <bool> (Erased && "Not in set?") ?
void (0) : __assert_fail ("Erased && \"Not in set?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1667, __extension__ __PRETTY_FUNCTION__))
;
1668
1669 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1670}
1671
1672llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1673 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1674 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1675
1676 if (!isFuncTypeConvertible(FPT))
1677 return llvm::StructType::get(getLLVMContext());
1678
1679 const CGFunctionInfo *Info;
1680 if (isa<CXXDestructorDecl>(MD))
1681 Info =
1682 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1683 else
1684 Info = &arrangeCXXMethodDeclaration(MD);
1685 return GetFunctionType(*Info);
1686}
1687
1688static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1689 llvm::AttrBuilder &FuncAttrs,
1690 const FunctionProtoType *FPT) {
1691 if (!FPT)
1692 return;
1693
1694 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1695 FPT->isNothrow())
1696 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1697}
1698
1699void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1700 bool AttrOnCallSite,
1701 llvm::AttrBuilder &FuncAttrs) {
1702 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1703 if (!HasOptnone) {
1704 if (CodeGenOpts.OptimizeSize)
1705 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1706 if (CodeGenOpts.OptimizeSize == 2)
1707 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1708 }
1709
1710 if (CodeGenOpts.DisableRedZone)
1711 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1712 if (CodeGenOpts.NoImplicitFloat)
1713 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1714
1715 if (AttrOnCallSite) {
1716 // Attributes that should go on the call site only.
1717 if (!CodeGenOpts.SimplifyLibCalls ||
1718 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1719 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1720 if (!CodeGenOpts.TrapFuncName.empty())
1721 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1722 } else {
1723 // Attributes that should go on the function, but not the call site.
1724 if (!CodeGenOpts.DisableFPElim) {
1725 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1726 } else if (CodeGenOpts.OmitLeafFramePointer) {
1727 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1728 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1729 } else {
1730 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1731 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1732 }
1733
1734 FuncAttrs.addAttribute("less-precise-fpmad",
1735 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1736
1737 if (!CodeGenOpts.FPDenormalMode.empty())
1738 FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1739
1740 FuncAttrs.addAttribute("no-trapping-math",
1741 llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1742
1743 // Strict (compliant) code is the default, so only add this attribute to
1744 // indicate that we are trying to workaround a problem case.
1745 if (!CodeGenOpts.StrictFloatCastOverflow)
1746 FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1747
1748 // TODO: Are these all needed?
1749 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1750 FuncAttrs.addAttribute("no-infs-fp-math",
1751 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1752 FuncAttrs.addAttribute("no-nans-fp-math",
1753 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1754 FuncAttrs.addAttribute("unsafe-fp-math",
1755 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1756 FuncAttrs.addAttribute("use-soft-float",
1757 llvm::toStringRef(CodeGenOpts.SoftFloat));
1758 FuncAttrs.addAttribute("stack-protector-buffer-size",
1759 llvm::utostr(CodeGenOpts.SSPBufferSize));
1760 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1761 llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1762 FuncAttrs.addAttribute(
1763 "correctly-rounded-divide-sqrt-fp-math",
1764 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1765
1766 if (getLangOpts().OpenCL)
1767 FuncAttrs.addAttribute("denorms-are-zero",
1768 llvm::toStringRef(CodeGenOpts.FlushDenorm));
1769
1770 // TODO: Reciprocal estimate codegen options should apply to instructions?
1771 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1772 if (!Recips.empty())
1773 FuncAttrs.addAttribute("reciprocal-estimates",
1774 llvm::join(Recips, ","));
1775
1776 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1777 CodeGenOpts.PreferVectorWidth != "none")
1778 FuncAttrs.addAttribute("prefer-vector-width",
1779 CodeGenOpts.PreferVectorWidth);
1780
1781 if (CodeGenOpts.StackRealignment)
1782 FuncAttrs.addAttribute("stackrealign");
1783 if (CodeGenOpts.Backchain)
1784 FuncAttrs.addAttribute("backchain");
1785 }
1786
1787 if (getLangOpts().assumeFunctionsAreConvergent()) {
1788 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1789 // convergent (meaning, they may call an intrinsically convergent op, such
1790 // as __syncthreads() / barrier(), and so can't have certain optimizations
1791 // applied around them). LLVM will remove this attribute where it safely
1792 // can.
1793 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1794 }
1795
1796 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1797 // Exceptions aren't supported in CUDA device code.
1798 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1799
1800 // Respect -fcuda-flush-denormals-to-zero.
1801 if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1802 FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1803 }
1804}
1805
1806void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1807 llvm::AttrBuilder FuncAttrs;
1808 ConstructDefaultFnAttrList(F.getName(),
1809 F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1810 /* AttrOnCallsite = */ false, FuncAttrs);
1811 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1812}
1813
1814void CodeGenModule::ConstructAttributeList(
1815 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1816 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1817 llvm::AttrBuilder FuncAttrs;
1818 llvm::AttrBuilder RetAttrs;
1819
1820 CallingConv = FI.getEffectiveCallingConvention();
1821 if (FI.isNoReturn())
1822 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1823
1824 // If we have information about the function prototype, we can learn
1825 // attributes from there.
1826 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1827 CalleeInfo.getCalleeFunctionProtoType());
1828
1829 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1830
1831 bool HasOptnone = false;
1832 // FIXME: handle sseregparm someday...
1833 if (TargetDecl) {
1834 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1835 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1836 if (TargetDecl->hasAttr<NoThrowAttr>())
1837 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1838 if (TargetDecl->hasAttr<NoReturnAttr>())
1839 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1840 if (TargetDecl->hasAttr<ColdAttr>())
1841 FuncAttrs.addAttribute(llvm::Attribute::Cold);
1842 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1843 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1844 if (TargetDecl->hasAttr<ConvergentAttr>())
1845 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1846
1847 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1848 AddAttributesFromFunctionProtoType(
1849 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1850 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1851 // These attributes are not inherited by overloads.
1852 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1853 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1854 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1855 }
1856
1857 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1858 if (TargetDecl->hasAttr<ConstAttr>()) {
1859 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1860 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1861 } else if (TargetDecl->hasAttr<PureAttr>()) {
1862 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1863 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1864 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1865 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1866 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1867 }
1868 if (TargetDecl->hasAttr<RestrictAttr>())
1869 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1870 if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1871 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1872 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1873 FuncAttrs.addAttribute("no_caller_saved_registers");
1874 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1875 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1876
1877 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1878 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1879 Optional<unsigned> NumElemsParam;
1880 if (AllocSize->getNumElemsParam().isValid())
1881 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1882 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1883 NumElemsParam);
1884 }
1885 }
1886
1887 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1888
1889 if (CodeGenOpts.EnableSegmentedStacks &&
1890 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1891 FuncAttrs.addAttribute("split-stack");
1892
1893 // Add NonLazyBind attribute to function declarations when -fno-plt
1894 // is used.
1895 if (TargetDecl && CodeGenOpts.NoPLT) {
1896 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1897 if (!Fn->isDefined() && !AttrOnCallSite) {
1898 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1899 }
1900 }
1901 }
1902
1903 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1904 if (getLangOpts().OpenCLVersion <= 120) {
1905 // OpenCL v1.2 Work groups are always uniform
1906 FuncAttrs.addAttribute("uniform-work-group-size", "true");
1907 } else {
1908 // OpenCL v2.0 Work groups may be whether uniform or not.
1909 // '-cl-uniform-work-group-size' compile option gets a hint
1910 // to the compiler that the global work-size be a multiple of
1911 // the work-group size specified to clEnqueueNDRangeKernel
1912 // (i.e. work groups are uniform).
1913 FuncAttrs.addAttribute("uniform-work-group-size",
1914 llvm::toStringRef(CodeGenOpts.UniformWGSize));
1915 }
1916 }
1917
1918 if (!AttrOnCallSite) {
1919 bool DisableTailCalls = false;
1920
1921 if (CodeGenOpts.DisableTailCalls)
1922 DisableTailCalls = true;
1923 else if (TargetDecl) {
1924 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1925 TargetDecl->hasAttr<AnyX86InterruptAttr>())
1926 DisableTailCalls = true;
1927 else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1928 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1929 if (!BD->doesNotEscape())
1930 DisableTailCalls = true;
1931 }
1932 }
1933
1934 FuncAttrs.addAttribute("disable-tail-calls",
1935 llvm::toStringRef(DisableTailCalls));
1936 GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
1937 }
1938
1939 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1940
1941 QualType RetTy = FI.getReturnType();
1942 const ABIArgInfo &RetAI = FI.getReturnInfo();
1943 switch (RetAI.getKind()) {
1944 case ABIArgInfo::Extend:
1945 if (RetAI.isSignExt())
1946 RetAttrs.addAttribute(llvm::Attribute::SExt);
1947 else
1948 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1949 LLVM_FALLTHROUGH[[clang::fallthrough]];
1950 case ABIArgInfo::Direct:
1951 if (RetAI.getInReg())
1952 RetAttrs.addAttribute(llvm::Attribute::InReg);
1953 break;
1954 case ABIArgInfo::Ignore:
1955 break;
1956
1957 case ABIArgInfo::InAlloca:
1958 case ABIArgInfo::Indirect: {
1959 // inalloca and sret disable readnone and readonly
1960 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1961 .removeAttribute(llvm::Attribute::ReadNone);
1962 break;
1963 }
1964
1965 case ABIArgInfo::CoerceAndExpand:
1966 break;
1967
1968 case ABIArgInfo::Expand:
1969 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 1969)
;
1970 }
1971
1972 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1973 QualType PTy = RefTy->getPointeeType();
1974 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1975 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1976 .getQuantity());
1977 else if (getContext().getTargetAddressSpace(PTy) == 0)
1978 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1979 }
1980
1981 bool hasUsedSRet = false;
1982 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1983
1984 // Attach attributes to sret.
1985 if (IRFunctionArgs.hasSRetArg()) {
1986 llvm::AttrBuilder SRETAttrs;
1987 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1988 hasUsedSRet = true;
1989 if (RetAI.getInReg())
1990 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1991 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
1992 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
1993 }
1994
1995 // Attach attributes to inalloca argument.
1996 if (IRFunctionArgs.hasInallocaArg()) {
1997 llvm::AttrBuilder Attrs;
1998 Attrs.addAttribute(llvm::Attribute::InAlloca);
1999 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2000 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2001 }
2002
2003 unsigned ArgNo = 0;
2004 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2005 E = FI.arg_end();
2006 I != E; ++I, ++ArgNo) {
2007 QualType ParamType = I->type;
2008 const ABIArgInfo &AI = I->info;
2009 llvm::AttrBuilder Attrs;
2010
2011 // Add attribute for padding argument, if necessary.
2012 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2013 if (AI.getPaddingInReg()) {
2014 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2015 llvm::AttributeSet::get(
2016 getLLVMContext(),
2017 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2018 }
2019 }
2020
2021 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2022 // have the corresponding parameter variable. It doesn't make
2023 // sense to do it here because parameters are so messed up.
2024 switch (AI.getKind()) {
2025 case ABIArgInfo::Extend:
2026 if (AI.isSignExt())
2027 Attrs.addAttribute(llvm::Attribute::SExt);
2028 else
2029 Attrs.addAttribute(llvm::Attribute::ZExt);
2030 LLVM_FALLTHROUGH[[clang::fallthrough]];
2031 case ABIArgInfo::Direct:
2032 if (ArgNo == 0 && FI.isChainCall())
2033 Attrs.addAttribute(llvm::Attribute::Nest);
2034 else if (AI.getInReg())
2035 Attrs.addAttribute(llvm::Attribute::InReg);
2036 break;
2037
2038 case ABIArgInfo::Indirect: {
2039 if (AI.getInReg())
2040 Attrs.addAttribute(llvm::Attribute::InReg);
2041
2042 if (AI.getIndirectByVal())
2043 Attrs.addAttribute(llvm::Attribute::ByVal);
2044
2045 CharUnits Align = AI.getIndirectAlign();
2046
2047 // In a byval argument, it is important that the required
2048 // alignment of the type is honored, as LLVM might be creating a
2049 // *new* stack object, and needs to know what alignment to give
2050 // it. (Sometimes it can deduce a sensible alignment on its own,
2051 // but not if clang decides it must emit a packed struct, or the
2052 // user specifies increased alignment requirements.)
2053 //
2054 // This is different from indirect *not* byval, where the object
2055 // exists already, and the align attribute is purely
2056 // informative.
2057 assert(!Align.isZero())(static_cast <bool> (!Align.isZero()) ? void (0) : __assert_fail
("!Align.isZero()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2057, __extension__ __PRETTY_FUNCTION__))
;
2058
2059 // For now, only add this when we have a byval argument.
2060 // TODO: be less lazy about updating test cases.
2061 if (AI.getIndirectByVal())
2062 Attrs.addAlignmentAttr(Align.getQuantity());
2063
2064 // byval disables readnone and readonly.
2065 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2066 .removeAttribute(llvm::Attribute::ReadNone);
2067 break;
2068 }
2069 case ABIArgInfo::Ignore:
2070 case ABIArgInfo::Expand:
2071 case ABIArgInfo::CoerceAndExpand:
2072 break;
2073
2074 case ABIArgInfo::InAlloca:
2075 // inalloca disables readnone and readonly.
2076 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2077 .removeAttribute(llvm::Attribute::ReadNone);
2078 continue;
2079 }
2080
2081 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2082 QualType PTy = RefTy->getPointeeType();
2083 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2084 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2085 .getQuantity());
2086 else if (getContext().getTargetAddressSpace(PTy) == 0)
2087 Attrs.addAttribute(llvm::Attribute::NonNull);
2088 }
2089
2090 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2091 case ParameterABI::Ordinary:
2092 break;
2093
2094 case ParameterABI::SwiftIndirectResult: {
2095 // Add 'sret' if we haven't already used it for something, but
2096 // only if the result is void.
2097 if (!hasUsedSRet && RetTy->isVoidType()) {
2098 Attrs.addAttribute(llvm::Attribute::StructRet);
2099 hasUsedSRet = true;
2100 }
2101
2102 // Add 'noalias' in either case.
2103 Attrs.addAttribute(llvm::Attribute::NoAlias);
2104
2105 // Add 'dereferenceable' and 'alignment'.
2106 auto PTy = ParamType->getPointeeType();
2107 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2108 auto info = getContext().getTypeInfoInChars(PTy);
2109 Attrs.addDereferenceableAttr(info.first.getQuantity());
2110 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2111 info.second.getQuantity()));
2112 }
2113 break;
2114 }
2115
2116 case ParameterABI::SwiftErrorResult:
2117 Attrs.addAttribute(llvm::Attribute::SwiftError);
2118 break;
2119
2120 case ParameterABI::SwiftContext:
2121 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2122 break;
2123 }
2124
2125 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2126 Attrs.addAttribute(llvm::Attribute::NoCapture);
2127
2128 if (Attrs.hasAttributes()) {
2129 unsigned FirstIRArg, NumIRArgs;
2130 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2131 for (unsigned i = 0; i < NumIRArgs; i++)
2132 ArgAttrs[FirstIRArg + i] =
2133 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2134 }
2135 }
2136 assert(ArgNo == FI.arg_size())(static_cast <bool> (ArgNo == FI.arg_size()) ? void (0)
: __assert_fail ("ArgNo == FI.arg_size()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2136, __extension__ __PRETTY_FUNCTION__))
;
2137
2138 AttrList = llvm::AttributeList::get(
2139 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2140 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2141}
2142
2143/// An argument came in as a promoted argument; demote it back to its
2144/// declared type.
2145static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2146 const VarDecl *var,
2147 llvm::Value *value) {
2148 llvm::Type *varType = CGF.ConvertType(var->getType());
2149
2150 // This can happen with promotions that actually don't change the
2151 // underlying type, like the enum promotions.
2152 if (value->getType() == varType) return value;
2153
2154 assert((varType->isIntegerTy() || varType->isFloatingPointTy())(static_cast <bool> ((varType->isIntegerTy() || varType
->isFloatingPointTy()) && "unexpected promotion type"
) ? void (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2155, __extension__ __PRETTY_FUNCTION__))
2155 && "unexpected promotion type")(static_cast <bool> ((varType->isIntegerTy() || varType
->isFloatingPointTy()) && "unexpected promotion type"
) ? void (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2155, __extension__ __PRETTY_FUNCTION__))
;
2156
2157 if (isa<llvm::IntegerType>(varType))
2158 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2159
2160 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2161}
2162
2163/// Returns the attribute (either parameter attribute, or function
2164/// attribute), which declares argument ArgNo to be non-null.
2165static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2166 QualType ArgType, unsigned ArgNo) {
2167 // FIXME: __attribute__((nonnull)) can also be applied to:
2168 // - references to pointers, where the pointee is known to be
2169 // nonnull (apparently a Clang extension)
2170 // - transparent unions containing pointers
2171 // In the former case, LLVM IR cannot represent the constraint. In
2172 // the latter case, we have no guarantee that the transparent union
2173 // is in fact passed as a pointer.
2174 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2175 return nullptr;
2176 // First, check attribute on parameter itself.
2177 if (PVD) {
2178 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2179 return ParmNNAttr;
2180 }
2181 // Check function attributes.
2182 if (!FD)
2183 return nullptr;
2184 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2185 if (NNAttr->isNonNull(ArgNo))
2186 return NNAttr;
2187 }
2188 return nullptr;
2189}
2190
2191namespace {
2192 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2193 Address Temp;
2194 Address Arg;
2195 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2196 void Emit(CodeGenFunction &CGF, Flags flags) override {
2197 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2198 CGF.Builder.CreateStore(errorValue, Arg);
2199 }
2200 };
2201}
2202
2203void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2204 llvm::Function *Fn,
2205 const FunctionArgList &Args) {
2206 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2207 // Naked functions don't have prologues.
2208 return;
2209
2210 // If this is an implicit-return-zero function, go ahead and
2211 // initialize the return value. TODO: it might be nice to have
2212 // a more general mechanism for this that didn't require synthesized
2213 // return statements.
2214 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2215 if (FD->hasImplicitReturnZero()) {
2216 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2217 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2218 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2219 Builder.CreateStore(Zero, ReturnValue);
2220 }
2221 }
2222
2223 // FIXME: We no longer need the types from FunctionArgList; lift up and
2224 // simplify.
2225
2226 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2227 // Flattened function arguments.
2228 SmallVector<llvm::Value *, 16> FnArgs;
2229 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2230 for (auto &Arg : Fn->args()) {
2231 FnArgs.push_back(&Arg);
2232 }
2233 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs())(static_cast <bool> (FnArgs.size() == IRFunctionArgs.totalIRArgs
()) ? void (0) : __assert_fail ("FnArgs.size() == IRFunctionArgs.totalIRArgs()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2233, __extension__ __PRETTY_FUNCTION__))
;
2234
2235 // If we're using inalloca, all the memory arguments are GEPs off of the last
2236 // parameter, which is a pointer to the complete memory area.
2237 Address ArgStruct = Address::invalid();
2238 const llvm::StructLayout *ArgStructLayout = nullptr;
2239 if (IRFunctionArgs.hasInallocaArg()) {
2240 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2241 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2242 FI.getArgStructAlignment());
2243
2244 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo())(static_cast <bool> (ArgStruct.getType() == FI.getArgStruct
()->getPointerTo()) ? void (0) : __assert_fail ("ArgStruct.getType() == FI.getArgStruct()->getPointerTo()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2244, __extension__ __PRETTY_FUNCTION__))
;
2245 }
2246
2247 // Name the struct return parameter.
2248 if (IRFunctionArgs.hasSRetArg()) {
2249 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2250 AI->setName("agg.result");
2251 AI->addAttr(llvm::Attribute::NoAlias);
2252 }
2253
2254 // Track if we received the parameter as a pointer (indirect, byval, or
2255 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2256 // into a local alloca for us.
2257 SmallVector<ParamValue, 16> ArgVals;
2258 ArgVals.reserve(Args.size());
2259
2260 // Create a pointer value for every parameter declaration. This usually
2261 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2262 // any cleanups or do anything that might unwind. We do that separately, so
2263 // we can push the cleanups in the correct order for the ABI.
2264 assert(FI.arg_size() == Args.size() &&(static_cast <bool> (FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.") ? void
(0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2265, __extension__ __PRETTY_FUNCTION__))
2265 "Mismatch between function signature & arguments.")(static_cast <bool> (FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.") ? void
(0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2265, __extension__ __PRETTY_FUNCTION__))
;
2266 unsigned ArgNo = 0;
2267 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2268 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2269 i != e; ++i, ++info_it, ++ArgNo) {
2270 const VarDecl *Arg = *i;
2271 const ABIArgInfo &ArgI = info_it->info;
2272
2273 bool isPromoted =
2274 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2275 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2276 // the parameter is promoted. In this case we convert to
2277 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2278 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2279 assert(hasScalarEvaluationKind(Ty) ==(static_cast <bool> (hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind
(Arg->getType())) ? void (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2280, __extension__ __PRETTY_FUNCTION__))
2280 hasScalarEvaluationKind(Arg->getType()))(static_cast <bool> (hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind
(Arg->getType())) ? void (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2280, __extension__ __PRETTY_FUNCTION__))
;
2281
2282 unsigned FirstIRArg, NumIRArgs;
2283 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2284
2285 switch (ArgI.getKind()) {
2286 case ABIArgInfo::InAlloca: {
2287 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2287, __extension__ __PRETTY_FUNCTION__))
;
2288 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2289 CharUnits FieldOffset =
2290 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2291 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2292 Arg->getName());
2293 ArgVals.push_back(ParamValue::forIndirect(V));
2294 break;
2295 }
2296
2297 case ABIArgInfo::Indirect: {
2298 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2298, __extension__ __PRETTY_FUNCTION__))
;
2299 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2300
2301 if (!hasScalarEvaluationKind(Ty)) {
2302 // Aggregates and complex variables are accessed by reference. All we
2303 // need to do is realign the value, if requested.
2304 Address V = ParamAddr;
2305 if (ArgI.getIndirectRealign()) {
2306 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2307
2308 // Copy from the incoming argument pointer to the temporary with the
2309 // appropriate alignment.
2310 //
2311 // FIXME: We should have a common utility for generating an aggregate
2312 // copy.
2313 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2314 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2315 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2316 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2317 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2318 V = AlignedTemp;
2319 }
2320 ArgVals.push_back(ParamValue::forIndirect(V));
2321 } else {
2322 // Load scalar value from indirect argument.
2323 llvm::Value *V =
2324 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2325
2326 if (isPromoted)
2327 V = emitArgumentDemotion(*this, Arg, V);
2328 ArgVals.push_back(ParamValue::forDirect(V));
2329 }
2330 break;
2331 }
2332
2333 case ABIArgInfo::Extend:
2334 case ABIArgInfo::Direct: {
2335
2336 // If we have the trivial case, handle it with no muss and fuss.
2337 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2338 ArgI.getCoerceToType() == ConvertType(Ty) &&
2339 ArgI.getDirectOffset() == 0) {
2340 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2340, __extension__ __PRETTY_FUNCTION__))
;
2341 llvm::Value *V = FnArgs[FirstIRArg];
2342 auto AI = cast<llvm::Argument>(V);
2343
2344 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2345 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2346 PVD->getFunctionScopeIndex()))
2347 AI->addAttr(llvm::Attribute::NonNull);
2348
2349 QualType OTy = PVD->getOriginalType();
2350 if (const auto *ArrTy =
2351 getContext().getAsConstantArrayType(OTy)) {
2352 // A C99 array parameter declaration with the static keyword also
2353 // indicates dereferenceability, and if the size is constant we can
2354 // use the dereferenceable attribute (which requires the size in
2355 // bytes).
2356 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2357 QualType ETy = ArrTy->getElementType();
2358 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2359 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2360 ArrSize) {
2361 llvm::AttrBuilder Attrs;
2362 Attrs.addDereferenceableAttr(
2363 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2364 AI->addAttrs(Attrs);
2365 } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2366 AI->addAttr(llvm::Attribute::NonNull);
2367 }
2368 }
2369 } else if (const auto *ArrTy =
2370 getContext().getAsVariableArrayType(OTy)) {
2371 // For C99 VLAs with the static keyword, we don't know the size so
2372 // we can't use the dereferenceable attribute, but in addrspace(0)
2373 // we know that it must be nonnull.
2374 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2375 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2376 AI->addAttr(llvm::Attribute::NonNull);
2377 }
2378
2379 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2380 if (!AVAttr)
2381 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2382 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2383 if (AVAttr) {
2384 llvm::Value *AlignmentValue =
2385 EmitScalarExpr(AVAttr->getAlignment());
2386 llvm::ConstantInt *AlignmentCI =
2387 cast<llvm::ConstantInt>(AlignmentValue);
2388 unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2389 +llvm::Value::MaximumAlignment);
2390 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2391 }
2392 }
2393
2394 if (Arg->getType().isRestrictQualified())
2395 AI->addAttr(llvm::Attribute::NoAlias);
2396
2397 // LLVM expects swifterror parameters to be used in very restricted
2398 // ways. Copy the value into a less-restricted temporary.
2399 if (FI.getExtParameterInfo(ArgNo).getABI()
2400 == ParameterABI::SwiftErrorResult) {
2401 QualType pointeeTy = Ty->getPointeeType();
2402 assert(pointeeTy->isPointerType())(static_cast <bool> (pointeeTy->isPointerType()) ? void
(0) : __assert_fail ("pointeeTy->isPointerType()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__))
;
2403 Address temp =
2404 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2405 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2406 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2407 Builder.CreateStore(incomingErrorValue, temp);
2408 V = temp.getPointer();
2409
2410 // Push a cleanup to copy the value back at the end of the function.
2411 // The convention does not guarantee that the value will be written
2412 // back if the function exits with an unwind exception.
2413 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2414 }
2415
2416 // Ensure the argument is the correct type.
2417 if (V->getType() != ArgI.getCoerceToType())
2418 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2419
2420 if (isPromoted)
2421 V = emitArgumentDemotion(*this, Arg, V);
2422
2423 // Because of merging of function types from multiple decls it is
2424 // possible for the type of an argument to not match the corresponding
2425 // type in the function type. Since we are codegening the callee
2426 // in here, add a cast to the argument type.
2427 llvm::Type *LTy = ConvertType(Arg->getType());
2428 if (V->getType() != LTy)
2429 V = Builder.CreateBitCast(V, LTy);
2430
2431 ArgVals.push_back(ParamValue::forDirect(V));
2432 break;
2433 }
2434
2435 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2436 Arg->getName());
2437
2438 // Pointer to store into.
2439 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2440
2441 // Fast-isel and the optimizer generally like scalar values better than
2442 // FCAs, so we flatten them if this is safe to do for this argument.
2443 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2444 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2445 STy->getNumElements() > 1) {
2446 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2447 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2448 llvm::Type *DstTy = Ptr.getElementType();
2449 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2450
2451 Address AddrToStoreInto = Address::invalid();
2452 if (SrcSize <= DstSize) {
2453 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2454 } else {
2455 AddrToStoreInto =
2456 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2457 }
2458
2459 assert(STy->getNumElements() == NumIRArgs)(static_cast <bool> (STy->getNumElements() == NumIRArgs
) ? void (0) : __assert_fail ("STy->getNumElements() == NumIRArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2459, __extension__ __PRETTY_FUNCTION__))
;
2460 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2461 auto AI = FnArgs[FirstIRArg + i];
2462 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2463 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2464 Address EltPtr =
2465 Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2466 Builder.CreateStore(AI, EltPtr);
2467 }
2468
2469 if (SrcSize > DstSize) {
2470 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2471 }
2472
2473 } else {
2474 // Simple case, just do a coerced store of the argument into the alloca.
2475 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2475, __extension__ __PRETTY_FUNCTION__))
;
2476 auto AI = FnArgs[FirstIRArg];
2477 AI->setName(Arg->getName() + ".coerce");
2478 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2479 }
2480
2481 // Match to what EmitParmDecl is expecting for this type.
2482 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2483 llvm::Value *V =
2484 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2485 if (isPromoted)
2486 V = emitArgumentDemotion(*this, Arg, V);
2487 ArgVals.push_back(ParamValue::forDirect(V));
2488 } else {
2489 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2490 }
2491 break;
2492 }
2493
2494 case ABIArgInfo::CoerceAndExpand: {
2495 // Reconstruct into a temporary.
2496 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2497 ArgVals.push_back(ParamValue::forIndirect(alloca));
2498
2499 auto coercionType = ArgI.getCoerceAndExpandType();
2500 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2501 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2502
2503 unsigned argIndex = FirstIRArg;
2504 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2505 llvm::Type *eltType = coercionType->getElementType(i);
2506 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2507 continue;
2508
2509 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2510 auto elt = FnArgs[argIndex++];
2511 Builder.CreateStore(elt, eltAddr);
2512 }
2513 assert(argIndex == FirstIRArg + NumIRArgs)(static_cast <bool> (argIndex == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("argIndex == FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2513, __extension__ __PRETTY_FUNCTION__))
;
2514 break;
2515 }
2516
2517 case ABIArgInfo::Expand: {
2518 // If this structure was expanded into multiple arguments then
2519 // we need to create a temporary and reconstruct it from the
2520 // arguments.
2521 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2522 LValue LV = MakeAddrLValue(Alloca, Ty);
2523 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2524
2525 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2526 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2527 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (FnArgIter == FnArgs.begin() + FirstIRArg
+ NumIRArgs) ? void (0) : __assert_fail ("FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2527, __extension__ __PRETTY_FUNCTION__))
;
2528 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2529 auto AI = FnArgs[FirstIRArg + i];
2530 AI->setName(Arg->getName() + "." + Twine(i));
2531 }
2532 break;
2533 }
2534
2535 case ABIArgInfo::Ignore:
2536 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2536, __extension__ __PRETTY_FUNCTION__))
;
2537 // Initialize the local variable appropriately.
2538 if (!hasScalarEvaluationKind(Ty)) {
2539 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2540 } else {
2541 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2542 ArgVals.push_back(ParamValue::forDirect(U));
2543 }
2544 break;
2545 }
2546 }
2547
2548 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2549 for (int I = Args.size() - 1; I >= 0; --I)
2550 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2551 } else {
2552 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2553 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2554 }
2555}
2556
2557static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2558 while (insn->use_empty()) {
2559 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2560 if (!bitcast) return;
2561
2562 // This is "safe" because we would have used a ConstantExpr otherwise.
2563 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2564 bitcast->eraseFromParent();
2565 }
2566}
2567
2568/// Try to emit a fused autorelease of a return result.
2569static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2570 llvm::Value *result) {
2571 // We must be immediately followed the cast.
2572 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2573 if (BB->empty()) return nullptr;
2574 if (&BB->back() != result) return nullptr;
2575
2576 llvm::Type *resultType = result->getType();
2577
2578 // result is in a BasicBlock and is therefore an Instruction.
2579 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2580
2581 SmallVector<llvm::Instruction *, 4> InstsToKill;
2582
2583 // Look for:
2584 // %generator = bitcast %type1* %generator2 to %type2*
2585 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2586 // We would have emitted this as a constant if the operand weren't
2587 // an Instruction.
2588 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2589
2590 // Require the generator to be immediately followed by the cast.
2591 if (generator->getNextNode() != bitcast)
2592 return nullptr;
2593
2594 InstsToKill.push_back(bitcast);
2595 }
2596
2597 // Look for:
2598 // %generator = call i8* @objc_retain(i8* %originalResult)
2599 // or
2600 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2601 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2602 if (!call) return nullptr;
2603
2604 bool doRetainAutorelease;
2605
2606 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2607 doRetainAutorelease = true;
2608 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2609 .objc_retainAutoreleasedReturnValue) {
2610 doRetainAutorelease = false;
2611
2612 // If we emitted an assembly marker for this call (and the
2613 // ARCEntrypoints field should have been set if so), go looking
2614 // for that call. If we can't find it, we can't do this
2615 // optimization. But it should always be the immediately previous
2616 // instruction, unless we needed bitcasts around the call.
2617 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2618 llvm::Instruction *prev = call->getPrevNode();
2619 assert(prev)(static_cast <bool> (prev) ? void (0) : __assert_fail (
"prev", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2619, __extension__ __PRETTY_FUNCTION__))
;
2620 if (isa<llvm::BitCastInst>(prev)) {
2621 prev = prev->getPrevNode();
2622 assert(prev)(static_cast <bool> (prev) ? void (0) : __assert_fail (
"prev", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2622, __extension__ __PRETTY_FUNCTION__))
;
2623 }
2624 assert(isa<llvm::CallInst>(prev))(static_cast <bool> (isa<llvm::CallInst>(prev)) ?
void (0) : __assert_fail ("isa<llvm::CallInst>(prev)",
"/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__))
;
2625 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==(static_cast <bool> (cast<llvm::CallInst>(prev)->
getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? void (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__))
2626 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker)(static_cast <bool> (cast<llvm::CallInst>(prev)->
getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? void (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__))
;
2627 InstsToKill.push_back(prev);
2628 }
2629 } else {
2630 return nullptr;
2631 }
2632
2633 result = call->getArgOperand(0);
2634 InstsToKill.push_back(call);
2635
2636 // Keep killing bitcasts, for sanity. Note that we no longer care
2637 // about precise ordering as long as there's exactly one use.
2638 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2639 if (!bitcast->hasOneUse()) break;
2640 InstsToKill.push_back(bitcast);
2641 result = bitcast->getOperand(0);
2642 }
2643
2644 // Delete all the unnecessary instructions, from latest to earliest.
2645 for (auto *I : InstsToKill)
2646 I->eraseFromParent();
2647
2648 // Do the fused retain/autorelease if we were asked to.
2649 if (doRetainAutorelease)
2650 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2651
2652 // Cast back to the result type.
2653 return CGF.Builder.CreateBitCast(result, resultType);
2654}
2655
2656/// If this is a +1 of the value of an immutable 'self', remove it.
2657static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2658 llvm::Value *result) {
2659 // This is only applicable to a method with an immutable 'self'.
2660 const ObjCMethodDecl *method =
2661 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2662 if (!method) return nullptr;
2663 const VarDecl *self = method->getSelfDecl();
2664 if (!self->getType().isConstQualified()) return nullptr;
2665
2666 // Look for a retain call.
2667 llvm::CallInst *retainCall =
2668 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2669 if (!retainCall ||
2670 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2671 return nullptr;
2672
2673 // Look for an ordinary load of 'self'.
2674 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2675 llvm::LoadInst *load =
2676 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2677 if (!load || load->isAtomic() || load->isVolatile() ||
2678 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2679 return nullptr;
2680
2681 // Okay! Burn it all down. This relies for correctness on the
2682 // assumption that the retain is emitted as part of the return and
2683 // that thereafter everything is used "linearly".
2684 llvm::Type *resultType = result->getType();
2685 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2686 assert(retainCall->use_empty())(static_cast <bool> (retainCall->use_empty()) ? void
(0) : __assert_fail ("retainCall->use_empty()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2686, __extension__ __PRETTY_FUNCTION__))
;
2687 retainCall->eraseFromParent();
2688 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2689
2690 return CGF.Builder.CreateBitCast(load, resultType);
2691}
2692
2693/// Emit an ARC autorelease of the result of a function.
2694///
2695/// \return the value to actually return from the function
2696static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2697 llvm::Value *result) {
2698 // If we're returning 'self', kill the initial retain. This is a
2699 // heuristic attempt to "encourage correctness" in the really unfortunate
2700 // case where we have a return of self during a dealloc and we desperately
2701 // need to avoid the possible autorelease.
2702 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2703 return self;
2704
2705 // At -O0, try to emit a fused retain/autorelease.
2706 if (CGF.shouldUseFusedARCCalls())
2707 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2708 return fused;
2709
2710 return CGF.EmitARCAutoreleaseReturnValue(result);
2711}
2712
2713/// Heuristically search for a dominating store to the return-value slot.
2714static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2715 // Check if a User is a store which pointerOperand is the ReturnValue.
2716 // We are looking for stores to the ReturnValue, not for stores of the
2717 // ReturnValue to some other location.
2718 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2719 auto *SI = dyn_cast<llvm::StoreInst>(U);
2720 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2721 return nullptr;
2722 // These aren't actually possible for non-coerced returns, and we
2723 // only care about non-coerced returns on this code path.
2724 assert(!SI->isAtomic() && !SI->isVolatile())(static_cast <bool> (!SI->isAtomic() && !SI->
isVolatile()) ? void (0) : __assert_fail ("!SI->isAtomic() && !SI->isVolatile()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2724, __extension__ __PRETTY_FUNCTION__))
;
2725 return SI;
2726 };
2727 // If there are multiple uses of the return-value slot, just check
2728 // for something immediately preceding the IP. Sometimes this can
2729 // happen with how we generate implicit-returns; it can also happen
2730 // with noreturn cleanups.
2731 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2732 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2733 if (IP->empty()) return nullptr;
2734 llvm::Instruction *I = &IP->back();
2735
2736 // Skip lifetime markers
2737 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2738 IE = IP->rend();
2739 II != IE; ++II) {
2740 if (llvm::IntrinsicInst *Intrinsic =
2741 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2742 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2743 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2744 ++II;
2745 if (II == IE)
2746 break;
2747 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2748 continue;
2749 }
2750 }
2751 I = &*II;
2752 break;
2753 }
2754
2755 return GetStoreIfValid(I);
2756 }
2757
2758 llvm::StoreInst *store =
2759 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2760 if (!store) return nullptr;
2761
2762 // Now do a first-and-dirty dominance check: just walk up the
2763 // single-predecessors chain from the current insertion point.
2764 llvm::BasicBlock *StoreBB = store->getParent();
2765 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2766 while (IP != StoreBB) {
2767 if (!(IP = IP->getSinglePredecessor()))
2768 return nullptr;
2769 }
2770
2771 // Okay, the store's basic block dominates the insertion point; we
2772 // can do our thing.
2773 return store;
2774}
2775
2776void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2777 bool EmitRetDbgLoc,
2778 SourceLocation EndLoc) {
2779 if (FI.isNoReturn()) {
2780 // Noreturn functions don't return.
2781 EmitUnreachable(EndLoc);
2782 return;
2783 }
2784
2785 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2786 // Naked functions don't have epilogues.
2787 Builder.CreateUnreachable();
2788 return;
2789 }
2790
2791 // Functions with no result always return void.
2792 if (!ReturnValue.isValid()) {
2793 Builder.CreateRetVoid();
2794 return;
2795 }
2796
2797 llvm::DebugLoc RetDbgLoc;
2798 llvm::Value *RV = nullptr;
2799 QualType RetTy = FI.getReturnType();
2800 const ABIArgInfo &RetAI = FI.getReturnInfo();
2801
2802 switch (RetAI.getKind()) {
2803 case ABIArgInfo::InAlloca:
2804 // Aggregrates get evaluated directly into the destination. Sometimes we
2805 // need to return the sret value in a register, though.
2806 assert(hasAggregateEvaluationKind(RetTy))(static_cast <bool> (hasAggregateEvaluationKind(RetTy))
? void (0) : __assert_fail ("hasAggregateEvaluationKind(RetTy)"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2806, __extension__ __PRETTY_FUNCTION__))
;
2807 if (RetAI.getInAllocaSRet()) {
2808 llvm::Function::arg_iterator EI = CurFn->arg_end();
2809 --EI;
2810 llvm::Value *ArgStruct = &*EI;
2811 llvm::Value *SRet = Builder.CreateStructGEP(
2812 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2813 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2814 }
2815 break;
2816
2817 case ABIArgInfo::Indirect: {
2818 auto AI = CurFn->arg_begin();
2819 if (RetAI.isSRetAfterThis())
2820 ++AI;
2821 switch (getEvaluationKind(RetTy)) {
2822 case TEK_Complex: {
2823 ComplexPairTy RT =
2824 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2825 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2826 /*isInit*/ true);
2827 break;
2828 }
2829 case TEK_Aggregate:
2830 // Do nothing; aggregrates get evaluated directly into the destination.
2831 break;
2832 case TEK_Scalar:
2833 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2834 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2835 /*isInit*/ true);
2836 break;
2837 }
2838 break;
2839 }
2840
2841 case ABIArgInfo::Extend:
2842 case ABIArgInfo::Direct:
2843 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2844 RetAI.getDirectOffset() == 0) {
2845 // The internal return value temp always will have pointer-to-return-type
2846 // type, just do a load.
2847
2848 // If there is a dominating store to ReturnValue, we can elide
2849 // the load, zap the store, and usually zap the alloca.
2850 if (llvm::StoreInst *SI =
2851 findDominatingStoreToReturnValue(*this)) {
2852 // Reuse the debug location from the store unless there is
2853 // cleanup code to be emitted between the store and return
2854 // instruction.
2855 if (EmitRetDbgLoc && !AutoreleaseResult)
2856 RetDbgLoc = SI->getDebugLoc();
2857 // Get the stored value and nuke the now-dead store.
2858 RV = SI->getValueOperand();
2859 SI->eraseFromParent();
2860
2861 // If that was the only use of the return value, nuke it as well now.
2862 auto returnValueInst = ReturnValue.getPointer();
2863 if (returnValueInst->use_empty()) {
2864 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2865 alloca->eraseFromParent();
2866 ReturnValue = Address::invalid();
2867 }
2868 }
2869
2870 // Otherwise, we have to do a simple load.
2871 } else {
2872 RV = Builder.CreateLoad(ReturnValue);
2873 }
2874 } else {
2875 // If the value is offset in memory, apply the offset now.
2876 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2877
2878 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2879 }
2880
2881 // In ARC, end functions that return a retainable type with a call
2882 // to objc_autoreleaseReturnValue.
2883 if (AutoreleaseResult) {
2884#ifndef NDEBUG
2885 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2886 // been stripped of the typedefs, so we cannot use RetTy here. Get the
2887 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2888 // CurCodeDecl or BlockInfo.
2889 QualType RT;
2890
2891 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2892 RT = FD->getReturnType();
2893 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2894 RT = MD->getReturnType();
2895 else if (isa<BlockDecl>(CurCodeDecl))
2896 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2897 else
2898 llvm_unreachable("Unexpected function/method type")::llvm::llvm_unreachable_internal("Unexpected function/method type"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2898)
;
2899
2900 assert(getLangOpts().ObjCAutoRefCount &&(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2902, __extension__ __PRETTY_FUNCTION__))
2901 !FI.isReturnsRetained() &&(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2902, __extension__ __PRETTY_FUNCTION__))
2902 RT->isObjCRetainableType())(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2902, __extension__ __PRETTY_FUNCTION__))
;
2903#endif
2904 RV = emitAutoreleaseOfResult(*this, RV);
2905 }
2906
2907 break;
2908
2909 case ABIArgInfo::Ignore:
2910 break;
2911
2912 case ABIArgInfo::CoerceAndExpand: {
2913 auto coercionType = RetAI.getCoerceAndExpandType();
2914 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2915
2916 // Load all of the coerced elements out into results.
2917 llvm::SmallVector<llvm::Value*, 4> results;
2918 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2919 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2920 auto coercedEltType = coercionType->getElementType(i);
2921 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2922 continue;
2923
2924 auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2925 auto elt = Builder.CreateLoad(eltAddr);
2926 results.push_back(elt);
2927 }
2928
2929 // If we have one result, it's the single direct result type.
2930 if (results.size() == 1) {
2931 RV = results[0];
2932
2933 // Otherwise, we need to make a first-class aggregate.
2934 } else {
2935 // Construct a return type that lacks padding elements.
2936 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2937
2938 RV = llvm::UndefValue::get(returnType);
2939 for (unsigned i = 0, e = results.size(); i != e; ++i) {
2940 RV = Builder.CreateInsertValue(RV, results[i], i);
2941 }
2942 }
2943 break;
2944 }
2945
2946 case ABIArgInfo::Expand:
2947 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2947)
;
2948 }
2949
2950 llvm::Instruction *Ret;
2951 if (RV) {
2952 EmitReturnValueCheck(RV);
2953 Ret = Builder.CreateRet(RV);
2954 } else {
2955 Ret = Builder.CreateRetVoid();
2956 }
2957
2958 if (RetDbgLoc)
2959 Ret->setDebugLoc(std::move(RetDbgLoc));
2960}
2961
2962void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2963 // A current decl may not be available when emitting vtable thunks.
2964 if (!CurCodeDecl)
2965 return;
2966
2967 ReturnsNonNullAttr *RetNNAttr = nullptr;
2968 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2969 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2970
2971 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2972 return;
2973
2974 // Prefer the returns_nonnull attribute if it's present.
2975 SourceLocation AttrLoc;
2976 SanitizerMask CheckKind;
2977 SanitizerHandler Handler;
2978 if (RetNNAttr) {
2979 assert(!requiresReturnValueNullabilityCheck() &&(static_cast <bool> (!requiresReturnValueNullabilityCheck
() && "Cannot check nullability and the nonnull attribute"
) ? void (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2980, __extension__ __PRETTY_FUNCTION__))
2980 "Cannot check nullability and the nonnull attribute")(static_cast <bool> (!requiresReturnValueNullabilityCheck
() && "Cannot check nullability and the nonnull attribute"
) ? void (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 2980, __extension__ __PRETTY_FUNCTION__))
;
2981 AttrLoc = RetNNAttr->getLocation();
2982 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2983 Handler = SanitizerHandler::NonnullReturn;
2984 } else {
2985 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2986 if (auto *TSI = DD->getTypeSourceInfo())
2987 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2988 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2989 CheckKind = SanitizerKind::NullabilityReturn;
2990 Handler = SanitizerHandler::NullabilityReturn;
2991 }
2992
2993 SanitizerScope SanScope(this);
2994
2995 // Make sure the "return" source location is valid. If we're checking a
2996 // nullability annotation, make sure the preconditions for the check are met.
2997 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
2998 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
2999 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3000 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3001 if (requiresReturnValueNullabilityCheck())
3002 CanNullCheck =
3003 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3004 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3005 EmitBlock(Check);
3006
3007 // Now do the null check.
3008 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3009 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3010 llvm::Value *DynamicData[] = {SLocPtr};
3011 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3012
3013 EmitBlock(NoCheck);
3014
3015#ifndef NDEBUG
3016 // The return location should not be used after the check has been emitted.
3017 ReturnLocation = Address::invalid();
3018#endif
3019}
3020
3021static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3022 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3023 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3024}
3025
3026static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3027 QualType Ty) {
3028 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3029 // placeholders.
3030 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3031 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3032 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3033
3034 // FIXME: When we generate this IR in one pass, we shouldn't need
3035 // this win32-specific alignment hack.
3036 CharUnits Align = CharUnits::fromQuantity(4);
3037 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3038
3039 return AggValueSlot::forAddr(Address(Placeholder, Align),
3040 Ty.getQualifiers(),
3041 AggValueSlot::IsNotDestructed,
3042 AggValueSlot::DoesNotNeedGCBarriers,
3043 AggValueSlot::IsNotAliased,
3044 AggValueSlot::DoesNotOverlap);
3045}
3046
3047void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3048 const VarDecl *param,
3049 SourceLocation loc) {
3050 // StartFunction converted the ABI-lowered parameter(s) into a
3051 // local alloca. We need to turn that into an r-value suitable
3052 // for EmitCall.
3053 Address local = GetAddrOfLocalVar(param);
3054
3055 QualType type = param->getType();
3056
3057 assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&(static_cast <bool> (!isInAllocaArgument(CGM.getCXXABI(
), type) && "cannot emit delegate call arguments for inalloca arguments!"
) ? void (0) : __assert_fail ("!isInAllocaArgument(CGM.getCXXABI(), type) && \"cannot emit delegate call arguments for inalloca arguments!\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3058, __extension__ __PRETTY_FUNCTION__))
3058 "cannot emit delegate call arguments for inalloca arguments!")(static_cast <bool> (!isInAllocaArgument(CGM.getCXXABI(
), type) && "cannot emit delegate call arguments for inalloca arguments!"
) ? void (0) : __assert_fail ("!isInAllocaArgument(CGM.getCXXABI(), type) && \"cannot emit delegate call arguments for inalloca arguments!\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3058, __extension__ __PRETTY_FUNCTION__))
;
3059
3060 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3061 // but the argument needs to be the original pointer.
3062 if (type->isReferenceType()) {
3063 args.add(RValue::get(Builder.CreateLoad(local)), type);
3064
3065 // In ARC, move out of consumed arguments so that the release cleanup
3066 // entered by StartFunction doesn't cause an over-release. This isn't
3067 // optimal -O0 code generation, but it should get cleaned up when
3068 // optimization is enabled. This also assumes that delegate calls are
3069 // performed exactly once for a set of arguments, but that should be safe.
3070 } else if (getLangOpts().ObjCAutoRefCount &&
3071 param->hasAttr<NSConsumedAttr>() &&
3072 type->isObjCRetainableType()) {
3073 llvm::Value *ptr = Builder.CreateLoad(local);
3074 auto null =
3075 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3076 Builder.CreateStore(null, local);
3077 args.add(RValue::get(ptr), type);
3078
3079 // For the most part, we just need to load the alloca, except that
3080 // aggregate r-values are actually pointers to temporaries.
3081 } else {
3082 args.add(convertTempToRValue(local, type, loc), type);
3083 }
3084
3085 // Deactivate the cleanup for the callee-destructed param that was pushed.
3086 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3087 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3088 type.isDestructedType()) {
3089 EHScopeStack::stable_iterator cleanup =
3090 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3091 assert(cleanup.isValid() &&(static_cast <bool> (cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? void (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3092, __extension__ __PRETTY_FUNCTION__))
3092 "cleanup for callee-destructed param not recorded")(static_cast <bool> (cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? void (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3092, __extension__ __PRETTY_FUNCTION__))
;
3093 // This unreachable is a temporary marker which will be removed later.
3094 llvm::Instruction *isActive = Builder.CreateUnreachable();
3095 args.addArgCleanupDeactivation(cleanup, isActive);
3096 }
3097}
3098
3099static bool isProvablyNull(llvm::Value *addr) {
3100 return isa<llvm::ConstantPointerNull>(addr);
3101}
3102
3103/// Emit the actual writing-back of a writeback.
3104static void emitWriteback(CodeGenFunction &CGF,
3105 const CallArgList::Writeback &writeback) {
3106 const LValue &srcLV = writeback.Source;
3107 Address srcAddr = srcLV.getAddress();
3108 assert(!isProvablyNull(srcAddr.getPointer()) &&(static_cast <bool> (!isProvablyNull(srcAddr.getPointer
()) && "shouldn't have writeback for provably null argument"
) ? void (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3109, __extension__ __PRETTY_FUNCTION__))
3109 "shouldn't have writeback for provably null argument")(static_cast <bool> (!isProvablyNull(srcAddr.getPointer
()) && "shouldn't have writeback for provably null argument"
) ? void (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3109, __extension__ __PRETTY_FUNCTION__))
;
3110
3111 llvm::BasicBlock *contBB = nullptr;
3112
3113 // If the argument wasn't provably non-null, we need to null check
3114 // before doing the store.
3115 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3116 CGF.CGM.getDataLayout());
3117 if (!provablyNonNull) {
3118 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3119 contBB = CGF.createBasicBlock("icr.done");
3120
3121 llvm::Value *isNull =
3122 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3123 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3124 CGF.EmitBlock(writebackBB);
3125 }
3126
3127 // Load the value to writeback.
3128 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3129
3130 // Cast it back, in case we're writing an id to a Foo* or something.
3131 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3132 "icr.writeback-cast");
3133
3134 // Perform the writeback.
3135
3136 // If we have a "to use" value, it's something we need to emit a use
3137 // of. This has to be carefully threaded in: if it's done after the
3138 // release it's potentially undefined behavior (and the optimizer
3139 // will ignore it), and if it happens before the retain then the
3140 // optimizer could move the release there.
3141 if (writeback.ToUse) {
3142 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong)(static_cast <bool> (srcLV.getObjCLifetime() == Qualifiers
::OCL_Strong) ? void (0) : __assert_fail ("srcLV.getObjCLifetime() == Qualifiers::OCL_Strong"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3142, __extension__ __PRETTY_FUNCTION__))
;
3143
3144 // Retain the new value. No need to block-copy here: the block's
3145 // being passed up the stack.
3146 value = CGF.EmitARCRetainNonBlock(value);
3147
3148 // Emit the intrinsic use here.
3149 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3150
3151 // Load the old value (primitively).
3152 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3153
3154 // Put the new value in place (primitively).
3155 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3156
3157 // Release the old value.
3158 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3159
3160 // Otherwise, we can just do a normal lvalue store.
3161 } else {
3162 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3163 }
3164
3165 // Jump to the continuation block.
3166 if (!provablyNonNull)
3167 CGF.EmitBlock(contBB);
3168}
3169
3170static void emitWritebacks(CodeGenFunction &CGF,
3171 const CallArgList &args) {
3172 for (const auto &I : args.writebacks())
3173 emitWriteback(CGF, I);
3174}
3175
3176static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3177 const CallArgList &CallArgs) {
3178 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3179 CallArgs.getCleanupsToDeactivate();
3180 // Iterate in reverse to increase the likelihood of popping the cleanup.
3181 for (const auto &I : llvm::reverse(Cleanups)) {
3182 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3183 I.IsActiveIP->eraseFromParent();
3184 }
3185}
3186
3187static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3188 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3189 if (uop->getOpcode() == UO_AddrOf)
3190 return uop->getSubExpr();
3191 return nullptr;
3192}
3193
3194/// Emit an argument that's being passed call-by-writeback. That is,
3195/// we are passing the address of an __autoreleased temporary; it
3196/// might be copy-initialized with the current value of the given
3197/// address, but it will definitely be copied out of after the call.
3198static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3199 const ObjCIndirectCopyRestoreExpr *CRE) {
3200 LValue srcLV;
3201
3202 // Make an optimistic effort to emit the address as an l-value.
3203 // This can fail if the argument expression is more complicated.
3204 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3205 srcLV = CGF.EmitLValue(lvExpr);
3206
3207 // Otherwise, just emit it as a scalar.
3208 } else {
3209 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3210
3211 QualType srcAddrType =
3212 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3213 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3214 }
3215 Address srcAddr = srcLV.getAddress();
3216
3217 // The dest and src types don't necessarily match in LLVM terms
3218 // because of the crazy ObjC compatibility rules.
3219
3220 llvm::PointerType *destType =
3221 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3222
3223 // If the address is a constant null, just pass the appropriate null.
3224 if (isProvablyNull(srcAddr.getPointer())) {
3225 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3226 CRE->getType());
3227 return;
3228 }
3229
3230 // Create the temporary.
3231 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3232 CGF.getPointerAlign(),
3233 "icr.temp");
3234 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3235 // and that cleanup will be conditional if we can't prove that the l-value
3236 // isn't null, so we need to register a dominating point so that the cleanups
3237 // system will make valid IR.
3238 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3239
3240 // Zero-initialize it if we're not doing a copy-initialization.
3241 bool shouldCopy = CRE->shouldCopy();
3242 if (!shouldCopy) {
3243 llvm::Value *null =
3244 llvm::ConstantPointerNull::get(
3245 cast<llvm::PointerType>(destType->getElementType()));
3246 CGF.Builder.CreateStore(null, temp);
3247 }
3248
3249 llvm::BasicBlock *contBB = nullptr;
3250 llvm::BasicBlock *originBB = nullptr;
3251
3252 // If the address is *not* known to be non-null, we need to switch.
3253 llvm::Value *finalArgument;
3254
3255 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3256 CGF.CGM.getDataLayout());
3257 if (provablyNonNull) {
3258 finalArgument = temp.getPointer();
3259 } else {
3260 llvm::Value *isNull =
3261 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3262
3263 finalArgument = CGF.Builder.CreateSelect(isNull,
3264 llvm::ConstantPointerNull::get(destType),
3265 temp.getPointer(), "icr.argument");
3266
3267 // If we need to copy, then the load has to be conditional, which
3268 // means we need control flow.
3269 if (shouldCopy) {
3270 originBB = CGF.Builder.GetInsertBlock();
3271 contBB = CGF.createBasicBlock("icr.cont");
3272 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3273 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3274 CGF.EmitBlock(copyBB);
3275 condEval.begin(CGF);
3276 }
3277 }
3278
3279 llvm::Value *valueToUse = nullptr;
3280
3281 // Perform a copy if necessary.
3282 if (shouldCopy) {
3283 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3284 assert(srcRV.isScalar())(static_cast <bool> (srcRV.isScalar()) ? void (0) : __assert_fail
("srcRV.isScalar()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3284, __extension__ __PRETTY_FUNCTION__))
;
3285
3286 llvm::Value *src = srcRV.getScalarVal();
3287 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3288 "icr.cast");
3289
3290 // Use an ordinary store, not a store-to-lvalue.
3291 CGF.Builder.CreateStore(src, temp);
3292
3293 // If optimization is enabled, and the value was held in a
3294 // __strong variable, we need to tell the optimizer that this
3295 // value has to stay alive until we're doing the store back.
3296 // This is because the temporary is effectively unretained,
3297 // and so otherwise we can violate the high-level semantics.
3298 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3299 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3300 valueToUse = src;
3301 }
3302 }
3303
3304 // Finish the control flow if we needed it.
3305 if (shouldCopy && !provablyNonNull) {
3306 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3307 CGF.EmitBlock(contBB);
3308
3309 // Make a phi for the value to intrinsically use.
3310 if (valueToUse) {
3311 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3312 "icr.to-use");
3313 phiToUse->addIncoming(valueToUse, copyBB);
3314 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3315 originBB);
3316 valueToUse = phiToUse;
3317 }
3318
3319 condEval.end(CGF);
3320 }
3321
3322 args.addWriteback(srcLV, temp, valueToUse);
3323 args.add(RValue::get(finalArgument), CRE->getType());
3324}
3325
3326void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3327 assert(!StackBase)(static_cast <bool> (!StackBase) ? void (0) : __assert_fail
("!StackBase", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3327, __extension__ __PRETTY_FUNCTION__))
;
3328
3329 // Save the stack.
3330 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3331 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3332}
3333
3334void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3335 if (StackBase) {
3336 // Restore the stack after the call.
3337 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3338 CGF.Builder.CreateCall(F, StackBase);
3339 }
3340}
3341
3342void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3343 SourceLocation ArgLoc,
3344 AbstractCallee AC,
3345 unsigned ParmNum) {
3346 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3347 SanOpts.has(SanitizerKind::NullabilityArg)))
3348 return;
3349
3350 // The param decl may be missing in a variadic function.
3351 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3352 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3353
3354 // Prefer the nonnull attribute if it's present.
3355 const NonNullAttr *NNAttr = nullptr;
3356 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3357 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3358
3359 bool CanCheckNullability = false;
3360 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3361 auto Nullability = PVD->getType()->getNullability(getContext());
3362 CanCheckNullability = Nullability &&
3363 *Nullability == NullabilityKind::NonNull &&
3364 PVD->getTypeSourceInfo();
3365 }
3366
3367 if (!NNAttr && !CanCheckNullability)
3368 return;
3369
3370 SourceLocation AttrLoc;
3371 SanitizerMask CheckKind;
3372 SanitizerHandler Handler;
3373 if (NNAttr) {
3374 AttrLoc = NNAttr->getLocation();
3375 CheckKind = SanitizerKind::NonnullAttribute;
3376 Handler = SanitizerHandler::NonnullArg;
3377 } else {
3378 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3379 CheckKind = SanitizerKind::NullabilityArg;
3380 Handler = SanitizerHandler::NullabilityArg;
3381 }
3382
3383 SanitizerScope SanScope(this);
3384 assert(RV.isScalar())(static_cast <bool> (RV.isScalar()) ? void (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3384, __extension__ __PRETTY_FUNCTION__))
;
3385 llvm::Value *V = RV.getScalarVal();
3386 llvm::Value *Cond =
3387 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3388 llvm::Constant *StaticData[] = {
3389 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3390 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3391 };
3392 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3393}
3394
3395void CodeGenFunction::EmitCallArgs(
3396 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3397 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3398 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3399 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()))(static_cast <bool> ((int)ArgTypes.size() == (ArgRange.
end() - ArgRange.begin())) ? void (0) : __assert_fail ("(int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3399, __extension__ __PRETTY_FUNCTION__))
;
3400
3401 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3402 // because arguments are destroyed left to right in the callee. As a special
3403 // case, there are certain language constructs that require left-to-right
3404 // evaluation, and in those cases we consider the evaluation order requirement
3405 // to trump the "destruction order is reverse construction order" guarantee.
3406 bool LeftToRight =
3407 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3408 ? Order == EvaluationOrder::ForceLeftToRight
3409 : Order != EvaluationOrder::ForceRightToLeft;
3410
3411 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3412 RValue EmittedArg) {
3413 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3414 return;
3415 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3416 if (PS == nullptr)
3417 return;
3418
3419 const auto &Context = getContext();
3420 auto SizeTy = Context.getSizeType();
3421 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3422 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?")(static_cast <bool> (EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?") ? void (0) : __assert_fail
("EmittedArg.getScalarVal() && \"We emitted nothing for the arg?\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3422, __extension__ __PRETTY_FUNCTION__))
;
3423 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3424 EmittedArg.getScalarVal());
3425 Args.add(RValue::get(V), SizeTy);
3426 // If we're emitting args in reverse, be sure to do so with
3427 // pass_object_size, as well.
3428 if (!LeftToRight)
3429 std::swap(Args.back(), *(&Args.back() - 1));
3430 };
3431
3432 // Insert a stack save if we're going to need any inalloca args.
3433 bool HasInAllocaArgs = false;
3434 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3435 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3436 I != E && !HasInAllocaArgs; ++I)
3437 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3438 if (HasInAllocaArgs) {
3439 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)(static_cast <bool> (getTarget().getTriple().getArch() ==
llvm::Triple::x86) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3439, __extension__ __PRETTY_FUNCTION__))
;
3440 Args.allocateArgumentMemory(*this);
3441 }
3442 }
3443
3444 // Evaluate each argument in the appropriate order.
3445 size_t CallArgsStart = Args.size();
3446 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3447 unsigned Idx = LeftToRight ? I : E - I - 1;
3448 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3449 unsigned InitialArgSize = Args.size();
3450 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3451 // the argument and parameter match or the objc method is parameterized.
3452 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3457, __extension__ __PRETTY_FUNCTION__))
3453 getContext().hasSameUnqualifiedType((*Arg)->getType(),(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3457, __extension__ __PRETTY_FUNCTION__))
3454 ArgTypes[Idx]) ||(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3457, __extension__ __PRETTY_FUNCTION__))
3455 (isa<ObjCMethodDecl>(AC.getDecl()) &&(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3457, __extension__ __PRETTY_FUNCTION__))
3456 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3457, __extension__ __PRETTY_FUNCTION__))
3457 "Argument and parameter types don't match")(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3457, __extension__ __PRETTY_FUNCTION__))
;
3458 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3459 // In particular, we depend on it being the last arg in Args, and the
3460 // objectsize bits depend on there only being one arg if !LeftToRight.
3461 assert(InitialArgSize + 1 == Args.size() &&(static_cast <bool> (InitialArgSize + 1 == Args.size() &&
"The code below depends on only adding one arg per EmitCallArg"
) ? void (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3462, __extension__ __PRETTY_FUNCTION__))
3462 "The code below depends on only adding one arg per EmitCallArg")(static_cast <bool> (InitialArgSize + 1 == Args.size() &&
"The code below depends on only adding one arg per EmitCallArg"
) ? void (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3462, __extension__ __PRETTY_FUNCTION__))
;
3463 (void)InitialArgSize;
3464 // Since pointer argument are never emitted as LValue, it is safe to emit
3465 // non-null argument check for r-value only.
3466 if (!Args.back().hasLValue()) {
3467 RValue RVArg = Args.back().getKnownRValue();
3468 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3469 ParamsToSkip + Idx);
3470 // @llvm.objectsize should never have side-effects and shouldn't need
3471 // destruction/cleanups, so we can safely "emit" it after its arg,
3472 // regardless of right-to-leftness
3473 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3474 }
3475 }
3476
3477 if (!LeftToRight) {
3478 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3479 // IR function.
3480 std::reverse(Args.begin() + CallArgsStart, Args.end());
3481 }
3482}
3483
3484namespace {
3485
3486struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3487 DestroyUnpassedArg(Address Addr, QualType Ty)
3488 : Addr(Addr), Ty(Ty) {}
3489
3490 Address Addr;
3491 QualType Ty;
3492
3493 void Emit(CodeGenFunction &CGF, Flags flags) override {
3494 QualType::DestructionKind DtorKind = Ty.isDestructedType();
3495 if (DtorKind == QualType::DK_cxx_destructor) {
3496 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3497 assert(!Dtor->isTrivial())(static_cast <bool> (!Dtor->isTrivial()) ? void (0) :
__assert_fail ("!Dtor->isTrivial()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3497, __extension__ __PRETTY_FUNCTION__))
;
3498 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3499 /*Delegating=*/false, Addr);
3500 } else {
3501 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3502 }
3503 }
3504};
3505
3506struct DisableDebugLocationUpdates {
3507 CodeGenFunction &CGF;
3508 bool disabledDebugInfo;
3509 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3510 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3511 CGF.disableDebugInfo();
3512 }
3513 ~DisableDebugLocationUpdates() {
3514 if (disabledDebugInfo)
3515 CGF.enableDebugInfo();
3516 }
3517};
3518
3519} // end anonymous namespace
3520
3521RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3522 if (!HasLV)
3523 return RV;
3524 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3525 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3526 LV.isVolatile());
3527 IsUsed = true;
3528 return RValue::getAggregate(Copy.getAddress());
3529}
3530
3531void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3532 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3533 if (!HasLV && RV.isScalar())
3534 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3535 else if (!HasLV && RV.isComplex())
3536 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3537 else {
3538 auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3539 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3540 // We assume that call args are never copied into subobjects.
3541 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3542 HasLV ? LV.isVolatileQualified()
3543 : RV.isVolatileQualified());
3544 }
3545 IsUsed = true;
3546}
3547
3548void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3549 QualType type) {
3550 DisableDebugLocationUpdates Dis(*this, E);
3551 if (const ObjCIndirectCopyRestoreExpr *CRE
3552 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3553 assert(getLangOpts().ObjCAutoRefCount)(static_cast <bool> (getLangOpts().ObjCAutoRefCount) ? void
(0) : __assert_fail ("getLangOpts().ObjCAutoRefCount", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3553, __extension__ __PRETTY_FUNCTION__))
;
3554 return emitWritebackArg(*this, args, CRE);
3555 }
3556
3557 assert(type->isReferenceType() == E->isGLValue() &&(static_cast <bool> (type->isReferenceType() == E->
isGLValue() && "reference binding to unmaterialized r-value!"
) ? void (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3558, __extension__ __PRETTY_FUNCTION__))
3558 "reference binding to unmaterialized r-value!")(static_cast <bool> (type->isReferenceType() == E->
isGLValue() && "reference binding to unmaterialized r-value!"
) ? void (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3558, __extension__ __PRETTY_FUNCTION__))
;
3559
3560 if (E->isGLValue()) {
3561 assert(E->getObjectKind() == OK_Ordinary)(static_cast <bool> (E->getObjectKind() == OK_Ordinary
) ? void (0) : __assert_fail ("E->getObjectKind() == OK_Ordinary"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3561, __extension__ __PRETTY_FUNCTION__))
;
3562 return args.add(EmitReferenceBindingToExpr(E), type);
3563 }
3564
3565 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3566
3567 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3568 // However, we still have to push an EH-only cleanup in case we unwind before
3569 // we make it to the call.
3570 if (HasAggregateEvalKind &&
3571 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3572 // If we're using inalloca, use the argument memory. Otherwise, use a
3573 // temporary.
3574 AggValueSlot Slot;
3575 if (args.isUsingInAlloca())
3576 Slot = createPlaceholderSlot(*this, type);
3577 else
3578 Slot = CreateAggTemp(type, "agg.tmp");
3579
3580 bool DestroyedInCallee = true, NeedsEHCleanup = true;
3581 if (const auto *RD = type->getAsCXXRecordDecl())
3582 DestroyedInCallee = RD->hasNonTrivialDestructor();
3583 else
3584 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3585
3586 if (DestroyedInCallee)
3587 Slot.setExternallyDestructed();
3588
3589 EmitAggExpr(E, Slot);
3590 RValue RV = Slot.asRValue();
3591 args.add(RV, type);
3592
3593 if (DestroyedInCallee && NeedsEHCleanup) {
3594 // Create a no-op GEP between the placeholder and the cleanup so we can
3595 // RAUW it successfully. It also serves as a marker of the first
3596 // instruction where the cleanup is active.
3597 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3598 type);
3599 // This unreachable is a temporary marker which will be removed later.
3600 llvm::Instruction *IsActive = Builder.CreateUnreachable();
3601 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3602 }
3603 return;
3604 }
3605
3606 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3607 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3608 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3609 assert(L.isSimple())(static_cast <bool> (L.isSimple()) ? void (0) : __assert_fail
("L.isSimple()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3609, __extension__ __PRETTY_FUNCTION__))
;
3610 args.addUncopiedAggregate(L, type);
3611 return;
3612 }
3613
3614 args.add(EmitAnyExprToTemp(E), type);
3615}
3616
3617QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3618 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3619 // implicitly widens null pointer constants that are arguments to varargs
3620 // functions to pointer-sized ints.
3621 if (!getTarget().getTriple().isOSWindows())
3622 return Arg->getType();
3623
3624 if (Arg->getType()->isIntegerType() &&
3625 getContext().getTypeSize(Arg->getType()) <
3626 getContext().getTargetInfo().getPointerWidth(0) &&
3627 Arg->isNullPointerConstant(getContext(),
3628 Expr::NPC_ValueDependentIsNotNull)) {
3629 return getContext().getIntPtrType();
3630 }
3631
3632 return Arg->getType();
3633}
3634
3635// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3636// optimizer it can aggressively ignore unwind edges.
3637void
3638CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3639 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3640 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3641 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3642 CGM.getNoObjCARCExceptionsMetadata());
3643}
3644
3645/// Emits a call to the given no-arguments nounwind runtime function.
3646llvm::CallInst *
3647CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3648 const llvm::Twine &name) {
3649 return EmitNounwindRuntimeCall(callee, None, name);
3650}
3651
3652/// Emits a call to the given nounwind runtime function.
3653llvm::CallInst *
3654CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3655 ArrayRef<llvm::Value*> args,
3656 const llvm::Twine &name) {
3657 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3658 call->setDoesNotThrow();
3659 return call;
3660}
3661
3662/// Emits a simple call (never an invoke) to the given no-arguments
3663/// runtime function.
3664llvm::CallInst *
3665CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3666 const llvm::Twine &name) {
3667 return EmitRuntimeCall(callee, None, name);
3668}
3669
3670// Calls which may throw must have operand bundles indicating which funclet
3671// they are nested within.
3672SmallVector<llvm::OperandBundleDef, 1>
3673CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3674 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3675 // There is no need for a funclet operand bundle if we aren't inside a
3676 // funclet.
3677 if (!CurrentFuncletPad)
3678 return BundleList;
3679
3680 // Skip intrinsics which cannot throw.
3681 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3682 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3683 return BundleList;
3684
3685 BundleList.emplace_back("funclet", CurrentFuncletPad);
3686 return BundleList;
3687}
3688
3689/// Emits a simple call (never an invoke) to the given runtime function.
3690llvm::CallInst *
3691CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3692 ArrayRef<llvm::Value*> args,
3693 const llvm::Twine &name) {
3694 llvm::CallInst *call =
3695 Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3696 call->setCallingConv(getRuntimeCC());
3697 return call;
3698}
3699
3700/// Emits a call or invoke to the given noreturn runtime function.
3701void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3702 ArrayRef<llvm::Value*> args) {
3703 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3704 getBundlesForFunclet(callee);
3705
3706 if (getInvokeDest()) {
3707 llvm::InvokeInst *invoke =
3708 Builder.CreateInvoke(callee,
3709 getUnreachableBlock(),
3710 getInvokeDest(),
3711 args,
3712 BundleList);
3713 invoke->setDoesNotReturn();
3714 invoke->setCallingConv(getRuntimeCC());
3715 } else {
3716 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3717 call->setDoesNotReturn();
3718 call->setCallingConv(getRuntimeCC());
3719 Builder.CreateUnreachable();
3720 }
3721}
3722
3723/// Emits a call or invoke instruction to the given nullary runtime function.
3724llvm::CallSite
3725CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3726 const Twine &name) {
3727 return EmitRuntimeCallOrInvoke(callee, None, name);
3728}
3729
3730/// Emits a call or invoke instruction to the given runtime function.
3731llvm::CallSite
3732CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3733 ArrayRef<llvm::Value*> args,
3734 const Twine &name) {
3735 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3736 callSite.setCallingConv(getRuntimeCC());
3737 return callSite;
3738}
3739
3740/// Emits a call or invoke instruction to the given function, depending
3741/// on the current state of the EH stack.
3742llvm::CallSite
3743CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3744 ArrayRef<llvm::Value *> Args,
3745 const Twine &Name) {
3746 llvm::BasicBlock *InvokeDest = getInvokeDest();
3747 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3748 getBundlesForFunclet(Callee);
3749
3750 llvm::Instruction *Inst;
3751 if (!InvokeDest)
3752 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3753 else {
3754 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3755 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3756 Name);
3757 EmitBlock(ContBB);
3758 }
3759
3760 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3761 // optimizer it can aggressively ignore unwind edges.
3762 if (CGM.getLangOpts().ObjCAutoRefCount)
3763 AddObjCARCExceptionMetadata(Inst);
3764
3765 return llvm::CallSite(Inst);
3766}
3767
3768void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3769 llvm::Value *New) {
3770 DeferredReplacements.push_back(std::make_pair(Old, New));
3771}
3772
3773RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3774 const CGCallee &Callee,
3775 ReturnValueSlot ReturnValue,
3776 const CallArgList &CallArgs,
3777 llvm::Instruction **callOrInvoke,
3778 SourceLocation Loc) {
3779 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3780
3781 assert(Callee.isOrdinary() || Callee.isVirtual())(static_cast <bool> (Callee.isOrdinary() || Callee.isVirtual
()) ? void (0) : __assert_fail ("Callee.isOrdinary() || Callee.isVirtual()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3781, __extension__ __PRETTY_FUNCTION__))
;
3782
3783 // Handle struct-return functions by passing a pointer to the
3784 // location that we would like to return into.
3785 QualType RetTy = CallInfo.getReturnType();
3786 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3787
3788 llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3789
3790 // 1. Set up the arguments.
3791
3792 // If we're using inalloca, insert the allocation after the stack save.
3793 // FIXME: Do this earlier rather than hacking it in here!
3794 Address ArgMemory = Address::invalid();
3795 const llvm::StructLayout *ArgMemoryLayout = nullptr;
1
'ArgMemoryLayout' initialized to a null pointer value
3796 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
2
Assuming 'ArgStruct' is null
3
Taking false branch
3797 const llvm::DataLayout &DL = CGM.getDataLayout();
3798 ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3799 llvm::Instruction *IP = CallArgs.getStackBase();
3800 llvm::AllocaInst *AI;
3801 if (IP) {
3802 IP = IP->getNextNode();
3803 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3804 "argmem", IP);
3805 } else {
3806 AI = CreateTempAlloca(ArgStruct, "argmem");
3807 }
3808 auto Align = CallInfo.getArgStructAlignment();
3809 AI->setAlignment(Align.getQuantity());
3810 AI->setUsedWithInAlloca(true);
3811 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca())(static_cast <bool> (AI->isUsedWithInAlloca() &&
!AI->isStaticAlloca()) ? void (0) : __assert_fail ("AI->isUsedWithInAlloca() && !AI->isStaticAlloca()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3811, __extension__ __PRETTY_FUNCTION__))
;
3812 ArgMemory = Address(AI, Align);
3813 }
3814
3815 // Helper function to drill into the inalloca allocation.
3816 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3817 auto FieldOffset =
3818 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
8
Called C++ object pointer is null
3819 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3820 };
3821
3822 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3823 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3824
3825 // If the call returns a temporary with struct return, create a temporary
3826 // alloca to hold the result, unless one is given to us.
3827 Address SRetPtr = Address::invalid();
3828 Address SRetAlloca = Address::invalid();
3829 llvm::Value *UnusedReturnSizePtr = nullptr;
3830 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3831 if (!ReturnValue.isNull()) {
4
Taking true branch
3832 SRetPtr = ReturnValue.getValue();
3833 } else {
3834 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3835 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3836 uint64_t size =
3837 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3838 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3839 }
3840 }
3841 if (IRFunctionArgs.hasSRetArg()) {
5
Taking false branch
3842 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3843 } else if (RetAI.isInAlloca()) {
6
Taking true branch
3844 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
7
Calling 'operator()'
3845 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3846 }
3847 }
3848
3849 Address swiftErrorTemp = Address::invalid();
3850 Address swiftErrorArg = Address::invalid();
3851
3852 // Translate all of the arguments as necessary to match the IR lowering.
3853 assert(CallInfo.arg_size() == CallArgs.size() &&(static_cast <bool> (CallInfo.arg_size() == CallArgs.size
() && "Mismatch between function signature & arguments."
) ? void (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3854, __extension__ __PRETTY_FUNCTION__))
3854 "Mismatch between function signature & arguments.")(static_cast <bool> (CallInfo.arg_size() == CallArgs.size
() && "Mismatch between function signature & arguments."
) ? void (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3854, __extension__ __PRETTY_FUNCTION__))
;
3855 unsigned ArgNo = 0;
3856 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3857 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3858 I != E; ++I, ++info_it, ++ArgNo) {
3859 const ABIArgInfo &ArgInfo = info_it->info;
3860
3861 // Insert a padding argument to ensure proper alignment.
3862 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3863 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3864 llvm::UndefValue::get(ArgInfo.getPaddingType());
3865
3866 unsigned FirstIRArg, NumIRArgs;
3867 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3868
3869 switch (ArgInfo.getKind()) {
3870 case ABIArgInfo::InAlloca: {
3871 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3871, __extension__ __PRETTY_FUNCTION__))
;
3872 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)(static_cast <bool> (getTarget().getTriple().getArch() ==
llvm::Triple::x86) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3872, __extension__ __PRETTY_FUNCTION__))
;
3873 if (I->isAggregate()) {
3874 // Replace the placeholder with the appropriate argument slot GEP.
3875 Address Addr = I->hasLValue()
3876 ? I->getKnownLValue().getAddress()
3877 : I->getKnownRValue().getAggregateAddress();
3878 llvm::Instruction *Placeholder =
3879 cast<llvm::Instruction>(Addr.getPointer());
3880 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3881 Builder.SetInsertPoint(Placeholder);
3882 Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3883 Builder.restoreIP(IP);
3884 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3885 } else {
3886 // Store the RValue into the argument struct.
3887 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3888 unsigned AS = Addr.getType()->getPointerAddressSpace();
3889 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3890 // There are some cases where a trivial bitcast is not avoidable. The
3891 // definition of a type later in a translation unit may change it's type
3892 // from {}* to (%struct.foo*)*.
3893 if (Addr.getType() != MemType)
3894 Addr = Builder.CreateBitCast(Addr, MemType);
3895 I->copyInto(*this, Addr);
3896 }
3897 break;
3898 }
3899
3900 case ABIArgInfo::Indirect: {
3901 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3901, __extension__ __PRETTY_FUNCTION__))
;
3902 if (!I->isAggregate()) {
3903 // Make a temporary alloca to pass the argument.
3904 Address Addr = CreateMemTempWithoutCast(
3905 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3906 IRCallArgs[FirstIRArg] = Addr.getPointer();
3907
3908 I->copyInto(*this, Addr);
3909 } else {
3910 // We want to avoid creating an unnecessary temporary+copy here;
3911 // however, we need one in three cases:
3912 // 1. If the argument is not byval, and we are required to copy the
3913 // source. (This case doesn't occur on any common architecture.)
3914 // 2. If the argument is byval, RV is not sufficiently aligned, and
3915 // we cannot force it to be sufficiently aligned.
3916 // 3. If the argument is byval, but RV is not located in default
3917 // or alloca address space.
3918 Address Addr = I->hasLValue()
3919 ? I->getKnownLValue().getAddress()
3920 : I->getKnownRValue().getAggregateAddress();
3921 llvm::Value *V = Addr.getPointer();
3922 CharUnits Align = ArgInfo.getIndirectAlign();
3923 const llvm::DataLayout *TD = &CGM.getDataLayout();
3924
3925 assert((FirstIRArg >= IRFuncTy->getNumParams() ||(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3928, __extension__ __PRETTY_FUNCTION__))
3926 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3928, __extension__ __PRETTY_FUNCTION__))
3927 TD->getAllocaAddrSpace()) &&(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3928, __extension__ __PRETTY_FUNCTION__))
3928 "indirect argument must be in alloca address space")(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3928, __extension__ __PRETTY_FUNCTION__))
;
3929
3930 bool NeedCopy = false;
3931
3932 if (Addr.getAlignment() < Align &&
3933 llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3934 Align.getQuantity()) {
3935 NeedCopy = true;
3936 } else if (I->hasLValue()) {
3937 auto LV = I->getKnownLValue();
3938 auto AS = LV.getAddressSpace();
3939 if ((!ArgInfo.getIndirectByVal() &&
3940 (LV.getAlignment() >=
3941 getContext().getTypeAlignInChars(I->Ty))) ||
3942 (ArgInfo.getIndirectByVal() &&
3943 ((AS != LangAS::Default && AS != LangAS::opencl_private &&
3944 AS != CGM.getASTAllocaAddressSpace())))) {
3945 NeedCopy = true;
3946 }
3947 }
3948 if (NeedCopy) {
3949 // Create an aligned temporary, and copy to it.
3950 Address AI = CreateMemTempWithoutCast(
3951 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3952 IRCallArgs[FirstIRArg] = AI.getPointer();
3953 I->copyInto(*this, AI);
3954 } else {
3955 // Skip the extra memcpy call.
3956 auto *T = V->getType()->getPointerElementType()->getPointerTo(
3957 CGM.getDataLayout().getAllocaAddrSpace());
3958 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3959 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3960 true);
3961 }
3962 }
3963 break;
3964 }
3965
3966 case ABIArgInfo::Ignore:
3967 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3967, __extension__ __PRETTY_FUNCTION__))
;
3968 break;
3969
3970 case ABIArgInfo::Extend:
3971 case ABIArgInfo::Direct: {
3972 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3973 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3974 ArgInfo.getDirectOffset() == 0) {
3975 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3975, __extension__ __PRETTY_FUNCTION__))
;
3976 llvm::Value *V;
3977 if (!I->isAggregate())
3978 V = I->getKnownRValue().getScalarVal();
3979 else
3980 V = Builder.CreateLoad(
3981 I->hasLValue() ? I->getKnownLValue().getAddress()
3982 : I->getKnownRValue().getAggregateAddress());
3983
3984 // Implement swifterror by copying into a new swifterror argument.
3985 // We'll write back in the normal path out of the call.
3986 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3987 == ParameterABI::SwiftErrorResult) {
3988 assert(!swiftErrorTemp.isValid() && "multiple swifterror args")(static_cast <bool> (!swiftErrorTemp.isValid() &&
"multiple swifterror args") ? void (0) : __assert_fail ("!swiftErrorTemp.isValid() && \"multiple swifterror args\""
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 3988, __extension__ __PRETTY_FUNCTION__))
;
3989
3990 QualType pointeeTy = I->Ty->getPointeeType();
3991 swiftErrorArg =
3992 Address(V, getContext().getTypeAlignInChars(pointeeTy));
3993
3994 swiftErrorTemp =
3995 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3996 V = swiftErrorTemp.getPointer();
3997 cast<llvm::AllocaInst>(V)->setSwiftError(true);
3998
3999 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4000 Builder.CreateStore(errorValue, swiftErrorTemp);
4001 }
4002
4003 // We might have to widen integers, but we should never truncate.
4004 if (ArgInfo.getCoerceToType() != V->getType() &&
4005 V->getType()->isIntegerTy())
4006 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4007
4008 // If the argument doesn't match, perform a bitcast to coerce it. This
4009 // can happen due to trivial type mismatches.
4010 if (FirstIRArg < IRFuncTy->getNumParams() &&
4011 V->getType() != IRFuncTy->getParamType(FirstIRArg))
4012 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4013
4014 IRCallArgs[FirstIRArg] = V;
4015 break;
4016 }
4017
4018 // FIXME: Avoid the conversion through memory if possible.
4019 Address Src = Address::invalid();
4020 if (!I->isAggregate()) {
4021 Src = CreateMemTemp(I->Ty, "coerce");
4022 I->copyInto(*this, Src);
4023 } else {
4024 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4025 : I->getKnownRValue().getAggregateAddress();
4026 }
4027
4028 // If the value is offset in memory, apply the offset now.
4029 Src = emitAddressAtOffset(*this, Src, ArgInfo);
4030
4031 // Fast-isel and the optimizer generally like scalar values better than
4032 // FCAs, so we flatten them if this is safe to do for this argument.
4033 llvm::StructType *STy =
4034 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4035 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4036 llvm::Type *SrcTy = Src.getType()->getElementType();
4037 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4038 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4039
4040 // If the source type is smaller than the destination type of the
4041 // coerce-to logic, copy the source value into a temp alloca the size
4042 // of the destination type to allow loading all of it. The bits past
4043 // the source value are left undef.
4044 if (SrcSize < DstSize) {
4045 Address TempAlloca
4046 = CreateTempAlloca(STy, Src.getAlignment(),
4047 Src.getName() + ".coerce");
4048 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4049 Src = TempAlloca;
4050 } else {
4051 Src = Builder.CreateBitCast(Src,
4052 STy->getPointerTo(Src.getAddressSpace()));
4053 }
4054
4055 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4056 assert(NumIRArgs == STy->getNumElements())(static_cast <bool> (NumIRArgs == STy->getNumElements
()) ? void (0) : __assert_fail ("NumIRArgs == STy->getNumElements()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4056, __extension__ __PRETTY_FUNCTION__))
;
4057 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4058 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4059 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4060 llvm::Value *LI = Builder.CreateLoad(EltPtr);
4061 IRCallArgs[FirstIRArg + i] = LI;
4062 }
4063 } else {
4064 // In the simple case, just pass the coerced loaded value.
4065 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4065, __extension__ __PRETTY_FUNCTION__))
;
4066 IRCallArgs[FirstIRArg] =
4067 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4068 }
4069
4070 break;
4071 }
4072
4073 case ABIArgInfo::CoerceAndExpand: {
4074 auto coercionType = ArgInfo.getCoerceAndExpandType();
4075 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4076
4077 llvm::Value *tempSize = nullptr;
4078 Address addr = Address::invalid();
4079 Address AllocaAddr = Address::invalid();
4080 if (I->isAggregate()) {
4081 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4082 : I->getKnownRValue().getAggregateAddress();
4083
4084 } else {
4085 RValue RV = I->getKnownRValue();
4086 assert(RV.isScalar())(static_cast <bool> (RV.isScalar()) ? void (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4086, __extension__ __PRETTY_FUNCTION__))
; // complex should always just be direct
4087
4088 llvm::Type *scalarType = RV.getScalarVal()->getType();
4089 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4090 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4091
4092 // Materialize to a temporary.
4093 addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4094 CharUnits::fromQuantity(std::max(
4095 layout->getAlignment(), scalarAlign)),
4096 "tmp",
4097 /*ArraySize=*/nullptr, &AllocaAddr);
4098 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4099
4100 Builder.CreateStore(RV.getScalarVal(), addr);
4101 }
4102
4103 addr = Builder.CreateElementBitCast(addr, coercionType);
4104
4105 unsigned IRArgPos = FirstIRArg;
4106 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4107 llvm::Type *eltType = coercionType->getElementType(i);
4108 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4109 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4110 llvm::Value *elt = Builder.CreateLoad(eltAddr);
4111 IRCallArgs[IRArgPos++] = elt;
4112 }
4113 assert(IRArgPos == FirstIRArg + NumIRArgs)(static_cast <bool> (IRArgPos == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4113, __extension__ __PRETTY_FUNCTION__))
;
4114
4115 if (tempSize) {
4116 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4117 }
4118
4119 break;
4120 }
4121
4122 case ABIArgInfo::Expand:
4123 unsigned IRArgPos = FirstIRArg;
4124 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4125 assert(IRArgPos == FirstIRArg + NumIRArgs)(static_cast <bool> (IRArgPos == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4125, __extension__ __PRETTY_FUNCTION__))
;
4126 break;
4127 }
4128 }
4129
4130 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4131 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4132
4133 // If we're using inalloca, set up that argument.
4134 if (ArgMemory.isValid()) {
4135 llvm::Value *Arg = ArgMemory.getPointer();
4136 if (CallInfo.isVariadic()) {
4137 // When passing non-POD arguments by value to variadic functions, we will
4138 // end up with a variadic prototype and an inalloca call site. In such
4139 // cases, we can't do any parameter mismatch checks. Give up and bitcast
4140 // the callee.
4141 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4142 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4143 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4144 } else {
4145 llvm::Type *LastParamTy =
4146 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4147 if (Arg->getType() != LastParamTy) {
4148#ifndef NDEBUG
4149 // Assert that these structs have equivalent element types.
4150 llvm::StructType *FullTy = CallInfo.getArgStruct();
4151 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4152 cast<llvm::PointerType>(LastParamTy)->getElementType());
4153 assert(DeclaredTy->getNumElements() == FullTy->getNumElements())(static_cast <bool> (DeclaredTy->getNumElements() ==
FullTy->getNumElements()) ? void (0) : __assert_fail ("DeclaredTy->getNumElements() == FullTy->getNumElements()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4153, __extension__ __PRETTY_FUNCTION__))
;
4154 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4155 DE = DeclaredTy->element_end(),
4156 FI = FullTy->element_begin();
4157 DI != DE; ++DI, ++FI)
4158 assert(*DI == *FI)(static_cast <bool> (*DI == *FI) ? void (0) : __assert_fail
("*DI == *FI", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4158, __extension__ __PRETTY_FUNCTION__))
;
4159#endif
4160 Arg = Builder.CreateBitCast(Arg, LastParamTy);
4161 }
4162 }
4163 assert(IRFunctionArgs.hasInallocaArg())(static_cast <bool> (IRFunctionArgs.hasInallocaArg()) ?
void (0) : __assert_fail ("IRFunctionArgs.hasInallocaArg()",
"/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4163, __extension__ __PRETTY_FUNCTION__))
;
4164 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4165 }
4166
4167 // 2. Prepare the function pointer.
4168
4169 // If the callee is a bitcast of a non-variadic function to have a
4170 // variadic function pointer type, check to see if we can remove the
4171 // bitcast. This comes up with unprototyped functions.
4172 //
4173 // This makes the IR nicer, but more importantly it ensures that we
4174 // can inline the function at -O0 if it is marked always_inline.
4175 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4176 llvm::FunctionType *CalleeFT =
4177 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4178 if (!CalleeFT->isVarArg())
4179 return Ptr;
4180
4181 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4182 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4183 return Ptr;
4184
4185 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4186 if (!OrigFn)
4187 return Ptr;
4188
4189 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4190
4191 // If the original type is variadic, or if any of the component types
4192 // disagree, we cannot remove the cast.
4193 if (OrigFT->isVarArg() ||
4194 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4195 OrigFT->getReturnType() != CalleeFT->getReturnType())
4196 return Ptr;
4197
4198 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4199 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4200 return Ptr;
4201
4202 return OrigFn;
4203 };
4204 CalleePtr = simplifyVariadicCallee(CalleePtr);
4205
4206 // 3. Perform the actual call.
4207
4208 // Deactivate any cleanups that we're supposed to do immediately before
4209 // the call.
4210 if (!CallArgs.getCleanupsToDeactivate().empty())
4211 deactivateArgCleanupsBeforeCall(*this, CallArgs);
4212
4213 // Assert that the arguments we computed match up. The IR verifier
4214 // will catch this, but this is a common enough source of problems
4215 // during IRGen changes that it's way better for debugging to catch
4216 // it ourselves here.
4217#ifndef NDEBUG
4218 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg())(static_cast <bool> (IRCallArgs.size() == IRFuncTy->
getNumParams() || IRFuncTy->isVarArg()) ? void (0) : __assert_fail
("IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4218, __extension__ __PRETTY_FUNCTION__))
;
4219 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4220 // Inalloca argument can have different type.
4221 if (IRFunctionArgs.hasInallocaArg() &&
4222 i == IRFunctionArgs.getInallocaArgNo())
4223 continue;
4224 if (i < IRFuncTy->getNumParams())
4225 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i))(static_cast <bool> (IRCallArgs[i]->getType() == IRFuncTy
->getParamType(i)) ? void (0) : __assert_fail ("IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4225, __extension__ __PRETTY_FUNCTION__))
;
4226 }
4227#endif
4228
4229 // Compute the calling convention and attributes.
4230 unsigned CallingConv;
4231 llvm::AttributeList Attrs;
4232 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4233 Callee.getAbstractInfo(), Attrs, CallingConv,
4234 /*AttrOnCallSite=*/true);
4235
4236 // Apply some call-site-specific attributes.
4237 // TODO: work this into building the attribute set.
4238
4239 // Apply always_inline to all calls within flatten functions.
4240 // FIXME: should this really take priority over __try, below?
4241 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4242 !(Callee.getAbstractInfo().getCalleeDecl() &&
4243 Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4244 Attrs =
4245 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4246 llvm::Attribute::AlwaysInline);
4247 }
4248
4249 // Disable inlining inside SEH __try blocks.
4250 if (isSEHTryScope()) {
4251 Attrs =
4252 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4253 llvm::Attribute::NoInline);
4254 }
4255
4256 // Decide whether to use a call or an invoke.
4257 bool CannotThrow;
4258 if (currentFunctionUsesSEHTry()) {
4259 // SEH cares about asynchronous exceptions, so everything can "throw."
4260 CannotThrow = false;
4261 } else if (isCleanupPadScope() &&
4262 EHPersonality::get(*this).isMSVCXXPersonality()) {
4263 // The MSVC++ personality will implicitly terminate the program if an
4264 // exception is thrown during a cleanup outside of a try/catch.
4265 // We don't need to model anything in IR to get this behavior.
4266 CannotThrow = true;
4267 } else {
4268 // Otherwise, nounwind call sites will never throw.
4269 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4270 llvm::Attribute::NoUnwind);
4271 }
4272
4273 // If we made a temporary, be sure to clean up after ourselves. Note that we
4274 // can't depend on being inside of an ExprWithCleanups, so we need to manually
4275 // pop this cleanup later on. Being eager about this is OK, since this
4276 // temporary is 'invisible' outside of the callee.
4277 if (UnusedReturnSizePtr)
4278 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4279 UnusedReturnSizePtr);
4280
4281 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4282
4283 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4284 getBundlesForFunclet(CalleePtr);
4285
4286 // Emit the actual call/invoke instruction.
4287 llvm::CallSite CS;
4288 if (!InvokeDest) {
4289 CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4290 } else {
4291 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4292 CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4293 BundleList);
4294 EmitBlock(Cont);
4295 }
4296 llvm::Instruction *CI = CS.getInstruction();
4297 if (callOrInvoke)
4298 *callOrInvoke = CI;
4299
4300 // Apply the attributes and calling convention.
4301 CS.setAttributes(Attrs);
4302 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4303
4304 // Apply various metadata.
4305
4306 if (!CI->getType()->isVoidTy())
4307 CI->setName("call");
4308
4309 // Insert instrumentation or attach profile metadata at indirect call sites.
4310 // For more details, see the comment before the definition of
4311 // IPVK_IndirectCallTarget in InstrProfData.inc.
4312 if (!CS.getCalledFunction())
4313 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4314 CI, CalleePtr);
4315
4316 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4317 // optimizer it can aggressively ignore unwind edges.
4318 if (CGM.getLangOpts().ObjCAutoRefCount)
4319 AddObjCARCExceptionMetadata(CI);
4320
4321 // Suppress tail calls if requested.
4322 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4323 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4324 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4325 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4326 }
4327
4328 // 4. Finish the call.
4329
4330 // If the call doesn't return, finish the basic block and clear the
4331 // insertion point; this allows the rest of IRGen to discard
4332 // unreachable code.
4333 if (CS.doesNotReturn()) {
4334 if (UnusedReturnSizePtr)
4335 PopCleanupBlock();
4336
4337 // Strip away the noreturn attribute to better diagnose unreachable UB.
4338 if (SanOpts.has(SanitizerKind::Unreachable)) {
4339 if (auto *F = CS.getCalledFunction())
4340 F->removeFnAttr(llvm::Attribute::NoReturn);
4341 CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4342 llvm::Attribute::NoReturn);
4343 }
4344
4345 EmitUnreachable(Loc);
4346 Builder.ClearInsertionPoint();
4347
4348 // FIXME: For now, emit a dummy basic block because expr emitters in
4349 // generally are not ready to handle emitting expressions at unreachable
4350 // points.
4351 EnsureInsertPoint();
4352
4353 // Return a reasonable RValue.
4354 return GetUndefRValue(RetTy);
4355 }
4356
4357 // Perform the swifterror writeback.
4358 if (swiftErrorTemp.isValid()) {
4359 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4360 Builder.CreateStore(errorResult, swiftErrorArg);
4361 }
4362
4363 // Emit any call-associated writebacks immediately. Arguably this
4364 // should happen after any return-value munging.
4365 if (CallArgs.hasWritebacks())
4366 emitWritebacks(*this, CallArgs);
4367
4368 // The stack cleanup for inalloca arguments has to run out of the normal
4369 // lexical order, so deactivate it and run it manually here.
4370 CallArgs.freeArgumentMemory(*this);
4371
4372 // Extract the return value.
4373 RValue Ret = [&] {
4374 switch (RetAI.getKind()) {
4375 case ABIArgInfo::CoerceAndExpand: {
4376 auto coercionType = RetAI.getCoerceAndExpandType();
4377 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4378
4379 Address addr = SRetPtr;
4380 addr = Builder.CreateElementBitCast(addr, coercionType);
4381
4382 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())(static_cast <bool> (CI->getType() == RetAI.getUnpaddedCoerceAndExpandType
()) ? void (0) : __assert_fail ("CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4382, __extension__ __PRETTY_FUNCTION__))
;
4383 bool requiresExtract = isa<llvm::StructType>(CI->getType());
4384
4385 unsigned unpaddedIndex = 0;
4386 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4387 llvm::Type *eltType = coercionType->getElementType(i);
4388 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4389 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4390 llvm::Value *elt = CI;
4391 if (requiresExtract)
4392 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4393 else
4394 assert(unpaddedIndex == 0)(static_cast <bool> (unpaddedIndex == 0) ? void (0) : __assert_fail
("unpaddedIndex == 0", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4394, __extension__ __PRETTY_FUNCTION__))
;
4395 Builder.CreateStore(elt, eltAddr);
4396 }
4397 // FALLTHROUGH
4398 LLVM_FALLTHROUGH[[clang::fallthrough]];
4399 }
4400
4401 case ABIArgInfo::InAlloca:
4402 case ABIArgInfo::Indirect: {
4403 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4404 if (UnusedReturnSizePtr)
4405 PopCleanupBlock();
4406 return ret;
4407 }
4408
4409 case ABIArgInfo::Ignore:
4410 // If we are ignoring an argument that had a result, make sure to
4411 // construct the appropriate return value for our caller.
4412 return GetUndefRValue(RetTy);
4413
4414 case ABIArgInfo::Extend:
4415 case ABIArgInfo::Direct: {
4416 llvm::Type *RetIRTy = ConvertType(RetTy);
4417 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4418 switch (getEvaluationKind(RetTy)) {
4419 case TEK_Complex: {
4420 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4421 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4422 return RValue::getComplex(std::make_pair(Real, Imag));
4423 }
4424 case TEK_Aggregate: {
4425 Address DestPtr = ReturnValue.getValue();
4426 bool DestIsVolatile = ReturnValue.isVolatile();
4427
4428 if (!DestPtr.isValid()) {
4429 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4430 DestIsVolatile = false;
4431 }
4432 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4433 return RValue::getAggregate(DestPtr);
4434 }
4435 case TEK_Scalar: {
4436 // If the argument doesn't match, perform a bitcast to coerce it. This
4437 // can happen due to trivial type mismatches.
4438 llvm::Value *V = CI;
4439 if (V->getType() != RetIRTy)
4440 V = Builder.CreateBitCast(V, RetIRTy);
4441 return RValue::get(V);
4442 }
4443 }
4444 llvm_unreachable("bad evaluation kind")::llvm::llvm_unreachable_internal("bad evaluation kind", "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4444)
;
4445 }
4446
4447 Address DestPtr = ReturnValue.getValue();
4448 bool DestIsVolatile = ReturnValue.isVolatile();
4449
4450 if (!DestPtr.isValid()) {
4451 DestPtr = CreateMemTemp(RetTy, "coerce");
4452 DestIsVolatile = false;
4453 }
4454
4455 // If the value is offset in memory, apply the offset now.
4456 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4457 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4458
4459 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4460 }
4461
4462 case ABIArgInfo::Expand:
4463 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4463)
;
4464 }
4465
4466 llvm_unreachable("Unhandled ABIArgInfo::Kind")::llvm::llvm_unreachable_internal("Unhandled ABIArgInfo::Kind"
, "/build/llvm-toolchain-snapshot-7~svn337204/tools/clang/lib/CodeGen/CGCall.cpp"
, 4466)
;
4467 } ();
4468
4469 // Emit the assume_aligned check on the return value.
4470 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4471 if (Ret.isScalar() && TargetDecl) {
4472 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4473 llvm::Value *OffsetValue = nullptr;
4474 if (const auto *Offset = AA->getOffset())
4475 OffsetValue = EmitScalarExpr(Offset);
4476
4477 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4478 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4479 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4480 OffsetValue);
4481 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4482 llvm::Value *ParamVal =
4483 CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
4484 *this).getScalarVal();
4485 EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4486 }
4487 }
4488
4489 return Ret;
4490}
4491
4492CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4493 if (isVirtual()) {
4494 const CallExpr *CE = getVirtualCallExpr();
4495 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4496 CGF, getVirtualMethodDecl(), getThisAddress(),
4497 getFunctionType(), CE ? CE->getLocStart() : SourceLocation());
4498 }
4499
4500 return *this;
4501}
4502
4503/* VarArg handling */
4504
4505Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4506 VAListAddr = VE->isMicrosoftABI()
4507 ? EmitMSVAListRef(VE->getSubExpr())
4508 : EmitVAListRef(VE->getSubExpr());
4509 QualType Ty = VE->getType();
4510 if (VE->isMicrosoftABI())
4511 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4512 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4513}