Bug Summary

File:tools/clang/lib/CodeGen/CGCall.cpp
Warning:line 2309, column 33
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGCall.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-config-compatibility-mode=true -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn348900/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn348900/tools/clang/include -I /build/llvm-toolchain-snapshot-8~svn348900/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-8~svn348900/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn348900/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn348900/build-llvm/tools/clang/lib/CodeGen -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-12-12-042652-12204-1 -x c++ /build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp -faddrsig
1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "ABIInfo.h"
17#include "CGBlocks.h"
18#include "CGCXXABI.h"
19#include "CGCleanup.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "TargetInfo.h"
23#include "clang/AST/Decl.h"
24#include "clang/AST/DeclCXX.h"
25#include "clang/AST/DeclObjC.h"
26#include "clang/Basic/CodeGenOptions.h"
27#include "clang/Basic/TargetBuiltins.h"
28#include "clang/Basic/TargetInfo.h"
29#include "clang/CodeGen/CGFunctionInfo.h"
30#include "clang/CodeGen/SwiftCallingConv.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/Transforms/Utils/Local.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Attributes.h"
35#include "llvm/IR/CallSite.h"
36#include "llvm/IR/CallingConv.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/InlineAsm.h"
39#include "llvm/IR/IntrinsicInst.h"
40#include "llvm/IR/Intrinsics.h"
41using namespace clang;
42using namespace CodeGen;
43
44/***/
45
46unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47 switch (CC) {
48 default: return llvm::CallingConv::C;
49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53 case CC_Win64: return llvm::CallingConv::Win64;
54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58 // TODO: Add support for __pascal to LLVM.
59 case CC_X86Pascal: return llvm::CallingConv::C;
60 // TODO: Add support for __vectorcall to LLVM.
61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
63 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
65 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
66 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
67 case CC_Swift: return llvm::CallingConv::Swift;
68 }
69}
70
71/// Derives the 'this' type for codegen purposes, i.e. ignoring method
72/// qualification.
73/// FIXME: address space qualification?
74static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
75 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
76 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
77}
78
79/// Returns the canonical formal type of the given C++ method.
80static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
81 return MD->getType()->getCanonicalTypeUnqualified()
82 .getAs<FunctionProtoType>();
83}
84
85/// Returns the "extra-canonicalized" return type, which discards
86/// qualifiers on the return type. Codegen doesn't care about them,
87/// and it makes ABI code a little easier to be able to assume that
88/// all parameter and return types are top-level unqualified.
89static CanQualType GetReturnType(QualType RetTy) {
90 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
91}
92
93/// Arrange the argument and result information for a value of the given
94/// unprototyped freestanding function type.
95const CGFunctionInfo &
96CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
97 // When translating an unprototyped function type, always use a
98 // variadic type.
99 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
100 /*instanceMethod=*/false,
101 /*chainCall=*/false, None,
102 FTNP->getExtInfo(), {}, RequiredArgs(0));
103}
104
105static void addExtParameterInfosForCall(
106 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
107 const FunctionProtoType *proto,
108 unsigned prefixArgs,
109 unsigned totalArgs) {
110 assert(proto->hasExtParameterInfos())((proto->hasExtParameterInfos()) ? static_cast<void>
(0) : __assert_fail ("proto->hasExtParameterInfos()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 110, __PRETTY_FUNCTION__))
;
111 assert(paramInfos.size() <= prefixArgs)((paramInfos.size() <= prefixArgs) ? static_cast<void>
(0) : __assert_fail ("paramInfos.size() <= prefixArgs", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 111, __PRETTY_FUNCTION__))
;
112 assert(proto->getNumParams() + prefixArgs <= totalArgs)((proto->getNumParams() + prefixArgs <= totalArgs) ? static_cast
<void> (0) : __assert_fail ("proto->getNumParams() + prefixArgs <= totalArgs"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 112, __PRETTY_FUNCTION__))
;
113
114 paramInfos.reserve(totalArgs);
115
116 // Add default infos for any prefix args that don't already have infos.
117 paramInfos.resize(prefixArgs);
118
119 // Add infos for the prototype.
120 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
121 paramInfos.push_back(ParamInfo);
122 // pass_object_size params have no parameter info.
123 if (ParamInfo.hasPassObjectSize())
124 paramInfos.emplace_back();
125 }
126
127 assert(paramInfos.size() <= totalArgs &&((paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 128, __PRETTY_FUNCTION__))
128 "Did we forget to insert pass_object_size args?")((paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 128, __PRETTY_FUNCTION__))
;
129 // Add default infos for the variadic and/or suffix arguments.
130 paramInfos.resize(totalArgs);
131}
132
133/// Adds the formal parameters in FPT to the given prefix. If any parameter in
134/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
135static void appendParameterTypes(const CodeGenTypes &CGT,
136 SmallVectorImpl<CanQualType> &prefix,
137 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
138 CanQual<FunctionProtoType> FPT) {
139 // Fast path: don't touch param info if we don't need to.
140 if (!FPT->hasExtParameterInfos()) {
141 assert(paramInfos.empty() &&((paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 142, __PRETTY_FUNCTION__))
142 "We have paramInfos, but the prototype doesn't?")((paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 142, __PRETTY_FUNCTION__))
;
143 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
144 return;
145 }
146
147 unsigned PrefixSize = prefix.size();
148 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
149 // parameters; the only thing that can change this is the presence of
150 // pass_object_size. So, we preallocate for the common case.
151 prefix.reserve(prefix.size() + FPT->getNumParams());
152
153 auto ExtInfos = FPT->getExtParameterInfos();
154 assert(ExtInfos.size() == FPT->getNumParams())((ExtInfos.size() == FPT->getNumParams()) ? static_cast<
void> (0) : __assert_fail ("ExtInfos.size() == FPT->getNumParams()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 154, __PRETTY_FUNCTION__))
;
155 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
156 prefix.push_back(FPT->getParamType(I));
157 if (ExtInfos[I].hasPassObjectSize())
158 prefix.push_back(CGT.getContext().getSizeType());
159 }
160
161 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
162 prefix.size());
163}
164
165/// Arrange the LLVM function layout for a value of the given function
166/// type, on top of any implicit parameters already stored.
167static const CGFunctionInfo &
168arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
169 SmallVectorImpl<CanQualType> &prefix,
170 CanQual<FunctionProtoType> FTP,
171 const FunctionDecl *FD) {
172 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
173 RequiredArgs Required =
174 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
175 // FIXME: Kill copy.
176 appendParameterTypes(CGT, prefix, paramInfos, FTP);
177 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
178
179 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
180 /*chainCall=*/false, prefix,
181 FTP->getExtInfo(), paramInfos,
182 Required);
183}
184
185/// Arrange the argument and result information for a value of the
186/// given freestanding function type.
187const CGFunctionInfo &
188CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
189 const FunctionDecl *FD) {
190 SmallVector<CanQualType, 16> argTypes;
191 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
192 FTP, FD);
193}
194
195static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
196 // Set the appropriate calling convention for the Function.
197 if (D->hasAttr<StdCallAttr>())
198 return CC_X86StdCall;
199
200 if (D->hasAttr<FastCallAttr>())
201 return CC_X86FastCall;
202
203 if (D->hasAttr<RegCallAttr>())
204 return CC_X86RegCall;
205
206 if (D->hasAttr<ThisCallAttr>())
207 return CC_X86ThisCall;
208
209 if (D->hasAttr<VectorCallAttr>())
210 return CC_X86VectorCall;
211
212 if (D->hasAttr<PascalAttr>())
213 return CC_X86Pascal;
214
215 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
216 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
217
218 if (D->hasAttr<AArch64VectorPcsAttr>())
219 return CC_AArch64VectorCall;
220
221 if (D->hasAttr<IntelOclBiccAttr>())
222 return CC_IntelOclBicc;
223
224 if (D->hasAttr<MSABIAttr>())
225 return IsWindows ? CC_C : CC_Win64;
226
227 if (D->hasAttr<SysVABIAttr>())
228 return IsWindows ? CC_X86_64SysV : CC_C;
229
230 if (D->hasAttr<PreserveMostAttr>())
231 return CC_PreserveMost;
232
233 if (D->hasAttr<PreserveAllAttr>())
234 return CC_PreserveAll;
235
236 return CC_C;
237}
238
239/// Arrange the argument and result information for a call to an
240/// unknown C++ non-static member function of the given abstract type.
241/// (Zero value of RD means we don't have any meaningful "this" argument type,
242/// so fall back to a generic pointer type).
243/// The member function must be an ordinary function, i.e. not a
244/// constructor or destructor.
245const CGFunctionInfo &
246CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
247 const FunctionProtoType *FTP,
248 const CXXMethodDecl *MD) {
249 SmallVector<CanQualType, 16> argTypes;
250
251 // Add the 'this' pointer.
252 if (RD)
253 argTypes.push_back(GetThisType(Context, RD));
254 else
255 argTypes.push_back(Context.VoidPtrTy);
256
257 return ::arrangeLLVMFunctionInfo(
258 *this, true, argTypes,
259 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
260}
261
262/// Set calling convention for CUDA/HIP kernel.
263static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
264 const FunctionDecl *FD) {
265 if (FD->hasAttr<CUDAGlobalAttr>()) {
266 const FunctionType *FT = FTy->getAs<FunctionType>();
267 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
268 FTy = FT->getCanonicalTypeUnqualified();
269 }
270}
271
272/// Arrange the argument and result information for a declaration or
273/// definition of the given C++ non-static member function. The
274/// member function must be an ordinary function, i.e. not a
275/// constructor or destructor.
276const CGFunctionInfo &
277CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
278 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!")((!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"
) ? static_cast<void> (0) : __assert_fail ("!isa<CXXConstructorDecl>(MD) && \"wrong method for constructors!\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 278, __PRETTY_FUNCTION__))
;
279 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!")((!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"
) ? static_cast<void> (0) : __assert_fail ("!isa<CXXDestructorDecl>(MD) && \"wrong method for destructors!\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 279, __PRETTY_FUNCTION__))
;
280
281 CanQualType FT = GetFormalType(MD).getAs<Type>();
282 setCUDAKernelCallingConvention(FT, CGM, MD);
283 auto prototype = FT.getAs<FunctionProtoType>();
284
285 if (MD->isInstance()) {
286 // The abstract case is perfectly fine.
287 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
288 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
289 }
290
291 return arrangeFreeFunctionType(prototype, MD);
292}
293
294bool CodeGenTypes::inheritingCtorHasParams(
295 const InheritedConstructor &Inherited, CXXCtorType Type) {
296 // Parameters are unnecessary if we're constructing a base class subobject
297 // and the inherited constructor lives in a virtual base.
298 return Type == Ctor_Complete ||
299 !Inherited.getShadowDecl()->constructsVirtualBase() ||
300 !Target.getCXXABI().hasConstructorVariants();
301 }
302
303const CGFunctionInfo &
304CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
305 StructorType Type) {
306
307 SmallVector<CanQualType, 16> argTypes;
308 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
309 argTypes.push_back(GetThisType(Context, MD->getParent()));
310
311 bool PassParams = true;
312
313 GlobalDecl GD;
314 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
315 GD = GlobalDecl(CD, toCXXCtorType(Type));
316
317 // A base class inheriting constructor doesn't get forwarded arguments
318 // needed to construct a virtual base (or base class thereof).
319 if (auto Inherited = CD->getInheritedConstructor())
320 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
321 } else {
322 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
323 GD = GlobalDecl(DD, toCXXDtorType(Type));
324 }
325
326 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
327
328 // Add the formal parameters.
329 if (PassParams)
330 appendParameterTypes(*this, argTypes, paramInfos, FTP);
331
332 CGCXXABI::AddedStructorArgs AddedArgs =
333 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
334 if (!paramInfos.empty()) {
335 // Note: prefix implies after the first param.
336 if (AddedArgs.Prefix)
337 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
338 FunctionProtoType::ExtParameterInfo{});
339 if (AddedArgs.Suffix)
340 paramInfos.append(AddedArgs.Suffix,
341 FunctionProtoType::ExtParameterInfo{});
342 }
343
344 RequiredArgs required =
345 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
346 : RequiredArgs::All);
347
348 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
349 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
350 ? argTypes.front()
351 : TheCXXABI.hasMostDerivedReturn(GD)
352 ? CGM.getContext().VoidPtrTy
353 : Context.VoidTy;
354 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
355 /*chainCall=*/false, argTypes, extInfo,
356 paramInfos, required);
357}
358
359static SmallVector<CanQualType, 16>
360getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
361 SmallVector<CanQualType, 16> argTypes;
362 for (auto &arg : args)
363 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
364 return argTypes;
365}
366
367static SmallVector<CanQualType, 16>
368getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
369 SmallVector<CanQualType, 16> argTypes;
370 for (auto &arg : args)
371 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
372 return argTypes;
373}
374
375static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
376getExtParameterInfosForCall(const FunctionProtoType *proto,
377 unsigned prefixArgs, unsigned totalArgs) {
378 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
379 if (proto->hasExtParameterInfos()) {
380 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
381 }
382 return result;
383}
384
385/// Arrange a call to a C++ method, passing the given arguments.
386///
387/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
388/// parameter.
389/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
390/// args.
391/// PassProtoArgs indicates whether `args` has args for the parameters in the
392/// given CXXConstructorDecl.
393const CGFunctionInfo &
394CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
395 const CXXConstructorDecl *D,
396 CXXCtorType CtorKind,
397 unsigned ExtraPrefixArgs,
398 unsigned ExtraSuffixArgs,
399 bool PassProtoArgs) {
400 // FIXME: Kill copy.
401 SmallVector<CanQualType, 16> ArgTypes;
402 for (const auto &Arg : args)
403 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
404
405 // +1 for implicit this, which should always be args[0].
406 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
407
408 CanQual<FunctionProtoType> FPT = GetFormalType(D);
409 RequiredArgs Required =
410 RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
411 GlobalDecl GD(D, CtorKind);
412 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
413 ? ArgTypes.front()
414 : TheCXXABI.hasMostDerivedReturn(GD)
415 ? CGM.getContext().VoidPtrTy
416 : Context.VoidTy;
417
418 FunctionType::ExtInfo Info = FPT->getExtInfo();
419 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
420 // If the prototype args are elided, we should only have ABI-specific args,
421 // which never have param info.
422 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
423 // ABI-specific suffix arguments are treated the same as variadic arguments.
424 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
425 ArgTypes.size());
426 }
427 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
428 /*chainCall=*/false, ArgTypes, Info,
429 ParamInfos, Required);
430}
431
432/// Arrange the argument and result information for the declaration or
433/// definition of the given function.
434const CGFunctionInfo &
435CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
436 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
437 if (MD->isInstance())
438 return arrangeCXXMethodDeclaration(MD);
439
440 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
441
442 assert(isa<FunctionType>(FTy))((isa<FunctionType>(FTy)) ? static_cast<void> (0)
: __assert_fail ("isa<FunctionType>(FTy)", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 442, __PRETTY_FUNCTION__))
;
443 setCUDAKernelCallingConvention(FTy, CGM, FD);
444
445 // When declaring a function without a prototype, always use a
446 // non-variadic type.
447 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
448 return arrangeLLVMFunctionInfo(
449 noProto->getReturnType(), /*instanceMethod=*/false,
450 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
451 }
452
453 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
454}
455
456/// Arrange the argument and result information for the declaration or
457/// definition of an Objective-C method.
458const CGFunctionInfo &
459CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
460 // It happens that this is the same as a call with no optional
461 // arguments, except also using the formal 'self' type.
462 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
463}
464
465/// Arrange the argument and result information for the function type
466/// through which to perform a send to the given Objective-C method,
467/// using the given receiver type. The receiver type is not always
468/// the 'self' type of the method or even an Objective-C pointer type.
469/// This is *not* the right method for actually performing such a
470/// message send, due to the possibility of optional arguments.
471const CGFunctionInfo &
472CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
473 QualType receiverType) {
474 SmallVector<CanQualType, 16> argTys;
475 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
476 argTys.push_back(Context.getCanonicalParamType(receiverType));
477 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
478 // FIXME: Kill copy?
479 for (const auto *I : MD->parameters()) {
480 argTys.push_back(Context.getCanonicalParamType(I->getType()));
481 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
482 I->hasAttr<NoEscapeAttr>());
483 extParamInfos.push_back(extParamInfo);
484 }
485
486 FunctionType::ExtInfo einfo;
487 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
488 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
489
490 if (getContext().getLangOpts().ObjCAutoRefCount &&
491 MD->hasAttr<NSReturnsRetainedAttr>())
492 einfo = einfo.withProducesResult(true);
493
494 RequiredArgs required =
495 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
496
497 return arrangeLLVMFunctionInfo(
498 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
499 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
500}
501
502const CGFunctionInfo &
503CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
504 const CallArgList &args) {
505 auto argTypes = getArgTypesForCall(Context, args);
506 FunctionType::ExtInfo einfo;
507
508 return arrangeLLVMFunctionInfo(
509 GetReturnType(returnType), /*instanceMethod=*/false,
510 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
511}
512
513const CGFunctionInfo &
514CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
515 // FIXME: Do we need to handle ObjCMethodDecl?
516 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
517
518 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
519 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
520
521 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
522 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
523
524 return arrangeFunctionDeclaration(FD);
525}
526
527/// Arrange a thunk that takes 'this' as the first parameter followed by
528/// varargs. Return a void pointer, regardless of the actual return type.
529/// The body of the thunk will end in a musttail call to a function of the
530/// correct type, and the caller will bitcast the function to the correct
531/// prototype.
532const CGFunctionInfo &
533CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
534 assert(MD->isVirtual() && "only methods have thunks")((MD->isVirtual() && "only methods have thunks") ?
static_cast<void> (0) : __assert_fail ("MD->isVirtual() && \"only methods have thunks\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 534, __PRETTY_FUNCTION__))
;
535 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
536 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
537 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
538 /*chainCall=*/false, ArgTys,
539 FTP->getExtInfo(), {}, RequiredArgs(1));
540}
541
542const CGFunctionInfo &
543CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
544 CXXCtorType CT) {
545 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure)((CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure) ? static_cast
<void> (0) : __assert_fail ("CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 545, __PRETTY_FUNCTION__))
;
546
547 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
548 SmallVector<CanQualType, 2> ArgTys;
549 const CXXRecordDecl *RD = CD->getParent();
550 ArgTys.push_back(GetThisType(Context, RD));
551 if (CT == Ctor_CopyingClosure)
552 ArgTys.push_back(*FTP->param_type_begin());
553 if (RD->getNumVBases() > 0)
554 ArgTys.push_back(Context.IntTy);
555 CallingConv CC = Context.getDefaultCallingConvention(
556 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
557 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
558 /*chainCall=*/false, ArgTys,
559 FunctionType::ExtInfo(CC), {},
560 RequiredArgs::All);
561}
562
563/// Arrange a call as unto a free function, except possibly with an
564/// additional number of formal parameters considered required.
565static const CGFunctionInfo &
566arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
567 CodeGenModule &CGM,
568 const CallArgList &args,
569 const FunctionType *fnType,
570 unsigned numExtraRequiredArgs,
571 bool chainCall) {
572 assert(args.size() >= numExtraRequiredArgs)((args.size() >= numExtraRequiredArgs) ? static_cast<void
> (0) : __assert_fail ("args.size() >= numExtraRequiredArgs"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 572, __PRETTY_FUNCTION__))
;
573
574 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
575
576 // In most cases, there are no optional arguments.
577 RequiredArgs required = RequiredArgs::All;
578
579 // If we have a variadic prototype, the required arguments are the
580 // extra prefix plus the arguments in the prototype.
581 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
582 if (proto->isVariadic())
583 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
584
585 if (proto->hasExtParameterInfos())
586 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
587 args.size());
588
589 // If we don't have a prototype at all, but we're supposed to
590 // explicitly use the variadic convention for unprototyped calls,
591 // treat all of the arguments as required but preserve the nominal
592 // possibility of variadics.
593 } else if (CGM.getTargetCodeGenInfo()
594 .isNoProtoCallVariadic(args,
595 cast<FunctionNoProtoType>(fnType))) {
596 required = RequiredArgs(args.size());
597 }
598
599 // FIXME: Kill copy.
600 SmallVector<CanQualType, 16> argTypes;
601 for (const auto &arg : args)
602 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
603 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
604 /*instanceMethod=*/false, chainCall,
605 argTypes, fnType->getExtInfo(), paramInfos,
606 required);
607}
608
609/// Figure out the rules for calling a function with the given formal
610/// type using the given arguments. The arguments are necessary
611/// because the function might be unprototyped, in which case it's
612/// target-dependent in crazy ways.
613const CGFunctionInfo &
614CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
615 const FunctionType *fnType,
616 bool chainCall) {
617 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
618 chainCall ? 1 : 0, chainCall);
619}
620
621/// A block function is essentially a free function with an
622/// extra implicit argument.
623const CGFunctionInfo &
624CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
625 const FunctionType *fnType) {
626 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
627 /*chainCall=*/false);
628}
629
630const CGFunctionInfo &
631CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
632 const FunctionArgList &params) {
633 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
634 auto argTypes = getArgTypesForDeclaration(Context, params);
635
636 return arrangeLLVMFunctionInfo(
637 GetReturnType(proto->getReturnType()),
638 /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
639 proto->getExtInfo(), paramInfos,
640 RequiredArgs::forPrototypePlus(proto, 1, nullptr));
641}
642
643const CGFunctionInfo &
644CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
645 const CallArgList &args) {
646 // FIXME: Kill copy.
647 SmallVector<CanQualType, 16> argTypes;
648 for (const auto &Arg : args)
649 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
650 return arrangeLLVMFunctionInfo(
651 GetReturnType(resultType), /*instanceMethod=*/false,
652 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
653 /*paramInfos=*/ {}, RequiredArgs::All);
654}
655
656const CGFunctionInfo &
657CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
658 const FunctionArgList &args) {
659 auto argTypes = getArgTypesForDeclaration(Context, args);
660
661 return arrangeLLVMFunctionInfo(
662 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
663 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
664}
665
666const CGFunctionInfo &
667CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
668 ArrayRef<CanQualType> argTypes) {
669 return arrangeLLVMFunctionInfo(
670 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
671 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
672}
673
674/// Arrange a call to a C++ method, passing the given arguments.
675///
676/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
677/// does not count `this`.
678const CGFunctionInfo &
679CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
680 const FunctionProtoType *proto,
681 RequiredArgs required,
682 unsigned numPrefixArgs) {
683 assert(numPrefixArgs + 1 <= args.size() &&((numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"
) ? static_cast<void> (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 684, __PRETTY_FUNCTION__))
684 "Emitting a call with less args than the required prefix?")((numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"
) ? static_cast<void> (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 684, __PRETTY_FUNCTION__))
;
685 // Add one to account for `this`. It's a bit awkward here, but we don't count
686 // `this` in similar places elsewhere.
687 auto paramInfos =
688 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
689
690 // FIXME: Kill copy.
691 auto argTypes = getArgTypesForCall(Context, args);
692
693 FunctionType::ExtInfo info = proto->getExtInfo();
694 return arrangeLLVMFunctionInfo(
695 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
696 /*chainCall=*/false, argTypes, info, paramInfos, required);
697}
698
699const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
700 return arrangeLLVMFunctionInfo(
701 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
702 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
703}
704
705const CGFunctionInfo &
706CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
707 const CallArgList &args) {
708 assert(signature.arg_size() <= args.size())((signature.arg_size() <= args.size()) ? static_cast<void
> (0) : __assert_fail ("signature.arg_size() <= args.size()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 708, __PRETTY_FUNCTION__))
;
709 if (signature.arg_size() == args.size())
710 return signature;
711
712 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
713 auto sigParamInfos = signature.getExtParameterInfos();
714 if (!sigParamInfos.empty()) {
715 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
716 paramInfos.resize(args.size());
717 }
718
719 auto argTypes = getArgTypesForCall(Context, args);
720
721 assert(signature.getRequiredArgs().allowsOptionalArgs())((signature.getRequiredArgs().allowsOptionalArgs()) ? static_cast
<void> (0) : __assert_fail ("signature.getRequiredArgs().allowsOptionalArgs()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 721, __PRETTY_FUNCTION__))
;
722 return arrangeLLVMFunctionInfo(signature.getReturnType(),
723 signature.isInstanceMethod(),
724 signature.isChainCall(),
725 argTypes,
726 signature.getExtInfo(),
727 paramInfos,
728 signature.getRequiredArgs());
729}
730
731namespace clang {
732namespace CodeGen {
733void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
734}
735}
736
737/// Arrange the argument and result information for an abstract value
738/// of a given function type. This is the method which all of the
739/// above functions ultimately defer to.
740const CGFunctionInfo &
741CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
742 bool instanceMethod,
743 bool chainCall,
744 ArrayRef<CanQualType> argTypes,
745 FunctionType::ExtInfo info,
746 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
747 RequiredArgs required) {
748 assert(llvm::all_of(argTypes,((llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam
(); })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 749, __PRETTY_FUNCTION__))
749 [](CanQualType T) { return T.isCanonicalAsParam(); }))((llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam
(); })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 749, __PRETTY_FUNCTION__))
;
750
751 // Lookup or create unique function info.
752 llvm::FoldingSetNodeID ID;
753 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
754 required, resultType, argTypes);
755
756 void *insertPos = nullptr;
757 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
758 if (FI)
759 return *FI;
760
761 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
762
763 // Construct the function info. We co-allocate the ArgInfos.
764 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
765 paramInfos, resultType, argTypes, required);
766 FunctionInfos.InsertNode(FI, insertPos);
767
768 bool inserted = FunctionsBeingProcessed.insert(FI).second;
769 (void)inserted;
770 assert(inserted && "Recursively being processed?")((inserted && "Recursively being processed?") ? static_cast
<void> (0) : __assert_fail ("inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 770, __PRETTY_FUNCTION__))
;
771
772 // Compute ABI information.
773 if (CC == llvm::CallingConv::SPIR_KERNEL) {
774 // Force target independent argument handling for the host visible
775 // kernel functions.
776 computeSPIRKernelABIInfo(CGM, *FI);
777 } else if (info.getCC() == CC_Swift) {
778 swiftcall::computeABIInfo(CGM, *FI);
779 } else {
780 getABIInfo().computeInfo(*FI);
781 }
782
783 // Loop over all of the computed argument and return value info. If any of
784 // them are direct or extend without a specified coerce type, specify the
785 // default now.
786 ABIArgInfo &retInfo = FI->getReturnInfo();
787 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
788 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
789
790 for (auto &I : FI->arguments())
791 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
792 I.info.setCoerceToType(ConvertType(I.type));
793
794 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
795 assert(erased && "Not in set?")((erased && "Not in set?") ? static_cast<void> (
0) : __assert_fail ("erased && \"Not in set?\"", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 795, __PRETTY_FUNCTION__))
;
796
797 return *FI;
798}
799
800CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
801 bool instanceMethod,
802 bool chainCall,
803 const FunctionType::ExtInfo &info,
804 ArrayRef<ExtParameterInfo> paramInfos,
805 CanQualType resultType,
806 ArrayRef<CanQualType> argTypes,
807 RequiredArgs required) {
808 assert(paramInfos.empty() || paramInfos.size() == argTypes.size())((paramInfos.empty() || paramInfos.size() == argTypes.size())
? static_cast<void> (0) : __assert_fail ("paramInfos.empty() || paramInfos.size() == argTypes.size()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 808, __PRETTY_FUNCTION__))
;
809
810 void *buffer =
811 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
812 argTypes.size() + 1, paramInfos.size()));
813
814 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
815 FI->CallingConvention = llvmCC;
816 FI->EffectiveCallingConvention = llvmCC;
817 FI->ASTCallingConvention = info.getCC();
818 FI->InstanceMethod = instanceMethod;
819 FI->ChainCall = chainCall;
820 FI->NoReturn = info.getNoReturn();
821 FI->ReturnsRetained = info.getProducesResult();
822 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
823 FI->NoCfCheck = info.getNoCfCheck();
824 FI->Required = required;
825 FI->HasRegParm = info.getHasRegParm();
826 FI->RegParm = info.getRegParm();
827 FI->ArgStruct = nullptr;
828 FI->ArgStructAlign = 0;
829 FI->NumArgs = argTypes.size();
830 FI->HasExtParameterInfos = !paramInfos.empty();
831 FI->getArgsBuffer()[0].type = resultType;
832 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
833 FI->getArgsBuffer()[i + 1].type = argTypes[i];
834 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
835 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
836 return FI;
837}
838
839/***/
840
841namespace {
842// ABIArgInfo::Expand implementation.
843
844// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
845struct TypeExpansion {
846 enum TypeExpansionKind {
847 // Elements of constant arrays are expanded recursively.
848 TEK_ConstantArray,
849 // Record fields are expanded recursively (but if record is a union, only
850 // the field with the largest size is expanded).
851 TEK_Record,
852 // For complex types, real and imaginary parts are expanded recursively.
853 TEK_Complex,
854 // All other types are not expandable.
855 TEK_None
856 };
857
858 const TypeExpansionKind Kind;
859
860 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
861 virtual ~TypeExpansion() {}
862};
863
864struct ConstantArrayExpansion : TypeExpansion {
865 QualType EltTy;
866 uint64_t NumElts;
867
868 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
869 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
870 static bool classof(const TypeExpansion *TE) {
871 return TE->Kind == TEK_ConstantArray;
872 }
873};
874
875struct RecordExpansion : TypeExpansion {
876 SmallVector<const CXXBaseSpecifier *, 1> Bases;
877
878 SmallVector<const FieldDecl *, 1> Fields;
879
880 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
881 SmallVector<const FieldDecl *, 1> &&Fields)
882 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
883 Fields(std::move(Fields)) {}
884 static bool classof(const TypeExpansion *TE) {
885 return TE->Kind == TEK_Record;
886 }
887};
888
889struct ComplexExpansion : TypeExpansion {
890 QualType EltTy;
891
892 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
893 static bool classof(const TypeExpansion *TE) {
894 return TE->Kind == TEK_Complex;
895 }
896};
897
898struct NoExpansion : TypeExpansion {
899 NoExpansion() : TypeExpansion(TEK_None) {}
900 static bool classof(const TypeExpansion *TE) {
901 return TE->Kind == TEK_None;
902 }
903};
904} // namespace
905
906static std::unique_ptr<TypeExpansion>
907getTypeExpansion(QualType Ty, const ASTContext &Context) {
908 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
909 return llvm::make_unique<ConstantArrayExpansion>(
910 AT->getElementType(), AT->getSize().getZExtValue());
911 }
912 if (const RecordType *RT = Ty->getAs<RecordType>()) {
913 SmallVector<const CXXBaseSpecifier *, 1> Bases;
914 SmallVector<const FieldDecl *, 1> Fields;
915 const RecordDecl *RD = RT->getDecl();
916 assert(!RD->hasFlexibleArrayMember() &&((!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."
) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 917, __PRETTY_FUNCTION__))
917 "Cannot expand structure with flexible array.")((!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."
) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 917, __PRETTY_FUNCTION__))
;
918 if (RD->isUnion()) {
919 // Unions can be here only in degenerative cases - all the fields are same
920 // after flattening. Thus we have to use the "largest" field.
921 const FieldDecl *LargestFD = nullptr;
922 CharUnits UnionSize = CharUnits::Zero();
923
924 for (const auto *FD : RD->fields()) {
925 if (FD->isZeroLengthBitField(Context))
926 continue;
927 assert(!FD->isBitField() &&((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 928, __PRETTY_FUNCTION__))
928 "Cannot expand structure with bit-field members.")((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 928, __PRETTY_FUNCTION__))
;
929 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
930 if (UnionSize < FieldSize) {
931 UnionSize = FieldSize;
932 LargestFD = FD;
933 }
934 }
935 if (LargestFD)
936 Fields.push_back(LargestFD);
937 } else {
938 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
939 assert(!CXXRD->isDynamicClass() &&((!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"
) ? static_cast<void> (0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 940, __PRETTY_FUNCTION__))
940 "cannot expand vtable pointers in dynamic classes")((!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"
) ? static_cast<void> (0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 940, __PRETTY_FUNCTION__))
;
941 for (const CXXBaseSpecifier &BS : CXXRD->bases())
942 Bases.push_back(&BS);
943 }
944
945 for (const auto *FD : RD->fields()) {
946 if (FD->isZeroLengthBitField(Context))
947 continue;
948 assert(!FD->isBitField() &&((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 949, __PRETTY_FUNCTION__))
949 "Cannot expand structure with bit-field members.")((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 949, __PRETTY_FUNCTION__))
;
950 Fields.push_back(FD);
951 }
952 }
953 return llvm::make_unique<RecordExpansion>(std::move(Bases),
954 std::move(Fields));
955 }
956 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
957 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
958 }
959 return llvm::make_unique<NoExpansion>();
960}
961
962static int getExpansionSize(QualType Ty, const ASTContext &Context) {
963 auto Exp = getTypeExpansion(Ty, Context);
964 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
965 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
966 }
967 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
968 int Res = 0;
969 for (auto BS : RExp->Bases)
970 Res += getExpansionSize(BS->getType(), Context);
971 for (auto FD : RExp->Fields)
972 Res += getExpansionSize(FD->getType(), Context);
973 return Res;
974 }
975 if (isa<ComplexExpansion>(Exp.get()))
976 return 2;
977 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 977, __PRETTY_FUNCTION__))
;
978 return 1;
979}
980
981void
982CodeGenTypes::getExpandedTypes(QualType Ty,
983 SmallVectorImpl<llvm::Type *>::iterator &TI) {
984 auto Exp = getTypeExpansion(Ty, Context);
985 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
986 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
987 getExpandedTypes(CAExp->EltTy, TI);
988 }
989 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
990 for (auto BS : RExp->Bases)
991 getExpandedTypes(BS->getType(), TI);
992 for (auto FD : RExp->Fields)
993 getExpandedTypes(FD->getType(), TI);
994 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
995 llvm::Type *EltTy = ConvertType(CExp->EltTy);
996 *TI++ = EltTy;
997 *TI++ = EltTy;
998 } else {
999 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 999, __PRETTY_FUNCTION__))
;
1000 *TI++ = ConvertType(Ty);
1001 }
1002}
1003
1004static void forConstantArrayExpansion(CodeGenFunction &CGF,
1005 ConstantArrayExpansion *CAE,
1006 Address BaseAddr,
1007 llvm::function_ref<void(Address)> Fn) {
1008 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1009 CharUnits EltAlign =
1010 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1011
1012 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1013 llvm::Value *EltAddr =
1014 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1015 Fn(Address(EltAddr, EltAlign));
1016 }
1017}
1018
1019void CodeGenFunction::ExpandTypeFromArgs(
1020 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1021 assert(LV.isSimple() &&((LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1022, __PRETTY_FUNCTION__))
1022 "Unexpected non-simple lvalue during struct expansion.")((LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1022, __PRETTY_FUNCTION__))
;
1023
1024 auto Exp = getTypeExpansion(Ty, getContext());
1025 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1026 forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1027 [&](Address EltAddr) {
1028 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1029 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1030 });
1031 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1032 Address This = LV.getAddress();
1033 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1034 // Perform a single step derived-to-base conversion.
1035 Address Base =
1036 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1037 /*NullCheckValue=*/false, SourceLocation());
1038 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1039
1040 // Recurse onto bases.
1041 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1042 }
1043 for (auto FD : RExp->Fields) {
1044 // FIXME: What are the right qualifiers here?
1045 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1046 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1047 }
1048 } else if (isa<ComplexExpansion>(Exp.get())) {
1049 auto realValue = *AI++;
1050 auto imagValue = *AI++;
1051 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1052 } else {
1053 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1053, __PRETTY_FUNCTION__))
;
1054 EmitStoreThroughLValue(RValue::get(*AI++), LV);
1055 }
1056}
1057
1058void CodeGenFunction::ExpandTypeToArgs(
1059 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1060 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1061 auto Exp = getTypeExpansion(Ty, getContext());
1062 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1063 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1064 : Arg.getKnownRValue().getAggregateAddress();
1065 forConstantArrayExpansion(
1066 *this, CAExp, Addr, [&](Address EltAddr) {
1067 CallArg EltArg = CallArg(
1068 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1069 CAExp->EltTy);
1070 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1071 IRCallArgPos);
1072 });
1073 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1074 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1075 : Arg.getKnownRValue().getAggregateAddress();
1076 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1077 // Perform a single step derived-to-base conversion.
1078 Address Base =
1079 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1080 /*NullCheckValue=*/false, SourceLocation());
1081 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1082
1083 // Recurse onto bases.
1084 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1085 IRCallArgPos);
1086 }
1087
1088 LValue LV = MakeAddrLValue(This, Ty);
1089 for (auto FD : RExp->Fields) {
1090 CallArg FldArg =
1091 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1092 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1093 IRCallArgPos);
1094 }
1095 } else if (isa<ComplexExpansion>(Exp.get())) {
1096 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1097 IRCallArgs[IRCallArgPos++] = CV.first;
1098 IRCallArgs[IRCallArgPos++] = CV.second;
1099 } else {
1100 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1100, __PRETTY_FUNCTION__))
;
1101 auto RV = Arg.getKnownRValue();
1102 assert(RV.isScalar() &&((RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1103, __PRETTY_FUNCTION__))
1103 "Unexpected non-scalar rvalue during struct expansion.")((RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1103, __PRETTY_FUNCTION__))
;
1104
1105 // Insert a bitcast as needed.
1106 llvm::Value *V = RV.getScalarVal();
1107 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1108 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1109 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1110
1111 IRCallArgs[IRCallArgPos++] = V;
1112 }
1113}
1114
1115/// Create a temporary allocation for the purposes of coercion.
1116static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1117 CharUnits MinAlign) {
1118 // Don't use an alignment that's worse than what LLVM would prefer.
1119 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1120 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1121
1122 return CGF.CreateTempAlloca(Ty, Align);
1123}
1124
1125/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1126/// accessing some number of bytes out of it, try to gep into the struct to get
1127/// at its inner goodness. Dive as deep as possible without entering an element
1128/// with an in-memory size smaller than DstSize.
1129static Address
1130EnterStructPointerForCoercedAccess(Address SrcPtr,
1131 llvm::StructType *SrcSTy,
1132 uint64_t DstSize, CodeGenFunction &CGF) {
1133 // We can't dive into a zero-element struct.
1134 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1135
1136 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1137
1138 // If the first elt is at least as large as what we're looking for, or if the
1139 // first element is the same size as the whole struct, we can enter it. The
1140 // comparison must be made on the store size and not the alloca size. Using
1141 // the alloca size may overstate the size of the load.
1142 uint64_t FirstEltSize =
1143 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1144 if (FirstEltSize < DstSize &&
1145 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1146 return SrcPtr;
1147
1148 // GEP into the first element.
1149 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1150
1151 // If the first element is a struct, recurse.
1152 llvm::Type *SrcTy = SrcPtr.getElementType();
1153 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1154 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1155
1156 return SrcPtr;
1157}
1158
1159/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1160/// are either integers or pointers. This does a truncation of the value if it
1161/// is too large or a zero extension if it is too small.
1162///
1163/// This behaves as if the value were coerced through memory, so on big-endian
1164/// targets the high bits are preserved in a truncation, while little-endian
1165/// targets preserve the low bits.
1166static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1167 llvm::Type *Ty,
1168 CodeGenFunction &CGF) {
1169 if (Val->getType() == Ty)
1170 return Val;
1171
1172 if (isa<llvm::PointerType>(Val->getType())) {
1173 // If this is Pointer->Pointer avoid conversion to and from int.
1174 if (isa<llvm::PointerType>(Ty))
1175 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1176
1177 // Convert the pointer to an integer so we can play with its width.
1178 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1179 }
1180
1181 llvm::Type *DestIntTy = Ty;
1182 if (isa<llvm::PointerType>(DestIntTy))
1183 DestIntTy = CGF.IntPtrTy;
1184
1185 if (Val->getType() != DestIntTy) {
1186 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1187 if (DL.isBigEndian()) {
1188 // Preserve the high bits on big-endian targets.
1189 // That is what memory coercion does.
1190 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1191 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1192
1193 if (SrcSize > DstSize) {
1194 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1195 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1196 } else {
1197 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1198 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1199 }
1200 } else {
1201 // Little-endian targets preserve the low bits. No shifts required.
1202 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1203 }
1204 }
1205
1206 if (isa<llvm::PointerType>(Ty))
1207 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1208 return Val;
1209}
1210
1211
1212
1213/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1214/// a pointer to an object of type \arg Ty, known to be aligned to
1215/// \arg SrcAlign bytes.
1216///
1217/// This safely handles the case when the src type is smaller than the
1218/// destination type; in this situation the values of bits which not
1219/// present in the src are undefined.
1220static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1221 CodeGenFunction &CGF) {
1222 llvm::Type *SrcTy = Src.getElementType();
1223
1224 // If SrcTy and Ty are the same, just do a load.
1225 if (SrcTy == Ty)
1226 return CGF.Builder.CreateLoad(Src);
1227
1228 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1229
1230 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1231 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1232 SrcTy = Src.getType()->getElementType();
1233 }
1234
1235 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1236
1237 // If the source and destination are integer or pointer types, just do an
1238 // extension or truncation to the desired type.
1239 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1240 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1241 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1242 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1243 }
1244
1245 // If load is legal, just bitcast the src pointer.
1246 if (SrcSize >= DstSize) {
1247 // Generally SrcSize is never greater than DstSize, since this means we are
1248 // losing bits. However, this can happen in cases where the structure has
1249 // additional padding, for example due to a user specified alignment.
1250 //
1251 // FIXME: Assert that we aren't truncating non-padding bits when have access
1252 // to that information.
1253 Src = CGF.Builder.CreateBitCast(Src,
1254 Ty->getPointerTo(Src.getAddressSpace()));
1255 return CGF.Builder.CreateLoad(Src);
1256 }
1257
1258 // Otherwise do coercion through memory. This is stupid, but simple.
1259 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1260 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1261 Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1262 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1263 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1264 false);
1265 return CGF.Builder.CreateLoad(Tmp);
1266}
1267
1268// Function to store a first-class aggregate into memory. We prefer to
1269// store the elements rather than the aggregate to be more friendly to
1270// fast-isel.
1271// FIXME: Do we need to recurse here?
1272static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1273 Address Dest, bool DestIsVolatile) {
1274 // Prefer scalar stores to first-class aggregate stores.
1275 if (llvm::StructType *STy =
1276 dyn_cast<llvm::StructType>(Val->getType())) {
1277 const llvm::StructLayout *Layout =
1278 CGF.CGM.getDataLayout().getStructLayout(STy);
1279
1280 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1281 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1282 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1283 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1284 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1285 }
1286 } else {
1287 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1288 }
1289}
1290
1291/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1292/// where the source and destination may have different types. The
1293/// destination is known to be aligned to \arg DstAlign bytes.
1294///
1295/// This safely handles the case when the src type is larger than the
1296/// destination type; the upper bits of the src will be lost.
1297static void CreateCoercedStore(llvm::Value *Src,
1298 Address Dst,
1299 bool DstIsVolatile,
1300 CodeGenFunction &CGF) {
1301 llvm::Type *SrcTy = Src->getType();
1302 llvm::Type *DstTy = Dst.getType()->getElementType();
1303 if (SrcTy == DstTy) {
1304 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1305 return;
1306 }
1307
1308 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1309
1310 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1311 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1312 DstTy = Dst.getType()->getElementType();
1313 }
1314
1315 // If the source and destination are integer or pointer types, just do an
1316 // extension or truncation to the desired type.
1317 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1318 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1319 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1320 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1321 return;
1322 }
1323
1324 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1325
1326 // If store is legal, just bitcast the src pointer.
1327 if (SrcSize <= DstSize) {
1328 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1329 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1330 } else {
1331 // Otherwise do coercion through memory. This is stupid, but
1332 // simple.
1333
1334 // Generally SrcSize is never greater than DstSize, since this means we are
1335 // losing bits. However, this can happen in cases where the structure has
1336 // additional padding, for example due to a user specified alignment.
1337 //
1338 // FIXME: Assert that we aren't truncating non-padding bits when have access
1339 // to that information.
1340 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1341 CGF.Builder.CreateStore(Src, Tmp);
1342 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1343 Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1344 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1345 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1346 false);
1347 }
1348}
1349
1350static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1351 const ABIArgInfo &info) {
1352 if (unsigned offset = info.getDirectOffset()) {
1353 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1354 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1355 CharUnits::fromQuantity(offset));
1356 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1357 }
1358 return addr;
1359}
1360
1361namespace {
1362
1363/// Encapsulates information about the way function arguments from
1364/// CGFunctionInfo should be passed to actual LLVM IR function.
1365class ClangToLLVMArgMapping {
1366 static const unsigned InvalidIndex = ~0U;
1367 unsigned InallocaArgNo;
1368 unsigned SRetArgNo;
1369 unsigned TotalIRArgs;
1370
1371 /// Arguments of LLVM IR function corresponding to single Clang argument.
1372 struct IRArgs {
1373 unsigned PaddingArgIndex;
1374 // Argument is expanded to IR arguments at positions
1375 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1376 unsigned FirstArgIndex;
1377 unsigned NumberOfArgs;
1378
1379 IRArgs()
1380 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1381 NumberOfArgs(0) {}
1382 };
1383
1384 SmallVector<IRArgs, 8> ArgInfo;
1385
1386public:
1387 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1388 bool OnlyRequiredArgs = false)
1389 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1390 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1391 construct(Context, FI, OnlyRequiredArgs);
1392 }
1393
1394 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1395 unsigned getInallocaArgNo() const {
1396 assert(hasInallocaArg())((hasInallocaArg()) ? static_cast<void> (0) : __assert_fail
("hasInallocaArg()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1396, __PRETTY_FUNCTION__))
;
1397 return InallocaArgNo;
1398 }
1399
1400 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1401 unsigned getSRetArgNo() const {
1402 assert(hasSRetArg())((hasSRetArg()) ? static_cast<void> (0) : __assert_fail
("hasSRetArg()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1402, __PRETTY_FUNCTION__))
;
1403 return SRetArgNo;
1404 }
1405
1406 unsigned totalIRArgs() const { return TotalIRArgs; }
1407
1408 bool hasPaddingArg(unsigned ArgNo) const {
1409 assert(ArgNo < ArgInfo.size())((ArgNo < ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1409, __PRETTY_FUNCTION__))
;
1410 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1411 }
1412 unsigned getPaddingArgNo(unsigned ArgNo) const {
1413 assert(hasPaddingArg(ArgNo))((hasPaddingArg(ArgNo)) ? static_cast<void> (0) : __assert_fail
("hasPaddingArg(ArgNo)", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1413, __PRETTY_FUNCTION__))
;
1414 return ArgInfo[ArgNo].PaddingArgIndex;
1415 }
1416
1417 /// Returns index of first IR argument corresponding to ArgNo, and their
1418 /// quantity.
1419 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1420 assert(ArgNo < ArgInfo.size())((ArgNo < ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1420, __PRETTY_FUNCTION__))
;
1421 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1422 ArgInfo[ArgNo].NumberOfArgs);
1423 }
1424
1425private:
1426 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1427 bool OnlyRequiredArgs);
1428};
1429
1430void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1431 const CGFunctionInfo &FI,
1432 bool OnlyRequiredArgs) {
1433 unsigned IRArgNo = 0;
1434 bool SwapThisWithSRet = false;
1435 const ABIArgInfo &RetAI = FI.getReturnInfo();
1436
1437 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1438 SwapThisWithSRet = RetAI.isSRetAfterThis();
1439 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1440 }
1441
1442 unsigned ArgNo = 0;
1443 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1444 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1445 ++I, ++ArgNo) {
1446 assert(I != FI.arg_end())((I != FI.arg_end()) ? static_cast<void> (0) : __assert_fail
("I != FI.arg_end()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1446, __PRETTY_FUNCTION__))
;
1447 QualType ArgType = I->type;
1448 const ABIArgInfo &AI = I->info;
1449 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1450 auto &IRArgs = ArgInfo[ArgNo];
1451
1452 if (AI.getPaddingType())
1453 IRArgs.PaddingArgIndex = IRArgNo++;
1454
1455 switch (AI.getKind()) {
1456 case ABIArgInfo::Extend:
1457 case ABIArgInfo::Direct: {
1458 // FIXME: handle sseregparm someday...
1459 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1460 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1461 IRArgs.NumberOfArgs = STy->getNumElements();
1462 } else {
1463 IRArgs.NumberOfArgs = 1;
1464 }
1465 break;
1466 }
1467 case ABIArgInfo::Indirect:
1468 IRArgs.NumberOfArgs = 1;
1469 break;
1470 case ABIArgInfo::Ignore:
1471 case ABIArgInfo::InAlloca:
1472 // ignore and inalloca doesn't have matching LLVM parameters.
1473 IRArgs.NumberOfArgs = 0;
1474 break;
1475 case ABIArgInfo::CoerceAndExpand:
1476 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1477 break;
1478 case ABIArgInfo::Expand:
1479 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1480 break;
1481 }
1482
1483 if (IRArgs.NumberOfArgs > 0) {
1484 IRArgs.FirstArgIndex = IRArgNo;
1485 IRArgNo += IRArgs.NumberOfArgs;
1486 }
1487
1488 // Skip over the sret parameter when it comes second. We already handled it
1489 // above.
1490 if (IRArgNo == 1 && SwapThisWithSRet)
1491 IRArgNo++;
1492 }
1493 assert(ArgNo == ArgInfo.size())((ArgNo == ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo == ArgInfo.size()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1493, __PRETTY_FUNCTION__))
;
1494
1495 if (FI.usesInAlloca())
1496 InallocaArgNo = IRArgNo++;
1497
1498 TotalIRArgs = IRArgNo;
1499}
1500} // namespace
1501
1502/***/
1503
1504bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1505 const auto &RI = FI.getReturnInfo();
1506 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1507}
1508
1509bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1510 return ReturnTypeUsesSRet(FI) &&
1511 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1512}
1513
1514bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1515 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1516 switch (BT->getKind()) {
1517 default:
1518 return false;
1519 case BuiltinType::Float:
1520 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1521 case BuiltinType::Double:
1522 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1523 case BuiltinType::LongDouble:
1524 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1525 }
1526 }
1527
1528 return false;
1529}
1530
1531bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1532 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1533 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1534 if (BT->getKind() == BuiltinType::LongDouble)
1535 return getTarget().useObjCFP2RetForComplexLongDouble();
1536 }
1537 }
1538
1539 return false;
1540}
1541
1542llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1543 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1544 return GetFunctionType(FI);
1545}
1546
1547llvm::FunctionType *
1548CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1549
1550 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1551 (void)Inserted;
1552 assert(Inserted && "Recursively being processed?")((Inserted && "Recursively being processed?") ? static_cast
<void> (0) : __assert_fail ("Inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1552, __PRETTY_FUNCTION__))
;
1553
1554 llvm::Type *resultType = nullptr;
1555 const ABIArgInfo &retAI = FI.getReturnInfo();
1556 switch (retAI.getKind()) {
1557 case ABIArgInfo::Expand:
1558 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1558)
;
1559
1560 case ABIArgInfo::Extend:
1561 case ABIArgInfo::Direct:
1562 resultType = retAI.getCoerceToType();
1563 break;
1564
1565 case ABIArgInfo::InAlloca:
1566 if (retAI.getInAllocaSRet()) {
1567 // sret things on win32 aren't void, they return the sret pointer.
1568 QualType ret = FI.getReturnType();
1569 llvm::Type *ty = ConvertType(ret);
1570 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1571 resultType = llvm::PointerType::get(ty, addressSpace);
1572 } else {
1573 resultType = llvm::Type::getVoidTy(getLLVMContext());
1574 }
1575 break;
1576
1577 case ABIArgInfo::Indirect:
1578 case ABIArgInfo::Ignore:
1579 resultType = llvm::Type::getVoidTy(getLLVMContext());
1580 break;
1581
1582 case ABIArgInfo::CoerceAndExpand:
1583 resultType = retAI.getUnpaddedCoerceAndExpandType();
1584 break;
1585 }
1586
1587 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1588 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1589
1590 // Add type for sret argument.
1591 if (IRFunctionArgs.hasSRetArg()) {
1592 QualType Ret = FI.getReturnType();
1593 llvm::Type *Ty = ConvertType(Ret);
1594 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1595 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1596 llvm::PointerType::get(Ty, AddressSpace);
1597 }
1598
1599 // Add type for inalloca argument.
1600 if (IRFunctionArgs.hasInallocaArg()) {
1601 auto ArgStruct = FI.getArgStruct();
1602 assert(ArgStruct)((ArgStruct) ? static_cast<void> (0) : __assert_fail ("ArgStruct"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1602, __PRETTY_FUNCTION__))
;
1603 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1604 }
1605
1606 // Add in all of the required arguments.
1607 unsigned ArgNo = 0;
1608 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1609 ie = it + FI.getNumRequiredArgs();
1610 for (; it != ie; ++it, ++ArgNo) {
1611 const ABIArgInfo &ArgInfo = it->info;
1612
1613 // Insert a padding type to ensure proper alignment.
1614 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1615 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1616 ArgInfo.getPaddingType();
1617
1618 unsigned FirstIRArg, NumIRArgs;
1619 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1620
1621 switch (ArgInfo.getKind()) {
1622 case ABIArgInfo::Ignore:
1623 case ABIArgInfo::InAlloca:
1624 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1624, __PRETTY_FUNCTION__))
;
1625 break;
1626
1627 case ABIArgInfo::Indirect: {
1628 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1628, __PRETTY_FUNCTION__))
;
1629 // indirect arguments are always on the stack, which is alloca addr space.
1630 llvm::Type *LTy = ConvertTypeForMem(it->type);
1631 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1632 CGM.getDataLayout().getAllocaAddrSpace());
1633 break;
1634 }
1635
1636 case ABIArgInfo::Extend:
1637 case ABIArgInfo::Direct: {
1638 // Fast-isel and the optimizer generally like scalar values better than
1639 // FCAs, so we flatten them if this is safe to do for this argument.
1640 llvm::Type *argType = ArgInfo.getCoerceToType();
1641 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1642 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1643 assert(NumIRArgs == st->getNumElements())((NumIRArgs == st->getNumElements()) ? static_cast<void
> (0) : __assert_fail ("NumIRArgs == st->getNumElements()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1643, __PRETTY_FUNCTION__))
;
1644 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1645 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1646 } else {
1647 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1647, __PRETTY_FUNCTION__))
;
1648 ArgTypes[FirstIRArg] = argType;
1649 }
1650 break;
1651 }
1652
1653 case ABIArgInfo::CoerceAndExpand: {
1654 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1655 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1656 *ArgTypesIter++ = EltTy;
1657 }
1658 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)((ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1658, __PRETTY_FUNCTION__))
;
1659 break;
1660 }
1661
1662 case ABIArgInfo::Expand:
1663 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1664 getExpandedTypes(it->type, ArgTypesIter);
1665 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)((ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1665, __PRETTY_FUNCTION__))
;
1666 break;
1667 }
1668 }
1669
1670 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1671 assert(Erased && "Not in set?")((Erased && "Not in set?") ? static_cast<void> (
0) : __assert_fail ("Erased && \"Not in set?\"", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1671, __PRETTY_FUNCTION__))
;
1672
1673 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1674}
1675
1676llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1677 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1678 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1679
1680 if (!isFuncTypeConvertible(FPT))
1681 return llvm::StructType::get(getLLVMContext());
1682
1683 const CGFunctionInfo *Info;
1684 if (isa<CXXDestructorDecl>(MD))
1685 Info =
1686 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1687 else
1688 Info = &arrangeCXXMethodDeclaration(MD);
1689 return GetFunctionType(*Info);
1690}
1691
1692static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1693 llvm::AttrBuilder &FuncAttrs,
1694 const FunctionProtoType *FPT) {
1695 if (!FPT)
1696 return;
1697
1698 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1699 FPT->isNothrow())
1700 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1701}
1702
1703void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1704 bool AttrOnCallSite,
1705 llvm::AttrBuilder &FuncAttrs) {
1706 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1707 if (!HasOptnone) {
1708 if (CodeGenOpts.OptimizeSize)
1709 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1710 if (CodeGenOpts.OptimizeSize == 2)
1711 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1712 }
1713
1714 if (CodeGenOpts.DisableRedZone)
1715 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1716 if (CodeGenOpts.IndirectTlsSegRefs)
1717 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1718 if (CodeGenOpts.NoImplicitFloat)
1719 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1720
1721 if (AttrOnCallSite) {
1722 // Attributes that should go on the call site only.
1723 if (!CodeGenOpts.SimplifyLibCalls ||
1724 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1725 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1726 if (!CodeGenOpts.TrapFuncName.empty())
1727 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1728 } else {
1729 // Attributes that should go on the function, but not the call site.
1730 if (!CodeGenOpts.DisableFPElim) {
1731 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1732 } else if (CodeGenOpts.OmitLeafFramePointer) {
1733 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1734 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1735 } else {
1736 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1737 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1738 }
1739
1740 FuncAttrs.addAttribute("less-precise-fpmad",
1741 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1742
1743 if (CodeGenOpts.NullPointerIsValid)
1744 FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1745 if (!CodeGenOpts.FPDenormalMode.empty())
1746 FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1747
1748 FuncAttrs.addAttribute("no-trapping-math",
1749 llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1750
1751 // Strict (compliant) code is the default, so only add this attribute to
1752 // indicate that we are trying to workaround a problem case.
1753 if (!CodeGenOpts.StrictFloatCastOverflow)
1754 FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1755
1756 // TODO: Are these all needed?
1757 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1758 FuncAttrs.addAttribute("no-infs-fp-math",
1759 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1760 FuncAttrs.addAttribute("no-nans-fp-math",
1761 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1762 FuncAttrs.addAttribute("unsafe-fp-math",
1763 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1764 FuncAttrs.addAttribute("use-soft-float",
1765 llvm::toStringRef(CodeGenOpts.SoftFloat));
1766 FuncAttrs.addAttribute("stack-protector-buffer-size",
1767 llvm::utostr(CodeGenOpts.SSPBufferSize));
1768 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1769 llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1770 FuncAttrs.addAttribute(
1771 "correctly-rounded-divide-sqrt-fp-math",
1772 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1773
1774 if (getLangOpts().OpenCL)
1775 FuncAttrs.addAttribute("denorms-are-zero",
1776 llvm::toStringRef(CodeGenOpts.FlushDenorm));
1777
1778 // TODO: Reciprocal estimate codegen options should apply to instructions?
1779 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1780 if (!Recips.empty())
1781 FuncAttrs.addAttribute("reciprocal-estimates",
1782 llvm::join(Recips, ","));
1783
1784 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1785 CodeGenOpts.PreferVectorWidth != "none")
1786 FuncAttrs.addAttribute("prefer-vector-width",
1787 CodeGenOpts.PreferVectorWidth);
1788
1789 if (CodeGenOpts.StackRealignment)
1790 FuncAttrs.addAttribute("stackrealign");
1791 if (CodeGenOpts.Backchain)
1792 FuncAttrs.addAttribute("backchain");
1793
1794 // FIXME: The interaction of this attribute with the SLH command line flag
1795 // has not been determined.
1796 if (CodeGenOpts.SpeculativeLoadHardening)
1797 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1798 }
1799
1800 if (getLangOpts().assumeFunctionsAreConvergent()) {
1801 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1802 // convergent (meaning, they may call an intrinsically convergent op, such
1803 // as __syncthreads() / barrier(), and so can't have certain optimizations
1804 // applied around them). LLVM will remove this attribute where it safely
1805 // can.
1806 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1807 }
1808
1809 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1810 // Exceptions aren't supported in CUDA device code.
1811 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1812
1813 // Respect -fcuda-flush-denormals-to-zero.
1814 if (CodeGenOpts.FlushDenorm)
1815 FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1816 }
1817}
1818
1819void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1820 llvm::AttrBuilder FuncAttrs;
1821 ConstructDefaultFnAttrList(F.getName(),
1822 F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1823 /* AttrOnCallsite = */ false, FuncAttrs);
1824 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1825}
1826
1827void CodeGenModule::ConstructAttributeList(
1828 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1829 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1830 llvm::AttrBuilder FuncAttrs;
1831 llvm::AttrBuilder RetAttrs;
1832
1833 CallingConv = FI.getEffectiveCallingConvention();
1834 if (FI.isNoReturn())
1835 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1836
1837 // If we have information about the function prototype, we can learn
1838 // attributes from there.
1839 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1840 CalleeInfo.getCalleeFunctionProtoType());
1841
1842 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1843
1844 bool HasOptnone = false;
1845 // FIXME: handle sseregparm someday...
1846 if (TargetDecl) {
1847 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1848 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1849 if (TargetDecl->hasAttr<NoThrowAttr>())
1850 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1851 if (TargetDecl->hasAttr<NoReturnAttr>())
1852 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1853 if (TargetDecl->hasAttr<ColdAttr>())
1854 FuncAttrs.addAttribute(llvm::Attribute::Cold);
1855 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1856 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1857 if (TargetDecl->hasAttr<ConvergentAttr>())
1858 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1859 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1860 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1861
1862 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1863 AddAttributesFromFunctionProtoType(
1864 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1865 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1866 // These attributes are not inherited by overloads.
1867 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1868 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1869 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1870 }
1871
1872 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1873 if (TargetDecl->hasAttr<ConstAttr>()) {
1874 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1875 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1876 } else if (TargetDecl->hasAttr<PureAttr>()) {
1877 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1878 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1879 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1880 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1881 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1882 }
1883 if (TargetDecl->hasAttr<RestrictAttr>())
1884 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1885 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1886 !CodeGenOpts.NullPointerIsValid)
1887 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1888 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1889 FuncAttrs.addAttribute("no_caller_saved_registers");
1890 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1891 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1892
1893 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1894 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1895 Optional<unsigned> NumElemsParam;
1896 if (AllocSize->getNumElemsParam().isValid())
1897 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1898 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1899 NumElemsParam);
1900 }
1901 }
1902
1903 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1904
1905 if (CodeGenOpts.EnableSegmentedStacks &&
1906 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1907 FuncAttrs.addAttribute("split-stack");
1908
1909 // Add NonLazyBind attribute to function declarations when -fno-plt
1910 // is used.
1911 if (TargetDecl && CodeGenOpts.NoPLT) {
1912 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1913 if (!Fn->isDefined() && !AttrOnCallSite) {
1914 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1915 }
1916 }
1917 }
1918
1919 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1920 if (getLangOpts().OpenCLVersion <= 120) {
1921 // OpenCL v1.2 Work groups are always uniform
1922 FuncAttrs.addAttribute("uniform-work-group-size", "true");
1923 } else {
1924 // OpenCL v2.0 Work groups may be whether uniform or not.
1925 // '-cl-uniform-work-group-size' compile option gets a hint
1926 // to the compiler that the global work-size be a multiple of
1927 // the work-group size specified to clEnqueueNDRangeKernel
1928 // (i.e. work groups are uniform).
1929 FuncAttrs.addAttribute("uniform-work-group-size",
1930 llvm::toStringRef(CodeGenOpts.UniformWGSize));
1931 }
1932 }
1933
1934 if (!AttrOnCallSite) {
1935 bool DisableTailCalls = false;
1936
1937 if (CodeGenOpts.DisableTailCalls)
1938 DisableTailCalls = true;
1939 else if (TargetDecl) {
1940 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1941 TargetDecl->hasAttr<AnyX86InterruptAttr>())
1942 DisableTailCalls = true;
1943 else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1944 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1945 if (!BD->doesNotEscape())
1946 DisableTailCalls = true;
1947 }
1948 }
1949
1950 FuncAttrs.addAttribute("disable-tail-calls",
1951 llvm::toStringRef(DisableTailCalls));
1952 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1953 }
1954
1955 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1956
1957 QualType RetTy = FI.getReturnType();
1958 const ABIArgInfo &RetAI = FI.getReturnInfo();
1959 switch (RetAI.getKind()) {
1960 case ABIArgInfo::Extend:
1961 if (RetAI.isSignExt())
1962 RetAttrs.addAttribute(llvm::Attribute::SExt);
1963 else
1964 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1965 LLVM_FALLTHROUGH[[clang::fallthrough]];
1966 case ABIArgInfo::Direct:
1967 if (RetAI.getInReg())
1968 RetAttrs.addAttribute(llvm::Attribute::InReg);
1969 break;
1970 case ABIArgInfo::Ignore:
1971 break;
1972
1973 case ABIArgInfo::InAlloca:
1974 case ABIArgInfo::Indirect: {
1975 // inalloca and sret disable readnone and readonly
1976 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1977 .removeAttribute(llvm::Attribute::ReadNone);
1978 break;
1979 }
1980
1981 case ABIArgInfo::CoerceAndExpand:
1982 break;
1983
1984 case ABIArgInfo::Expand:
1985 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 1985)
;
1986 }
1987
1988 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1989 QualType PTy = RefTy->getPointeeType();
1990 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1991 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1992 .getQuantity());
1993 else if (getContext().getTargetAddressSpace(PTy) == 0 &&
1994 !CodeGenOpts.NullPointerIsValid)
1995 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1996 }
1997
1998 bool hasUsedSRet = false;
1999 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2000
2001 // Attach attributes to sret.
2002 if (IRFunctionArgs.hasSRetArg()) {
2003 llvm::AttrBuilder SRETAttrs;
2004 if (!RetAI.getSuppressSRet())
2005 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2006 hasUsedSRet = true;
2007 if (RetAI.getInReg())
2008 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2009 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2010 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2011 }
2012
2013 // Attach attributes to inalloca argument.
2014 if (IRFunctionArgs.hasInallocaArg()) {
2015 llvm::AttrBuilder Attrs;
2016 Attrs.addAttribute(llvm::Attribute::InAlloca);
2017 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2018 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2019 }
2020
2021 unsigned ArgNo = 0;
2022 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2023 E = FI.arg_end();
2024 I != E; ++I, ++ArgNo) {
2025 QualType ParamType = I->type;
2026 const ABIArgInfo &AI = I->info;
2027 llvm::AttrBuilder Attrs;
2028
2029 // Add attribute for padding argument, if necessary.
2030 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2031 if (AI.getPaddingInReg()) {
2032 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2033 llvm::AttributeSet::get(
2034 getLLVMContext(),
2035 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2036 }
2037 }
2038
2039 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2040 // have the corresponding parameter variable. It doesn't make
2041 // sense to do it here because parameters are so messed up.
2042 switch (AI.getKind()) {
2043 case ABIArgInfo::Extend:
2044 if (AI.isSignExt())
2045 Attrs.addAttribute(llvm::Attribute::SExt);
2046 else
2047 Attrs.addAttribute(llvm::Attribute::ZExt);
2048 LLVM_FALLTHROUGH[[clang::fallthrough]];
2049 case ABIArgInfo::Direct:
2050 if (ArgNo == 0 && FI.isChainCall())
2051 Attrs.addAttribute(llvm::Attribute::Nest);
2052 else if (AI.getInReg())
2053 Attrs.addAttribute(llvm::Attribute::InReg);
2054 break;
2055
2056 case ABIArgInfo::Indirect: {
2057 if (AI.getInReg())
2058 Attrs.addAttribute(llvm::Attribute::InReg);
2059
2060 if (AI.getIndirectByVal())
2061 Attrs.addAttribute(llvm::Attribute::ByVal);
2062
2063 CharUnits Align = AI.getIndirectAlign();
2064
2065 // In a byval argument, it is important that the required
2066 // alignment of the type is honored, as LLVM might be creating a
2067 // *new* stack object, and needs to know what alignment to give
2068 // it. (Sometimes it can deduce a sensible alignment on its own,
2069 // but not if clang decides it must emit a packed struct, or the
2070 // user specifies increased alignment requirements.)
2071 //
2072 // This is different from indirect *not* byval, where the object
2073 // exists already, and the align attribute is purely
2074 // informative.
2075 assert(!Align.isZero())((!Align.isZero()) ? static_cast<void> (0) : __assert_fail
("!Align.isZero()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2075, __PRETTY_FUNCTION__))
;
2076
2077 // For now, only add this when we have a byval argument.
2078 // TODO: be less lazy about updating test cases.
2079 if (AI.getIndirectByVal())
2080 Attrs.addAlignmentAttr(Align.getQuantity());
2081
2082 // byval disables readnone and readonly.
2083 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2084 .removeAttribute(llvm::Attribute::ReadNone);
2085 break;
2086 }
2087 case ABIArgInfo::Ignore:
2088 case ABIArgInfo::Expand:
2089 case ABIArgInfo::CoerceAndExpand:
2090 break;
2091
2092 case ABIArgInfo::InAlloca:
2093 // inalloca disables readnone and readonly.
2094 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2095 .removeAttribute(llvm::Attribute::ReadNone);
2096 continue;
2097 }
2098
2099 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2100 QualType PTy = RefTy->getPointeeType();
2101 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2102 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2103 .getQuantity());
2104 else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2105 !CodeGenOpts.NullPointerIsValid)
2106 Attrs.addAttribute(llvm::Attribute::NonNull);
2107 }
2108
2109 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2110 case ParameterABI::Ordinary:
2111 break;
2112
2113 case ParameterABI::SwiftIndirectResult: {
2114 // Add 'sret' if we haven't already used it for something, but
2115 // only if the result is void.
2116 if (!hasUsedSRet && RetTy->isVoidType()) {
2117 Attrs.addAttribute(llvm::Attribute::StructRet);
2118 hasUsedSRet = true;
2119 }
2120
2121 // Add 'noalias' in either case.
2122 Attrs.addAttribute(llvm::Attribute::NoAlias);
2123
2124 // Add 'dereferenceable' and 'alignment'.
2125 auto PTy = ParamType->getPointeeType();
2126 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2127 auto info = getContext().getTypeInfoInChars(PTy);
2128 Attrs.addDereferenceableAttr(info.first.getQuantity());
2129 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2130 info.second.getQuantity()));
2131 }
2132 break;
2133 }
2134
2135 case ParameterABI::SwiftErrorResult:
2136 Attrs.addAttribute(llvm::Attribute::SwiftError);
2137 break;
2138
2139 case ParameterABI::SwiftContext:
2140 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2141 break;
2142 }
2143
2144 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2145 Attrs.addAttribute(llvm::Attribute::NoCapture);
2146
2147 if (Attrs.hasAttributes()) {
2148 unsigned FirstIRArg, NumIRArgs;
2149 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2150 for (unsigned i = 0; i < NumIRArgs; i++)
2151 ArgAttrs[FirstIRArg + i] =
2152 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2153 }
2154 }
2155 assert(ArgNo == FI.arg_size())((ArgNo == FI.arg_size()) ? static_cast<void> (0) : __assert_fail
("ArgNo == FI.arg_size()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2155, __PRETTY_FUNCTION__))
;
2156
2157 AttrList = llvm::AttributeList::get(
2158 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2159 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2160}
2161
2162/// An argument came in as a promoted argument; demote it back to its
2163/// declared type.
2164static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2165 const VarDecl *var,
2166 llvm::Value *value) {
2167 llvm::Type *varType = CGF.ConvertType(var->getType());
2168
2169 // This can happen with promotions that actually don't change the
2170 // underlying type, like the enum promotions.
2171 if (value->getType() == varType) return value;
2172
2173 assert((varType->isIntegerTy() || varType->isFloatingPointTy())(((varType->isIntegerTy() || varType->isFloatingPointTy
()) && "unexpected promotion type") ? static_cast<
void> (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2174, __PRETTY_FUNCTION__))
2174 && "unexpected promotion type")(((varType->isIntegerTy() || varType->isFloatingPointTy
()) && "unexpected promotion type") ? static_cast<
void> (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2174, __PRETTY_FUNCTION__))
;
2175
2176 if (isa<llvm::IntegerType>(varType))
2177 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2178
2179 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2180}
2181
2182/// Returns the attribute (either parameter attribute, or function
2183/// attribute), which declares argument ArgNo to be non-null.
2184static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2185 QualType ArgType, unsigned ArgNo) {
2186 // FIXME: __attribute__((nonnull)) can also be applied to:
2187 // - references to pointers, where the pointee is known to be
2188 // nonnull (apparently a Clang extension)
2189 // - transparent unions containing pointers
2190 // In the former case, LLVM IR cannot represent the constraint. In
2191 // the latter case, we have no guarantee that the transparent union
2192 // is in fact passed as a pointer.
2193 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2194 return nullptr;
2195 // First, check attribute on parameter itself.
2196 if (PVD) {
2197 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2198 return ParmNNAttr;
2199 }
2200 // Check function attributes.
2201 if (!FD)
2202 return nullptr;
2203 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2204 if (NNAttr->isNonNull(ArgNo))
2205 return NNAttr;
2206 }
2207 return nullptr;
2208}
2209
2210namespace {
2211 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2212 Address Temp;
2213 Address Arg;
2214 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2215 void Emit(CodeGenFunction &CGF, Flags flags) override {
2216 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2217 CGF.Builder.CreateStore(errorValue, Arg);
2218 }
2219 };
2220}
2221
2222void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2223 llvm::Function *Fn,
2224 const FunctionArgList &Args) {
2225 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1
Assuming the condition is false
2226 // Naked functions don't have prologues.
2227 return;
2228
2229 // If this is an implicit-return-zero function, go ahead and
2230 // initialize the return value. TODO: it might be nice to have
2231 // a more general mechanism for this that didn't require synthesized
2232 // return statements.
2233 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2
Taking false branch
2234 if (FD->hasImplicitReturnZero()) {
2235 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2236 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2237 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2238 Builder.CreateStore(Zero, ReturnValue);
2239 }
2240 }
2241
2242 // FIXME: We no longer need the types from FunctionArgList; lift up and
2243 // simplify.
2244
2245 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2246 // Flattened function arguments.
2247 SmallVector<llvm::Value *, 16> FnArgs;
2248 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2249 for (auto &Arg : Fn->args()) {
3
Assuming '__begin1' is equal to '__end1'
2250 FnArgs.push_back(&Arg);
2251 }
2252 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs())((FnArgs.size() == IRFunctionArgs.totalIRArgs()) ? static_cast
<void> (0) : __assert_fail ("FnArgs.size() == IRFunctionArgs.totalIRArgs()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2252, __PRETTY_FUNCTION__))
;
4
Assuming the condition is true
5
'?' condition is true
2253
2254 // If we're using inalloca, all the memory arguments are GEPs off of the last
2255 // parameter, which is a pointer to the complete memory area.
2256 Address ArgStruct = Address::invalid();
2257 const llvm::StructLayout *ArgStructLayout = nullptr;
6
'ArgStructLayout' initialized to a null pointer value
2258 if (IRFunctionArgs.hasInallocaArg()) {
7
Taking false branch
2259 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2260 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2261 FI.getArgStructAlignment());
2262
2263 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo())((ArgStruct.getType() == FI.getArgStruct()->getPointerTo()
) ? static_cast<void> (0) : __assert_fail ("ArgStruct.getType() == FI.getArgStruct()->getPointerTo()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2263, __PRETTY_FUNCTION__))
;
2264 }
2265
2266 // Name the struct return parameter.
2267 if (IRFunctionArgs.hasSRetArg()) {
8
Taking false branch
2268 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2269 AI->setName("agg.result");
2270 AI->addAttr(llvm::Attribute::NoAlias);
2271 }
2272
2273 // Track if we received the parameter as a pointer (indirect, byval, or
2274 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2275 // into a local alloca for us.
2276 SmallVector<ParamValue, 16> ArgVals;
2277 ArgVals.reserve(Args.size());
2278
2279 // Create a pointer value for every parameter declaration. This usually
2280 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2281 // any cleanups or do anything that might unwind. We do that separately, so
2282 // we can push the cleanups in the correct order for the ABI.
2283 assert(FI.arg_size() == Args.size() &&((FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2284, __PRETTY_FUNCTION__))
9
Assuming the condition is true
10
'?' condition is true
2284 "Mismatch between function signature & arguments.")((FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2284, __PRETTY_FUNCTION__))
;
2285 unsigned ArgNo = 0;
2286 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2287 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
12
Loop condition is true. Entering loop body
2288 i != e; ++i, ++info_it, ++ArgNo) {
11
Assuming 'i' is not equal to 'e'
2289 const VarDecl *Arg = *i;
2290 const ABIArgInfo &ArgI = info_it->info;
2291
2292 bool isPromoted =
2293 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2294 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2295 // the parameter is promoted. In this case we convert to
2296 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2297 QualType Ty = isPromoted ? info_it->type : Arg->getType();
13
'?' condition is false
2298 assert(hasScalarEvaluationKind(Ty) ==((hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->
getType())) ? static_cast<void> (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2299, __PRETTY_FUNCTION__))
14
'?' condition is true
2299 hasScalarEvaluationKind(Arg->getType()))((hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->
getType())) ? static_cast<void> (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2299, __PRETTY_FUNCTION__))
;
2300
2301 unsigned FirstIRArg, NumIRArgs;
2302 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2303
2304 switch (ArgI.getKind()) {
15
Control jumps to 'case InAlloca:' at line 2305
2305 case ABIArgInfo::InAlloca: {
2306 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2306, __PRETTY_FUNCTION__))
;
16
Assuming 'NumIRArgs' is equal to 0
17
'?' condition is true
2307 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2308 CharUnits FieldOffset =
2309 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
18
Called C++ object pointer is null
2310 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2311 Arg->getName());
2312 ArgVals.push_back(ParamValue::forIndirect(V));
2313 break;
2314 }
2315
2316 case ABIArgInfo::Indirect: {
2317 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2317, __PRETTY_FUNCTION__))
;
2318 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2319
2320 if (!hasScalarEvaluationKind(Ty)) {
2321 // Aggregates and complex variables are accessed by reference. All we
2322 // need to do is realign the value, if requested.
2323 Address V = ParamAddr;
2324 if (ArgI.getIndirectRealign()) {
2325 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2326
2327 // Copy from the incoming argument pointer to the temporary with the
2328 // appropriate alignment.
2329 //
2330 // FIXME: We should have a common utility for generating an aggregate
2331 // copy.
2332 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2333 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2334 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2335 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2336 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2337 V = AlignedTemp;
2338 }
2339 ArgVals.push_back(ParamValue::forIndirect(V));
2340 } else {
2341 // Load scalar value from indirect argument.
2342 llvm::Value *V =
2343 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2344
2345 if (isPromoted)
2346 V = emitArgumentDemotion(*this, Arg, V);
2347 ArgVals.push_back(ParamValue::forDirect(V));
2348 }
2349 break;
2350 }
2351
2352 case ABIArgInfo::Extend:
2353 case ABIArgInfo::Direct: {
2354
2355 // If we have the trivial case, handle it with no muss and fuss.
2356 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2357 ArgI.getCoerceToType() == ConvertType(Ty) &&
2358 ArgI.getDirectOffset() == 0) {
2359 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2359, __PRETTY_FUNCTION__))
;
2360 llvm::Value *V = FnArgs[FirstIRArg];
2361 auto AI = cast<llvm::Argument>(V);
2362
2363 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2364 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2365 PVD->getFunctionScopeIndex()) &&
2366 !CGM.getCodeGenOpts().NullPointerIsValid)
2367 AI->addAttr(llvm::Attribute::NonNull);
2368
2369 QualType OTy = PVD->getOriginalType();
2370 if (const auto *ArrTy =
2371 getContext().getAsConstantArrayType(OTy)) {
2372 // A C99 array parameter declaration with the static keyword also
2373 // indicates dereferenceability, and if the size is constant we can
2374 // use the dereferenceable attribute (which requires the size in
2375 // bytes).
2376 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2377 QualType ETy = ArrTy->getElementType();
2378 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2379 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2380 ArrSize) {
2381 llvm::AttrBuilder Attrs;
2382 Attrs.addDereferenceableAttr(
2383 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2384 AI->addAttrs(Attrs);
2385 } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2386 !CGM.getCodeGenOpts().NullPointerIsValid) {
2387 AI->addAttr(llvm::Attribute::NonNull);
2388 }
2389 }
2390 } else if (const auto *ArrTy =
2391 getContext().getAsVariableArrayType(OTy)) {
2392 // For C99 VLAs with the static keyword, we don't know the size so
2393 // we can't use the dereferenceable attribute, but in addrspace(0)
2394 // we know that it must be nonnull.
2395 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2396 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2397 !CGM.getCodeGenOpts().NullPointerIsValid)
2398 AI->addAttr(llvm::Attribute::NonNull);
2399 }
2400
2401 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2402 if (!AVAttr)
2403 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2404 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2405 if (AVAttr) {
2406 llvm::Value *AlignmentValue =
2407 EmitScalarExpr(AVAttr->getAlignment());
2408 llvm::ConstantInt *AlignmentCI =
2409 cast<llvm::ConstantInt>(AlignmentValue);
2410 unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2411 +llvm::Value::MaximumAlignment);
2412 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2413 }
2414 }
2415
2416 if (Arg->getType().isRestrictQualified())
2417 AI->addAttr(llvm::Attribute::NoAlias);
2418
2419 // LLVM expects swifterror parameters to be used in very restricted
2420 // ways. Copy the value into a less-restricted temporary.
2421 if (FI.getExtParameterInfo(ArgNo).getABI()
2422 == ParameterABI::SwiftErrorResult) {
2423 QualType pointeeTy = Ty->getPointeeType();
2424 assert(pointeeTy->isPointerType())((pointeeTy->isPointerType()) ? static_cast<void> (0
) : __assert_fail ("pointeeTy->isPointerType()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2424, __PRETTY_FUNCTION__))
;
2425 Address temp =
2426 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2427 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2428 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2429 Builder.CreateStore(incomingErrorValue, temp);
2430 V = temp.getPointer();
2431
2432 // Push a cleanup to copy the value back at the end of the function.
2433 // The convention does not guarantee that the value will be written
2434 // back if the function exits with an unwind exception.
2435 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2436 }
2437
2438 // Ensure the argument is the correct type.
2439 if (V->getType() != ArgI.getCoerceToType())
2440 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2441
2442 if (isPromoted)
2443 V = emitArgumentDemotion(*this, Arg, V);
2444
2445 // Because of merging of function types from multiple decls it is
2446 // possible for the type of an argument to not match the corresponding
2447 // type in the function type. Since we are codegening the callee
2448 // in here, add a cast to the argument type.
2449 llvm::Type *LTy = ConvertType(Arg->getType());
2450 if (V->getType() != LTy)
2451 V = Builder.CreateBitCast(V, LTy);
2452
2453 ArgVals.push_back(ParamValue::forDirect(V));
2454 break;
2455 }
2456
2457 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2458 Arg->getName());
2459
2460 // Pointer to store into.
2461 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2462
2463 // Fast-isel and the optimizer generally like scalar values better than
2464 // FCAs, so we flatten them if this is safe to do for this argument.
2465 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2466 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2467 STy->getNumElements() > 1) {
2468 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2469 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2470 llvm::Type *DstTy = Ptr.getElementType();
2471 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2472
2473 Address AddrToStoreInto = Address::invalid();
2474 if (SrcSize <= DstSize) {
2475 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2476 } else {
2477 AddrToStoreInto =
2478 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2479 }
2480
2481 assert(STy->getNumElements() == NumIRArgs)((STy->getNumElements() == NumIRArgs) ? static_cast<void
> (0) : __assert_fail ("STy->getNumElements() == NumIRArgs"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2481, __PRETTY_FUNCTION__))
;
2482 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2483 auto AI = FnArgs[FirstIRArg + i];
2484 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2485 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2486 Address EltPtr =
2487 Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2488 Builder.CreateStore(AI, EltPtr);
2489 }
2490
2491 if (SrcSize > DstSize) {
2492 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2493 }
2494
2495 } else {
2496 // Simple case, just do a coerced store of the argument into the alloca.
2497 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2497, __PRETTY_FUNCTION__))
;
2498 auto AI = FnArgs[FirstIRArg];
2499 AI->setName(Arg->getName() + ".coerce");
2500 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2501 }
2502
2503 // Match to what EmitParmDecl is expecting for this type.
2504 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2505 llvm::Value *V =
2506 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2507 if (isPromoted)
2508 V = emitArgumentDemotion(*this, Arg, V);
2509 ArgVals.push_back(ParamValue::forDirect(V));
2510 } else {
2511 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2512 }
2513 break;
2514 }
2515
2516 case ABIArgInfo::CoerceAndExpand: {
2517 // Reconstruct into a temporary.
2518 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2519 ArgVals.push_back(ParamValue::forIndirect(alloca));
2520
2521 auto coercionType = ArgI.getCoerceAndExpandType();
2522 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2523 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2524
2525 unsigned argIndex = FirstIRArg;
2526 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2527 llvm::Type *eltType = coercionType->getElementType(i);
2528 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2529 continue;
2530
2531 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2532 auto elt = FnArgs[argIndex++];
2533 Builder.CreateStore(elt, eltAddr);
2534 }
2535 assert(argIndex == FirstIRArg + NumIRArgs)((argIndex == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("argIndex == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2535, __PRETTY_FUNCTION__))
;
2536 break;
2537 }
2538
2539 case ABIArgInfo::Expand: {
2540 // If this structure was expanded into multiple arguments then
2541 // we need to create a temporary and reconstruct it from the
2542 // arguments.
2543 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2544 LValue LV = MakeAddrLValue(Alloca, Ty);
2545 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2546
2547 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2548 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2549 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs)((FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs) ? static_cast
<void> (0) : __assert_fail ("FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2549, __PRETTY_FUNCTION__))
;
2550 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2551 auto AI = FnArgs[FirstIRArg + i];
2552 AI->setName(Arg->getName() + "." + Twine(i));
2553 }
2554 break;
2555 }
2556
2557 case ABIArgInfo::Ignore:
2558 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2558, __PRETTY_FUNCTION__))
;
2559 // Initialize the local variable appropriately.
2560 if (!hasScalarEvaluationKind(Ty)) {
2561 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2562 } else {
2563 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2564 ArgVals.push_back(ParamValue::forDirect(U));
2565 }
2566 break;
2567 }
2568 }
2569
2570 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2571 for (int I = Args.size() - 1; I >= 0; --I)
2572 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2573 } else {
2574 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2575 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2576 }
2577}
2578
2579static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2580 while (insn->use_empty()) {
2581 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2582 if (!bitcast) return;
2583
2584 // This is "safe" because we would have used a ConstantExpr otherwise.
2585 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2586 bitcast->eraseFromParent();
2587 }
2588}
2589
2590/// Try to emit a fused autorelease of a return result.
2591static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2592 llvm::Value *result) {
2593 // We must be immediately followed the cast.
2594 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2595 if (BB->empty()) return nullptr;
2596 if (&BB->back() != result) return nullptr;
2597
2598 llvm::Type *resultType = result->getType();
2599
2600 // result is in a BasicBlock and is therefore an Instruction.
2601 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2602
2603 SmallVector<llvm::Instruction *, 4> InstsToKill;
2604
2605 // Look for:
2606 // %generator = bitcast %type1* %generator2 to %type2*
2607 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2608 // We would have emitted this as a constant if the operand weren't
2609 // an Instruction.
2610 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2611
2612 // Require the generator to be immediately followed by the cast.
2613 if (generator->getNextNode() != bitcast)
2614 return nullptr;
2615
2616 InstsToKill.push_back(bitcast);
2617 }
2618
2619 // Look for:
2620 // %generator = call i8* @objc_retain(i8* %originalResult)
2621 // or
2622 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2623 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2624 if (!call) return nullptr;
2625
2626 bool doRetainAutorelease;
2627
2628 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2629 doRetainAutorelease = true;
2630 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2631 .objc_retainAutoreleasedReturnValue) {
2632 doRetainAutorelease = false;
2633
2634 // If we emitted an assembly marker for this call (and the
2635 // ARCEntrypoints field should have been set if so), go looking
2636 // for that call. If we can't find it, we can't do this
2637 // optimization. But it should always be the immediately previous
2638 // instruction, unless we needed bitcasts around the call.
2639 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2640 llvm::Instruction *prev = call->getPrevNode();
2641 assert(prev)((prev) ? static_cast<void> (0) : __assert_fail ("prev"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2641, __PRETTY_FUNCTION__))
;
2642 if (isa<llvm::BitCastInst>(prev)) {
2643 prev = prev->getPrevNode();
2644 assert(prev)((prev) ? static_cast<void> (0) : __assert_fail ("prev"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2644, __PRETTY_FUNCTION__))
;
2645 }
2646 assert(isa<llvm::CallInst>(prev))((isa<llvm::CallInst>(prev)) ? static_cast<void> (
0) : __assert_fail ("isa<llvm::CallInst>(prev)", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2646, __PRETTY_FUNCTION__))
;
2647 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==((cast<llvm::CallInst>(prev)->getCalledValue() == CGF
.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? static_cast<void> (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2648, __PRETTY_FUNCTION__))
2648 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker)((cast<llvm::CallInst>(prev)->getCalledValue() == CGF
.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? static_cast<void> (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledValue() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2648, __PRETTY_FUNCTION__))
;
2649 InstsToKill.push_back(prev);
2650 }
2651 } else {
2652 return nullptr;
2653 }
2654
2655 result = call->getArgOperand(0);
2656 InstsToKill.push_back(call);
2657
2658 // Keep killing bitcasts, for sanity. Note that we no longer care
2659 // about precise ordering as long as there's exactly one use.
2660 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2661 if (!bitcast->hasOneUse()) break;
2662 InstsToKill.push_back(bitcast);
2663 result = bitcast->getOperand(0);
2664 }
2665
2666 // Delete all the unnecessary instructions, from latest to earliest.
2667 for (auto *I : InstsToKill)
2668 I->eraseFromParent();
2669
2670 // Do the fused retain/autorelease if we were asked to.
2671 if (doRetainAutorelease)
2672 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2673
2674 // Cast back to the result type.
2675 return CGF.Builder.CreateBitCast(result, resultType);
2676}
2677
2678/// If this is a +1 of the value of an immutable 'self', remove it.
2679static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2680 llvm::Value *result) {
2681 // This is only applicable to a method with an immutable 'self'.
2682 const ObjCMethodDecl *method =
2683 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2684 if (!method) return nullptr;
2685 const VarDecl *self = method->getSelfDecl();
2686 if (!self->getType().isConstQualified()) return nullptr;
2687
2688 // Look for a retain call.
2689 llvm::CallInst *retainCall =
2690 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2691 if (!retainCall ||
2692 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2693 return nullptr;
2694
2695 // Look for an ordinary load of 'self'.
2696 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2697 llvm::LoadInst *load =
2698 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2699 if (!load || load->isAtomic() || load->isVolatile() ||
2700 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2701 return nullptr;
2702
2703 // Okay! Burn it all down. This relies for correctness on the
2704 // assumption that the retain is emitted as part of the return and
2705 // that thereafter everything is used "linearly".
2706 llvm::Type *resultType = result->getType();
2707 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2708 assert(retainCall->use_empty())((retainCall->use_empty()) ? static_cast<void> (0) :
__assert_fail ("retainCall->use_empty()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2708, __PRETTY_FUNCTION__))
;
2709 retainCall->eraseFromParent();
2710 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2711
2712 return CGF.Builder.CreateBitCast(load, resultType);
2713}
2714
2715/// Emit an ARC autorelease of the result of a function.
2716///
2717/// \return the value to actually return from the function
2718static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2719 llvm::Value *result) {
2720 // If we're returning 'self', kill the initial retain. This is a
2721 // heuristic attempt to "encourage correctness" in the really unfortunate
2722 // case where we have a return of self during a dealloc and we desperately
2723 // need to avoid the possible autorelease.
2724 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2725 return self;
2726
2727 // At -O0, try to emit a fused retain/autorelease.
2728 if (CGF.shouldUseFusedARCCalls())
2729 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2730 return fused;
2731
2732 return CGF.EmitARCAutoreleaseReturnValue(result);
2733}
2734
2735/// Heuristically search for a dominating store to the return-value slot.
2736static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2737 // Check if a User is a store which pointerOperand is the ReturnValue.
2738 // We are looking for stores to the ReturnValue, not for stores of the
2739 // ReturnValue to some other location.
2740 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2741 auto *SI = dyn_cast<llvm::StoreInst>(U);
2742 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2743 return nullptr;
2744 // These aren't actually possible for non-coerced returns, and we
2745 // only care about non-coerced returns on this code path.
2746 assert(!SI->isAtomic() && !SI->isVolatile())((!SI->isAtomic() && !SI->isVolatile()) ? static_cast
<void> (0) : __assert_fail ("!SI->isAtomic() && !SI->isVolatile()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2746, __PRETTY_FUNCTION__))
;
2747 return SI;
2748 };
2749 // If there are multiple uses of the return-value slot, just check
2750 // for something immediately preceding the IP. Sometimes this can
2751 // happen with how we generate implicit-returns; it can also happen
2752 // with noreturn cleanups.
2753 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2754 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2755 if (IP->empty()) return nullptr;
2756 llvm::Instruction *I = &IP->back();
2757
2758 // Skip lifetime markers
2759 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2760 IE = IP->rend();
2761 II != IE; ++II) {
2762 if (llvm::IntrinsicInst *Intrinsic =
2763 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2764 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2765 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2766 ++II;
2767 if (II == IE)
2768 break;
2769 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2770 continue;
2771 }
2772 }
2773 I = &*II;
2774 break;
2775 }
2776
2777 return GetStoreIfValid(I);
2778 }
2779
2780 llvm::StoreInst *store =
2781 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2782 if (!store) return nullptr;
2783
2784 // Now do a first-and-dirty dominance check: just walk up the
2785 // single-predecessors chain from the current insertion point.
2786 llvm::BasicBlock *StoreBB = store->getParent();
2787 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2788 while (IP != StoreBB) {
2789 if (!(IP = IP->getSinglePredecessor()))
2790 return nullptr;
2791 }
2792
2793 // Okay, the store's basic block dominates the insertion point; we
2794 // can do our thing.
2795 return store;
2796}
2797
2798void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2799 bool EmitRetDbgLoc,
2800 SourceLocation EndLoc) {
2801 if (FI.isNoReturn()) {
2802 // Noreturn functions don't return.
2803 EmitUnreachable(EndLoc);
2804 return;
2805 }
2806
2807 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2808 // Naked functions don't have epilogues.
2809 Builder.CreateUnreachable();
2810 return;
2811 }
2812
2813 // Functions with no result always return void.
2814 if (!ReturnValue.isValid()) {
2815 Builder.CreateRetVoid();
2816 return;
2817 }
2818
2819 llvm::DebugLoc RetDbgLoc;
2820 llvm::Value *RV = nullptr;
2821 QualType RetTy = FI.getReturnType();
2822 const ABIArgInfo &RetAI = FI.getReturnInfo();
2823
2824 switch (RetAI.getKind()) {
2825 case ABIArgInfo::InAlloca:
2826 // Aggregrates get evaluated directly into the destination. Sometimes we
2827 // need to return the sret value in a register, though.
2828 assert(hasAggregateEvaluationKind(RetTy))((hasAggregateEvaluationKind(RetTy)) ? static_cast<void>
(0) : __assert_fail ("hasAggregateEvaluationKind(RetTy)", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2828, __PRETTY_FUNCTION__))
;
2829 if (RetAI.getInAllocaSRet()) {
2830 llvm::Function::arg_iterator EI = CurFn->arg_end();
2831 --EI;
2832 llvm::Value *ArgStruct = &*EI;
2833 llvm::Value *SRet = Builder.CreateStructGEP(
2834 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2835 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2836 }
2837 break;
2838
2839 case ABIArgInfo::Indirect: {
2840 auto AI = CurFn->arg_begin();
2841 if (RetAI.isSRetAfterThis())
2842 ++AI;
2843 switch (getEvaluationKind(RetTy)) {
2844 case TEK_Complex: {
2845 ComplexPairTy RT =
2846 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2847 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2848 /*isInit*/ true);
2849 break;
2850 }
2851 case TEK_Aggregate:
2852 // Do nothing; aggregrates get evaluated directly into the destination.
2853 break;
2854 case TEK_Scalar:
2855 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2856 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2857 /*isInit*/ true);
2858 break;
2859 }
2860 break;
2861 }
2862
2863 case ABIArgInfo::Extend:
2864 case ABIArgInfo::Direct:
2865 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2866 RetAI.getDirectOffset() == 0) {
2867 // The internal return value temp always will have pointer-to-return-type
2868 // type, just do a load.
2869
2870 // If there is a dominating store to ReturnValue, we can elide
2871 // the load, zap the store, and usually zap the alloca.
2872 if (llvm::StoreInst *SI =
2873 findDominatingStoreToReturnValue(*this)) {
2874 // Reuse the debug location from the store unless there is
2875 // cleanup code to be emitted between the store and return
2876 // instruction.
2877 if (EmitRetDbgLoc && !AutoreleaseResult)
2878 RetDbgLoc = SI->getDebugLoc();
2879 // Get the stored value and nuke the now-dead store.
2880 RV = SI->getValueOperand();
2881 SI->eraseFromParent();
2882
2883 // If that was the only use of the return value, nuke it as well now.
2884 auto returnValueInst = ReturnValue.getPointer();
2885 if (returnValueInst->use_empty()) {
2886 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2887 alloca->eraseFromParent();
2888 ReturnValue = Address::invalid();
2889 }
2890 }
2891
2892 // Otherwise, we have to do a simple load.
2893 } else {
2894 RV = Builder.CreateLoad(ReturnValue);
2895 }
2896 } else {
2897 // If the value is offset in memory, apply the offset now.
2898 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2899
2900 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2901 }
2902
2903 // In ARC, end functions that return a retainable type with a call
2904 // to objc_autoreleaseReturnValue.
2905 if (AutoreleaseResult) {
2906#ifndef NDEBUG
2907 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2908 // been stripped of the typedefs, so we cannot use RetTy here. Get the
2909 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2910 // CurCodeDecl or BlockInfo.
2911 QualType RT;
2912
2913 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2914 RT = FD->getReturnType();
2915 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2916 RT = MD->getReturnType();
2917 else if (isa<BlockDecl>(CurCodeDecl))
2918 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2919 else
2920 llvm_unreachable("Unexpected function/method type")::llvm::llvm_unreachable_internal("Unexpected function/method type"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2920)
;
2921
2922 assert(getLangOpts().ObjCAutoRefCount &&((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2924, __PRETTY_FUNCTION__))
2923 !FI.isReturnsRetained() &&((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2924, __PRETTY_FUNCTION__))
2924 RT->isObjCRetainableType())((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2924, __PRETTY_FUNCTION__))
;
2925#endif
2926 RV = emitAutoreleaseOfResult(*this, RV);
2927 }
2928
2929 break;
2930
2931 case ABIArgInfo::Ignore:
2932 break;
2933
2934 case ABIArgInfo::CoerceAndExpand: {
2935 auto coercionType = RetAI.getCoerceAndExpandType();
2936 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2937
2938 // Load all of the coerced elements out into results.
2939 llvm::SmallVector<llvm::Value*, 4> results;
2940 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2941 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2942 auto coercedEltType = coercionType->getElementType(i);
2943 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2944 continue;
2945
2946 auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2947 auto elt = Builder.CreateLoad(eltAddr);
2948 results.push_back(elt);
2949 }
2950
2951 // If we have one result, it's the single direct result type.
2952 if (results.size() == 1) {
2953 RV = results[0];
2954
2955 // Otherwise, we need to make a first-class aggregate.
2956 } else {
2957 // Construct a return type that lacks padding elements.
2958 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2959
2960 RV = llvm::UndefValue::get(returnType);
2961 for (unsigned i = 0, e = results.size(); i != e; ++i) {
2962 RV = Builder.CreateInsertValue(RV, results[i], i);
2963 }
2964 }
2965 break;
2966 }
2967
2968 case ABIArgInfo::Expand:
2969 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 2969)
;
2970 }
2971
2972 llvm::Instruction *Ret;
2973 if (RV) {
2974 EmitReturnValueCheck(RV);
2975 Ret = Builder.CreateRet(RV);
2976 } else {
2977 Ret = Builder.CreateRetVoid();
2978 }
2979
2980 if (RetDbgLoc)
2981 Ret->setDebugLoc(std::move(RetDbgLoc));
2982}
2983
2984void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2985 // A current decl may not be available when emitting vtable thunks.
2986 if (!CurCodeDecl)
2987 return;
2988
2989 ReturnsNonNullAttr *RetNNAttr = nullptr;
2990 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2991 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2992
2993 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2994 return;
2995
2996 // Prefer the returns_nonnull attribute if it's present.
2997 SourceLocation AttrLoc;
2998 SanitizerMask CheckKind;
2999 SanitizerHandler Handler;
3000 if (RetNNAttr) {
3001 assert(!requiresReturnValueNullabilityCheck() &&((!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"
) ? static_cast<void> (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3002, __PRETTY_FUNCTION__))
3002 "Cannot check nullability and the nonnull attribute")((!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"
) ? static_cast<void> (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3002, __PRETTY_FUNCTION__))
;
3003 AttrLoc = RetNNAttr->getLocation();
3004 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3005 Handler = SanitizerHandler::NonnullReturn;
3006 } else {
3007 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3008 if (auto *TSI = DD->getTypeSourceInfo())
3009 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3010 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3011 CheckKind = SanitizerKind::NullabilityReturn;
3012 Handler = SanitizerHandler::NullabilityReturn;
3013 }
3014
3015 SanitizerScope SanScope(this);
3016
3017 // Make sure the "return" source location is valid. If we're checking a
3018 // nullability annotation, make sure the preconditions for the check are met.
3019 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3020 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3021 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3022 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3023 if (requiresReturnValueNullabilityCheck())
3024 CanNullCheck =
3025 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3026 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3027 EmitBlock(Check);
3028
3029 // Now do the null check.
3030 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3031 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3032 llvm::Value *DynamicData[] = {SLocPtr};
3033 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3034
3035 EmitBlock(NoCheck);
3036
3037#ifndef NDEBUG
3038 // The return location should not be used after the check has been emitted.
3039 ReturnLocation = Address::invalid();
3040#endif
3041}
3042
3043static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3044 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3045 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3046}
3047
3048static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3049 QualType Ty) {
3050 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3051 // placeholders.
3052 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3053 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3054 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3055
3056 // FIXME: When we generate this IR in one pass, we shouldn't need
3057 // this win32-specific alignment hack.
3058 CharUnits Align = CharUnits::fromQuantity(4);
3059 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3060
3061 return AggValueSlot::forAddr(Address(Placeholder, Align),
3062 Ty.getQualifiers(),
3063 AggValueSlot::IsNotDestructed,
3064 AggValueSlot::DoesNotNeedGCBarriers,
3065 AggValueSlot::IsNotAliased,
3066 AggValueSlot::DoesNotOverlap);
3067}
3068
3069void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3070 const VarDecl *param,
3071 SourceLocation loc) {
3072 // StartFunction converted the ABI-lowered parameter(s) into a
3073 // local alloca. We need to turn that into an r-value suitable
3074 // for EmitCall.
3075 Address local = GetAddrOfLocalVar(param);
3076
3077 QualType type = param->getType();
3078
3079 assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&((!isInAllocaArgument(CGM.getCXXABI(), type) && "cannot emit delegate call arguments for inalloca arguments!"
) ? static_cast<void> (0) : __assert_fail ("!isInAllocaArgument(CGM.getCXXABI(), type) && \"cannot emit delegate call arguments for inalloca arguments!\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3080, __PRETTY_FUNCTION__))
3080 "cannot emit delegate call arguments for inalloca arguments!")((!isInAllocaArgument(CGM.getCXXABI(), type) && "cannot emit delegate call arguments for inalloca arguments!"
) ? static_cast<void> (0) : __assert_fail ("!isInAllocaArgument(CGM.getCXXABI(), type) && \"cannot emit delegate call arguments for inalloca arguments!\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3080, __PRETTY_FUNCTION__))
;
3081
3082 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3083 // but the argument needs to be the original pointer.
3084 if (type->isReferenceType()) {
3085 args.add(RValue::get(Builder.CreateLoad(local)), type);
3086
3087 // In ARC, move out of consumed arguments so that the release cleanup
3088 // entered by StartFunction doesn't cause an over-release. This isn't
3089 // optimal -O0 code generation, but it should get cleaned up when
3090 // optimization is enabled. This also assumes that delegate calls are
3091 // performed exactly once for a set of arguments, but that should be safe.
3092 } else if (getLangOpts().ObjCAutoRefCount &&
3093 param->hasAttr<NSConsumedAttr>() &&
3094 type->isObjCRetainableType()) {
3095 llvm::Value *ptr = Builder.CreateLoad(local);
3096 auto null =
3097 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3098 Builder.CreateStore(null, local);
3099 args.add(RValue::get(ptr), type);
3100
3101 // For the most part, we just need to load the alloca, except that
3102 // aggregate r-values are actually pointers to temporaries.
3103 } else {
3104 args.add(convertTempToRValue(local, type, loc), type);
3105 }
3106
3107 // Deactivate the cleanup for the callee-destructed param that was pushed.
3108 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3109 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3110 type.isDestructedType()) {
3111 EHScopeStack::stable_iterator cleanup =
3112 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3113 assert(cleanup.isValid() &&((cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? static_cast<void> (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3114, __PRETTY_FUNCTION__))
3114 "cleanup for callee-destructed param not recorded")((cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? static_cast<void> (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3114, __PRETTY_FUNCTION__))
;
3115 // This unreachable is a temporary marker which will be removed later.
3116 llvm::Instruction *isActive = Builder.CreateUnreachable();
3117 args.addArgCleanupDeactivation(cleanup, isActive);
3118 }
3119}
3120
3121static bool isProvablyNull(llvm::Value *addr) {
3122 return isa<llvm::ConstantPointerNull>(addr);
3123}
3124
3125/// Emit the actual writing-back of a writeback.
3126static void emitWriteback(CodeGenFunction &CGF,
3127 const CallArgList::Writeback &writeback) {
3128 const LValue &srcLV = writeback.Source;
3129 Address srcAddr = srcLV.getAddress();
3130 assert(!isProvablyNull(srcAddr.getPointer()) &&((!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"
) ? static_cast<void> (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3131, __PRETTY_FUNCTION__))
3131 "shouldn't have writeback for provably null argument")((!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"
) ? static_cast<void> (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3131, __PRETTY_FUNCTION__))
;
3132
3133 llvm::BasicBlock *contBB = nullptr;
3134
3135 // If the argument wasn't provably non-null, we need to null check
3136 // before doing the store.
3137 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3138 CGF.CGM.getDataLayout());
3139 if (!provablyNonNull) {
3140 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3141 contBB = CGF.createBasicBlock("icr.done");
3142
3143 llvm::Value *isNull =
3144 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3145 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3146 CGF.EmitBlock(writebackBB);
3147 }
3148
3149 // Load the value to writeback.
3150 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3151
3152 // Cast it back, in case we're writing an id to a Foo* or something.
3153 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3154 "icr.writeback-cast");
3155
3156 // Perform the writeback.
3157
3158 // If we have a "to use" value, it's something we need to emit a use
3159 // of. This has to be carefully threaded in: if it's done after the
3160 // release it's potentially undefined behavior (and the optimizer
3161 // will ignore it), and if it happens before the retain then the
3162 // optimizer could move the release there.
3163 if (writeback.ToUse) {
3164 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong)((srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) ? static_cast
<void> (0) : __assert_fail ("srcLV.getObjCLifetime() == Qualifiers::OCL_Strong"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3164, __PRETTY_FUNCTION__))
;
3165
3166 // Retain the new value. No need to block-copy here: the block's
3167 // being passed up the stack.
3168 value = CGF.EmitARCRetainNonBlock(value);
3169
3170 // Emit the intrinsic use here.
3171 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3172
3173 // Load the old value (primitively).
3174 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3175
3176 // Put the new value in place (primitively).
3177 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3178
3179 // Release the old value.
3180 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3181
3182 // Otherwise, we can just do a normal lvalue store.
3183 } else {
3184 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3185 }
3186
3187 // Jump to the continuation block.
3188 if (!provablyNonNull)
3189 CGF.EmitBlock(contBB);
3190}
3191
3192static void emitWritebacks(CodeGenFunction &CGF,
3193 const CallArgList &args) {
3194 for (const auto &I : args.writebacks())
3195 emitWriteback(CGF, I);
3196}
3197
3198static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3199 const CallArgList &CallArgs) {
3200 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3201 CallArgs.getCleanupsToDeactivate();
3202 // Iterate in reverse to increase the likelihood of popping the cleanup.
3203 for (const auto &I : llvm::reverse(Cleanups)) {
3204 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3205 I.IsActiveIP->eraseFromParent();
3206 }
3207}
3208
3209static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3210 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3211 if (uop->getOpcode() == UO_AddrOf)
3212 return uop->getSubExpr();
3213 return nullptr;
3214}
3215
3216/// Emit an argument that's being passed call-by-writeback. That is,
3217/// we are passing the address of an __autoreleased temporary; it
3218/// might be copy-initialized with the current value of the given
3219/// address, but it will definitely be copied out of after the call.
3220static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3221 const ObjCIndirectCopyRestoreExpr *CRE) {
3222 LValue srcLV;
3223
3224 // Make an optimistic effort to emit the address as an l-value.
3225 // This can fail if the argument expression is more complicated.
3226 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3227 srcLV = CGF.EmitLValue(lvExpr);
3228
3229 // Otherwise, just emit it as a scalar.
3230 } else {
3231 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3232
3233 QualType srcAddrType =
3234 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3235 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3236 }
3237 Address srcAddr = srcLV.getAddress();
3238
3239 // The dest and src types don't necessarily match in LLVM terms
3240 // because of the crazy ObjC compatibility rules.
3241
3242 llvm::PointerType *destType =
3243 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3244
3245 // If the address is a constant null, just pass the appropriate null.
3246 if (isProvablyNull(srcAddr.getPointer())) {
3247 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3248 CRE->getType());
3249 return;
3250 }
3251
3252 // Create the temporary.
3253 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3254 CGF.getPointerAlign(),
3255 "icr.temp");
3256 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3257 // and that cleanup will be conditional if we can't prove that the l-value
3258 // isn't null, so we need to register a dominating point so that the cleanups
3259 // system will make valid IR.
3260 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3261
3262 // Zero-initialize it if we're not doing a copy-initialization.
3263 bool shouldCopy = CRE->shouldCopy();
3264 if (!shouldCopy) {
3265 llvm::Value *null =
3266 llvm::ConstantPointerNull::get(
3267 cast<llvm::PointerType>(destType->getElementType()));
3268 CGF.Builder.CreateStore(null, temp);
3269 }
3270
3271 llvm::BasicBlock *contBB = nullptr;
3272 llvm::BasicBlock *originBB = nullptr;
3273
3274 // If the address is *not* known to be non-null, we need to switch.
3275 llvm::Value *finalArgument;
3276
3277 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3278 CGF.CGM.getDataLayout());
3279 if (provablyNonNull) {
3280 finalArgument = temp.getPointer();
3281 } else {
3282 llvm::Value *isNull =
3283 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3284
3285 finalArgument = CGF.Builder.CreateSelect(isNull,
3286 llvm::ConstantPointerNull::get(destType),
3287 temp.getPointer(), "icr.argument");
3288
3289 // If we need to copy, then the load has to be conditional, which
3290 // means we need control flow.
3291 if (shouldCopy) {
3292 originBB = CGF.Builder.GetInsertBlock();
3293 contBB = CGF.createBasicBlock("icr.cont");
3294 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3295 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3296 CGF.EmitBlock(copyBB);
3297 condEval.begin(CGF);
3298 }
3299 }
3300
3301 llvm::Value *valueToUse = nullptr;
3302
3303 // Perform a copy if necessary.
3304 if (shouldCopy) {
3305 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3306 assert(srcRV.isScalar())((srcRV.isScalar()) ? static_cast<void> (0) : __assert_fail
("srcRV.isScalar()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3306, __PRETTY_FUNCTION__))
;
3307
3308 llvm::Value *src = srcRV.getScalarVal();
3309 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3310 "icr.cast");
3311
3312 // Use an ordinary store, not a store-to-lvalue.
3313 CGF.Builder.CreateStore(src, temp);
3314
3315 // If optimization is enabled, and the value was held in a
3316 // __strong variable, we need to tell the optimizer that this
3317 // value has to stay alive until we're doing the store back.
3318 // This is because the temporary is effectively unretained,
3319 // and so otherwise we can violate the high-level semantics.
3320 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3321 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3322 valueToUse = src;
3323 }
3324 }
3325
3326 // Finish the control flow if we needed it.
3327 if (shouldCopy && !provablyNonNull) {
3328 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3329 CGF.EmitBlock(contBB);
3330
3331 // Make a phi for the value to intrinsically use.
3332 if (valueToUse) {
3333 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3334 "icr.to-use");
3335 phiToUse->addIncoming(valueToUse, copyBB);
3336 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3337 originBB);
3338 valueToUse = phiToUse;
3339 }
3340
3341 condEval.end(CGF);
3342 }
3343
3344 args.addWriteback(srcLV, temp, valueToUse);
3345 args.add(RValue::get(finalArgument), CRE->getType());
3346}
3347
3348void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3349 assert(!StackBase)((!StackBase) ? static_cast<void> (0) : __assert_fail (
"!StackBase", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3349, __PRETTY_FUNCTION__))
;
3350
3351 // Save the stack.
3352 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3353 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3354}
3355
3356void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3357 if (StackBase) {
3358 // Restore the stack after the call.
3359 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3360 CGF.Builder.CreateCall(F, StackBase);
3361 }
3362}
3363
3364void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3365 SourceLocation ArgLoc,
3366 AbstractCallee AC,
3367 unsigned ParmNum) {
3368 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3369 SanOpts.has(SanitizerKind::NullabilityArg)))
3370 return;
3371
3372 // The param decl may be missing in a variadic function.
3373 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3374 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3375
3376 // Prefer the nonnull attribute if it's present.
3377 const NonNullAttr *NNAttr = nullptr;
3378 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3379 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3380
3381 bool CanCheckNullability = false;
3382 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3383 auto Nullability = PVD->getType()->getNullability(getContext());
3384 CanCheckNullability = Nullability &&
3385 *Nullability == NullabilityKind::NonNull &&
3386 PVD->getTypeSourceInfo();
3387 }
3388
3389 if (!NNAttr && !CanCheckNullability)
3390 return;
3391
3392 SourceLocation AttrLoc;
3393 SanitizerMask CheckKind;
3394 SanitizerHandler Handler;
3395 if (NNAttr) {
3396 AttrLoc = NNAttr->getLocation();
3397 CheckKind = SanitizerKind::NonnullAttribute;
3398 Handler = SanitizerHandler::NonnullArg;
3399 } else {
3400 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3401 CheckKind = SanitizerKind::NullabilityArg;
3402 Handler = SanitizerHandler::NullabilityArg;
3403 }
3404
3405 SanitizerScope SanScope(this);
3406 assert(RV.isScalar())((RV.isScalar()) ? static_cast<void> (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3406, __PRETTY_FUNCTION__))
;
3407 llvm::Value *V = RV.getScalarVal();
3408 llvm::Value *Cond =
3409 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3410 llvm::Constant *StaticData[] = {
3411 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3412 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3413 };
3414 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3415}
3416
3417void CodeGenFunction::EmitCallArgs(
3418 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3419 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3420 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3421 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()))(((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())
) ? static_cast<void> (0) : __assert_fail ("(int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3421, __PRETTY_FUNCTION__))
;
3422
3423 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3424 // because arguments are destroyed left to right in the callee. As a special
3425 // case, there are certain language constructs that require left-to-right
3426 // evaluation, and in those cases we consider the evaluation order requirement
3427 // to trump the "destruction order is reverse construction order" guarantee.
3428 bool LeftToRight =
3429 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3430 ? Order == EvaluationOrder::ForceLeftToRight
3431 : Order != EvaluationOrder::ForceRightToLeft;
3432
3433 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3434 RValue EmittedArg) {
3435 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3436 return;
3437 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3438 if (PS == nullptr)
3439 return;
3440
3441 const auto &Context = getContext();
3442 auto SizeTy = Context.getSizeType();
3443 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3444 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?")((EmittedArg.getScalarVal() && "We emitted nothing for the arg?"
) ? static_cast<void> (0) : __assert_fail ("EmittedArg.getScalarVal() && \"We emitted nothing for the arg?\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3444, __PRETTY_FUNCTION__))
;
3445 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3446 EmittedArg.getScalarVal());
3447 Args.add(RValue::get(V), SizeTy);
3448 // If we're emitting args in reverse, be sure to do so with
3449 // pass_object_size, as well.
3450 if (!LeftToRight)
3451 std::swap(Args.back(), *(&Args.back() - 1));
3452 };
3453
3454 // Insert a stack save if we're going to need any inalloca args.
3455 bool HasInAllocaArgs = false;
3456 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3457 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3458 I != E && !HasInAllocaArgs; ++I)
3459 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3460 if (HasInAllocaArgs) {
3461 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)((getTarget().getTriple().getArch() == llvm::Triple::x86) ? static_cast
<void> (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3461, __PRETTY_FUNCTION__))
;
3462 Args.allocateArgumentMemory(*this);
3463 }
3464 }
3465
3466 // Evaluate each argument in the appropriate order.
3467 size_t CallArgsStart = Args.size();
3468 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3469 unsigned Idx = LeftToRight ? I : E - I - 1;
3470 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3471 unsigned InitialArgSize = Args.size();
3472 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3473 // the argument and parameter match or the objc method is parameterized.
3474 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3479, __PRETTY_FUNCTION__))
3475 getContext().hasSameUnqualifiedType((*Arg)->getType(),(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3479, __PRETTY_FUNCTION__))
3476 ArgTypes[Idx]) ||(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3479, __PRETTY_FUNCTION__))
3477 (isa<ObjCMethodDecl>(AC.getDecl()) &&(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3479, __PRETTY_FUNCTION__))
3478 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3479, __PRETTY_FUNCTION__))
3479 "Argument and parameter types don't match")(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3479, __PRETTY_FUNCTION__))
;
3480 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3481 // In particular, we depend on it being the last arg in Args, and the
3482 // objectsize bits depend on there only being one arg if !LeftToRight.
3483 assert(InitialArgSize + 1 == Args.size() &&((InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"
) ? static_cast<void> (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3484, __PRETTY_FUNCTION__))
3484 "The code below depends on only adding one arg per EmitCallArg")((InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"
) ? static_cast<void> (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3484, __PRETTY_FUNCTION__))
;
3485 (void)InitialArgSize;
3486 // Since pointer argument are never emitted as LValue, it is safe to emit
3487 // non-null argument check for r-value only.
3488 if (!Args.back().hasLValue()) {
3489 RValue RVArg = Args.back().getKnownRValue();
3490 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3491 ParamsToSkip + Idx);
3492 // @llvm.objectsize should never have side-effects and shouldn't need
3493 // destruction/cleanups, so we can safely "emit" it after its arg,
3494 // regardless of right-to-leftness
3495 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3496 }
3497 }
3498
3499 if (!LeftToRight) {
3500 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3501 // IR function.
3502 std::reverse(Args.begin() + CallArgsStart, Args.end());
3503 }
3504}
3505
3506namespace {
3507
3508struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3509 DestroyUnpassedArg(Address Addr, QualType Ty)
3510 : Addr(Addr), Ty(Ty) {}
3511
3512 Address Addr;
3513 QualType Ty;
3514
3515 void Emit(CodeGenFunction &CGF, Flags flags) override {
3516 QualType::DestructionKind DtorKind = Ty.isDestructedType();
3517 if (DtorKind == QualType::DK_cxx_destructor) {
3518 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3519 assert(!Dtor->isTrivial())((!Dtor->isTrivial()) ? static_cast<void> (0) : __assert_fail
("!Dtor->isTrivial()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3519, __PRETTY_FUNCTION__))
;
3520 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3521 /*Delegating=*/false, Addr);
3522 } else {
3523 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3524 }
3525 }
3526};
3527
3528struct DisableDebugLocationUpdates {
3529 CodeGenFunction &CGF;
3530 bool disabledDebugInfo;
3531 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3532 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3533 CGF.disableDebugInfo();
3534 }
3535 ~DisableDebugLocationUpdates() {
3536 if (disabledDebugInfo)
3537 CGF.enableDebugInfo();
3538 }
3539};
3540
3541} // end anonymous namespace
3542
3543RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3544 if (!HasLV)
3545 return RV;
3546 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3547 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3548 LV.isVolatile());
3549 IsUsed = true;
3550 return RValue::getAggregate(Copy.getAddress());
3551}
3552
3553void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3554 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3555 if (!HasLV && RV.isScalar())
3556 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3557 else if (!HasLV && RV.isComplex())
3558 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3559 else {
3560 auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3561 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3562 // We assume that call args are never copied into subobjects.
3563 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3564 HasLV ? LV.isVolatileQualified()
3565 : RV.isVolatileQualified());
3566 }
3567 IsUsed = true;
3568}
3569
3570void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3571 QualType type) {
3572 DisableDebugLocationUpdates Dis(*this, E);
3573 if (const ObjCIndirectCopyRestoreExpr *CRE
3574 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3575 assert(getLangOpts().ObjCAutoRefCount)((getLangOpts().ObjCAutoRefCount) ? static_cast<void> (
0) : __assert_fail ("getLangOpts().ObjCAutoRefCount", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3575, __PRETTY_FUNCTION__))
;
3576 return emitWritebackArg(*this, args, CRE);
3577 }
3578
3579 assert(type->isReferenceType() == E->isGLValue() &&((type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"
) ? static_cast<void> (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3580, __PRETTY_FUNCTION__))
3580 "reference binding to unmaterialized r-value!")((type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"
) ? static_cast<void> (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3580, __PRETTY_FUNCTION__))
;
3581
3582 if (E->isGLValue()) {
3583 assert(E->getObjectKind() == OK_Ordinary)((E->getObjectKind() == OK_Ordinary) ? static_cast<void
> (0) : __assert_fail ("E->getObjectKind() == OK_Ordinary"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3583, __PRETTY_FUNCTION__))
;
3584 return args.add(EmitReferenceBindingToExpr(E), type);
3585 }
3586
3587 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3588
3589 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3590 // However, we still have to push an EH-only cleanup in case we unwind before
3591 // we make it to the call.
3592 if (HasAggregateEvalKind &&
3593 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3594 // If we're using inalloca, use the argument memory. Otherwise, use a
3595 // temporary.
3596 AggValueSlot Slot;
3597 if (args.isUsingInAlloca())
3598 Slot = createPlaceholderSlot(*this, type);
3599 else
3600 Slot = CreateAggTemp(type, "agg.tmp");
3601
3602 bool DestroyedInCallee = true, NeedsEHCleanup = true;
3603 if (const auto *RD = type->getAsCXXRecordDecl())
3604 DestroyedInCallee = RD->hasNonTrivialDestructor();
3605 else
3606 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3607
3608 if (DestroyedInCallee)
3609 Slot.setExternallyDestructed();
3610
3611 EmitAggExpr(E, Slot);
3612 RValue RV = Slot.asRValue();
3613 args.add(RV, type);
3614
3615 if (DestroyedInCallee && NeedsEHCleanup) {
3616 // Create a no-op GEP between the placeholder and the cleanup so we can
3617 // RAUW it successfully. It also serves as a marker of the first
3618 // instruction where the cleanup is active.
3619 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3620 type);
3621 // This unreachable is a temporary marker which will be removed later.
3622 llvm::Instruction *IsActive = Builder.CreateUnreachable();
3623 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3624 }
3625 return;
3626 }
3627
3628 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3629 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3630 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3631 assert(L.isSimple())((L.isSimple()) ? static_cast<void> (0) : __assert_fail
("L.isSimple()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3631, __PRETTY_FUNCTION__))
;
3632 args.addUncopiedAggregate(L, type);
3633 return;
3634 }
3635
3636 args.add(EmitAnyExprToTemp(E), type);
3637}
3638
3639QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3640 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3641 // implicitly widens null pointer constants that are arguments to varargs
3642 // functions to pointer-sized ints.
3643 if (!getTarget().getTriple().isOSWindows())
3644 return Arg->getType();
3645
3646 if (Arg->getType()->isIntegerType() &&
3647 getContext().getTypeSize(Arg->getType()) <
3648 getContext().getTargetInfo().getPointerWidth(0) &&
3649 Arg->isNullPointerConstant(getContext(),
3650 Expr::NPC_ValueDependentIsNotNull)) {
3651 return getContext().getIntPtrType();
3652 }
3653
3654 return Arg->getType();
3655}
3656
3657// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3658// optimizer it can aggressively ignore unwind edges.
3659void
3660CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3661 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3662 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3663 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3664 CGM.getNoObjCARCExceptionsMetadata());
3665}
3666
3667/// Emits a call to the given no-arguments nounwind runtime function.
3668llvm::CallInst *
3669CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3670 const llvm::Twine &name) {
3671 return EmitNounwindRuntimeCall(callee, None, name);
3672}
3673
3674/// Emits a call to the given nounwind runtime function.
3675llvm::CallInst *
3676CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3677 ArrayRef<llvm::Value*> args,
3678 const llvm::Twine &name) {
3679 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3680 call->setDoesNotThrow();
3681 return call;
3682}
3683
3684/// Emits a simple call (never an invoke) to the given no-arguments
3685/// runtime function.
3686llvm::CallInst *
3687CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3688 const llvm::Twine &name) {
3689 return EmitRuntimeCall(callee, None, name);
3690}
3691
3692// Calls which may throw must have operand bundles indicating which funclet
3693// they are nested within.
3694SmallVector<llvm::OperandBundleDef, 1>
3695CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3696 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3697 // There is no need for a funclet operand bundle if we aren't inside a
3698 // funclet.
3699 if (!CurrentFuncletPad)
3700 return BundleList;
3701
3702 // Skip intrinsics which cannot throw.
3703 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3704 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3705 return BundleList;
3706
3707 BundleList.emplace_back("funclet", CurrentFuncletPad);
3708 return BundleList;
3709}
3710
3711/// Emits a simple call (never an invoke) to the given runtime function.
3712llvm::CallInst *
3713CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3714 ArrayRef<llvm::Value*> args,
3715 const llvm::Twine &name) {
3716 llvm::CallInst *call =
3717 Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3718 call->setCallingConv(getRuntimeCC());
3719 return call;
3720}
3721
3722/// Emits a call or invoke to the given noreturn runtime function.
3723void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3724 ArrayRef<llvm::Value*> args) {
3725 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3726 getBundlesForFunclet(callee);
3727
3728 if (getInvokeDest()) {
3729 llvm::InvokeInst *invoke =
3730 Builder.CreateInvoke(callee,
3731 getUnreachableBlock(),
3732 getInvokeDest(),
3733 args,
3734 BundleList);
3735 invoke->setDoesNotReturn();
3736 invoke->setCallingConv(getRuntimeCC());
3737 } else {
3738 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3739 call->setDoesNotReturn();
3740 call->setCallingConv(getRuntimeCC());
3741 Builder.CreateUnreachable();
3742 }
3743}
3744
3745/// Emits a call or invoke instruction to the given nullary runtime function.
3746llvm::CallSite
3747CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3748 const Twine &name) {
3749 return EmitRuntimeCallOrInvoke(callee, None, name);
3750}
3751
3752/// Emits a call or invoke instruction to the given runtime function.
3753llvm::CallSite
3754CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3755 ArrayRef<llvm::Value*> args,
3756 const Twine &name) {
3757 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3758 callSite.setCallingConv(getRuntimeCC());
3759 return callSite;
3760}
3761
3762/// Emits a call or invoke instruction to the given function, depending
3763/// on the current state of the EH stack.
3764llvm::CallSite
3765CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3766 ArrayRef<llvm::Value *> Args,
3767 const Twine &Name) {
3768 llvm::BasicBlock *InvokeDest = getInvokeDest();
3769 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3770 getBundlesForFunclet(Callee);
3771
3772 llvm::Instruction *Inst;
3773 if (!InvokeDest)
3774 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3775 else {
3776 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3777 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3778 Name);
3779 EmitBlock(ContBB);
3780 }
3781
3782 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3783 // optimizer it can aggressively ignore unwind edges.
3784 if (CGM.getLangOpts().ObjCAutoRefCount)
3785 AddObjCARCExceptionMetadata(Inst);
3786
3787 return llvm::CallSite(Inst);
3788}
3789
3790void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3791 llvm::Value *New) {
3792 DeferredReplacements.push_back(std::make_pair(Old, New));
3793}
3794
3795RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3796 const CGCallee &Callee,
3797 ReturnValueSlot ReturnValue,
3798 const CallArgList &CallArgs,
3799 llvm::Instruction **callOrInvoke,
3800 SourceLocation Loc) {
3801 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3802
3803 assert(Callee.isOrdinary() || Callee.isVirtual())((Callee.isOrdinary() || Callee.isVirtual()) ? static_cast<
void> (0) : __assert_fail ("Callee.isOrdinary() || Callee.isVirtual()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3803, __PRETTY_FUNCTION__))
;
3804
3805 // Handle struct-return functions by passing a pointer to the
3806 // location that we would like to return into.
3807 QualType RetTy = CallInfo.getReturnType();
3808 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3809
3810 llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3811
3812 // 1. Set up the arguments.
3813
3814 // If we're using inalloca, insert the allocation after the stack save.
3815 // FIXME: Do this earlier rather than hacking it in here!
3816 Address ArgMemory = Address::invalid();
3817 const llvm::StructLayout *ArgMemoryLayout = nullptr;
3818 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3819 const llvm::DataLayout &DL = CGM.getDataLayout();
3820 ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3821 llvm::Instruction *IP = CallArgs.getStackBase();
3822 llvm::AllocaInst *AI;
3823 if (IP) {
3824 IP = IP->getNextNode();
3825 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3826 "argmem", IP);
3827 } else {
3828 AI = CreateTempAlloca(ArgStruct, "argmem");
3829 }
3830 auto Align = CallInfo.getArgStructAlignment();
3831 AI->setAlignment(Align.getQuantity());
3832 AI->setUsedWithInAlloca(true);
3833 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca())((AI->isUsedWithInAlloca() && !AI->isStaticAlloca
()) ? static_cast<void> (0) : __assert_fail ("AI->isUsedWithInAlloca() && !AI->isStaticAlloca()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3833, __PRETTY_FUNCTION__))
;
3834 ArgMemory = Address(AI, Align);
3835 }
3836
3837 // Helper function to drill into the inalloca allocation.
3838 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3839 auto FieldOffset =
3840 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3841 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3842 };
3843
3844 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3845 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3846
3847 // If the call returns a temporary with struct return, create a temporary
3848 // alloca to hold the result, unless one is given to us.
3849 Address SRetPtr = Address::invalid();
3850 Address SRetAlloca = Address::invalid();
3851 llvm::Value *UnusedReturnSizePtr = nullptr;
3852 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3853 if (!ReturnValue.isNull()) {
3854 SRetPtr = ReturnValue.getValue();
3855 } else {
3856 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3857 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3858 uint64_t size =
3859 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3860 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3861 }
3862 }
3863 if (IRFunctionArgs.hasSRetArg()) {
3864 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3865 } else if (RetAI.isInAlloca()) {
3866 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3867 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3868 }
3869 }
3870
3871 Address swiftErrorTemp = Address::invalid();
3872 Address swiftErrorArg = Address::invalid();
3873
3874 // Translate all of the arguments as necessary to match the IR lowering.
3875 assert(CallInfo.arg_size() == CallArgs.size() &&((CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3876, __PRETTY_FUNCTION__))
3876 "Mismatch between function signature & arguments.")((CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3876, __PRETTY_FUNCTION__))
;
3877 unsigned ArgNo = 0;
3878 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3879 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3880 I != E; ++I, ++info_it, ++ArgNo) {
3881 const ABIArgInfo &ArgInfo = info_it->info;
3882
3883 // Insert a padding argument to ensure proper alignment.
3884 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3885 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3886 llvm::UndefValue::get(ArgInfo.getPaddingType());
3887
3888 unsigned FirstIRArg, NumIRArgs;
3889 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3890
3891 switch (ArgInfo.getKind()) {
3892 case ABIArgInfo::InAlloca: {
3893 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3893, __PRETTY_FUNCTION__))
;
3894 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)((getTarget().getTriple().getArch() == llvm::Triple::x86) ? static_cast
<void> (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3894, __PRETTY_FUNCTION__))
;
3895 if (I->isAggregate()) {
3896 // Replace the placeholder with the appropriate argument slot GEP.
3897 Address Addr = I->hasLValue()
3898 ? I->getKnownLValue().getAddress()
3899 : I->getKnownRValue().getAggregateAddress();
3900 llvm::Instruction *Placeholder =
3901 cast<llvm::Instruction>(Addr.getPointer());
3902 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3903 Builder.SetInsertPoint(Placeholder);
3904 Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3905 Builder.restoreIP(IP);
3906 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3907 } else {
3908 // Store the RValue into the argument struct.
3909 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3910 unsigned AS = Addr.getType()->getPointerAddressSpace();
3911 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3912 // There are some cases where a trivial bitcast is not avoidable. The
3913 // definition of a type later in a translation unit may change it's type
3914 // from {}* to (%struct.foo*)*.
3915 if (Addr.getType() != MemType)
3916 Addr = Builder.CreateBitCast(Addr, MemType);
3917 I->copyInto(*this, Addr);
3918 }
3919 break;
3920 }
3921
3922 case ABIArgInfo::Indirect: {
3923 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3923, __PRETTY_FUNCTION__))
;
3924 if (!I->isAggregate()) {
3925 // Make a temporary alloca to pass the argument.
3926 Address Addr = CreateMemTempWithoutCast(
3927 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3928 IRCallArgs[FirstIRArg] = Addr.getPointer();
3929
3930 I->copyInto(*this, Addr);
3931 } else {
3932 // We want to avoid creating an unnecessary temporary+copy here;
3933 // however, we need one in three cases:
3934 // 1. If the argument is not byval, and we are required to copy the
3935 // source. (This case doesn't occur on any common architecture.)
3936 // 2. If the argument is byval, RV is not sufficiently aligned, and
3937 // we cannot force it to be sufficiently aligned.
3938 // 3. If the argument is byval, but RV is not located in default
3939 // or alloca address space.
3940 Address Addr = I->hasLValue()
3941 ? I->getKnownLValue().getAddress()
3942 : I->getKnownRValue().getAggregateAddress();
3943 llvm::Value *V = Addr.getPointer();
3944 CharUnits Align = ArgInfo.getIndirectAlign();
3945 const llvm::DataLayout *TD = &CGM.getDataLayout();
3946
3947 assert((FirstIRArg >= IRFuncTy->getNumParams() ||(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3950, __PRETTY_FUNCTION__))
3948 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3950, __PRETTY_FUNCTION__))
3949 TD->getAllocaAddrSpace()) &&(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3950, __PRETTY_FUNCTION__))
3950 "indirect argument must be in alloca address space")(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 3950, __PRETTY_FUNCTION__))
;
3951
3952 bool NeedCopy = false;
3953
3954 if (Addr.getAlignment() < Align &&
3955 llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3956 Align.getQuantity()) {
3957 NeedCopy = true;
3958 } else if (I->hasLValue()) {
3959 auto LV = I->getKnownLValue();
3960 auto AS = LV.getAddressSpace();
3961
3962 if ((!ArgInfo.getIndirectByVal() &&
3963 (LV.getAlignment() >=
3964 getContext().getTypeAlignInChars(I->Ty)))) {
3965 NeedCopy = true;
3966 }
3967 if (!getLangOpts().OpenCL) {
3968 if ((ArgInfo.getIndirectByVal() &&
3969 (AS != LangAS::Default &&
3970 AS != CGM.getASTAllocaAddressSpace()))) {
3971 NeedCopy = true;
3972 }
3973 }
3974 // For OpenCL even if RV is located in default or alloca address space
3975 // we don't want to perform address space cast for it.
3976 else if ((ArgInfo.getIndirectByVal() &&
3977 Addr.getType()->getAddressSpace() != IRFuncTy->
3978 getParamType(FirstIRArg)->getPointerAddressSpace())) {
3979 NeedCopy = true;
3980 }
3981 }
3982
3983 if (NeedCopy) {
3984 // Create an aligned temporary, and copy to it.
3985 Address AI = CreateMemTempWithoutCast(
3986 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3987 IRCallArgs[FirstIRArg] = AI.getPointer();
3988 I->copyInto(*this, AI);
3989 } else {
3990 // Skip the extra memcpy call.
3991 auto *T = V->getType()->getPointerElementType()->getPointerTo(
3992 CGM.getDataLayout().getAllocaAddrSpace());
3993 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3994 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3995 true);
3996 }
3997 }
3998 break;
3999 }
4000
4001 case ABIArgInfo::Ignore:
4002 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4002, __PRETTY_FUNCTION__))
;
4003 break;
4004
4005 case ABIArgInfo::Extend:
4006 case ABIArgInfo::Direct: {
4007 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4008 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4009 ArgInfo.getDirectOffset() == 0) {
4010 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4010, __PRETTY_FUNCTION__))
;
4011 llvm::Value *V;
4012 if (!I->isAggregate())
4013 V = I->getKnownRValue().getScalarVal();
4014 else
4015 V = Builder.CreateLoad(
4016 I->hasLValue() ? I->getKnownLValue().getAddress()
4017 : I->getKnownRValue().getAggregateAddress());
4018
4019 // Implement swifterror by copying into a new swifterror argument.
4020 // We'll write back in the normal path out of the call.
4021 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4022 == ParameterABI::SwiftErrorResult) {
4023 assert(!swiftErrorTemp.isValid() && "multiple swifterror args")((!swiftErrorTemp.isValid() && "multiple swifterror args"
) ? static_cast<void> (0) : __assert_fail ("!swiftErrorTemp.isValid() && \"multiple swifterror args\""
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4023, __PRETTY_FUNCTION__))
;
4024
4025 QualType pointeeTy = I->Ty->getPointeeType();
4026 swiftErrorArg =
4027 Address(V, getContext().getTypeAlignInChars(pointeeTy));
4028
4029 swiftErrorTemp =
4030 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4031 V = swiftErrorTemp.getPointer();
4032 cast<llvm::AllocaInst>(V)->setSwiftError(true);
4033
4034 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4035 Builder.CreateStore(errorValue, swiftErrorTemp);
4036 }
4037
4038 // We might have to widen integers, but we should never truncate.
4039 if (ArgInfo.getCoerceToType() != V->getType() &&
4040 V->getType()->isIntegerTy())
4041 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4042
4043 // If the argument doesn't match, perform a bitcast to coerce it. This
4044 // can happen due to trivial type mismatches.
4045 if (FirstIRArg < IRFuncTy->getNumParams() &&
4046 V->getType() != IRFuncTy->getParamType(FirstIRArg))
4047 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4048
4049 IRCallArgs[FirstIRArg] = V;
4050 break;
4051 }
4052
4053 // FIXME: Avoid the conversion through memory if possible.
4054 Address Src = Address::invalid();
4055 if (!I->isAggregate()) {
4056 Src = CreateMemTemp(I->Ty, "coerce");
4057 I->copyInto(*this, Src);
4058 } else {
4059 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4060 : I->getKnownRValue().getAggregateAddress();
4061 }
4062
4063 // If the value is offset in memory, apply the offset now.
4064 Src = emitAddressAtOffset(*this, Src, ArgInfo);
4065
4066 // Fast-isel and the optimizer generally like scalar values better than
4067 // FCAs, so we flatten them if this is safe to do for this argument.
4068 llvm::StructType *STy =
4069 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4070 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4071 llvm::Type *SrcTy = Src.getType()->getElementType();
4072 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4073 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4074
4075 // If the source type is smaller than the destination type of the
4076 // coerce-to logic, copy the source value into a temp alloca the size
4077 // of the destination type to allow loading all of it. The bits past
4078 // the source value are left undef.
4079 if (SrcSize < DstSize) {
4080 Address TempAlloca
4081 = CreateTempAlloca(STy, Src.getAlignment(),
4082 Src.getName() + ".coerce");
4083 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4084 Src = TempAlloca;
4085 } else {
4086 Src = Builder.CreateBitCast(Src,
4087 STy->getPointerTo(Src.getAddressSpace()));
4088 }
4089
4090 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4091 assert(NumIRArgs == STy->getNumElements())((NumIRArgs == STy->getNumElements()) ? static_cast<void
> (0) : __assert_fail ("NumIRArgs == STy->getNumElements()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4091, __PRETTY_FUNCTION__))
;
4092 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4093 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4094 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4095 llvm::Value *LI = Builder.CreateLoad(EltPtr);
4096 IRCallArgs[FirstIRArg + i] = LI;
4097 }
4098 } else {
4099 // In the simple case, just pass the coerced loaded value.
4100 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4100, __PRETTY_FUNCTION__))
;
4101 IRCallArgs[FirstIRArg] =
4102 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4103 }
4104
4105 break;
4106 }
4107
4108 case ABIArgInfo::CoerceAndExpand: {
4109 auto coercionType = ArgInfo.getCoerceAndExpandType();
4110 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4111
4112 llvm::Value *tempSize = nullptr;
4113 Address addr = Address::invalid();
4114 Address AllocaAddr = Address::invalid();
4115 if (I->isAggregate()) {
4116 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4117 : I->getKnownRValue().getAggregateAddress();
4118
4119 } else {
4120 RValue RV = I->getKnownRValue();
4121 assert(RV.isScalar())((RV.isScalar()) ? static_cast<void> (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4121, __PRETTY_FUNCTION__))
; // complex should always just be direct
4122
4123 llvm::Type *scalarType = RV.getScalarVal()->getType();
4124 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4125 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4126
4127 // Materialize to a temporary.
4128 addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4129 CharUnits::fromQuantity(std::max(
4130 layout->getAlignment(), scalarAlign)),
4131 "tmp",
4132 /*ArraySize=*/nullptr, &AllocaAddr);
4133 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4134
4135 Builder.CreateStore(RV.getScalarVal(), addr);
4136 }
4137
4138 addr = Builder.CreateElementBitCast(addr, coercionType);
4139
4140 unsigned IRArgPos = FirstIRArg;
4141 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4142 llvm::Type *eltType = coercionType->getElementType(i);
4143 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4144 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4145 llvm::Value *elt = Builder.CreateLoad(eltAddr);
4146 IRCallArgs[IRArgPos++] = elt;
4147 }
4148 assert(IRArgPos == FirstIRArg + NumIRArgs)((IRArgPos == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4148, __PRETTY_FUNCTION__))
;
4149
4150 if (tempSize) {
4151 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4152 }
4153
4154 break;
4155 }
4156
4157 case ABIArgInfo::Expand:
4158 unsigned IRArgPos = FirstIRArg;
4159 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4160 assert(IRArgPos == FirstIRArg + NumIRArgs)((IRArgPos == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4160, __PRETTY_FUNCTION__))
;
4161 break;
4162 }
4163 }
4164
4165 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4166 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4167
4168 // If we're using inalloca, set up that argument.
4169 if (ArgMemory.isValid()) {
4170 llvm::Value *Arg = ArgMemory.getPointer();
4171 if (CallInfo.isVariadic()) {
4172 // When passing non-POD arguments by value to variadic functions, we will
4173 // end up with a variadic prototype and an inalloca call site. In such
4174 // cases, we can't do any parameter mismatch checks. Give up and bitcast
4175 // the callee.
4176 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4177 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4178 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4179 } else {
4180 llvm::Type *LastParamTy =
4181 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4182 if (Arg->getType() != LastParamTy) {
4183#ifndef NDEBUG
4184 // Assert that these structs have equivalent element types.
4185 llvm::StructType *FullTy = CallInfo.getArgStruct();
4186 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4187 cast<llvm::PointerType>(LastParamTy)->getElementType());
4188 assert(DeclaredTy->getNumElements() == FullTy->getNumElements())((DeclaredTy->getNumElements() == FullTy->getNumElements
()) ? static_cast<void> (0) : __assert_fail ("DeclaredTy->getNumElements() == FullTy->getNumElements()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4188, __PRETTY_FUNCTION__))
;
4189 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4190 DE = DeclaredTy->element_end(),
4191 FI = FullTy->element_begin();
4192 DI != DE; ++DI, ++FI)
4193 assert(*DI == *FI)((*DI == *FI) ? static_cast<void> (0) : __assert_fail (
"*DI == *FI", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4193, __PRETTY_FUNCTION__))
;
4194#endif
4195 Arg = Builder.CreateBitCast(Arg, LastParamTy);
4196 }
4197 }
4198 assert(IRFunctionArgs.hasInallocaArg())((IRFunctionArgs.hasInallocaArg()) ? static_cast<void> (
0) : __assert_fail ("IRFunctionArgs.hasInallocaArg()", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4198, __PRETTY_FUNCTION__))
;
4199 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4200 }
4201
4202 // 2. Prepare the function pointer.
4203
4204 // If the callee is a bitcast of a non-variadic function to have a
4205 // variadic function pointer type, check to see if we can remove the
4206 // bitcast. This comes up with unprototyped functions.
4207 //
4208 // This makes the IR nicer, but more importantly it ensures that we
4209 // can inline the function at -O0 if it is marked always_inline.
4210 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4211 llvm::FunctionType *CalleeFT =
4212 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4213 if (!CalleeFT->isVarArg())
4214 return Ptr;
4215
4216 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4217 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4218 return Ptr;
4219
4220 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4221 if (!OrigFn)
4222 return Ptr;
4223
4224 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4225
4226 // If the original type is variadic, or if any of the component types
4227 // disagree, we cannot remove the cast.
4228 if (OrigFT->isVarArg() ||
4229 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4230 OrigFT->getReturnType() != CalleeFT->getReturnType())
4231 return Ptr;
4232
4233 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4234 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4235 return Ptr;
4236
4237 return OrigFn;
4238 };
4239 CalleePtr = simplifyVariadicCallee(CalleePtr);
4240
4241 // 3. Perform the actual call.
4242
4243 // Deactivate any cleanups that we're supposed to do immediately before
4244 // the call.
4245 if (!CallArgs.getCleanupsToDeactivate().empty())
4246 deactivateArgCleanupsBeforeCall(*this, CallArgs);
4247
4248 // Assert that the arguments we computed match up. The IR verifier
4249 // will catch this, but this is a common enough source of problems
4250 // during IRGen changes that it's way better for debugging to catch
4251 // it ourselves here.
4252#ifndef NDEBUG
4253 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg())((IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy
->isVarArg()) ? static_cast<void> (0) : __assert_fail
("IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4253, __PRETTY_FUNCTION__))
;
4254 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4255 // Inalloca argument can have different type.
4256 if (IRFunctionArgs.hasInallocaArg() &&
4257 i == IRFunctionArgs.getInallocaArgNo())
4258 continue;
4259 if (i < IRFuncTy->getNumParams())
4260 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i))((IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)
) ? static_cast<void> (0) : __assert_fail ("IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4260, __PRETTY_FUNCTION__))
;
4261 }
4262#endif
4263
4264 // Update the largest vector width if any arguments have vector types.
4265 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4266 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4267 LargestVectorWidth = std::max(LargestVectorWidth,
4268 VT->getPrimitiveSizeInBits());
4269 }
4270
4271 // Compute the calling convention and attributes.
4272 unsigned CallingConv;
4273 llvm::AttributeList Attrs;
4274 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4275 Callee.getAbstractInfo(), Attrs, CallingConv,
4276 /*AttrOnCallSite=*/true);
4277
4278 // Apply some call-site-specific attributes.
4279 // TODO: work this into building the attribute set.
4280
4281 // Apply always_inline to all calls within flatten functions.
4282 // FIXME: should this really take priority over __try, below?
4283 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4284 !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
4285 Callee.getAbstractInfo()
4286 .getCalleeDecl()
4287 .getDecl()
4288 ->hasAttr<NoInlineAttr>())) {
4289 Attrs =
4290 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4291 llvm::Attribute::AlwaysInline);
4292 }
4293
4294 // Disable inlining inside SEH __try blocks.
4295 if (isSEHTryScope()) {
4296 Attrs =
4297 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4298 llvm::Attribute::NoInline);
4299 }
4300
4301 // Decide whether to use a call or an invoke.
4302 bool CannotThrow;
4303 if (currentFunctionUsesSEHTry()) {
4304 // SEH cares about asynchronous exceptions, so everything can "throw."
4305 CannotThrow = false;
4306 } else if (isCleanupPadScope() &&
4307 EHPersonality::get(*this).isMSVCXXPersonality()) {
4308 // The MSVC++ personality will implicitly terminate the program if an
4309 // exception is thrown during a cleanup outside of a try/catch.
4310 // We don't need to model anything in IR to get this behavior.
4311 CannotThrow = true;
4312 } else {
4313 // Otherwise, nounwind call sites will never throw.
4314 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4315 llvm::Attribute::NoUnwind);
4316 }
4317
4318 // If we made a temporary, be sure to clean up after ourselves. Note that we
4319 // can't depend on being inside of an ExprWithCleanups, so we need to manually
4320 // pop this cleanup later on. Being eager about this is OK, since this
4321 // temporary is 'invisible' outside of the callee.
4322 if (UnusedReturnSizePtr)
4323 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4324 UnusedReturnSizePtr);
4325
4326 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4327
4328 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4329 getBundlesForFunclet(CalleePtr);
4330
4331 // Emit the actual call/invoke instruction.
4332 llvm::CallSite CS;
4333 if (!InvokeDest) {
4334 CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4335 } else {
4336 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4337 CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4338 BundleList);
4339 EmitBlock(Cont);
4340 }
4341 llvm::Instruction *CI = CS.getInstruction();
4342 if (callOrInvoke)
4343 *callOrInvoke = CI;
4344
4345 // Apply the attributes and calling convention.
4346 CS.setAttributes(Attrs);
4347 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4348
4349 // Apply various metadata.
4350
4351 if (!CI->getType()->isVoidTy())
4352 CI->setName("call");
4353
4354 // Update largest vector width from the return type.
4355 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4356 LargestVectorWidth = std::max(LargestVectorWidth,
4357 VT->getPrimitiveSizeInBits());
4358
4359 // Insert instrumentation or attach profile metadata at indirect call sites.
4360 // For more details, see the comment before the definition of
4361 // IPVK_IndirectCallTarget in InstrProfData.inc.
4362 if (!CS.getCalledFunction())
4363 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4364 CI, CalleePtr);
4365
4366 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4367 // optimizer it can aggressively ignore unwind edges.
4368 if (CGM.getLangOpts().ObjCAutoRefCount)
4369 AddObjCARCExceptionMetadata(CI);
4370
4371 // Suppress tail calls if requested.
4372 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4373 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4374 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4375 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4376 }
4377
4378 // 4. Finish the call.
4379
4380 // If the call doesn't return, finish the basic block and clear the
4381 // insertion point; this allows the rest of IRGen to discard
4382 // unreachable code.
4383 if (CS.doesNotReturn()) {
4384 if (UnusedReturnSizePtr)
4385 PopCleanupBlock();
4386
4387 // Strip away the noreturn attribute to better diagnose unreachable UB.
4388 if (SanOpts.has(SanitizerKind::Unreachable)) {
4389 if (auto *F = CS.getCalledFunction())
4390 F->removeFnAttr(llvm::Attribute::NoReturn);
4391 CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4392 llvm::Attribute::NoReturn);
4393 }
4394
4395 EmitUnreachable(Loc);
4396 Builder.ClearInsertionPoint();
4397
4398 // FIXME: For now, emit a dummy basic block because expr emitters in
4399 // generally are not ready to handle emitting expressions at unreachable
4400 // points.
4401 EnsureInsertPoint();
4402
4403 // Return a reasonable RValue.
4404 return GetUndefRValue(RetTy);
4405 }
4406
4407 // Perform the swifterror writeback.
4408 if (swiftErrorTemp.isValid()) {
4409 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4410 Builder.CreateStore(errorResult, swiftErrorArg);
4411 }
4412
4413 // Emit any call-associated writebacks immediately. Arguably this
4414 // should happen after any return-value munging.
4415 if (CallArgs.hasWritebacks())
4416 emitWritebacks(*this, CallArgs);
4417
4418 // The stack cleanup for inalloca arguments has to run out of the normal
4419 // lexical order, so deactivate it and run it manually here.
4420 CallArgs.freeArgumentMemory(*this);
4421
4422 // Extract the return value.
4423 RValue Ret = [&] {
4424 switch (RetAI.getKind()) {
4425 case ABIArgInfo::CoerceAndExpand: {
4426 auto coercionType = RetAI.getCoerceAndExpandType();
4427 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4428
4429 Address addr = SRetPtr;
4430 addr = Builder.CreateElementBitCast(addr, coercionType);
4431
4432 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())((CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())
? static_cast<void> (0) : __assert_fail ("CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4432, __PRETTY_FUNCTION__))
;
4433 bool requiresExtract = isa<llvm::StructType>(CI->getType());
4434
4435 unsigned unpaddedIndex = 0;
4436 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4437 llvm::Type *eltType = coercionType->getElementType(i);
4438 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4439 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4440 llvm::Value *elt = CI;
4441 if (requiresExtract)
4442 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4443 else
4444 assert(unpaddedIndex == 0)((unpaddedIndex == 0) ? static_cast<void> (0) : __assert_fail
("unpaddedIndex == 0", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4444, __PRETTY_FUNCTION__))
;
4445 Builder.CreateStore(elt, eltAddr);
4446 }
4447 // FALLTHROUGH
4448 LLVM_FALLTHROUGH[[clang::fallthrough]];
4449 }
4450
4451 case ABIArgInfo::InAlloca:
4452 case ABIArgInfo::Indirect: {
4453 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4454 if (UnusedReturnSizePtr)
4455 PopCleanupBlock();
4456 return ret;
4457 }
4458
4459 case ABIArgInfo::Ignore:
4460 // If we are ignoring an argument that had a result, make sure to
4461 // construct the appropriate return value for our caller.
4462 return GetUndefRValue(RetTy);
4463
4464 case ABIArgInfo::Extend:
4465 case ABIArgInfo::Direct: {
4466 llvm::Type *RetIRTy = ConvertType(RetTy);
4467 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4468 switch (getEvaluationKind(RetTy)) {
4469 case TEK_Complex: {
4470 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4471 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4472 return RValue::getComplex(std::make_pair(Real, Imag));
4473 }
4474 case TEK_Aggregate: {
4475 Address DestPtr = ReturnValue.getValue();
4476 bool DestIsVolatile = ReturnValue.isVolatile();
4477
4478 if (!DestPtr.isValid()) {
4479 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4480 DestIsVolatile = false;
4481 }
4482 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4483 return RValue::getAggregate(DestPtr);
4484 }
4485 case TEK_Scalar: {
4486 // If the argument doesn't match, perform a bitcast to coerce it. This
4487 // can happen due to trivial type mismatches.
4488 llvm::Value *V = CI;
4489 if (V->getType() != RetIRTy)
4490 V = Builder.CreateBitCast(V, RetIRTy);
4491 return RValue::get(V);
4492 }
4493 }
4494 llvm_unreachable("bad evaluation kind")::llvm::llvm_unreachable_internal("bad evaluation kind", "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4494)
;
4495 }
4496
4497 Address DestPtr = ReturnValue.getValue();
4498 bool DestIsVolatile = ReturnValue.isVolatile();
4499
4500 if (!DestPtr.isValid()) {
4501 DestPtr = CreateMemTemp(RetTy, "coerce");
4502 DestIsVolatile = false;
4503 }
4504
4505 // If the value is offset in memory, apply the offset now.
4506 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4507 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4508
4509 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4510 }
4511
4512 case ABIArgInfo::Expand:
4513 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4513)
;
4514 }
4515
4516 llvm_unreachable("Unhandled ABIArgInfo::Kind")::llvm::llvm_unreachable_internal("Unhandled ABIArgInfo::Kind"
, "/build/llvm-toolchain-snapshot-8~svn348900/tools/clang/lib/CodeGen/CGCall.cpp"
, 4516)
;
4517 } ();
4518
4519 // Emit the assume_aligned check on the return value.
4520 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4521 if (Ret.isScalar() && TargetDecl) {
4522 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4523 llvm::Value *OffsetValue = nullptr;
4524 if (const auto *Offset = AA->getOffset())
4525 OffsetValue = EmitScalarExpr(Offset);
4526
4527 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4528 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4529 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4530 OffsetValue);
4531 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4532 llvm::Value *ParamVal =
4533 CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
4534 *this).getScalarVal();
4535 EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4536 }
4537 }
4538
4539 return Ret;
4540}
4541
4542CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4543 if (isVirtual()) {
4544 const CallExpr *CE = getVirtualCallExpr();
4545 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4546 CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
4547 CE ? CE->getBeginLoc() : SourceLocation());
4548 }
4549
4550 return *this;
4551}
4552
4553/* VarArg handling */
4554
4555Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4556 VAListAddr = VE->isMicrosoftABI()
4557 ? EmitMSVAListRef(VE->getSubExpr())
4558 : EmitVAListRef(VE->getSubExpr());
4559 QualType Ty = VE->getType();
4560 if (VE->isMicrosoftABI())
4561 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4562 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4563}