Bug Summary

File:clang/lib/CodeGen/CGCall.cpp
Warning:line 3044, column 26
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGCall.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/include -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-26-161721-17566-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp
1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCall.h"
15#include "ABIInfo.h"
16#include "CGBlocks.h"
17#include "CGCXXABI.h"
18#include "CGCleanup.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "TargetInfo.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/DeclCXX.h"
26#include "clang/AST/DeclObjC.h"
27#include "clang/Basic/CodeGenOptions.h"
28#include "clang/Basic/TargetBuiltins.h"
29#include "clang/Basic/TargetInfo.h"
30#include "clang/CodeGen/CGFunctionInfo.h"
31#include "clang/CodeGen/SwiftCallingConv.h"
32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Attributes.h"
35#include "llvm/IR/CallingConv.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/InlineAsm.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/Transforms/Utils/Local.h"
41using namespace clang;
42using namespace CodeGen;
43
44/***/
45
46unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47 switch (CC) {
48 default: return llvm::CallingConv::C;
49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53 case CC_Win64: return llvm::CallingConv::Win64;
54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58 // TODO: Add support for __pascal to LLVM.
59 case CC_X86Pascal: return llvm::CallingConv::C;
60 // TODO: Add support for __vectorcall to LLVM.
61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
63 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
65 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
66 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
67 case CC_Swift: return llvm::CallingConv::Swift;
68 }
69}
70
71/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
72/// qualification. Either or both of RD and MD may be null. A null RD indicates
73/// that there is no meaningful 'this' type, and a null MD can occur when
74/// calling a method pointer.
75CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
76 const CXXMethodDecl *MD) {
77 QualType RecTy;
78 if (RD)
79 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
80 else
81 RecTy = Context.VoidTy;
82
83 if (MD)
84 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
85 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
86}
87
88/// Returns the canonical formal type of the given C++ method.
89static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
90 return MD->getType()->getCanonicalTypeUnqualified()
91 .getAs<FunctionProtoType>();
92}
93
94/// Returns the "extra-canonicalized" return type, which discards
95/// qualifiers on the return type. Codegen doesn't care about them,
96/// and it makes ABI code a little easier to be able to assume that
97/// all parameter and return types are top-level unqualified.
98static CanQualType GetReturnType(QualType RetTy) {
99 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
100}
101
102/// Arrange the argument and result information for a value of the given
103/// unprototyped freestanding function type.
104const CGFunctionInfo &
105CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
106 // When translating an unprototyped function type, always use a
107 // variadic type.
108 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
109 /*instanceMethod=*/false,
110 /*chainCall=*/false, None,
111 FTNP->getExtInfo(), {}, RequiredArgs(0));
112}
113
114static void addExtParameterInfosForCall(
115 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
116 const FunctionProtoType *proto,
117 unsigned prefixArgs,
118 unsigned totalArgs) {
119 assert(proto->hasExtParameterInfos())((proto->hasExtParameterInfos()) ? static_cast<void>
(0) : __assert_fail ("proto->hasExtParameterInfos()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 119, __PRETTY_FUNCTION__))
;
120 assert(paramInfos.size() <= prefixArgs)((paramInfos.size() <= prefixArgs) ? static_cast<void>
(0) : __assert_fail ("paramInfos.size() <= prefixArgs", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 120, __PRETTY_FUNCTION__))
;
121 assert(proto->getNumParams() + prefixArgs <= totalArgs)((proto->getNumParams() + prefixArgs <= totalArgs) ? static_cast
<void> (0) : __assert_fail ("proto->getNumParams() + prefixArgs <= totalArgs"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 121, __PRETTY_FUNCTION__))
;
122
123 paramInfos.reserve(totalArgs);
124
125 // Add default infos for any prefix args that don't already have infos.
126 paramInfos.resize(prefixArgs);
127
128 // Add infos for the prototype.
129 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
130 paramInfos.push_back(ParamInfo);
131 // pass_object_size params have no parameter info.
132 if (ParamInfo.hasPassObjectSize())
133 paramInfos.emplace_back();
134 }
135
136 assert(paramInfos.size() <= totalArgs &&((paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 137, __PRETTY_FUNCTION__))
137 "Did we forget to insert pass_object_size args?")((paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 137, __PRETTY_FUNCTION__))
;
138 // Add default infos for the variadic and/or suffix arguments.
139 paramInfos.resize(totalArgs);
140}
141
142/// Adds the formal parameters in FPT to the given prefix. If any parameter in
143/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
144static void appendParameterTypes(const CodeGenTypes &CGT,
145 SmallVectorImpl<CanQualType> &prefix,
146 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
147 CanQual<FunctionProtoType> FPT) {
148 // Fast path: don't touch param info if we don't need to.
149 if (!FPT->hasExtParameterInfos()) {
150 assert(paramInfos.empty() &&((paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 151, __PRETTY_FUNCTION__))
151 "We have paramInfos, but the prototype doesn't?")((paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? static_cast<void> (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 151, __PRETTY_FUNCTION__))
;
152 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
153 return;
154 }
155
156 unsigned PrefixSize = prefix.size();
157 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
158 // parameters; the only thing that can change this is the presence of
159 // pass_object_size. So, we preallocate for the common case.
160 prefix.reserve(prefix.size() + FPT->getNumParams());
161
162 auto ExtInfos = FPT->getExtParameterInfos();
163 assert(ExtInfos.size() == FPT->getNumParams())((ExtInfos.size() == FPT->getNumParams()) ? static_cast<
void> (0) : __assert_fail ("ExtInfos.size() == FPT->getNumParams()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 163, __PRETTY_FUNCTION__))
;
164 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
165 prefix.push_back(FPT->getParamType(I));
166 if (ExtInfos[I].hasPassObjectSize())
167 prefix.push_back(CGT.getContext().getSizeType());
168 }
169
170 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
171 prefix.size());
172}
173
174/// Arrange the LLVM function layout for a value of the given function
175/// type, on top of any implicit parameters already stored.
176static const CGFunctionInfo &
177arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
178 SmallVectorImpl<CanQualType> &prefix,
179 CanQual<FunctionProtoType> FTP) {
180 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
181 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
182 // FIXME: Kill copy.
183 appendParameterTypes(CGT, prefix, paramInfos, FTP);
184 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
185
186 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
187 /*chainCall=*/false, prefix,
188 FTP->getExtInfo(), paramInfos,
189 Required);
190}
191
192/// Arrange the argument and result information for a value of the
193/// given freestanding function type.
194const CGFunctionInfo &
195CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
196 SmallVector<CanQualType, 16> argTypes;
197 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
198 FTP);
199}
200
201static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
202 // Set the appropriate calling convention for the Function.
203 if (D->hasAttr<StdCallAttr>())
204 return CC_X86StdCall;
205
206 if (D->hasAttr<FastCallAttr>())
207 return CC_X86FastCall;
208
209 if (D->hasAttr<RegCallAttr>())
210 return CC_X86RegCall;
211
212 if (D->hasAttr<ThisCallAttr>())
213 return CC_X86ThisCall;
214
215 if (D->hasAttr<VectorCallAttr>())
216 return CC_X86VectorCall;
217
218 if (D->hasAttr<PascalAttr>())
219 return CC_X86Pascal;
220
221 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
222 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
223
224 if (D->hasAttr<AArch64VectorPcsAttr>())
225 return CC_AArch64VectorCall;
226
227 if (D->hasAttr<IntelOclBiccAttr>())
228 return CC_IntelOclBicc;
229
230 if (D->hasAttr<MSABIAttr>())
231 return IsWindows ? CC_C : CC_Win64;
232
233 if (D->hasAttr<SysVABIAttr>())
234 return IsWindows ? CC_X86_64SysV : CC_C;
235
236 if (D->hasAttr<PreserveMostAttr>())
237 return CC_PreserveMost;
238
239 if (D->hasAttr<PreserveAllAttr>())
240 return CC_PreserveAll;
241
242 return CC_C;
243}
244
245/// Arrange the argument and result information for a call to an
246/// unknown C++ non-static member function of the given abstract type.
247/// (A null RD means we don't have any meaningful "this" argument type,
248/// so fall back to a generic pointer type).
249/// The member function must be an ordinary function, i.e. not a
250/// constructor or destructor.
251const CGFunctionInfo &
252CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
253 const FunctionProtoType *FTP,
254 const CXXMethodDecl *MD) {
255 SmallVector<CanQualType, 16> argTypes;
256
257 // Add the 'this' pointer.
258 argTypes.push_back(DeriveThisType(RD, MD));
259
260 return ::arrangeLLVMFunctionInfo(
261 *this, true, argTypes,
262 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
263}
264
265/// Set calling convention for CUDA/HIP kernel.
266static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
267 const FunctionDecl *FD) {
268 if (FD->hasAttr<CUDAGlobalAttr>()) {
269 const FunctionType *FT = FTy->getAs<FunctionType>();
270 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
271 FTy = FT->getCanonicalTypeUnqualified();
272 }
273}
274
275/// Arrange the argument and result information for a declaration or
276/// definition of the given C++ non-static member function. The
277/// member function must be an ordinary function, i.e. not a
278/// constructor or destructor.
279const CGFunctionInfo &
280CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
281 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!")((!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"
) ? static_cast<void> (0) : __assert_fail ("!isa<CXXConstructorDecl>(MD) && \"wrong method for constructors!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 281, __PRETTY_FUNCTION__))
;
282 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!")((!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"
) ? static_cast<void> (0) : __assert_fail ("!isa<CXXDestructorDecl>(MD) && \"wrong method for destructors!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 282, __PRETTY_FUNCTION__))
;
283
284 CanQualType FT = GetFormalType(MD).getAs<Type>();
285 setCUDAKernelCallingConvention(FT, CGM, MD);
286 auto prototype = FT.getAs<FunctionProtoType>();
287
288 if (MD->isInstance()) {
289 // The abstract case is perfectly fine.
290 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
291 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
292 }
293
294 return arrangeFreeFunctionType(prototype);
295}
296
297bool CodeGenTypes::inheritingCtorHasParams(
298 const InheritedConstructor &Inherited, CXXCtorType Type) {
299 // Parameters are unnecessary if we're constructing a base class subobject
300 // and the inherited constructor lives in a virtual base.
301 return Type == Ctor_Complete ||
302 !Inherited.getShadowDecl()->constructsVirtualBase() ||
303 !Target.getCXXABI().hasConstructorVariants();
304}
305
306const CGFunctionInfo &
307CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
308 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
309
310 SmallVector<CanQualType, 16> argTypes;
311 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
312 argTypes.push_back(DeriveThisType(MD->getParent(), MD));
313
314 bool PassParams = true;
315
316 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
317 // A base class inheriting constructor doesn't get forwarded arguments
318 // needed to construct a virtual base (or base class thereof).
319 if (auto Inherited = CD->getInheritedConstructor())
320 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
321 }
322
323 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
324
325 // Add the formal parameters.
326 if (PassParams)
327 appendParameterTypes(*this, argTypes, paramInfos, FTP);
328
329 CGCXXABI::AddedStructorArgCounts AddedArgs =
330 TheCXXABI.buildStructorSignature(GD, argTypes);
331 if (!paramInfos.empty()) {
332 // Note: prefix implies after the first param.
333 if (AddedArgs.Prefix)
334 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
335 FunctionProtoType::ExtParameterInfo{});
336 if (AddedArgs.Suffix)
337 paramInfos.append(AddedArgs.Suffix,
338 FunctionProtoType::ExtParameterInfo{});
339 }
340
341 RequiredArgs required =
342 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
343 : RequiredArgs::All);
344
345 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
346 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
347 ? argTypes.front()
348 : TheCXXABI.hasMostDerivedReturn(GD)
349 ? CGM.getContext().VoidPtrTy
350 : Context.VoidTy;
351 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
352 /*chainCall=*/false, argTypes, extInfo,
353 paramInfos, required);
354}
355
356static SmallVector<CanQualType, 16>
357getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
358 SmallVector<CanQualType, 16> argTypes;
359 for (auto &arg : args)
360 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
361 return argTypes;
362}
363
364static SmallVector<CanQualType, 16>
365getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
366 SmallVector<CanQualType, 16> argTypes;
367 for (auto &arg : args)
368 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
369 return argTypes;
370}
371
372static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
373getExtParameterInfosForCall(const FunctionProtoType *proto,
374 unsigned prefixArgs, unsigned totalArgs) {
375 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
376 if (proto->hasExtParameterInfos()) {
377 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
378 }
379 return result;
380}
381
382/// Arrange a call to a C++ method, passing the given arguments.
383///
384/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
385/// parameter.
386/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
387/// args.
388/// PassProtoArgs indicates whether `args` has args for the parameters in the
389/// given CXXConstructorDecl.
390const CGFunctionInfo &
391CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
392 const CXXConstructorDecl *D,
393 CXXCtorType CtorKind,
394 unsigned ExtraPrefixArgs,
395 unsigned ExtraSuffixArgs,
396 bool PassProtoArgs) {
397 // FIXME: Kill copy.
398 SmallVector<CanQualType, 16> ArgTypes;
399 for (const auto &Arg : args)
400 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
401
402 // +1 for implicit this, which should always be args[0].
403 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
404
405 CanQual<FunctionProtoType> FPT = GetFormalType(D);
406 RequiredArgs Required = PassProtoArgs
407 ? RequiredArgs::forPrototypePlus(
408 FPT, TotalPrefixArgs + ExtraSuffixArgs)
409 : RequiredArgs::All;
410
411 GlobalDecl GD(D, CtorKind);
412 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
413 ? ArgTypes.front()
414 : TheCXXABI.hasMostDerivedReturn(GD)
415 ? CGM.getContext().VoidPtrTy
416 : Context.VoidTy;
417
418 FunctionType::ExtInfo Info = FPT->getExtInfo();
419 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
420 // If the prototype args are elided, we should only have ABI-specific args,
421 // which never have param info.
422 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
423 // ABI-specific suffix arguments are treated the same as variadic arguments.
424 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
425 ArgTypes.size());
426 }
427 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
428 /*chainCall=*/false, ArgTypes, Info,
429 ParamInfos, Required);
430}
431
432/// Arrange the argument and result information for the declaration or
433/// definition of the given function.
434const CGFunctionInfo &
435CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
436 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
437 if (MD->isInstance())
438 return arrangeCXXMethodDeclaration(MD);
439
440 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
441
442 assert(isa<FunctionType>(FTy))((isa<FunctionType>(FTy)) ? static_cast<void> (0)
: __assert_fail ("isa<FunctionType>(FTy)", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 442, __PRETTY_FUNCTION__))
;
443 setCUDAKernelCallingConvention(FTy, CGM, FD);
444
445 // When declaring a function without a prototype, always use a
446 // non-variadic type.
447 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
448 return arrangeLLVMFunctionInfo(
449 noProto->getReturnType(), /*instanceMethod=*/false,
450 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
451 }
452
453 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
454}
455
456/// Arrange the argument and result information for the declaration or
457/// definition of an Objective-C method.
458const CGFunctionInfo &
459CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
460 // It happens that this is the same as a call with no optional
461 // arguments, except also using the formal 'self' type.
462 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
463}
464
465/// Arrange the argument and result information for the function type
466/// through which to perform a send to the given Objective-C method,
467/// using the given receiver type. The receiver type is not always
468/// the 'self' type of the method or even an Objective-C pointer type.
469/// This is *not* the right method for actually performing such a
470/// message send, due to the possibility of optional arguments.
471const CGFunctionInfo &
472CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
473 QualType receiverType) {
474 SmallVector<CanQualType, 16> argTys;
475 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
476 argTys.push_back(Context.getCanonicalParamType(receiverType));
477 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
478 // FIXME: Kill copy?
479 for (const auto *I : MD->parameters()) {
480 argTys.push_back(Context.getCanonicalParamType(I->getType()));
481 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
482 I->hasAttr<NoEscapeAttr>());
483 extParamInfos.push_back(extParamInfo);
484 }
485
486 FunctionType::ExtInfo einfo;
487 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
488 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
489
490 if (getContext().getLangOpts().ObjCAutoRefCount &&
491 MD->hasAttr<NSReturnsRetainedAttr>())
492 einfo = einfo.withProducesResult(true);
493
494 RequiredArgs required =
495 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
496
497 return arrangeLLVMFunctionInfo(
498 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
499 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
500}
501
502const CGFunctionInfo &
503CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
504 const CallArgList &args) {
505 auto argTypes = getArgTypesForCall(Context, args);
506 FunctionType::ExtInfo einfo;
507
508 return arrangeLLVMFunctionInfo(
509 GetReturnType(returnType), /*instanceMethod=*/false,
510 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
511}
512
513const CGFunctionInfo &
514CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
515 // FIXME: Do we need to handle ObjCMethodDecl?
516 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
517
518 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
519 isa<CXXDestructorDecl>(GD.getDecl()))
520 return arrangeCXXStructorDeclaration(GD);
521
522 return arrangeFunctionDeclaration(FD);
523}
524
525/// Arrange a thunk that takes 'this' as the first parameter followed by
526/// varargs. Return a void pointer, regardless of the actual return type.
527/// The body of the thunk will end in a musttail call to a function of the
528/// correct type, and the caller will bitcast the function to the correct
529/// prototype.
530const CGFunctionInfo &
531CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
532 assert(MD->isVirtual() && "only methods have thunks")((MD->isVirtual() && "only methods have thunks") ?
static_cast<void> (0) : __assert_fail ("MD->isVirtual() && \"only methods have thunks\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 532, __PRETTY_FUNCTION__))
;
533 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
534 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
535 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
536 /*chainCall=*/false, ArgTys,
537 FTP->getExtInfo(), {}, RequiredArgs(1));
538}
539
540const CGFunctionInfo &
541CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
542 CXXCtorType CT) {
543 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure)((CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure) ? static_cast
<void> (0) : __assert_fail ("CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 543, __PRETTY_FUNCTION__))
;
544
545 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
546 SmallVector<CanQualType, 2> ArgTys;
547 const CXXRecordDecl *RD = CD->getParent();
548 ArgTys.push_back(DeriveThisType(RD, CD));
549 if (CT == Ctor_CopyingClosure)
550 ArgTys.push_back(*FTP->param_type_begin());
551 if (RD->getNumVBases() > 0)
552 ArgTys.push_back(Context.IntTy);
553 CallingConv CC = Context.getDefaultCallingConvention(
554 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
555 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
556 /*chainCall=*/false, ArgTys,
557 FunctionType::ExtInfo(CC), {},
558 RequiredArgs::All);
559}
560
561/// Arrange a call as unto a free function, except possibly with an
562/// additional number of formal parameters considered required.
563static const CGFunctionInfo &
564arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
565 CodeGenModule &CGM,
566 const CallArgList &args,
567 const FunctionType *fnType,
568 unsigned numExtraRequiredArgs,
569 bool chainCall) {
570 assert(args.size() >= numExtraRequiredArgs)((args.size() >= numExtraRequiredArgs) ? static_cast<void
> (0) : __assert_fail ("args.size() >= numExtraRequiredArgs"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 570, __PRETTY_FUNCTION__))
;
571
572 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
573
574 // In most cases, there are no optional arguments.
575 RequiredArgs required = RequiredArgs::All;
576
577 // If we have a variadic prototype, the required arguments are the
578 // extra prefix plus the arguments in the prototype.
579 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
580 if (proto->isVariadic())
581 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
582
583 if (proto->hasExtParameterInfos())
584 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
585 args.size());
586
587 // If we don't have a prototype at all, but we're supposed to
588 // explicitly use the variadic convention for unprototyped calls,
589 // treat all of the arguments as required but preserve the nominal
590 // possibility of variadics.
591 } else if (CGM.getTargetCodeGenInfo()
592 .isNoProtoCallVariadic(args,
593 cast<FunctionNoProtoType>(fnType))) {
594 required = RequiredArgs(args.size());
595 }
596
597 // FIXME: Kill copy.
598 SmallVector<CanQualType, 16> argTypes;
599 for (const auto &arg : args)
600 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
601 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
602 /*instanceMethod=*/false, chainCall,
603 argTypes, fnType->getExtInfo(), paramInfos,
604 required);
605}
606
607/// Figure out the rules for calling a function with the given formal
608/// type using the given arguments. The arguments are necessary
609/// because the function might be unprototyped, in which case it's
610/// target-dependent in crazy ways.
611const CGFunctionInfo &
612CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
613 const FunctionType *fnType,
614 bool chainCall) {
615 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
616 chainCall ? 1 : 0, chainCall);
617}
618
619/// A block function is essentially a free function with an
620/// extra implicit argument.
621const CGFunctionInfo &
622CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
623 const FunctionType *fnType) {
624 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
625 /*chainCall=*/false);
626}
627
628const CGFunctionInfo &
629CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
630 const FunctionArgList &params) {
631 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
632 auto argTypes = getArgTypesForDeclaration(Context, params);
633
634 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
635 /*instanceMethod*/ false, /*chainCall*/ false,
636 argTypes, proto->getExtInfo(), paramInfos,
637 RequiredArgs::forPrototypePlus(proto, 1));
638}
639
640const CGFunctionInfo &
641CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
642 const CallArgList &args) {
643 // FIXME: Kill copy.
644 SmallVector<CanQualType, 16> argTypes;
645 for (const auto &Arg : args)
646 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
647 return arrangeLLVMFunctionInfo(
648 GetReturnType(resultType), /*instanceMethod=*/false,
649 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
650 /*paramInfos=*/ {}, RequiredArgs::All);
651}
652
653const CGFunctionInfo &
654CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
655 const FunctionArgList &args) {
656 auto argTypes = getArgTypesForDeclaration(Context, args);
657
658 return arrangeLLVMFunctionInfo(
659 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
660 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
661}
662
663const CGFunctionInfo &
664CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
665 ArrayRef<CanQualType> argTypes) {
666 return arrangeLLVMFunctionInfo(
667 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
668 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
669}
670
671/// Arrange a call to a C++ method, passing the given arguments.
672///
673/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
674/// does not count `this`.
675const CGFunctionInfo &
676CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
677 const FunctionProtoType *proto,
678 RequiredArgs required,
679 unsigned numPrefixArgs) {
680 assert(numPrefixArgs + 1 <= args.size() &&((numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"
) ? static_cast<void> (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 681, __PRETTY_FUNCTION__))
681 "Emitting a call with less args than the required prefix?")((numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"
) ? static_cast<void> (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 681, __PRETTY_FUNCTION__))
;
682 // Add one to account for `this`. It's a bit awkward here, but we don't count
683 // `this` in similar places elsewhere.
684 auto paramInfos =
685 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
686
687 // FIXME: Kill copy.
688 auto argTypes = getArgTypesForCall(Context, args);
689
690 FunctionType::ExtInfo info = proto->getExtInfo();
691 return arrangeLLVMFunctionInfo(
692 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
693 /*chainCall=*/false, argTypes, info, paramInfos, required);
694}
695
696const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
697 return arrangeLLVMFunctionInfo(
698 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
699 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
700}
701
702const CGFunctionInfo &
703CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
704 const CallArgList &args) {
705 assert(signature.arg_size() <= args.size())((signature.arg_size() <= args.size()) ? static_cast<void
> (0) : __assert_fail ("signature.arg_size() <= args.size()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 705, __PRETTY_FUNCTION__))
;
706 if (signature.arg_size() == args.size())
707 return signature;
708
709 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
710 auto sigParamInfos = signature.getExtParameterInfos();
711 if (!sigParamInfos.empty()) {
712 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
713 paramInfos.resize(args.size());
714 }
715
716 auto argTypes = getArgTypesForCall(Context, args);
717
718 assert(signature.getRequiredArgs().allowsOptionalArgs())((signature.getRequiredArgs().allowsOptionalArgs()) ? static_cast
<void> (0) : __assert_fail ("signature.getRequiredArgs().allowsOptionalArgs()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 718, __PRETTY_FUNCTION__))
;
719 return arrangeLLVMFunctionInfo(signature.getReturnType(),
720 signature.isInstanceMethod(),
721 signature.isChainCall(),
722 argTypes,
723 signature.getExtInfo(),
724 paramInfos,
725 signature.getRequiredArgs());
726}
727
728namespace clang {
729namespace CodeGen {
730void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
731}
732}
733
734/// Arrange the argument and result information for an abstract value
735/// of a given function type. This is the method which all of the
736/// above functions ultimately defer to.
737const CGFunctionInfo &
738CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
739 bool instanceMethod,
740 bool chainCall,
741 ArrayRef<CanQualType> argTypes,
742 FunctionType::ExtInfo info,
743 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
744 RequiredArgs required) {
745 assert(llvm::all_of(argTypes,((llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam
(); })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 746, __PRETTY_FUNCTION__))
746 [](CanQualType T) { return T.isCanonicalAsParam(); }))((llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam
(); })) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 746, __PRETTY_FUNCTION__))
;
747
748 // Lookup or create unique function info.
749 llvm::FoldingSetNodeID ID;
750 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
751 required, resultType, argTypes);
752
753 void *insertPos = nullptr;
754 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
755 if (FI)
756 return *FI;
757
758 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
759
760 // Construct the function info. We co-allocate the ArgInfos.
761 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
762 paramInfos, resultType, argTypes, required);
763 FunctionInfos.InsertNode(FI, insertPos);
764
765 bool inserted = FunctionsBeingProcessed.insert(FI).second;
766 (void)inserted;
767 assert(inserted && "Recursively being processed?")((inserted && "Recursively being processed?") ? static_cast
<void> (0) : __assert_fail ("inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 767, __PRETTY_FUNCTION__))
;
768
769 // Compute ABI information.
770 if (CC == llvm::CallingConv::SPIR_KERNEL) {
771 // Force target independent argument handling for the host visible
772 // kernel functions.
773 computeSPIRKernelABIInfo(CGM, *FI);
774 } else if (info.getCC() == CC_Swift) {
775 swiftcall::computeABIInfo(CGM, *FI);
776 } else {
777 getABIInfo().computeInfo(*FI);
778 }
779
780 // Loop over all of the computed argument and return value info. If any of
781 // them are direct or extend without a specified coerce type, specify the
782 // default now.
783 ABIArgInfo &retInfo = FI->getReturnInfo();
784 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
785 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
786
787 for (auto &I : FI->arguments())
788 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
789 I.info.setCoerceToType(ConvertType(I.type));
790
791 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
792 assert(erased && "Not in set?")((erased && "Not in set?") ? static_cast<void> (
0) : __assert_fail ("erased && \"Not in set?\"", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 792, __PRETTY_FUNCTION__))
;
793
794 return *FI;
795}
796
797CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
798 bool instanceMethod,
799 bool chainCall,
800 const FunctionType::ExtInfo &info,
801 ArrayRef<ExtParameterInfo> paramInfos,
802 CanQualType resultType,
803 ArrayRef<CanQualType> argTypes,
804 RequiredArgs required) {
805 assert(paramInfos.empty() || paramInfos.size() == argTypes.size())((paramInfos.empty() || paramInfos.size() == argTypes.size())
? static_cast<void> (0) : __assert_fail ("paramInfos.empty() || paramInfos.size() == argTypes.size()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 805, __PRETTY_FUNCTION__))
;
806 assert(!required.allowsOptionalArgs() ||((!required.allowsOptionalArgs() || required.getNumRequiredArgs
() <= argTypes.size()) ? static_cast<void> (0) : __assert_fail
("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 807, __PRETTY_FUNCTION__))
807 required.getNumRequiredArgs() <= argTypes.size())((!required.allowsOptionalArgs() || required.getNumRequiredArgs
() <= argTypes.size()) ? static_cast<void> (0) : __assert_fail
("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 807, __PRETTY_FUNCTION__))
;
808
809 void *buffer =
810 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
811 argTypes.size() + 1, paramInfos.size()));
812
813 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
814 FI->CallingConvention = llvmCC;
815 FI->EffectiveCallingConvention = llvmCC;
816 FI->ASTCallingConvention = info.getCC();
817 FI->InstanceMethod = instanceMethod;
818 FI->ChainCall = chainCall;
819 FI->CmseNSCall = info.getCmseNSCall();
820 FI->NoReturn = info.getNoReturn();
821 FI->ReturnsRetained = info.getProducesResult();
822 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
823 FI->NoCfCheck = info.getNoCfCheck();
824 FI->Required = required;
825 FI->HasRegParm = info.getHasRegParm();
826 FI->RegParm = info.getRegParm();
827 FI->ArgStruct = nullptr;
828 FI->ArgStructAlign = 0;
829 FI->NumArgs = argTypes.size();
830 FI->HasExtParameterInfos = !paramInfos.empty();
831 FI->getArgsBuffer()[0].type = resultType;
832 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
833 FI->getArgsBuffer()[i + 1].type = argTypes[i];
834 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
835 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
836 return FI;
837}
838
839/***/
840
841namespace {
842// ABIArgInfo::Expand implementation.
843
844// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
845struct TypeExpansion {
846 enum TypeExpansionKind {
847 // Elements of constant arrays are expanded recursively.
848 TEK_ConstantArray,
849 // Record fields are expanded recursively (but if record is a union, only
850 // the field with the largest size is expanded).
851 TEK_Record,
852 // For complex types, real and imaginary parts are expanded recursively.
853 TEK_Complex,
854 // All other types are not expandable.
855 TEK_None
856 };
857
858 const TypeExpansionKind Kind;
859
860 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
861 virtual ~TypeExpansion() {}
862};
863
864struct ConstantArrayExpansion : TypeExpansion {
865 QualType EltTy;
866 uint64_t NumElts;
867
868 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
869 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
870 static bool classof(const TypeExpansion *TE) {
871 return TE->Kind == TEK_ConstantArray;
872 }
873};
874
875struct RecordExpansion : TypeExpansion {
876 SmallVector<const CXXBaseSpecifier *, 1> Bases;
877
878 SmallVector<const FieldDecl *, 1> Fields;
879
880 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
881 SmallVector<const FieldDecl *, 1> &&Fields)
882 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
883 Fields(std::move(Fields)) {}
884 static bool classof(const TypeExpansion *TE) {
885 return TE->Kind == TEK_Record;
886 }
887};
888
889struct ComplexExpansion : TypeExpansion {
890 QualType EltTy;
891
892 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
893 static bool classof(const TypeExpansion *TE) {
894 return TE->Kind == TEK_Complex;
895 }
896};
897
898struct NoExpansion : TypeExpansion {
899 NoExpansion() : TypeExpansion(TEK_None) {}
900 static bool classof(const TypeExpansion *TE) {
901 return TE->Kind == TEK_None;
902 }
903};
904} // namespace
905
906static std::unique_ptr<TypeExpansion>
907getTypeExpansion(QualType Ty, const ASTContext &Context) {
908 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
909 return std::make_unique<ConstantArrayExpansion>(
910 AT->getElementType(), AT->getSize().getZExtValue());
911 }
912 if (const RecordType *RT = Ty->getAs<RecordType>()) {
913 SmallVector<const CXXBaseSpecifier *, 1> Bases;
914 SmallVector<const FieldDecl *, 1> Fields;
915 const RecordDecl *RD = RT->getDecl();
916 assert(!RD->hasFlexibleArrayMember() &&((!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."
) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 917, __PRETTY_FUNCTION__))
917 "Cannot expand structure with flexible array.")((!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."
) ? static_cast<void> (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 917, __PRETTY_FUNCTION__))
;
918 if (RD->isUnion()) {
919 // Unions can be here only in degenerative cases - all the fields are same
920 // after flattening. Thus we have to use the "largest" field.
921 const FieldDecl *LargestFD = nullptr;
922 CharUnits UnionSize = CharUnits::Zero();
923
924 for (const auto *FD : RD->fields()) {
925 if (FD->isZeroLengthBitField(Context))
926 continue;
927 assert(!FD->isBitField() &&((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 928, __PRETTY_FUNCTION__))
928 "Cannot expand structure with bit-field members.")((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 928, __PRETTY_FUNCTION__))
;
929 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
930 if (UnionSize < FieldSize) {
931 UnionSize = FieldSize;
932 LargestFD = FD;
933 }
934 }
935 if (LargestFD)
936 Fields.push_back(LargestFD);
937 } else {
938 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
939 assert(!CXXRD->isDynamicClass() &&((!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"
) ? static_cast<void> (0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 940, __PRETTY_FUNCTION__))
940 "cannot expand vtable pointers in dynamic classes")((!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"
) ? static_cast<void> (0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 940, __PRETTY_FUNCTION__))
;
941 for (const CXXBaseSpecifier &BS : CXXRD->bases())
942 Bases.push_back(&BS);
943 }
944
945 for (const auto *FD : RD->fields()) {
946 if (FD->isZeroLengthBitField(Context))
947 continue;
948 assert(!FD->isBitField() &&((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 949, __PRETTY_FUNCTION__))
949 "Cannot expand structure with bit-field members.")((!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? static_cast<void> (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 949, __PRETTY_FUNCTION__))
;
950 Fields.push_back(FD);
951 }
952 }
953 return std::make_unique<RecordExpansion>(std::move(Bases),
954 std::move(Fields));
955 }
956 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
957 return std::make_unique<ComplexExpansion>(CT->getElementType());
958 }
959 return std::make_unique<NoExpansion>();
960}
961
962static int getExpansionSize(QualType Ty, const ASTContext &Context) {
963 auto Exp = getTypeExpansion(Ty, Context);
964 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
965 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
966 }
967 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
968 int Res = 0;
969 for (auto BS : RExp->Bases)
970 Res += getExpansionSize(BS->getType(), Context);
971 for (auto FD : RExp->Fields)
972 Res += getExpansionSize(FD->getType(), Context);
973 return Res;
974 }
975 if (isa<ComplexExpansion>(Exp.get()))
976 return 2;
977 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 977, __PRETTY_FUNCTION__))
;
978 return 1;
979}
980
981void
982CodeGenTypes::getExpandedTypes(QualType Ty,
983 SmallVectorImpl<llvm::Type *>::iterator &TI) {
984 auto Exp = getTypeExpansion(Ty, Context);
985 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
986 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
987 getExpandedTypes(CAExp->EltTy, TI);
988 }
989 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
990 for (auto BS : RExp->Bases)
991 getExpandedTypes(BS->getType(), TI);
992 for (auto FD : RExp->Fields)
993 getExpandedTypes(FD->getType(), TI);
994 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
995 llvm::Type *EltTy = ConvertType(CExp->EltTy);
996 *TI++ = EltTy;
997 *TI++ = EltTy;
998 } else {
999 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 999, __PRETTY_FUNCTION__))
;
1000 *TI++ = ConvertType(Ty);
1001 }
1002}
1003
1004static void forConstantArrayExpansion(CodeGenFunction &CGF,
1005 ConstantArrayExpansion *CAE,
1006 Address BaseAddr,
1007 llvm::function_ref<void(Address)> Fn) {
1008 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1009 CharUnits EltAlign =
1010 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1011
1012 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1013 llvm::Value *EltAddr =
1014 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1015 Fn(Address(EltAddr, EltAlign));
1016 }
1017}
1018
1019void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1020 llvm::Function::arg_iterator &AI) {
1021 assert(LV.isSimple() &&((LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1022, __PRETTY_FUNCTION__))
1022 "Unexpected non-simple lvalue during struct expansion.")((LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1022, __PRETTY_FUNCTION__))
;
1023
1024 auto Exp = getTypeExpansion(Ty, getContext());
1025 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1026 forConstantArrayExpansion(
1027 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1028 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1029 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1030 });
1031 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1032 Address This = LV.getAddress(*this);
1033 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1034 // Perform a single step derived-to-base conversion.
1035 Address Base =
1036 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1037 /*NullCheckValue=*/false, SourceLocation());
1038 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1039
1040 // Recurse onto bases.
1041 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1042 }
1043 for (auto FD : RExp->Fields) {
1044 // FIXME: What are the right qualifiers here?
1045 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1046 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1047 }
1048 } else if (isa<ComplexExpansion>(Exp.get())) {
1049 auto realValue = &*AI++;
1050 auto imagValue = &*AI++;
1051 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1052 } else {
1053 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1054 // primitive store.
1055 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1055, __PRETTY_FUNCTION__))
;
1056 if (LV.isBitField())
1057 EmitStoreThroughLValue(RValue::get(&*AI++), LV);
1058 else
1059 EmitStoreOfScalar(&*AI++, LV);
1060 }
1061}
1062
1063void CodeGenFunction::ExpandTypeToArgs(
1064 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1065 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1066 auto Exp = getTypeExpansion(Ty, getContext());
1067 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1068 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1069 : Arg.getKnownRValue().getAggregateAddress();
1070 forConstantArrayExpansion(
1071 *this, CAExp, Addr, [&](Address EltAddr) {
1072 CallArg EltArg = CallArg(
1073 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1074 CAExp->EltTy);
1075 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1076 IRCallArgPos);
1077 });
1078 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1079 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1080 : Arg.getKnownRValue().getAggregateAddress();
1081 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1082 // Perform a single step derived-to-base conversion.
1083 Address Base =
1084 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1085 /*NullCheckValue=*/false, SourceLocation());
1086 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1087
1088 // Recurse onto bases.
1089 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1090 IRCallArgPos);
1091 }
1092
1093 LValue LV = MakeAddrLValue(This, Ty);
1094 for (auto FD : RExp->Fields) {
1095 CallArg FldArg =
1096 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1097 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1098 IRCallArgPos);
1099 }
1100 } else if (isa<ComplexExpansion>(Exp.get())) {
1101 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1102 IRCallArgs[IRCallArgPos++] = CV.first;
1103 IRCallArgs[IRCallArgPos++] = CV.second;
1104 } else {
1105 assert(isa<NoExpansion>(Exp.get()))((isa<NoExpansion>(Exp.get())) ? static_cast<void>
(0) : __assert_fail ("isa<NoExpansion>(Exp.get())", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1105, __PRETTY_FUNCTION__))
;
1106 auto RV = Arg.getKnownRValue();
1107 assert(RV.isScalar() &&((RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1108, __PRETTY_FUNCTION__))
1108 "Unexpected non-scalar rvalue during struct expansion.")((RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? static_cast<void> (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1108, __PRETTY_FUNCTION__))
;
1109
1110 // Insert a bitcast as needed.
1111 llvm::Value *V = RV.getScalarVal();
1112 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1113 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1114 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1115
1116 IRCallArgs[IRCallArgPos++] = V;
1117 }
1118}
1119
1120/// Create a temporary allocation for the purposes of coercion.
1121static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1122 CharUnits MinAlign,
1123 const Twine &Name = "tmp") {
1124 // Don't use an alignment that's worse than what LLVM would prefer.
1125 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1126 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1127
1128 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1129}
1130
1131/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1132/// accessing some number of bytes out of it, try to gep into the struct to get
1133/// at its inner goodness. Dive as deep as possible without entering an element
1134/// with an in-memory size smaller than DstSize.
1135static Address
1136EnterStructPointerForCoercedAccess(Address SrcPtr,
1137 llvm::StructType *SrcSTy,
1138 uint64_t DstSize, CodeGenFunction &CGF) {
1139 // We can't dive into a zero-element struct.
1140 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1141
1142 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1143
1144 // If the first elt is at least as large as what we're looking for, or if the
1145 // first element is the same size as the whole struct, we can enter it. The
1146 // comparison must be made on the store size and not the alloca size. Using
1147 // the alloca size may overstate the size of the load.
1148 uint64_t FirstEltSize =
1149 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1150 if (FirstEltSize < DstSize &&
1151 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1152 return SrcPtr;
1153
1154 // GEP into the first element.
1155 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1156
1157 // If the first element is a struct, recurse.
1158 llvm::Type *SrcTy = SrcPtr.getElementType();
1159 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1160 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1161
1162 return SrcPtr;
1163}
1164
1165/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1166/// are either integers or pointers. This does a truncation of the value if it
1167/// is too large or a zero extension if it is too small.
1168///
1169/// This behaves as if the value were coerced through memory, so on big-endian
1170/// targets the high bits are preserved in a truncation, while little-endian
1171/// targets preserve the low bits.
1172static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1173 llvm::Type *Ty,
1174 CodeGenFunction &CGF) {
1175 if (Val->getType() == Ty)
1176 return Val;
1177
1178 if (isa<llvm::PointerType>(Val->getType())) {
1179 // If this is Pointer->Pointer avoid conversion to and from int.
1180 if (isa<llvm::PointerType>(Ty))
1181 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1182
1183 // Convert the pointer to an integer so we can play with its width.
1184 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1185 }
1186
1187 llvm::Type *DestIntTy = Ty;
1188 if (isa<llvm::PointerType>(DestIntTy))
1189 DestIntTy = CGF.IntPtrTy;
1190
1191 if (Val->getType() != DestIntTy) {
1192 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1193 if (DL.isBigEndian()) {
1194 // Preserve the high bits on big-endian targets.
1195 // That is what memory coercion does.
1196 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1197 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1198
1199 if (SrcSize > DstSize) {
1200 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1201 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1202 } else {
1203 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1204 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1205 }
1206 } else {
1207 // Little-endian targets preserve the low bits. No shifts required.
1208 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1209 }
1210 }
1211
1212 if (isa<llvm::PointerType>(Ty))
1213 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1214 return Val;
1215}
1216
1217
1218
1219/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1220/// a pointer to an object of type \arg Ty, known to be aligned to
1221/// \arg SrcAlign bytes.
1222///
1223/// This safely handles the case when the src type is smaller than the
1224/// destination type; in this situation the values of bits which not
1225/// present in the src are undefined.
1226static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1227 CodeGenFunction &CGF) {
1228 llvm::Type *SrcTy = Src.getElementType();
1229
1230 // If SrcTy and Ty are the same, just do a load.
1231 if (SrcTy == Ty)
1232 return CGF.Builder.CreateLoad(Src);
1233
1234 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1235
1236 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1237 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1238 DstSize.getFixedSize(), CGF);
1239 SrcTy = Src.getElementType();
1240 }
1241
1242 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1243
1244 // If the source and destination are integer or pointer types, just do an
1245 // extension or truncation to the desired type.
1246 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1247 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1248 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1249 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1250 }
1251
1252 // If load is legal, just bitcast the src pointer.
1253 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1254 SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1255 // Generally SrcSize is never greater than DstSize, since this means we are
1256 // losing bits. However, this can happen in cases where the structure has
1257 // additional padding, for example due to a user specified alignment.
1258 //
1259 // FIXME: Assert that we aren't truncating non-padding bits when have access
1260 // to that information.
1261 Src = CGF.Builder.CreateBitCast(Src,
1262 Ty->getPointerTo(Src.getAddressSpace()));
1263 return CGF.Builder.CreateLoad(Src);
1264 }
1265
1266 // Otherwise do coercion through memory. This is stupid, but simple.
1267 Address Tmp =
1268 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1269 CGF.Builder.CreateMemCpy(
1270 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1271 Src.getAlignment().getAsAlign(),
1272 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1273 return CGF.Builder.CreateLoad(Tmp);
1274}
1275
1276// Function to store a first-class aggregate into memory. We prefer to
1277// store the elements rather than the aggregate to be more friendly to
1278// fast-isel.
1279// FIXME: Do we need to recurse here?
1280void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1281 bool DestIsVolatile) {
1282 // Prefer scalar stores to first-class aggregate stores.
1283 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1284 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1285 Address EltPtr = Builder.CreateStructGEP(Dest, i);
1286 llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1287 Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1288 }
1289 } else {
1290 Builder.CreateStore(Val, Dest, DestIsVolatile);
1291 }
1292}
1293
1294/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1295/// where the source and destination may have different types. The
1296/// destination is known to be aligned to \arg DstAlign bytes.
1297///
1298/// This safely handles the case when the src type is larger than the
1299/// destination type; the upper bits of the src will be lost.
1300static void CreateCoercedStore(llvm::Value *Src,
1301 Address Dst,
1302 bool DstIsVolatile,
1303 CodeGenFunction &CGF) {
1304 llvm::Type *SrcTy = Src->getType();
1305 llvm::Type *DstTy = Dst.getElementType();
1306 if (SrcTy == DstTy) {
1307 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1308 return;
1309 }
1310
1311 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1312
1313 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1314 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1315 SrcSize.getFixedSize(), CGF);
1316 DstTy = Dst.getElementType();
1317 }
1318
1319 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1320 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1321 if (SrcPtrTy && DstPtrTy &&
1322 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1323 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1324 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1325 return;
1326 }
1327
1328 // If the source and destination are integer or pointer types, just do an
1329 // extension or truncation to the desired type.
1330 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1331 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1332 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1333 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1334 return;
1335 }
1336
1337 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1338
1339 // If store is legal, just bitcast the src pointer.
1340 if (isa<llvm::ScalableVectorType>(SrcTy) ||
1341 isa<llvm::ScalableVectorType>(DstTy) ||
1342 SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1343 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1344 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1345 } else {
1346 // Otherwise do coercion through memory. This is stupid, but
1347 // simple.
1348
1349 // Generally SrcSize is never greater than DstSize, since this means we are
1350 // losing bits. However, this can happen in cases where the structure has
1351 // additional padding, for example due to a user specified alignment.
1352 //
1353 // FIXME: Assert that we aren't truncating non-padding bits when have access
1354 // to that information.
1355 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1356 CGF.Builder.CreateStore(Src, Tmp);
1357 CGF.Builder.CreateMemCpy(
1358 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1359 Tmp.getAlignment().getAsAlign(),
1360 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1361 }
1362}
1363
1364static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1365 const ABIArgInfo &info) {
1366 if (unsigned offset = info.getDirectOffset()) {
1367 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1368 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1369 CharUnits::fromQuantity(offset));
1370 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1371 }
1372 return addr;
1373}
1374
1375namespace {
1376
1377/// Encapsulates information about the way function arguments from
1378/// CGFunctionInfo should be passed to actual LLVM IR function.
1379class ClangToLLVMArgMapping {
1380 static const unsigned InvalidIndex = ~0U;
1381 unsigned InallocaArgNo;
1382 unsigned SRetArgNo;
1383 unsigned TotalIRArgs;
1384
1385 /// Arguments of LLVM IR function corresponding to single Clang argument.
1386 struct IRArgs {
1387 unsigned PaddingArgIndex;
1388 // Argument is expanded to IR arguments at positions
1389 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1390 unsigned FirstArgIndex;
1391 unsigned NumberOfArgs;
1392
1393 IRArgs()
1394 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1395 NumberOfArgs(0) {}
1396 };
1397
1398 SmallVector<IRArgs, 8> ArgInfo;
1399
1400public:
1401 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1402 bool OnlyRequiredArgs = false)
1403 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1404 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1405 construct(Context, FI, OnlyRequiredArgs);
1406 }
1407
1408 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1409 unsigned getInallocaArgNo() const {
1410 assert(hasInallocaArg())((hasInallocaArg()) ? static_cast<void> (0) : __assert_fail
("hasInallocaArg()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1410, __PRETTY_FUNCTION__))
;
1411 return InallocaArgNo;
1412 }
1413
1414 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1415 unsigned getSRetArgNo() const {
1416 assert(hasSRetArg())((hasSRetArg()) ? static_cast<void> (0) : __assert_fail
("hasSRetArg()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1416, __PRETTY_FUNCTION__))
;
1417 return SRetArgNo;
1418 }
1419
1420 unsigned totalIRArgs() const { return TotalIRArgs; }
1421
1422 bool hasPaddingArg(unsigned ArgNo) const {
1423 assert(ArgNo < ArgInfo.size())((ArgNo < ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1423, __PRETTY_FUNCTION__))
;
1424 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1425 }
1426 unsigned getPaddingArgNo(unsigned ArgNo) const {
1427 assert(hasPaddingArg(ArgNo))((hasPaddingArg(ArgNo)) ? static_cast<void> (0) : __assert_fail
("hasPaddingArg(ArgNo)", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1427, __PRETTY_FUNCTION__))
;
1428 return ArgInfo[ArgNo].PaddingArgIndex;
1429 }
1430
1431 /// Returns index of first IR argument corresponding to ArgNo, and their
1432 /// quantity.
1433 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1434 assert(ArgNo < ArgInfo.size())((ArgNo < ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo < ArgInfo.size()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1434, __PRETTY_FUNCTION__))
;
1435 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1436 ArgInfo[ArgNo].NumberOfArgs);
1437 }
1438
1439private:
1440 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1441 bool OnlyRequiredArgs);
1442};
1443
1444void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1445 const CGFunctionInfo &FI,
1446 bool OnlyRequiredArgs) {
1447 unsigned IRArgNo = 0;
1448 bool SwapThisWithSRet = false;
1449 const ABIArgInfo &RetAI = FI.getReturnInfo();
1450
1451 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1452 SwapThisWithSRet = RetAI.isSRetAfterThis();
1453 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1454 }
1455
1456 unsigned ArgNo = 0;
1457 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1458 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1459 ++I, ++ArgNo) {
1460 assert(I != FI.arg_end())((I != FI.arg_end()) ? static_cast<void> (0) : __assert_fail
("I != FI.arg_end()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1460, __PRETTY_FUNCTION__))
;
1461 QualType ArgType = I->type;
1462 const ABIArgInfo &AI = I->info;
1463 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1464 auto &IRArgs = ArgInfo[ArgNo];
1465
1466 if (AI.getPaddingType())
1467 IRArgs.PaddingArgIndex = IRArgNo++;
1468
1469 switch (AI.getKind()) {
1470 case ABIArgInfo::Extend:
1471 case ABIArgInfo::Direct: {
1472 // FIXME: handle sseregparm someday...
1473 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1474 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1475 IRArgs.NumberOfArgs = STy->getNumElements();
1476 } else {
1477 IRArgs.NumberOfArgs = 1;
1478 }
1479 break;
1480 }
1481 case ABIArgInfo::Indirect:
1482 case ABIArgInfo::IndirectAliased:
1483 IRArgs.NumberOfArgs = 1;
1484 break;
1485 case ABIArgInfo::Ignore:
1486 case ABIArgInfo::InAlloca:
1487 // ignore and inalloca doesn't have matching LLVM parameters.
1488 IRArgs.NumberOfArgs = 0;
1489 break;
1490 case ABIArgInfo::CoerceAndExpand:
1491 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1492 break;
1493 case ABIArgInfo::Expand:
1494 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1495 break;
1496 }
1497
1498 if (IRArgs.NumberOfArgs > 0) {
1499 IRArgs.FirstArgIndex = IRArgNo;
1500 IRArgNo += IRArgs.NumberOfArgs;
1501 }
1502
1503 // Skip over the sret parameter when it comes second. We already handled it
1504 // above.
1505 if (IRArgNo == 1 && SwapThisWithSRet)
1506 IRArgNo++;
1507 }
1508 assert(ArgNo == ArgInfo.size())((ArgNo == ArgInfo.size()) ? static_cast<void> (0) : __assert_fail
("ArgNo == ArgInfo.size()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1508, __PRETTY_FUNCTION__))
;
1509
1510 if (FI.usesInAlloca())
1511 InallocaArgNo = IRArgNo++;
1512
1513 TotalIRArgs = IRArgNo;
1514}
1515} // namespace
1516
1517/***/
1518
1519bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1520 const auto &RI = FI.getReturnInfo();
1521 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1522}
1523
1524bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1525 return ReturnTypeUsesSRet(FI) &&
1526 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1527}
1528
1529bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1530 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1531 switch (BT->getKind()) {
1532 default:
1533 return false;
1534 case BuiltinType::Float:
1535 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1536 case BuiltinType::Double:
1537 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1538 case BuiltinType::LongDouble:
1539 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1540 }
1541 }
1542
1543 return false;
1544}
1545
1546bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1547 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1548 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1549 if (BT->getKind() == BuiltinType::LongDouble)
1550 return getTarget().useObjCFP2RetForComplexLongDouble();
1551 }
1552 }
1553
1554 return false;
1555}
1556
1557llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1558 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1559 return GetFunctionType(FI);
1560}
1561
1562llvm::FunctionType *
1563CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1564
1565 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1566 (void)Inserted;
1567 assert(Inserted && "Recursively being processed?")((Inserted && "Recursively being processed?") ? static_cast
<void> (0) : __assert_fail ("Inserted && \"Recursively being processed?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1567, __PRETTY_FUNCTION__))
;
1568
1569 llvm::Type *resultType = nullptr;
1570 const ABIArgInfo &retAI = FI.getReturnInfo();
1571 switch (retAI.getKind()) {
1572 case ABIArgInfo::Expand:
1573 case ABIArgInfo::IndirectAliased:
1574 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1574)
;
1575
1576 case ABIArgInfo::Extend:
1577 case ABIArgInfo::Direct:
1578 resultType = retAI.getCoerceToType();
1579 break;
1580
1581 case ABIArgInfo::InAlloca:
1582 if (retAI.getInAllocaSRet()) {
1583 // sret things on win32 aren't void, they return the sret pointer.
1584 QualType ret = FI.getReturnType();
1585 llvm::Type *ty = ConvertType(ret);
1586 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1587 resultType = llvm::PointerType::get(ty, addressSpace);
1588 } else {
1589 resultType = llvm::Type::getVoidTy(getLLVMContext());
1590 }
1591 break;
1592
1593 case ABIArgInfo::Indirect:
1594 case ABIArgInfo::Ignore:
1595 resultType = llvm::Type::getVoidTy(getLLVMContext());
1596 break;
1597
1598 case ABIArgInfo::CoerceAndExpand:
1599 resultType = retAI.getUnpaddedCoerceAndExpandType();
1600 break;
1601 }
1602
1603 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1604 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1605
1606 // Add type for sret argument.
1607 if (IRFunctionArgs.hasSRetArg()) {
1608 QualType Ret = FI.getReturnType();
1609 llvm::Type *Ty = ConvertType(Ret);
1610 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1611 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1612 llvm::PointerType::get(Ty, AddressSpace);
1613 }
1614
1615 // Add type for inalloca argument.
1616 if (IRFunctionArgs.hasInallocaArg()) {
1617 auto ArgStruct = FI.getArgStruct();
1618 assert(ArgStruct)((ArgStruct) ? static_cast<void> (0) : __assert_fail ("ArgStruct"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1618, __PRETTY_FUNCTION__))
;
1619 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1620 }
1621
1622 // Add in all of the required arguments.
1623 unsigned ArgNo = 0;
1624 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1625 ie = it + FI.getNumRequiredArgs();
1626 for (; it != ie; ++it, ++ArgNo) {
1627 const ABIArgInfo &ArgInfo = it->info;
1628
1629 // Insert a padding type to ensure proper alignment.
1630 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1631 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1632 ArgInfo.getPaddingType();
1633
1634 unsigned FirstIRArg, NumIRArgs;
1635 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1636
1637 switch (ArgInfo.getKind()) {
1638 case ABIArgInfo::Ignore:
1639 case ABIArgInfo::InAlloca:
1640 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1640, __PRETTY_FUNCTION__))
;
1641 break;
1642
1643 case ABIArgInfo::Indirect: {
1644 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1644, __PRETTY_FUNCTION__))
;
1645 // indirect arguments are always on the stack, which is alloca addr space.
1646 llvm::Type *LTy = ConvertTypeForMem(it->type);
1647 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1648 CGM.getDataLayout().getAllocaAddrSpace());
1649 break;
1650 }
1651 case ABIArgInfo::IndirectAliased: {
1652 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1652, __PRETTY_FUNCTION__))
;
1653 llvm::Type *LTy = ConvertTypeForMem(it->type);
1654 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1655 break;
1656 }
1657 case ABIArgInfo::Extend:
1658 case ABIArgInfo::Direct: {
1659 // Fast-isel and the optimizer generally like scalar values better than
1660 // FCAs, so we flatten them if this is safe to do for this argument.
1661 llvm::Type *argType = ArgInfo.getCoerceToType();
1662 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1663 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1664 assert(NumIRArgs == st->getNumElements())((NumIRArgs == st->getNumElements()) ? static_cast<void
> (0) : __assert_fail ("NumIRArgs == st->getNumElements()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1664, __PRETTY_FUNCTION__))
;
1665 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1666 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1667 } else {
1668 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1668, __PRETTY_FUNCTION__))
;
1669 ArgTypes[FirstIRArg] = argType;
1670 }
1671 break;
1672 }
1673
1674 case ABIArgInfo::CoerceAndExpand: {
1675 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1676 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1677 *ArgTypesIter++ = EltTy;
1678 }
1679 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)((ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1679, __PRETTY_FUNCTION__))
;
1680 break;
1681 }
1682
1683 case ABIArgInfo::Expand:
1684 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1685 getExpandedTypes(it->type, ArgTypesIter);
1686 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)((ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1686, __PRETTY_FUNCTION__))
;
1687 break;
1688 }
1689 }
1690
1691 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1692 assert(Erased && "Not in set?")((Erased && "Not in set?") ? static_cast<void> (
0) : __assert_fail ("Erased && \"Not in set?\"", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 1692, __PRETTY_FUNCTION__))
;
1693
1694 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1695}
1696
1697llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1698 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1699 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1700
1701 if (!isFuncTypeConvertible(FPT))
1702 return llvm::StructType::get(getLLVMContext());
1703
1704 return GetFunctionType(GD);
1705}
1706
1707static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1708 llvm::AttrBuilder &FuncAttrs,
1709 const FunctionProtoType *FPT) {
1710 if (!FPT)
1711 return;
1712
1713 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1714 FPT->isNothrow())
1715 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1716}
1717
1718void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1719 bool HasOptnone,
1720 bool AttrOnCallSite,
1721 llvm::AttrBuilder &FuncAttrs) {
1722 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1723 if (!HasOptnone) {
1724 if (CodeGenOpts.OptimizeSize)
1725 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1726 if (CodeGenOpts.OptimizeSize == 2)
1727 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1728 }
1729
1730 if (CodeGenOpts.DisableRedZone)
1731 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1732 if (CodeGenOpts.IndirectTlsSegRefs)
1733 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1734 if (CodeGenOpts.NoImplicitFloat)
1735 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1736
1737 if (AttrOnCallSite) {
1738 // Attributes that should go on the call site only.
1739 if (!CodeGenOpts.SimplifyLibCalls ||
1740 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1741 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1742 if (!CodeGenOpts.TrapFuncName.empty())
1743 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1744 } else {
1745 StringRef FpKind;
1746 switch (CodeGenOpts.getFramePointer()) {
1747 case CodeGenOptions::FramePointerKind::None:
1748 FpKind = "none";
1749 break;
1750 case CodeGenOptions::FramePointerKind::NonLeaf:
1751 FpKind = "non-leaf";
1752 break;
1753 case CodeGenOptions::FramePointerKind::All:
1754 FpKind = "all";
1755 break;
1756 }
1757 FuncAttrs.addAttribute("frame-pointer", FpKind);
1758
1759 FuncAttrs.addAttribute("less-precise-fpmad",
1760 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1761
1762 if (CodeGenOpts.NullPointerIsValid)
1763 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1764
1765 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1766 FuncAttrs.addAttribute("denormal-fp-math",
1767 CodeGenOpts.FPDenormalMode.str());
1768 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1769 FuncAttrs.addAttribute(
1770 "denormal-fp-math-f32",
1771 CodeGenOpts.FP32DenormalMode.str());
1772 }
1773
1774 FuncAttrs.addAttribute("no-trapping-math",
1775 llvm::toStringRef(LangOpts.getFPExceptionMode() ==
1776 LangOptions::FPE_Ignore));
1777
1778 // Strict (compliant) code is the default, so only add this attribute to
1779 // indicate that we are trying to workaround a problem case.
1780 if (!CodeGenOpts.StrictFloatCastOverflow)
1781 FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1782
1783 // TODO: Are these all needed?
1784 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1785 FuncAttrs.addAttribute("no-infs-fp-math",
1786 llvm::toStringRef(LangOpts.NoHonorInfs));
1787 FuncAttrs.addAttribute("no-nans-fp-math",
1788 llvm::toStringRef(LangOpts.NoHonorNaNs));
1789 FuncAttrs.addAttribute("unsafe-fp-math",
1790 llvm::toStringRef(LangOpts.UnsafeFPMath));
1791 FuncAttrs.addAttribute("use-soft-float",
1792 llvm::toStringRef(CodeGenOpts.SoftFloat));
1793 FuncAttrs.addAttribute("stack-protector-buffer-size",
1794 llvm::utostr(CodeGenOpts.SSPBufferSize));
1795 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1796 llvm::toStringRef(LangOpts.NoSignedZero));
1797 FuncAttrs.addAttribute(
1798 "correctly-rounded-divide-sqrt-fp-math",
1799 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1800
1801 // TODO: Reciprocal estimate codegen options should apply to instructions?
1802 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1803 if (!Recips.empty())
1804 FuncAttrs.addAttribute("reciprocal-estimates",
1805 llvm::join(Recips, ","));
1806
1807 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1808 CodeGenOpts.PreferVectorWidth != "none")
1809 FuncAttrs.addAttribute("prefer-vector-width",
1810 CodeGenOpts.PreferVectorWidth);
1811
1812 if (CodeGenOpts.StackRealignment)
1813 FuncAttrs.addAttribute("stackrealign");
1814 if (CodeGenOpts.Backchain)
1815 FuncAttrs.addAttribute("backchain");
1816 if (CodeGenOpts.EnableSegmentedStacks)
1817 FuncAttrs.addAttribute("split-stack");
1818
1819 if (CodeGenOpts.SpeculativeLoadHardening)
1820 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1821 }
1822
1823 if (getLangOpts().assumeFunctionsAreConvergent()) {
1824 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1825 // convergent (meaning, they may call an intrinsically convergent op, such
1826 // as __syncthreads() / barrier(), and so can't have certain optimizations
1827 // applied around them). LLVM will remove this attribute where it safely
1828 // can.
1829 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1830 }
1831
1832 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1833 // Exceptions aren't supported in CUDA device code.
1834 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1835 }
1836
1837 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1838 StringRef Var, Value;
1839 std::tie(Var, Value) = Attr.split('=');
1840 FuncAttrs.addAttribute(Var, Value);
1841 }
1842}
1843
1844void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1845 llvm::AttrBuilder FuncAttrs;
1846 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1847 /* AttrOnCallSite = */ false, FuncAttrs);
1848 // TODO: call GetCPUAndFeaturesAttributes?
1849 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1850}
1851
1852void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1853 llvm::AttrBuilder &attrs) {
1854 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1855 /*for call*/ false, attrs);
1856 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1857}
1858
1859static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1860 const LangOptions &LangOpts,
1861 const NoBuiltinAttr *NBA = nullptr) {
1862 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1863 SmallString<32> AttributeName;
1864 AttributeName += "no-builtin-";
1865 AttributeName += BuiltinName;
1866 FuncAttrs.addAttribute(AttributeName);
1867 };
1868
1869 // First, handle the language options passed through -fno-builtin.
1870 if (LangOpts.NoBuiltin) {
1871 // -fno-builtin disables them all.
1872 FuncAttrs.addAttribute("no-builtins");
1873 return;
1874 }
1875
1876 // Then, add attributes for builtins specified through -fno-builtin-<name>.
1877 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1878
1879 // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1880 // the source.
1881 if (!NBA)
1882 return;
1883
1884 // If there is a wildcard in the builtin names specified through the
1885 // attribute, disable them all.
1886 if (llvm::is_contained(NBA->builtinNames(), "*")) {
1887 FuncAttrs.addAttribute("no-builtins");
1888 return;
1889 }
1890
1891 // And last, add the rest of the builtin names.
1892 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1893}
1894
1895/// Construct the IR attribute list of a function or call.
1896///
1897/// When adding an attribute, please consider where it should be handled:
1898///
1899/// - getDefaultFunctionAttributes is for attributes that are essentially
1900/// part of the global target configuration (but perhaps can be
1901/// overridden on a per-function basis). Adding attributes there
1902/// will cause them to also be set in frontends that build on Clang's
1903/// target-configuration logic, as well as for code defined in library
1904/// modules such as CUDA's libdevice.
1905///
1906/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
1907/// and adds declaration-specific, convention-specific, and
1908/// frontend-specific logic. The last is of particular importance:
1909/// attributes that restrict how the frontend generates code must be
1910/// added here rather than getDefaultFunctionAttributes.
1911///
1912void CodeGenModule::ConstructAttributeList(
1913 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1914 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1915 llvm::AttrBuilder FuncAttrs;
1916 llvm::AttrBuilder RetAttrs;
1917
1918 // Collect function IR attributes from the CC lowering.
1919 // We'll collect the paramete and result attributes later.
1920 CallingConv = FI.getEffectiveCallingConvention();
1921 if (FI.isNoReturn())
1922 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1923 if (FI.isCmseNSCall())
1924 FuncAttrs.addAttribute("cmse_nonsecure_call");
1925
1926 // Collect function IR attributes from the callee prototype if we have one.
1927 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1928 CalleeInfo.getCalleeFunctionProtoType());
1929
1930 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1931
1932 bool HasOptnone = false;
1933 // The NoBuiltinAttr attached to the target FunctionDecl.
1934 const NoBuiltinAttr *NBA = nullptr;
1935
1936 // Collect function IR attributes based on declaration-specific
1937 // information.
1938 // FIXME: handle sseregparm someday...
1939 if (TargetDecl) {
1940 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1941 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1942 if (TargetDecl->hasAttr<NoThrowAttr>())
1943 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1944 if (TargetDecl->hasAttr<NoReturnAttr>())
1945 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1946 if (TargetDecl->hasAttr<ColdAttr>())
1947 FuncAttrs.addAttribute(llvm::Attribute::Cold);
1948 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1949 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1950 if (TargetDecl->hasAttr<ConvergentAttr>())
1951 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1952
1953 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1954 AddAttributesFromFunctionProtoType(
1955 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1956 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
1957 // A sane operator new returns a non-aliasing pointer.
1958 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
1959 if (getCodeGenOpts().AssumeSaneOperatorNew &&
1960 (Kind == OO_New || Kind == OO_Array_New))
1961 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1962 }
1963 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1964 const bool IsVirtualCall = MD && MD->isVirtual();
1965 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1966 // virtual function. These attributes are not inherited by overloads.
1967 if (!(AttrOnCallSite && IsVirtualCall)) {
1968 if (Fn->isNoReturn())
1969 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1970 NBA = Fn->getAttr<NoBuiltinAttr>();
1971 }
1972 }
1973
1974 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1975 if (TargetDecl->hasAttr<ConstAttr>()) {
1976 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1977 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1978 } else if (TargetDecl->hasAttr<PureAttr>()) {
1979 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1980 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1981 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1982 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1983 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1984 }
1985 if (TargetDecl->hasAttr<RestrictAttr>())
1986 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1987 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1988 !CodeGenOpts.NullPointerIsValid)
1989 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1990 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1991 FuncAttrs.addAttribute("no_caller_saved_registers");
1992 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1993 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1994
1995 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1996 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1997 Optional<unsigned> NumElemsParam;
1998 if (AllocSize->getNumElemsParam().isValid())
1999 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2000 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2001 NumElemsParam);
2002 }
2003
2004 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2005 if (getLangOpts().OpenCLVersion <= 120) {
2006 // OpenCL v1.2 Work groups are always uniform
2007 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2008 } else {
2009 // OpenCL v2.0 Work groups may be whether uniform or not.
2010 // '-cl-uniform-work-group-size' compile option gets a hint
2011 // to the compiler that the global work-size be a multiple of
2012 // the work-group size specified to clEnqueueNDRangeKernel
2013 // (i.e. work groups are uniform).
2014 FuncAttrs.addAttribute("uniform-work-group-size",
2015 llvm::toStringRef(CodeGenOpts.UniformWGSize));
2016 }
2017 }
2018 }
2019
2020 // Attach "no-builtins" attributes to:
2021 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2022 // * definitions: "no-builtins" or "no-builtin-<name>" only.
2023 // The attributes can come from:
2024 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2025 // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2026 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2027
2028 // Collect function IR attributes based on global settiings.
2029 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2030
2031 // Override some default IR attributes based on declaration-specific
2032 // information.
2033 if (TargetDecl) {
2034 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2035 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2036 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2037 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2038 if (TargetDecl->hasAttr<NoSplitStackAttr>())
2039 FuncAttrs.removeAttribute("split-stack");
2040
2041 // Add NonLazyBind attribute to function declarations when -fno-plt
2042 // is used.
2043 // FIXME: what if we just haven't processed the function definition
2044 // yet, or if it's an external definition like C99 inline?
2045 if (CodeGenOpts.NoPLT) {
2046 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2047 if (!Fn->isDefined() && !AttrOnCallSite) {
2048 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2049 }
2050 }
2051 }
2052 }
2053
2054 // Collect non-call-site function IR attributes from declaration-specific
2055 // information.
2056 if (!AttrOnCallSite) {
2057 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2058 FuncAttrs.addAttribute("cmse_nonsecure_entry");
2059
2060 // Whether tail calls are enabled.
2061 auto shouldDisableTailCalls = [&] {
2062 // Should this be honored in getDefaultFunctionAttributes?
2063 if (CodeGenOpts.DisableTailCalls)
2064 return true;
2065
2066 if (!TargetDecl)
2067 return false;
2068
2069 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2070 TargetDecl->hasAttr<AnyX86InterruptAttr>())
2071 return true;
2072
2073 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2074 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2075 if (!BD->doesNotEscape())
2076 return true;
2077 }
2078
2079 return false;
2080 };
2081 FuncAttrs.addAttribute("disable-tail-calls",
2082 llvm::toStringRef(shouldDisableTailCalls()));
2083
2084 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
2085 // handles these separately to set them based on the global defaults.
2086 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2087 }
2088
2089 // Collect attributes from arguments and return values.
2090 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2091
2092 QualType RetTy = FI.getReturnType();
2093 const ABIArgInfo &RetAI = FI.getReturnInfo();
2094 switch (RetAI.getKind()) {
2095 case ABIArgInfo::Extend:
2096 if (RetAI.isSignExt())
2097 RetAttrs.addAttribute(llvm::Attribute::SExt);
2098 else
2099 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2100 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2101 case ABIArgInfo::Direct:
2102 if (RetAI.getInReg())
2103 RetAttrs.addAttribute(llvm::Attribute::InReg);
2104 break;
2105 case ABIArgInfo::Ignore:
2106 break;
2107
2108 case ABIArgInfo::InAlloca:
2109 case ABIArgInfo::Indirect: {
2110 // inalloca and sret disable readnone and readonly
2111 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2112 .removeAttribute(llvm::Attribute::ReadNone);
2113 break;
2114 }
2115
2116 case ABIArgInfo::CoerceAndExpand:
2117 break;
2118
2119 case ABIArgInfo::Expand:
2120 case ABIArgInfo::IndirectAliased:
2121 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2121)
;
2122 }
2123
2124 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2125 QualType PTy = RefTy->getPointeeType();
2126 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2127 RetAttrs.addDereferenceableAttr(
2128 getMinimumObjectSize(PTy).getQuantity());
2129 if (getContext().getTargetAddressSpace(PTy) == 0 &&
2130 !CodeGenOpts.NullPointerIsValid)
2131 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2132 if (PTy->isObjectType()) {
2133 llvm::Align Alignment =
2134 getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2135 RetAttrs.addAlignmentAttr(Alignment);
2136 }
2137 }
2138
2139 bool hasUsedSRet = false;
2140 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2141
2142 // Attach attributes to sret.
2143 if (IRFunctionArgs.hasSRetArg()) {
2144 llvm::AttrBuilder SRETAttrs;
2145 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2146 hasUsedSRet = true;
2147 if (RetAI.getInReg())
2148 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2149 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2150 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2151 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2152 }
2153
2154 // Attach attributes to inalloca argument.
2155 if (IRFunctionArgs.hasInallocaArg()) {
2156 llvm::AttrBuilder Attrs;
2157 Attrs.addAttribute(llvm::Attribute::InAlloca);
2158 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2159 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2160 }
2161
2162 unsigned ArgNo = 0;
2163 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2164 E = FI.arg_end();
2165 I != E; ++I, ++ArgNo) {
2166 QualType ParamType = I->type;
2167 const ABIArgInfo &AI = I->info;
2168 llvm::AttrBuilder Attrs;
2169
2170 // Add attribute for padding argument, if necessary.
2171 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2172 if (AI.getPaddingInReg()) {
2173 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2174 llvm::AttributeSet::get(
2175 getLLVMContext(),
2176 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2177 }
2178 }
2179
2180 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2181 // have the corresponding parameter variable. It doesn't make
2182 // sense to do it here because parameters are so messed up.
2183 switch (AI.getKind()) {
2184 case ABIArgInfo::Extend:
2185 if (AI.isSignExt())
2186 Attrs.addAttribute(llvm::Attribute::SExt);
2187 else
2188 Attrs.addAttribute(llvm::Attribute::ZExt);
2189 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2190 case ABIArgInfo::Direct:
2191 if (ArgNo == 0 && FI.isChainCall())
2192 Attrs.addAttribute(llvm::Attribute::Nest);
2193 else if (AI.getInReg())
2194 Attrs.addAttribute(llvm::Attribute::InReg);
2195 break;
2196
2197 case ABIArgInfo::Indirect: {
2198 if (AI.getInReg())
2199 Attrs.addAttribute(llvm::Attribute::InReg);
2200
2201 if (AI.getIndirectByVal())
2202 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2203
2204 auto *Decl = ParamType->getAsRecordDecl();
2205 if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2206 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2207 // When calling the function, the pointer passed in will be the only
2208 // reference to the underlying object. Mark it accordingly.
2209 Attrs.addAttribute(llvm::Attribute::NoAlias);
2210
2211 // TODO: We could add the byref attribute if not byval, but it would
2212 // require updating many testcases.
2213
2214 CharUnits Align = AI.getIndirectAlign();
2215
2216 // In a byval argument, it is important that the required
2217 // alignment of the type is honored, as LLVM might be creating a
2218 // *new* stack object, and needs to know what alignment to give
2219 // it. (Sometimes it can deduce a sensible alignment on its own,
2220 // but not if clang decides it must emit a packed struct, or the
2221 // user specifies increased alignment requirements.)
2222 //
2223 // This is different from indirect *not* byval, where the object
2224 // exists already, and the align attribute is purely
2225 // informative.
2226 assert(!Align.isZero())((!Align.isZero()) ? static_cast<void> (0) : __assert_fail
("!Align.isZero()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2226, __PRETTY_FUNCTION__))
;
2227
2228 // For now, only add this when we have a byval argument.
2229 // TODO: be less lazy about updating test cases.
2230 if (AI.getIndirectByVal())
2231 Attrs.addAlignmentAttr(Align.getQuantity());
2232
2233 // byval disables readnone and readonly.
2234 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2235 .removeAttribute(llvm::Attribute::ReadNone);
2236
2237 break;
2238 }
2239 case ABIArgInfo::IndirectAliased: {
2240 CharUnits Align = AI.getIndirectAlign();
2241 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2242 Attrs.addAlignmentAttr(Align.getQuantity());
2243 break;
2244 }
2245 case ABIArgInfo::Ignore:
2246 case ABIArgInfo::Expand:
2247 case ABIArgInfo::CoerceAndExpand:
2248 break;
2249
2250 case ABIArgInfo::InAlloca:
2251 // inalloca disables readnone and readonly.
2252 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2253 .removeAttribute(llvm::Attribute::ReadNone);
2254 continue;
2255 }
2256
2257 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2258 QualType PTy = RefTy->getPointeeType();
2259 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2260 Attrs.addDereferenceableAttr(
2261 getMinimumObjectSize(PTy).getQuantity());
2262 if (getContext().getTargetAddressSpace(PTy) == 0 &&
2263 !CodeGenOpts.NullPointerIsValid)
2264 Attrs.addAttribute(llvm::Attribute::NonNull);
2265 if (PTy->isObjectType()) {
2266 llvm::Align Alignment =
2267 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2268 Attrs.addAlignmentAttr(Alignment);
2269 }
2270 }
2271
2272 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2273 case ParameterABI::Ordinary:
2274 break;
2275
2276 case ParameterABI::SwiftIndirectResult: {
2277 // Add 'sret' if we haven't already used it for something, but
2278 // only if the result is void.
2279 if (!hasUsedSRet && RetTy->isVoidType()) {
2280 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2281 hasUsedSRet = true;
2282 }
2283
2284 // Add 'noalias' in either case.
2285 Attrs.addAttribute(llvm::Attribute::NoAlias);
2286
2287 // Add 'dereferenceable' and 'alignment'.
2288 auto PTy = ParamType->getPointeeType();
2289 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2290 auto info = getContext().getTypeInfoInChars(PTy);
2291 Attrs.addDereferenceableAttr(info.first.getQuantity());
2292 Attrs.addAlignmentAttr(info.second.getAsAlign());
2293 }
2294 break;
2295 }
2296
2297 case ParameterABI::SwiftErrorResult:
2298 Attrs.addAttribute(llvm::Attribute::SwiftError);
2299 break;
2300
2301 case ParameterABI::SwiftContext:
2302 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2303 break;
2304 }
2305
2306 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2307 Attrs.addAttribute(llvm::Attribute::NoCapture);
2308
2309 if (Attrs.hasAttributes()) {
2310 unsigned FirstIRArg, NumIRArgs;
2311 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2312 for (unsigned i = 0; i < NumIRArgs; i++)
2313 ArgAttrs[FirstIRArg + i] =
2314 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2315 }
2316 }
2317 assert(ArgNo == FI.arg_size())((ArgNo == FI.arg_size()) ? static_cast<void> (0) : __assert_fail
("ArgNo == FI.arg_size()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2317, __PRETTY_FUNCTION__))
;
2318
2319 AttrList = llvm::AttributeList::get(
2320 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2321 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2322}
2323
2324/// An argument came in as a promoted argument; demote it back to its
2325/// declared type.
2326static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2327 const VarDecl *var,
2328 llvm::Value *value) {
2329 llvm::Type *varType = CGF.ConvertType(var->getType());
2330
2331 // This can happen with promotions that actually don't change the
2332 // underlying type, like the enum promotions.
2333 if (value->getType() == varType) return value;
2334
2335 assert((varType->isIntegerTy() || varType->isFloatingPointTy())(((varType->isIntegerTy() || varType->isFloatingPointTy
()) && "unexpected promotion type") ? static_cast<
void> (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2336, __PRETTY_FUNCTION__))
2336 && "unexpected promotion type")(((varType->isIntegerTy() || varType->isFloatingPointTy
()) && "unexpected promotion type") ? static_cast<
void> (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2336, __PRETTY_FUNCTION__))
;
2337
2338 if (isa<llvm::IntegerType>(varType))
2339 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2340
2341 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2342}
2343
2344/// Returns the attribute (either parameter attribute, or function
2345/// attribute), which declares argument ArgNo to be non-null.
2346static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2347 QualType ArgType, unsigned ArgNo) {
2348 // FIXME: __attribute__((nonnull)) can also be applied to:
2349 // - references to pointers, where the pointee is known to be
2350 // nonnull (apparently a Clang extension)
2351 // - transparent unions containing pointers
2352 // In the former case, LLVM IR cannot represent the constraint. In
2353 // the latter case, we have no guarantee that the transparent union
2354 // is in fact passed as a pointer.
2355 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2356 return nullptr;
2357 // First, check attribute on parameter itself.
2358 if (PVD) {
2359 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2360 return ParmNNAttr;
2361 }
2362 // Check function attributes.
2363 if (!FD)
2364 return nullptr;
2365 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2366 if (NNAttr->isNonNull(ArgNo))
2367 return NNAttr;
2368 }
2369 return nullptr;
2370}
2371
2372namespace {
2373 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2374 Address Temp;
2375 Address Arg;
2376 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2377 void Emit(CodeGenFunction &CGF, Flags flags) override {
2378 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2379 CGF.Builder.CreateStore(errorValue, Arg);
2380 }
2381 };
2382}
2383
2384void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2385 llvm::Function *Fn,
2386 const FunctionArgList &Args) {
2387 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2388 // Naked functions don't have prologues.
2389 return;
2390
2391 // If this is an implicit-return-zero function, go ahead and
2392 // initialize the return value. TODO: it might be nice to have
2393 // a more general mechanism for this that didn't require synthesized
2394 // return statements.
2395 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2396 if (FD->hasImplicitReturnZero()) {
2397 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2398 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2399 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2400 Builder.CreateStore(Zero, ReturnValue);
2401 }
2402 }
2403
2404 // FIXME: We no longer need the types from FunctionArgList; lift up and
2405 // simplify.
2406
2407 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2408 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs())((Fn->arg_size() == IRFunctionArgs.totalIRArgs()) ? static_cast
<void> (0) : __assert_fail ("Fn->arg_size() == IRFunctionArgs.totalIRArgs()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2408, __PRETTY_FUNCTION__))
;
2409
2410 // If we're using inalloca, all the memory arguments are GEPs off of the last
2411 // parameter, which is a pointer to the complete memory area.
2412 Address ArgStruct = Address::invalid();
2413 if (IRFunctionArgs.hasInallocaArg()) {
2414 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2415 FI.getArgStructAlignment());
2416
2417 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo())((ArgStruct.getType() == FI.getArgStruct()->getPointerTo()
) ? static_cast<void> (0) : __assert_fail ("ArgStruct.getType() == FI.getArgStruct()->getPointerTo()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2417, __PRETTY_FUNCTION__))
;
2418 }
2419
2420 // Name the struct return parameter.
2421 if (IRFunctionArgs.hasSRetArg()) {
2422 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2423 AI->setName("agg.result");
2424 AI->addAttr(llvm::Attribute::NoAlias);
2425 }
2426
2427 // Track if we received the parameter as a pointer (indirect, byval, or
2428 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2429 // into a local alloca for us.
2430 SmallVector<ParamValue, 16> ArgVals;
2431 ArgVals.reserve(Args.size());
2432
2433 // Create a pointer value for every parameter declaration. This usually
2434 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2435 // any cleanups or do anything that might unwind. We do that separately, so
2436 // we can push the cleanups in the correct order for the ABI.
2437 assert(FI.arg_size() == Args.size() &&((FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2438, __PRETTY_FUNCTION__))
2438 "Mismatch between function signature & arguments.")((FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2438, __PRETTY_FUNCTION__))
;
2439 unsigned ArgNo = 0;
2440 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2441 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2442 i != e; ++i, ++info_it, ++ArgNo) {
2443 const VarDecl *Arg = *i;
2444 const ABIArgInfo &ArgI = info_it->info;
2445
2446 bool isPromoted =
2447 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2448 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2449 // the parameter is promoted. In this case we convert to
2450 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2451 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2452 assert(hasScalarEvaluationKind(Ty) ==((hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->
getType())) ? static_cast<void> (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2453, __PRETTY_FUNCTION__))
2453 hasScalarEvaluationKind(Arg->getType()))((hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->
getType())) ? static_cast<void> (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2453, __PRETTY_FUNCTION__))
;
2454
2455 unsigned FirstIRArg, NumIRArgs;
2456 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2457
2458 switch (ArgI.getKind()) {
2459 case ABIArgInfo::InAlloca: {
2460 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2460, __PRETTY_FUNCTION__))
;
2461 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2462 Address V =
2463 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2464 if (ArgI.getInAllocaIndirect())
2465 V = Address(Builder.CreateLoad(V),
2466 getContext().getTypeAlignInChars(Ty));
2467 ArgVals.push_back(ParamValue::forIndirect(V));
2468 break;
2469 }
2470
2471 case ABIArgInfo::Indirect:
2472 case ABIArgInfo::IndirectAliased: {
2473 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2473, __PRETTY_FUNCTION__))
;
2474 Address ParamAddr =
2475 Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
2476
2477 if (!hasScalarEvaluationKind(Ty)) {
2478 // Aggregates and complex variables are accessed by reference. All we
2479 // need to do is realign the value, if requested. Also, if the address
2480 // may be aliased, copy it to ensure that the parameter variable is
2481 // mutable and has a unique adress, as C requires.
2482 Address V = ParamAddr;
2483 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2484 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2485
2486 // Copy from the incoming argument pointer to the temporary with the
2487 // appropriate alignment.
2488 //
2489 // FIXME: We should have a common utility for generating an aggregate
2490 // copy.
2491 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2492 Builder.CreateMemCpy(
2493 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2494 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2495 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2496 V = AlignedTemp;
2497 }
2498 ArgVals.push_back(ParamValue::forIndirect(V));
2499 } else {
2500 // Load scalar value from indirect argument.
2501 llvm::Value *V =
2502 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2503
2504 if (isPromoted)
2505 V = emitArgumentDemotion(*this, Arg, V);
2506 ArgVals.push_back(ParamValue::forDirect(V));
2507 }
2508 break;
2509 }
2510
2511 case ABIArgInfo::Extend:
2512 case ABIArgInfo::Direct: {
2513 auto AI = Fn->getArg(FirstIRArg);
2514 llvm::Type *LTy = ConvertType(Arg->getType());
2515
2516 // Prepare parameter attributes. So far, only attributes for pointer
2517 // parameters are prepared. See
2518 // http://llvm.org/docs/LangRef.html#paramattrs.
2519 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2520 ArgI.getCoerceToType()->isPointerTy()) {
2521 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2521, __PRETTY_FUNCTION__))
;
2522
2523 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2524 // Set `nonnull` attribute if any.
2525 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2526 PVD->getFunctionScopeIndex()) &&
2527 !CGM.getCodeGenOpts().NullPointerIsValid)
2528 AI->addAttr(llvm::Attribute::NonNull);
2529
2530 QualType OTy = PVD->getOriginalType();
2531 if (const auto *ArrTy =
2532 getContext().getAsConstantArrayType(OTy)) {
2533 // A C99 array parameter declaration with the static keyword also
2534 // indicates dereferenceability, and if the size is constant we can
2535 // use the dereferenceable attribute (which requires the size in
2536 // bytes).
2537 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2538 QualType ETy = ArrTy->getElementType();
2539 llvm::Align Alignment =
2540 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2541 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2542 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2543 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2544 ArrSize) {
2545 llvm::AttrBuilder Attrs;
2546 Attrs.addDereferenceableAttr(
2547 getContext().getTypeSizeInChars(ETy).getQuantity() *
2548 ArrSize);
2549 AI->addAttrs(Attrs);
2550 } else if (getContext().getTargetInfo().getNullPointerValue(
2551 ETy.getAddressSpace()) == 0 &&
2552 !CGM.getCodeGenOpts().NullPointerIsValid) {
2553 AI->addAttr(llvm::Attribute::NonNull);
2554 }
2555 }
2556 } else if (const auto *ArrTy =
2557 getContext().getAsVariableArrayType(OTy)) {
2558 // For C99 VLAs with the static keyword, we don't know the size so
2559 // we can't use the dereferenceable attribute, but in addrspace(0)
2560 // we know that it must be nonnull.
2561 if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
2562 QualType ETy = ArrTy->getElementType();
2563 llvm::Align Alignment =
2564 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2565 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2566 if (!getContext().getTargetAddressSpace(ETy) &&
2567 !CGM.getCodeGenOpts().NullPointerIsValid)
2568 AI->addAttr(llvm::Attribute::NonNull);
2569 }
2570 }
2571
2572 // Set `align` attribute if any.
2573 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2574 if (!AVAttr)
2575 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2576 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2577 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2578 // If alignment-assumption sanitizer is enabled, we do *not* add
2579 // alignment attribute here, but emit normal alignment assumption,
2580 // so the UBSAN check could function.
2581 llvm::ConstantInt *AlignmentCI =
2582 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2583 unsigned AlignmentInt =
2584 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2585 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2586 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2587 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
2588 llvm::Align(AlignmentInt)));
2589 }
2590 }
2591 }
2592
2593 // Set 'noalias' if an argument type has the `restrict` qualifier.
2594 if (Arg->getType().isRestrictQualified())
2595 AI->addAttr(llvm::Attribute::NoAlias);
2596 }
2597
2598 // Prepare the argument value. If we have the trivial case, handle it
2599 // with no muss and fuss.
2600 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2601 ArgI.getCoerceToType() == ConvertType(Ty) &&
2602 ArgI.getDirectOffset() == 0) {
2603 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2603, __PRETTY_FUNCTION__))
;
2604
2605 // LLVM expects swifterror parameters to be used in very restricted
2606 // ways. Copy the value into a less-restricted temporary.
2607 llvm::Value *V = AI;
2608 if (FI.getExtParameterInfo(ArgNo).getABI()
2609 == ParameterABI::SwiftErrorResult) {
2610 QualType pointeeTy = Ty->getPointeeType();
2611 assert(pointeeTy->isPointerType())((pointeeTy->isPointerType()) ? static_cast<void> (0
) : __assert_fail ("pointeeTy->isPointerType()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2611, __PRETTY_FUNCTION__))
;
2612 Address temp =
2613 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2614 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2615 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2616 Builder.CreateStore(incomingErrorValue, temp);
2617 V = temp.getPointer();
2618
2619 // Push a cleanup to copy the value back at the end of the function.
2620 // The convention does not guarantee that the value will be written
2621 // back if the function exits with an unwind exception.
2622 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2623 }
2624
2625 // Ensure the argument is the correct type.
2626 if (V->getType() != ArgI.getCoerceToType())
2627 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2628
2629 if (isPromoted)
2630 V = emitArgumentDemotion(*this, Arg, V);
2631
2632 // Because of merging of function types from multiple decls it is
2633 // possible for the type of an argument to not match the corresponding
2634 // type in the function type. Since we are codegening the callee
2635 // in here, add a cast to the argument type.
2636 llvm::Type *LTy = ConvertType(Arg->getType());
2637 if (V->getType() != LTy)
2638 V = Builder.CreateBitCast(V, LTy);
2639
2640 ArgVals.push_back(ParamValue::forDirect(V));
2641 break;
2642 }
2643
2644 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2645 Arg->getName());
2646
2647 // Pointer to store into.
2648 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2649
2650 // Fast-isel and the optimizer generally like scalar values better than
2651 // FCAs, so we flatten them if this is safe to do for this argument.
2652 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2653 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2654 STy->getNumElements() > 1) {
2655 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2656 llvm::Type *DstTy = Ptr.getElementType();
2657 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2658
2659 Address AddrToStoreInto = Address::invalid();
2660 if (SrcSize <= DstSize) {
2661 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2662 } else {
2663 AddrToStoreInto =
2664 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2665 }
2666
2667 assert(STy->getNumElements() == NumIRArgs)((STy->getNumElements() == NumIRArgs) ? static_cast<void
> (0) : __assert_fail ("STy->getNumElements() == NumIRArgs"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2667, __PRETTY_FUNCTION__))
;
2668 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2669 auto AI = Fn->getArg(FirstIRArg + i);
2670 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2671 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2672 Builder.CreateStore(AI, EltPtr);
2673 }
2674
2675 if (SrcSize > DstSize) {
2676 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2677 }
2678
2679 } else {
2680 // Simple case, just do a coerced store of the argument into the alloca.
2681 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2681, __PRETTY_FUNCTION__))
;
2682 auto AI = Fn->getArg(FirstIRArg);
2683 AI->setName(Arg->getName() + ".coerce");
2684 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2685 }
2686
2687 // Match to what EmitParmDecl is expecting for this type.
2688 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2689 llvm::Value *V =
2690 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2691 if (isPromoted)
2692 V = emitArgumentDemotion(*this, Arg, V);
2693 ArgVals.push_back(ParamValue::forDirect(V));
2694 } else {
2695 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2696 }
2697 break;
2698 }
2699
2700 case ABIArgInfo::CoerceAndExpand: {
2701 // Reconstruct into a temporary.
2702 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2703 ArgVals.push_back(ParamValue::forIndirect(alloca));
2704
2705 auto coercionType = ArgI.getCoerceAndExpandType();
2706 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2707
2708 unsigned argIndex = FirstIRArg;
2709 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2710 llvm::Type *eltType = coercionType->getElementType(i);
2711 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2712 continue;
2713
2714 auto eltAddr = Builder.CreateStructGEP(alloca, i);
2715 auto elt = Fn->getArg(argIndex++);
2716 Builder.CreateStore(elt, eltAddr);
2717 }
2718 assert(argIndex == FirstIRArg + NumIRArgs)((argIndex == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("argIndex == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2718, __PRETTY_FUNCTION__))
;
2719 break;
2720 }
2721
2722 case ABIArgInfo::Expand: {
2723 // If this structure was expanded into multiple arguments then
2724 // we need to create a temporary and reconstruct it from the
2725 // arguments.
2726 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2727 LValue LV = MakeAddrLValue(Alloca, Ty);
2728 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2729
2730 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
2731 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2732 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs)((FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs) ?
static_cast<void> (0) : __assert_fail ("FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2732, __PRETTY_FUNCTION__))
;
2733 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2734 auto AI = Fn->getArg(FirstIRArg + i);
2735 AI->setName(Arg->getName() + "." + Twine(i));
2736 }
2737 break;
2738 }
2739
2740 case ABIArgInfo::Ignore:
2741 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2741, __PRETTY_FUNCTION__))
;
2742 // Initialize the local variable appropriately.
2743 if (!hasScalarEvaluationKind(Ty)) {
2744 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2745 } else {
2746 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2747 ArgVals.push_back(ParamValue::forDirect(U));
2748 }
2749 break;
2750 }
2751 }
2752
2753 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2754 for (int I = Args.size() - 1; I >= 0; --I)
2755 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2756 } else {
2757 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2758 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2759 }
2760}
2761
2762static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2763 while (insn->use_empty()) {
2764 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2765 if (!bitcast) return;
2766
2767 // This is "safe" because we would have used a ConstantExpr otherwise.
2768 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2769 bitcast->eraseFromParent();
2770 }
2771}
2772
2773/// Try to emit a fused autorelease of a return result.
2774static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2775 llvm::Value *result) {
2776 // We must be immediately followed the cast.
2777 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2778 if (BB->empty()) return nullptr;
2779 if (&BB->back() != result) return nullptr;
2780
2781 llvm::Type *resultType = result->getType();
2782
2783 // result is in a BasicBlock and is therefore an Instruction.
2784 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2785
2786 SmallVector<llvm::Instruction *, 4> InstsToKill;
2787
2788 // Look for:
2789 // %generator = bitcast %type1* %generator2 to %type2*
2790 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2791 // We would have emitted this as a constant if the operand weren't
2792 // an Instruction.
2793 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2794
2795 // Require the generator to be immediately followed by the cast.
2796 if (generator->getNextNode() != bitcast)
2797 return nullptr;
2798
2799 InstsToKill.push_back(bitcast);
2800 }
2801
2802 // Look for:
2803 // %generator = call i8* @objc_retain(i8* %originalResult)
2804 // or
2805 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2806 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2807 if (!call) return nullptr;
2808
2809 bool doRetainAutorelease;
2810
2811 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2812 doRetainAutorelease = true;
2813 } else if (call->getCalledOperand() ==
2814 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
2815 doRetainAutorelease = false;
2816
2817 // If we emitted an assembly marker for this call (and the
2818 // ARCEntrypoints field should have been set if so), go looking
2819 // for that call. If we can't find it, we can't do this
2820 // optimization. But it should always be the immediately previous
2821 // instruction, unless we needed bitcasts around the call.
2822 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2823 llvm::Instruction *prev = call->getPrevNode();
2824 assert(prev)((prev) ? static_cast<void> (0) : __assert_fail ("prev"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2824, __PRETTY_FUNCTION__))
;
2825 if (isa<llvm::BitCastInst>(prev)) {
2826 prev = prev->getPrevNode();
2827 assert(prev)((prev) ? static_cast<void> (0) : __assert_fail ("prev"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2827, __PRETTY_FUNCTION__))
;
2828 }
2829 assert(isa<llvm::CallInst>(prev))((isa<llvm::CallInst>(prev)) ? static_cast<void> (
0) : __assert_fail ("isa<llvm::CallInst>(prev)", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2829, __PRETTY_FUNCTION__))
;
2830 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==((cast<llvm::CallInst>(prev)->getCalledOperand() == CGF
.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? static_cast<void> (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2831, __PRETTY_FUNCTION__))
2831 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker)((cast<llvm::CallInst>(prev)->getCalledOperand() == CGF
.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? static_cast<void> (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2831, __PRETTY_FUNCTION__))
;
2832 InstsToKill.push_back(prev);
2833 }
2834 } else {
2835 return nullptr;
2836 }
2837
2838 result = call->getArgOperand(0);
2839 InstsToKill.push_back(call);
2840
2841 // Keep killing bitcasts, for sanity. Note that we no longer care
2842 // about precise ordering as long as there's exactly one use.
2843 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2844 if (!bitcast->hasOneUse()) break;
2845 InstsToKill.push_back(bitcast);
2846 result = bitcast->getOperand(0);
2847 }
2848
2849 // Delete all the unnecessary instructions, from latest to earliest.
2850 for (auto *I : InstsToKill)
2851 I->eraseFromParent();
2852
2853 // Do the fused retain/autorelease if we were asked to.
2854 if (doRetainAutorelease)
2855 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2856
2857 // Cast back to the result type.
2858 return CGF.Builder.CreateBitCast(result, resultType);
2859}
2860
2861/// If this is a +1 of the value of an immutable 'self', remove it.
2862static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2863 llvm::Value *result) {
2864 // This is only applicable to a method with an immutable 'self'.
2865 const ObjCMethodDecl *method =
2866 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2867 if (!method) return nullptr;
2868 const VarDecl *self = method->getSelfDecl();
2869 if (!self->getType().isConstQualified()) return nullptr;
2870
2871 // Look for a retain call.
2872 llvm::CallInst *retainCall =
2873 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2874 if (!retainCall || retainCall->getCalledOperand() !=
2875 CGF.CGM.getObjCEntrypoints().objc_retain)
2876 return nullptr;
2877
2878 // Look for an ordinary load of 'self'.
2879 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2880 llvm::LoadInst *load =
2881 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2882 if (!load || load->isAtomic() || load->isVolatile() ||
2883 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2884 return nullptr;
2885
2886 // Okay! Burn it all down. This relies for correctness on the
2887 // assumption that the retain is emitted as part of the return and
2888 // that thereafter everything is used "linearly".
2889 llvm::Type *resultType = result->getType();
2890 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2891 assert(retainCall->use_empty())((retainCall->use_empty()) ? static_cast<void> (0) :
__assert_fail ("retainCall->use_empty()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2891, __PRETTY_FUNCTION__))
;
2892 retainCall->eraseFromParent();
2893 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2894
2895 return CGF.Builder.CreateBitCast(load, resultType);
2896}
2897
2898/// Emit an ARC autorelease of the result of a function.
2899///
2900/// \return the value to actually return from the function
2901static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2902 llvm::Value *result) {
2903 // If we're returning 'self', kill the initial retain. This is a
2904 // heuristic attempt to "encourage correctness" in the really unfortunate
2905 // case where we have a return of self during a dealloc and we desperately
2906 // need to avoid the possible autorelease.
2907 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2908 return self;
2909
2910 // At -O0, try to emit a fused retain/autorelease.
2911 if (CGF.shouldUseFusedARCCalls())
2912 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2913 return fused;
2914
2915 return CGF.EmitARCAutoreleaseReturnValue(result);
2916}
2917
2918/// Heuristically search for a dominating store to the return-value slot.
2919static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2920 // Check if a User is a store which pointerOperand is the ReturnValue.
2921 // We are looking for stores to the ReturnValue, not for stores of the
2922 // ReturnValue to some other location.
2923 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2924 auto *SI = dyn_cast<llvm::StoreInst>(U);
2925 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2926 return nullptr;
2927 // These aren't actually possible for non-coerced returns, and we
2928 // only care about non-coerced returns on this code path.
2929 assert(!SI->isAtomic() && !SI->isVolatile())((!SI->isAtomic() && !SI->isVolatile()) ? static_cast
<void> (0) : __assert_fail ("!SI->isAtomic() && !SI->isVolatile()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2929, __PRETTY_FUNCTION__))
;
2930 return SI;
2931 };
2932 // If there are multiple uses of the return-value slot, just check
2933 // for something immediately preceding the IP. Sometimes this can
2934 // happen with how we generate implicit-returns; it can also happen
2935 // with noreturn cleanups.
2936 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2937 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2938 if (IP->empty()) return nullptr;
2939 llvm::Instruction *I = &IP->back();
2940
2941 // Skip lifetime markers
2942 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2943 IE = IP->rend();
2944 II != IE; ++II) {
2945 if (llvm::IntrinsicInst *Intrinsic =
2946 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2947 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2948 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2949 ++II;
2950 if (II == IE)
2951 break;
2952 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2953 continue;
2954 }
2955 }
2956 I = &*II;
2957 break;
2958 }
2959
2960 return GetStoreIfValid(I);
2961 }
2962
2963 llvm::StoreInst *store =
2964 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2965 if (!store) return nullptr;
2966
2967 // Now do a first-and-dirty dominance check: just walk up the
2968 // single-predecessors chain from the current insertion point.
2969 llvm::BasicBlock *StoreBB = store->getParent();
2970 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2971 while (IP != StoreBB) {
2972 if (!(IP = IP->getSinglePredecessor()))
2973 return nullptr;
2974 }
2975
2976 // Okay, the store's basic block dominates the insertion point; we
2977 // can do our thing.
2978 return store;
2979}
2980
2981// Helper functions for EmitCMSEClearRecord
2982
2983// Set the bits corresponding to a field having width `BitWidth` and located at
2984// offset `BitOffset` (from the least significant bit) within a storage unit of
2985// `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
2986// Use little-endian layout, i.e.`Bits[0]` is the LSB.
2987static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
2988 int BitWidth, int CharWidth) {
2989 assert(CharWidth <= 64)((CharWidth <= 64) ? static_cast<void> (0) : __assert_fail
("CharWidth <= 64", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2989, __PRETTY_FUNCTION__))
;
2990 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth)((static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth
) ? static_cast<void> (0) : __assert_fail ("static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 2990, __PRETTY_FUNCTION__))
;
2991
2992 int Pos = 0;
2993 if (BitOffset >= CharWidth) {
2994 Pos += BitOffset / CharWidth;
2995 BitOffset = BitOffset % CharWidth;
2996 }
2997
2998 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
2999 if (BitOffset + BitWidth >= CharWidth) {
3000 Bits[Pos++] |= (Used << BitOffset) & Used;
3001 BitWidth -= CharWidth - BitOffset;
3002 BitOffset = 0;
3003 }
3004
3005 while (BitWidth >= CharWidth) {
3006 Bits[Pos++] = Used;
3007 BitWidth -= CharWidth;
3008 }
3009
3010 if (BitWidth > 0)
3011 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3012}
3013
3014// Set the bits corresponding to a field having width `BitWidth` and located at
3015// offset `BitOffset` (from the least significant bit) within a storage unit of
3016// `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3017// `Bits` corresponds to one target byte. Use target endian layout.
3018static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3019 int StorageSize, int BitOffset, int BitWidth,
3020 int CharWidth, bool BigEndian) {
3021
3022 SmallVector<uint64_t, 8> TmpBits(StorageSize);
3023 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3024
3025 if (BigEndian)
3026 std::reverse(TmpBits.begin(), TmpBits.end());
3027
3028 for (uint64_t V : TmpBits)
3029 Bits[StorageOffset++] |= V;
3030}
3031
3032static void setUsedBits(CodeGenModule &, QualType, int,
3033 SmallVectorImpl<uint64_t> &);
3034
3035// Set the bits in `Bits`, which correspond to the value representations of
3036// the actual members of the record type `RTy`. Note that this function does
3037// not handle base classes, virtual tables, etc, since they cannot happen in
3038// CMSE function arguments or return. The bit mask corresponds to the target
3039// memory layout, i.e. it's endian dependent.
3040static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3041 SmallVectorImpl<uint64_t> &Bits) {
3042 ASTContext &Context = CGM.getContext();
3043 int CharWidth = Context.getCharWidth();
3044 const RecordDecl *RD = RTy->getDecl()->getDefinition();
4
Called C++ object pointer is null
3045 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3046 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3047
3048 int Idx = 0;
3049 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3050 const FieldDecl *F = *I;
3051
3052 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3053 F->getType()->isIncompleteArrayType())
3054 continue;
3055
3056 if (F->isBitField()) {
3057 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3058 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3059 BFI.StorageSize / CharWidth, BFI.Offset,
3060 BFI.Size, CharWidth,
3061 CGM.getDataLayout().isBigEndian());
3062 continue;
3063 }
3064
3065 setUsedBits(CGM, F->getType(),
3066 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3067 }
3068}
3069
3070// Set the bits in `Bits`, which correspond to the value representations of
3071// the elements of an array type `ATy`.
3072static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3073 int Offset, SmallVectorImpl<uint64_t> &Bits) {
3074 const ASTContext &Context = CGM.getContext();
3075
3076 QualType ETy = Context.getBaseElementType(ATy);
3077 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3078 SmallVector<uint64_t, 4> TmpBits(Size);
3079 setUsedBits(CGM, ETy, 0, TmpBits);
3080
3081 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3082 auto Src = TmpBits.begin();
3083 auto Dst = Bits.begin() + Offset + I * Size;
3084 for (int J = 0; J < Size; ++J)
3085 *Dst++ |= *Src++;
3086 }
3087}
3088
3089// Set the bits in `Bits`, which correspond to the value representations of
3090// the type `QTy`.
3091static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3092 SmallVectorImpl<uint64_t> &Bits) {
3093 if (const auto *RTy = QTy->getAs<RecordType>())
3094 return setUsedBits(CGM, RTy, Offset, Bits);
3095
3096 ASTContext &Context = CGM.getContext();
3097 if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3098 return setUsedBits(CGM, ATy, Offset, Bits);
3099
3100 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3101 if (Size <= 0)
3102 return;
3103
3104 std::fill_n(Bits.begin() + Offset, Size,
3105 (uint64_t(1) << Context.getCharWidth()) - 1);
3106}
3107
3108static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3109 int Pos, int Size, int CharWidth,
3110 bool BigEndian) {
3111 assert(Size > 0)((Size > 0) ? static_cast<void> (0) : __assert_fail (
"Size > 0", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3111, __PRETTY_FUNCTION__))
;
3112 uint64_t Mask = 0;
3113 if (BigEndian) {
3114 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3115 ++P)
3116 Mask = (Mask << CharWidth) | *P;
3117 } else {
3118 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3119 do
3120 Mask = (Mask << CharWidth) | *--P;
3121 while (P != End);
3122 }
3123 return Mask;
3124}
3125
3126// Emit code to clear the bits in a record, which aren't a part of any user
3127// declared member, when the record is a function return.
3128llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3129 llvm::IntegerType *ITy,
3130 QualType QTy) {
3131 assert(Src->getType() == ITy)((Src->getType() == ITy) ? static_cast<void> (0) : __assert_fail
("Src->getType() == ITy", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3131, __PRETTY_FUNCTION__))
;
3132 assert(ITy->getScalarSizeInBits() <= 64)((ITy->getScalarSizeInBits() <= 64) ? static_cast<void
> (0) : __assert_fail ("ITy->getScalarSizeInBits() <= 64"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3132, __PRETTY_FUNCTION__))
;
3133
3134 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3135 int Size = DataLayout.getTypeStoreSize(ITy);
3136 SmallVector<uint64_t, 4> Bits(Size);
3137 setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
3138
3139 int CharWidth = CGM.getContext().getCharWidth();
3140 uint64_t Mask =
3141 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3142
3143 return Builder.CreateAnd(Src, Mask, "cmse.clear");
3144}
3145
3146// Emit code to clear the bits in a record, which aren't a part of any user
3147// declared member, when the record is a function argument.
3148llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3149 llvm::ArrayType *ATy,
3150 QualType QTy) {
3151 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3152 int Size = DataLayout.getTypeStoreSize(ATy);
3153 SmallVector<uint64_t, 16> Bits(Size);
3154 setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
1
Assuming the object is not a 'RecordType'
2
Passing null pointer value via 2nd parameter 'RTy'
3
Calling 'setUsedBits'
3155
3156 // Clear each element of the LLVM array.
3157 int CharWidth = CGM.getContext().getCharWidth();
3158 int CharsPerElt =
3159 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3160 int MaskIndex = 0;
3161 llvm::Value *R = llvm::UndefValue::get(ATy);
3162 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3163 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3164 DataLayout.isBigEndian());
3165 MaskIndex += CharsPerElt;
3166 llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3167 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3168 R = Builder.CreateInsertValue(R, T1, I);
3169 }
3170
3171 return R;
3172}
3173
3174void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3175 bool EmitRetDbgLoc,
3176 SourceLocation EndLoc) {
3177 if (FI.isNoReturn()) {
3178 // Noreturn functions don't return.
3179 EmitUnreachable(EndLoc);
3180 return;
3181 }
3182
3183 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3184 // Naked functions don't have epilogues.
3185 Builder.CreateUnreachable();
3186 return;
3187 }
3188
3189 // Functions with no result always return void.
3190 if (!ReturnValue.isValid()) {
3191 Builder.CreateRetVoid();
3192 return;
3193 }
3194
3195 llvm::DebugLoc RetDbgLoc;
3196 llvm::Value *RV = nullptr;
3197 QualType RetTy = FI.getReturnType();
3198 const ABIArgInfo &RetAI = FI.getReturnInfo();
3199
3200 switch (RetAI.getKind()) {
3201 case ABIArgInfo::InAlloca:
3202 // Aggregrates get evaluated directly into the destination. Sometimes we
3203 // need to return the sret value in a register, though.
3204 assert(hasAggregateEvaluationKind(RetTy))((hasAggregateEvaluationKind(RetTy)) ? static_cast<void>
(0) : __assert_fail ("hasAggregateEvaluationKind(RetTy)", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3204, __PRETTY_FUNCTION__))
;
3205 if (RetAI.getInAllocaSRet()) {
3206 llvm::Function::arg_iterator EI = CurFn->arg_end();
3207 --EI;
3208 llvm::Value *ArgStruct = &*EI;
3209 llvm::Value *SRet = Builder.CreateStructGEP(
3210 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
3211 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
3212 }
3213 break;
3214
3215 case ABIArgInfo::Indirect: {
3216 auto AI = CurFn->arg_begin();
3217 if (RetAI.isSRetAfterThis())
3218 ++AI;
3219 switch (getEvaluationKind(RetTy)) {
3220 case TEK_Complex: {
3221 ComplexPairTy RT =
3222 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3223 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3224 /*isInit*/ true);
3225 break;
3226 }
3227 case TEK_Aggregate:
3228 // Do nothing; aggregrates get evaluated directly into the destination.
3229 break;
3230 case TEK_Scalar:
3231 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
3232 MakeNaturalAlignAddrLValue(&*AI, RetTy),
3233 /*isInit*/ true);
3234 break;
3235 }
3236 break;
3237 }
3238
3239 case ABIArgInfo::Extend:
3240 case ABIArgInfo::Direct:
3241 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3242 RetAI.getDirectOffset() == 0) {
3243 // The internal return value temp always will have pointer-to-return-type
3244 // type, just do a load.
3245
3246 // If there is a dominating store to ReturnValue, we can elide
3247 // the load, zap the store, and usually zap the alloca.
3248 if (llvm::StoreInst *SI =
3249 findDominatingStoreToReturnValue(*this)) {
3250 // Reuse the debug location from the store unless there is
3251 // cleanup code to be emitted between the store and return
3252 // instruction.
3253 if (EmitRetDbgLoc && !AutoreleaseResult)
3254 RetDbgLoc = SI->getDebugLoc();
3255 // Get the stored value and nuke the now-dead store.
3256 RV = SI->getValueOperand();
3257 SI->eraseFromParent();
3258
3259 // Otherwise, we have to do a simple load.
3260 } else {
3261 RV = Builder.CreateLoad(ReturnValue);
3262 }
3263 } else {
3264 // If the value is offset in memory, apply the offset now.
3265 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3266
3267 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3268 }
3269
3270 // In ARC, end functions that return a retainable type with a call
3271 // to objc_autoreleaseReturnValue.
3272 if (AutoreleaseResult) {
3273#ifndef NDEBUG
3274 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3275 // been stripped of the typedefs, so we cannot use RetTy here. Get the
3276 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3277 // CurCodeDecl or BlockInfo.
3278 QualType RT;
3279
3280 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3281 RT = FD->getReturnType();
3282 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3283 RT = MD->getReturnType();
3284 else if (isa<BlockDecl>(CurCodeDecl))
3285 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3286 else
3287 llvm_unreachable("Unexpected function/method type")::llvm::llvm_unreachable_internal("Unexpected function/method type"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3287)
;
3288
3289 assert(getLangOpts().ObjCAutoRefCount &&((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3291, __PRETTY_FUNCTION__))
3290 !FI.isReturnsRetained() &&((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3291, __PRETTY_FUNCTION__))
3291 RT->isObjCRetainableType())((getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained
() && RT->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3291, __PRETTY_FUNCTION__))
;
3292#endif
3293 RV = emitAutoreleaseOfResult(*this, RV);
3294 }
3295
3296 break;
3297
3298 case ABIArgInfo::Ignore:
3299 break;
3300
3301 case ABIArgInfo::CoerceAndExpand: {
3302 auto coercionType = RetAI.getCoerceAndExpandType();
3303
3304 // Load all of the coerced elements out into results.
3305 llvm::SmallVector<llvm::Value*, 4> results;
3306 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3307 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3308 auto coercedEltType = coercionType->getElementType(i);
3309 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3310 continue;
3311
3312 auto eltAddr = Builder.CreateStructGEP(addr, i);
3313 auto elt = Builder.CreateLoad(eltAddr);
3314 results.push_back(elt);
3315 }
3316
3317 // If we have one result, it's the single direct result type.
3318 if (results.size() == 1) {
3319 RV = results[0];
3320
3321 // Otherwise, we need to make a first-class aggregate.
3322 } else {
3323 // Construct a return type that lacks padding elements.
3324 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3325
3326 RV = llvm::UndefValue::get(returnType);
3327 for (unsigned i = 0, e = results.size(); i != e; ++i) {
3328 RV = Builder.CreateInsertValue(RV, results[i], i);
3329 }
3330 }
3331 break;
3332 }
3333 case ABIArgInfo::Expand:
3334 case ABIArgInfo::IndirectAliased:
3335 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3335)
;
3336 }
3337
3338 llvm::Instruction *Ret;
3339 if (RV) {
3340 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3341 // For certain return types, clear padding bits, as they may reveal
3342 // sensitive information.
3343 // Small struct/union types are passed as integers.
3344 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3345 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3346 RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3347 }
3348 EmitReturnValueCheck(RV);
3349 Ret = Builder.CreateRet(RV);
3350 } else {
3351 Ret = Builder.CreateRetVoid();
3352 }
3353
3354 if (RetDbgLoc)
3355 Ret->setDebugLoc(std::move(RetDbgLoc));
3356}
3357
3358void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3359 // A current decl may not be available when emitting vtable thunks.
3360 if (!CurCodeDecl)
3361 return;
3362
3363 // If the return block isn't reachable, neither is this check, so don't emit
3364 // it.
3365 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3366 return;
3367
3368 ReturnsNonNullAttr *RetNNAttr = nullptr;
3369 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3370 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3371
3372 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3373 return;
3374
3375 // Prefer the returns_nonnull attribute if it's present.
3376 SourceLocation AttrLoc;
3377 SanitizerMask CheckKind;
3378 SanitizerHandler Handler;
3379 if (RetNNAttr) {
3380 assert(!requiresReturnValueNullabilityCheck() &&((!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"
) ? static_cast<void> (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3381, __PRETTY_FUNCTION__))
3381 "Cannot check nullability and the nonnull attribute")((!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"
) ? static_cast<void> (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3381, __PRETTY_FUNCTION__))
;
3382 AttrLoc = RetNNAttr->getLocation();
3383 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3384 Handler = SanitizerHandler::NonnullReturn;
3385 } else {
3386 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3387 if (auto *TSI = DD->getTypeSourceInfo())
3388 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3389 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3390 CheckKind = SanitizerKind::NullabilityReturn;
3391 Handler = SanitizerHandler::NullabilityReturn;
3392 }
3393
3394 SanitizerScope SanScope(this);
3395
3396 // Make sure the "return" source location is valid. If we're checking a
3397 // nullability annotation, make sure the preconditions for the check are met.
3398 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3399 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3400 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3401 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3402 if (requiresReturnValueNullabilityCheck())
3403 CanNullCheck =
3404 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3405 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3406 EmitBlock(Check);
3407
3408 // Now do the null check.
3409 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3410 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3411 llvm::Value *DynamicData[] = {SLocPtr};
3412 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3413
3414 EmitBlock(NoCheck);
3415
3416#ifndef NDEBUG
3417 // The return location should not be used after the check has been emitted.
3418 ReturnLocation = Address::invalid();
3419#endif
3420}
3421
3422static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3423 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3424 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3425}
3426
3427static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3428 QualType Ty) {
3429 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3430 // placeholders.
3431 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3432 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3433 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3434
3435 // FIXME: When we generate this IR in one pass, we shouldn't need
3436 // this win32-specific alignment hack.
3437 CharUnits Align = CharUnits::fromQuantity(4);
3438 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3439
3440 return AggValueSlot::forAddr(Address(Placeholder, Align),
3441 Ty.getQualifiers(),
3442 AggValueSlot::IsNotDestructed,
3443 AggValueSlot::DoesNotNeedGCBarriers,
3444 AggValueSlot::IsNotAliased,
3445 AggValueSlot::DoesNotOverlap);
3446}
3447
3448void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3449 const VarDecl *param,
3450 SourceLocation loc) {
3451 // StartFunction converted the ABI-lowered parameter(s) into a
3452 // local alloca. We need to turn that into an r-value suitable
3453 // for EmitCall.
3454 Address local = GetAddrOfLocalVar(param);
3455
3456 QualType type = param->getType();
3457
3458 if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3459 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3460 }
3461
3462 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3463 // but the argument needs to be the original pointer.
3464 if (type->isReferenceType()) {
3465 args.add(RValue::get(Builder.CreateLoad(local)), type);
3466
3467 // In ARC, move out of consumed arguments so that the release cleanup
3468 // entered by StartFunction doesn't cause an over-release. This isn't
3469 // optimal -O0 code generation, but it should get cleaned up when
3470 // optimization is enabled. This also assumes that delegate calls are
3471 // performed exactly once for a set of arguments, but that should be safe.
3472 } else if (getLangOpts().ObjCAutoRefCount &&
3473 param->hasAttr<NSConsumedAttr>() &&
3474 type->isObjCRetainableType()) {
3475 llvm::Value *ptr = Builder.CreateLoad(local);
3476 auto null =
3477 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3478 Builder.CreateStore(null, local);
3479 args.add(RValue::get(ptr), type);
3480
3481 // For the most part, we just need to load the alloca, except that
3482 // aggregate r-values are actually pointers to temporaries.
3483 } else {
3484 args.add(convertTempToRValue(local, type, loc), type);
3485 }
3486
3487 // Deactivate the cleanup for the callee-destructed param that was pushed.
3488 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3489 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3490 param->needsDestruction(getContext())) {
3491 EHScopeStack::stable_iterator cleanup =
3492 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3493 assert(cleanup.isValid() &&((cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? static_cast<void> (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3494, __PRETTY_FUNCTION__))
3494 "cleanup for callee-destructed param not recorded")((cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? static_cast<void> (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3494, __PRETTY_FUNCTION__))
;
3495 // This unreachable is a temporary marker which will be removed later.
3496 llvm::Instruction *isActive = Builder.CreateUnreachable();
3497 args.addArgCleanupDeactivation(cleanup, isActive);
3498 }
3499}
3500
3501static bool isProvablyNull(llvm::Value *addr) {
3502 return isa<llvm::ConstantPointerNull>(addr);
3503}
3504
3505/// Emit the actual writing-back of a writeback.
3506static void emitWriteback(CodeGenFunction &CGF,
3507 const CallArgList::Writeback &writeback) {
3508 const LValue &srcLV = writeback.Source;
3509 Address srcAddr = srcLV.getAddress(CGF);
3510 assert(!isProvablyNull(srcAddr.getPointer()) &&((!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"
) ? static_cast<void> (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3511, __PRETTY_FUNCTION__))
3511 "shouldn't have writeback for provably null argument")((!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"
) ? static_cast<void> (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3511, __PRETTY_FUNCTION__))
;
3512
3513 llvm::BasicBlock *contBB = nullptr;
3514
3515 // If the argument wasn't provably non-null, we need to null check
3516 // before doing the store.
3517 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3518 CGF.CGM.getDataLayout());
3519 if (!provablyNonNull) {
3520 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3521 contBB = CGF.createBasicBlock("icr.done");
3522
3523 llvm::Value *isNull =
3524 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3525 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3526 CGF.EmitBlock(writebackBB);
3527 }
3528
3529 // Load the value to writeback.
3530 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3531
3532 // Cast it back, in case we're writing an id to a Foo* or something.
3533 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3534 "icr.writeback-cast");
3535
3536 // Perform the writeback.
3537
3538 // If we have a "to use" value, it's something we need to emit a use
3539 // of. This has to be carefully threaded in: if it's done after the
3540 // release it's potentially undefined behavior (and the optimizer
3541 // will ignore it), and if it happens before the retain then the
3542 // optimizer could move the release there.
3543 if (writeback.ToUse) {
3544 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong)((srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) ? static_cast
<void> (0) : __assert_fail ("srcLV.getObjCLifetime() == Qualifiers::OCL_Strong"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3544, __PRETTY_FUNCTION__))
;
3545
3546 // Retain the new value. No need to block-copy here: the block's
3547 // being passed up the stack.
3548 value = CGF.EmitARCRetainNonBlock(value);
3549
3550 // Emit the intrinsic use here.
3551 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3552
3553 // Load the old value (primitively).
3554 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3555
3556 // Put the new value in place (primitively).
3557 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3558
3559 // Release the old value.
3560 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3561
3562 // Otherwise, we can just do a normal lvalue store.
3563 } else {
3564 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3565 }
3566
3567 // Jump to the continuation block.
3568 if (!provablyNonNull)
3569 CGF.EmitBlock(contBB);
3570}
3571
3572static void emitWritebacks(CodeGenFunction &CGF,
3573 const CallArgList &args) {
3574 for (const auto &I : args.writebacks())
3575 emitWriteback(CGF, I);
3576}
3577
3578static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3579 const CallArgList &CallArgs) {
3580 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3581 CallArgs.getCleanupsToDeactivate();
3582 // Iterate in reverse to increase the likelihood of popping the cleanup.
3583 for (const auto &I : llvm::reverse(Cleanups)) {
3584 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3585 I.IsActiveIP->eraseFromParent();
3586 }
3587}
3588
3589static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3590 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3591 if (uop->getOpcode() == UO_AddrOf)
3592 return uop->getSubExpr();
3593 return nullptr;
3594}
3595
3596/// Emit an argument that's being passed call-by-writeback. That is,
3597/// we are passing the address of an __autoreleased temporary; it
3598/// might be copy-initialized with the current value of the given
3599/// address, but it will definitely be copied out of after the call.
3600static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3601 const ObjCIndirectCopyRestoreExpr *CRE) {
3602 LValue srcLV;
3603
3604 // Make an optimistic effort to emit the address as an l-value.
3605 // This can fail if the argument expression is more complicated.
3606 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3607 srcLV = CGF.EmitLValue(lvExpr);
3608
3609 // Otherwise, just emit it as a scalar.
3610 } else {
3611 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3612
3613 QualType srcAddrType =
3614 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3615 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3616 }
3617 Address srcAddr = srcLV.getAddress(CGF);
3618
3619 // The dest and src types don't necessarily match in LLVM terms
3620 // because of the crazy ObjC compatibility rules.
3621
3622 llvm::PointerType *destType =
3623 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3624
3625 // If the address is a constant null, just pass the appropriate null.
3626 if (isProvablyNull(srcAddr.getPointer())) {
3627 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3628 CRE->getType());
3629 return;
3630 }
3631
3632 // Create the temporary.
3633 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3634 CGF.getPointerAlign(),
3635 "icr.temp");
3636 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3637 // and that cleanup will be conditional if we can't prove that the l-value
3638 // isn't null, so we need to register a dominating point so that the cleanups
3639 // system will make valid IR.
3640 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3641
3642 // Zero-initialize it if we're not doing a copy-initialization.
3643 bool shouldCopy = CRE->shouldCopy();
3644 if (!shouldCopy) {
3645 llvm::Value *null =
3646 llvm::ConstantPointerNull::get(
3647 cast<llvm::PointerType>(destType->getElementType()));
3648 CGF.Builder.CreateStore(null, temp);
3649 }
3650
3651 llvm::BasicBlock *contBB = nullptr;
3652 llvm::BasicBlock *originBB = nullptr;
3653
3654 // If the address is *not* known to be non-null, we need to switch.
3655 llvm::Value *finalArgument;
3656
3657 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3658 CGF.CGM.getDataLayout());
3659 if (provablyNonNull) {
3660 finalArgument = temp.getPointer();
3661 } else {
3662 llvm::Value *isNull =
3663 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3664
3665 finalArgument = CGF.Builder.CreateSelect(isNull,
3666 llvm::ConstantPointerNull::get(destType),
3667 temp.getPointer(), "icr.argument");
3668
3669 // If we need to copy, then the load has to be conditional, which
3670 // means we need control flow.
3671 if (shouldCopy) {
3672 originBB = CGF.Builder.GetInsertBlock();
3673 contBB = CGF.createBasicBlock("icr.cont");
3674 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3675 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3676 CGF.EmitBlock(copyBB);
3677 condEval.begin(CGF);
3678 }
3679 }
3680
3681 llvm::Value *valueToUse = nullptr;
3682
3683 // Perform a copy if necessary.
3684 if (shouldCopy) {
3685 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3686 assert(srcRV.isScalar())((srcRV.isScalar()) ? static_cast<void> (0) : __assert_fail
("srcRV.isScalar()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3686, __PRETTY_FUNCTION__))
;
3687
3688 llvm::Value *src = srcRV.getScalarVal();
3689 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3690 "icr.cast");
3691
3692 // Use an ordinary store, not a store-to-lvalue.
3693 CGF.Builder.CreateStore(src, temp);
3694
3695 // If optimization is enabled, and the value was held in a
3696 // __strong variable, we need to tell the optimizer that this
3697 // value has to stay alive until we're doing the store back.
3698 // This is because the temporary is effectively unretained,
3699 // and so otherwise we can violate the high-level semantics.
3700 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3701 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3702 valueToUse = src;
3703 }
3704 }
3705
3706 // Finish the control flow if we needed it.
3707 if (shouldCopy && !provablyNonNull) {
3708 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3709 CGF.EmitBlock(contBB);
3710
3711 // Make a phi for the value to intrinsically use.
3712 if (valueToUse) {
3713 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3714 "icr.to-use");
3715 phiToUse->addIncoming(valueToUse, copyBB);
3716 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3717 originBB);
3718 valueToUse = phiToUse;
3719 }
3720
3721 condEval.end(CGF);
3722 }
3723
3724 args.addWriteback(srcLV, temp, valueToUse);
3725 args.add(RValue::get(finalArgument), CRE->getType());
3726}
3727
3728void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3729 assert(!StackBase)((!StackBase) ? static_cast<void> (0) : __assert_fail (
"!StackBase", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3729, __PRETTY_FUNCTION__))
;
3730
3731 // Save the stack.
3732 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3733 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3734}
3735
3736void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3737 if (StackBase) {
3738 // Restore the stack after the call.
3739 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3740 CGF.Builder.CreateCall(F, StackBase);
3741 }
3742}
3743
3744void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3745 SourceLocation ArgLoc,
3746 AbstractCallee AC,
3747 unsigned ParmNum) {
3748 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3749 SanOpts.has(SanitizerKind::NullabilityArg)))
3750 return;
3751
3752 // The param decl may be missing in a variadic function.
3753 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3754 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3755
3756 // Prefer the nonnull attribute if it's present.
3757 const NonNullAttr *NNAttr = nullptr;
3758 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3759 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3760
3761 bool CanCheckNullability = false;
3762 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3763 auto Nullability = PVD->getType()->getNullability(getContext());
3764 CanCheckNullability = Nullability &&
3765 *Nullability == NullabilityKind::NonNull &&
3766 PVD->getTypeSourceInfo();
3767 }
3768
3769 if (!NNAttr && !CanCheckNullability)
3770 return;
3771
3772 SourceLocation AttrLoc;
3773 SanitizerMask CheckKind;
3774 SanitizerHandler Handler;
3775 if (NNAttr) {
3776 AttrLoc = NNAttr->getLocation();
3777 CheckKind = SanitizerKind::NonnullAttribute;
3778 Handler = SanitizerHandler::NonnullArg;
3779 } else {
3780 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3781 CheckKind = SanitizerKind::NullabilityArg;
3782 Handler = SanitizerHandler::NullabilityArg;
3783 }
3784
3785 SanitizerScope SanScope(this);
3786 assert(RV.isScalar())((RV.isScalar()) ? static_cast<void> (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3786, __PRETTY_FUNCTION__))
;
3787 llvm::Value *V = RV.getScalarVal();
3788 llvm::Value *Cond =
3789 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3790 llvm::Constant *StaticData[] = {
3791 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3792 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3793 };
3794 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3795}
3796
3797void CodeGenFunction::EmitCallArgs(
3798 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3799 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3800 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3801 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()))(((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())
) ? static_cast<void> (0) : __assert_fail ("(int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3801, __PRETTY_FUNCTION__))
;
3802
3803 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3804 // because arguments are destroyed left to right in the callee. As a special
3805 // case, there are certain language constructs that require left-to-right
3806 // evaluation, and in those cases we consider the evaluation order requirement
3807 // to trump the "destruction order is reverse construction order" guarantee.
3808 bool LeftToRight =
3809 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3810 ? Order == EvaluationOrder::ForceLeftToRight
3811 : Order != EvaluationOrder::ForceRightToLeft;
3812
3813 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3814 RValue EmittedArg) {
3815 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3816 return;
3817 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3818 if (PS == nullptr)
3819 return;
3820
3821 const auto &Context = getContext();
3822 auto SizeTy = Context.getSizeType();
3823 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3824 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?")((EmittedArg.getScalarVal() && "We emitted nothing for the arg?"
) ? static_cast<void> (0) : __assert_fail ("EmittedArg.getScalarVal() && \"We emitted nothing for the arg?\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3824, __PRETTY_FUNCTION__))
;
3825 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3826 EmittedArg.getScalarVal(),
3827 PS->isDynamic());
3828 Args.add(RValue::get(V), SizeTy);
3829 // If we're emitting args in reverse, be sure to do so with
3830 // pass_object_size, as well.
3831 if (!LeftToRight)
3832 std::swap(Args.back(), *(&Args.back() - 1));
3833 };
3834
3835 // Insert a stack save if we're going to need any inalloca args.
3836 bool HasInAllocaArgs = false;
3837 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3838 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3839 I != E && !HasInAllocaArgs; ++I)
3840 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3841 if (HasInAllocaArgs) {
3842 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)((getTarget().getTriple().getArch() == llvm::Triple::x86) ? static_cast
<void> (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3842, __PRETTY_FUNCTION__))
;
3843 Args.allocateArgumentMemory(*this);
3844 }
3845 }
3846
3847 // Evaluate each argument in the appropriate order.
3848 size_t CallArgsStart = Args.size();
3849 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3850 unsigned Idx = LeftToRight ? I : E - I - 1;
3851 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3852 unsigned InitialArgSize = Args.size();
3853 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3854 // the argument and parameter match or the objc method is parameterized.
3855 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3860, __PRETTY_FUNCTION__))
3856 getContext().hasSameUnqualifiedType((*Arg)->getType(),(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3860, __PRETTY_FUNCTION__))
3857 ArgTypes[Idx]) ||(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3860, __PRETTY_FUNCTION__))
3858 (isa<ObjCMethodDecl>(AC.getDecl()) &&(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3860, __PRETTY_FUNCTION__))
3859 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3860, __PRETTY_FUNCTION__))
3860 "Argument and parameter types don't match")(((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext
().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]
) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams
(cast<ObjCMethodDecl>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? static_cast<void> (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3860, __PRETTY_FUNCTION__))
;
3861 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3862 // In particular, we depend on it being the last arg in Args, and the
3863 // objectsize bits depend on there only being one arg if !LeftToRight.
3864 assert(InitialArgSize + 1 == Args.size() &&((InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"
) ? static_cast<void> (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3865, __PRETTY_FUNCTION__))
3865 "The code below depends on only adding one arg per EmitCallArg")((InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"
) ? static_cast<void> (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3865, __PRETTY_FUNCTION__))
;
3866 (void)InitialArgSize;
3867 // Since pointer argument are never emitted as LValue, it is safe to emit
3868 // non-null argument check for r-value only.
3869 if (!Args.back().hasLValue()) {
3870 RValue RVArg = Args.back().getKnownRValue();
3871 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3872 ParamsToSkip + Idx);
3873 // @llvm.objectsize should never have side-effects and shouldn't need
3874 // destruction/cleanups, so we can safely "emit" it after its arg,
3875 // regardless of right-to-leftness
3876 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3877 }
3878 }
3879
3880 if (!LeftToRight) {
3881 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3882 // IR function.
3883 std::reverse(Args.begin() + CallArgsStart, Args.end());
3884 }
3885}
3886
3887namespace {
3888
3889struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3890 DestroyUnpassedArg(Address Addr, QualType Ty)
3891 : Addr(Addr), Ty(Ty) {}
3892
3893 Address Addr;
3894 QualType Ty;
3895
3896 void Emit(CodeGenFunction &CGF, Flags flags) override {
3897 QualType::DestructionKind DtorKind = Ty.isDestructedType();
3898 if (DtorKind == QualType::DK_cxx_destructor) {
3899 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3900 assert(!Dtor->isTrivial())((!Dtor->isTrivial()) ? static_cast<void> (0) : __assert_fail
("!Dtor->isTrivial()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3900, __PRETTY_FUNCTION__))
;
3901 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3902 /*Delegating=*/false, Addr, Ty);
3903 } else {
3904 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3905 }
3906 }
3907};
3908
3909struct DisableDebugLocationUpdates {
3910 CodeGenFunction &CGF;
3911 bool disabledDebugInfo;
3912 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3913 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3914 CGF.disableDebugInfo();
3915 }
3916 ~DisableDebugLocationUpdates() {
3917 if (disabledDebugInfo)
3918 CGF.enableDebugInfo();
3919 }
3920};
3921
3922} // end anonymous namespace
3923
3924RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3925 if (!HasLV)
3926 return RV;
3927 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3928 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3929 LV.isVolatile());
3930 IsUsed = true;
3931 return RValue::getAggregate(Copy.getAddress(CGF));
3932}
3933
3934void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3935 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3936 if (!HasLV && RV.isScalar())
3937 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
3938 else if (!HasLV && RV.isComplex())
3939 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3940 else {
3941 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
3942 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3943 // We assume that call args are never copied into subobjects.
3944 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3945 HasLV ? LV.isVolatileQualified()
3946 : RV.isVolatileQualified());
3947 }
3948 IsUsed = true;
3949}
3950
3951void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3952 QualType type) {
3953 DisableDebugLocationUpdates Dis(*this, E);
3954 if (const ObjCIndirectCopyRestoreExpr *CRE
3955 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3956 assert(getLangOpts().ObjCAutoRefCount)((getLangOpts().ObjCAutoRefCount) ? static_cast<void> (
0) : __assert_fail ("getLangOpts().ObjCAutoRefCount", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3956, __PRETTY_FUNCTION__))
;
3957 return emitWritebackArg(*this, args, CRE);
3958 }
3959
3960 assert(type->isReferenceType() == E->isGLValue() &&((type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"
) ? static_cast<void> (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3961, __PRETTY_FUNCTION__))
3961 "reference binding to unmaterialized r-value!")((type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"
) ? static_cast<void> (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3961, __PRETTY_FUNCTION__))
;
3962
3963 if (E->isGLValue()) {
3964 assert(E->getObjectKind() == OK_Ordinary)((E->getObjectKind() == OK_Ordinary) ? static_cast<void
> (0) : __assert_fail ("E->getObjectKind() == OK_Ordinary"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 3964, __PRETTY_FUNCTION__))
;
3965 return args.add(EmitReferenceBindingToExpr(E), type);
3966 }
3967
3968 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3969
3970 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3971 // However, we still have to push an EH-only cleanup in case we unwind before
3972 // we make it to the call.
3973 if (HasAggregateEvalKind &&
3974 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3975 // If we're using inalloca, use the argument memory. Otherwise, use a
3976 // temporary.
3977 AggValueSlot Slot;
3978 if (args.isUsingInAlloca())
3979 Slot = createPlaceholderSlot(*this, type);
3980 else
3981 Slot = CreateAggTemp(type, "agg.tmp");
3982
3983 bool DestroyedInCallee = true, NeedsEHCleanup = true;
3984 if (const auto *RD = type->getAsCXXRecordDecl())
3985 DestroyedInCallee = RD->hasNonTrivialDestructor();
3986 else
3987 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3988
3989 if (DestroyedInCallee)
3990 Slot.setExternallyDestructed();
3991
3992 EmitAggExpr(E, Slot);
3993 RValue RV = Slot.asRValue();
3994 args.add(RV, type);
3995
3996 if (DestroyedInCallee && NeedsEHCleanup) {
3997 // Create a no-op GEP between the placeholder and the cleanup so we can
3998 // RAUW it successfully. It also serves as a marker of the first
3999 // instruction where the cleanup is active.
4000 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4001 type);
4002 // This unreachable is a temporary marker which will be removed later.
4003 llvm::Instruction *IsActive = Builder.CreateUnreachable();
4004 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
4005 }
4006 return;
4007 }
4008
4009 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4010 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4011 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4012 assert(L.isSimple())((L.isSimple()) ? static_cast<void> (0) : __assert_fail
("L.isSimple()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4012, __PRETTY_FUNCTION__))
;
4013 args.addUncopiedAggregate(L, type);
4014 return;
4015 }
4016
4017 args.add(EmitAnyExprToTemp(E), type);
4018}
4019
4020QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4021 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4022 // implicitly widens null pointer constants that are arguments to varargs
4023 // functions to pointer-sized ints.
4024 if (!getTarget().getTriple().isOSWindows())
4025 return Arg->getType();
4026
4027 if (Arg->getType()->isIntegerType() &&
4028 getContext().getTypeSize(Arg->getType()) <
4029 getContext().getTargetInfo().getPointerWidth(0) &&
4030 Arg->isNullPointerConstant(getContext(),
4031 Expr::NPC_ValueDependentIsNotNull)) {
4032 return getContext().getIntPtrType();
4033 }
4034
4035 return Arg->getType();
4036}
4037
4038// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4039// optimizer it can aggressively ignore unwind edges.
4040void
4041CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4042 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4043 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4044 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4045 CGM.getNoObjCARCExceptionsMetadata());
4046}
4047
4048/// Emits a call to the given no-arguments nounwind runtime function.
4049llvm::CallInst *
4050CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4051 const llvm::Twine &name) {
4052 return EmitNounwindRuntimeCall(callee, None, name);
4053}
4054
4055/// Emits a call to the given nounwind runtime function.
4056llvm::CallInst *
4057CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4058 ArrayRef<llvm::Value *> args,
4059 const llvm::Twine &name) {
4060 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4061 call->setDoesNotThrow();
4062 return call;
4063}
4064
4065/// Emits a simple call (never an invoke) to the given no-arguments
4066/// runtime function.
4067llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4068 const llvm::Twine &name) {
4069 return EmitRuntimeCall(callee, None, name);
4070}
4071
4072// Calls which may throw must have operand bundles indicating which funclet
4073// they are nested within.
4074SmallVector<llvm::OperandBundleDef, 1>
4075CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4076 SmallVector<llvm::OperandBundleDef, 1> BundleList;
4077 // There is no need for a funclet operand bundle if we aren't inside a
4078 // funclet.
4079 if (!CurrentFuncletPad)
4080 return BundleList;
4081
4082 // Skip intrinsics which cannot throw.
4083 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
4084 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
4085 return BundleList;
4086
4087 BundleList.emplace_back("funclet", CurrentFuncletPad);
4088 return BundleList;
4089}
4090
4091/// Emits a simple call (never an invoke) to the given runtime function.
4092llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4093 ArrayRef<llvm::Value *> args,
4094 const llvm::Twine &name) {
4095 llvm::CallInst *call = Builder.CreateCall(
4096 callee, args, getBundlesForFunclet(callee.getCallee()), name);
4097 call->setCallingConv(getRuntimeCC());
4098 return call;
4099}
4100
4101/// Emits a call or invoke to the given noreturn runtime function.
4102void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4103 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4104 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4105 getBundlesForFunclet(callee.getCallee());
4106
4107 if (getInvokeDest()) {
4108 llvm::InvokeInst *invoke =
4109 Builder.CreateInvoke(callee,
4110 getUnreachableBlock(),
4111 getInvokeDest(),
4112 args,
4113 BundleList);
4114 invoke->setDoesNotReturn();
4115 invoke->setCallingConv(getRuntimeCC());
4116 } else {
4117 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4118 call->setDoesNotReturn();
4119 call->setCallingConv(getRuntimeCC());
4120 Builder.CreateUnreachable();
4121 }
4122}
4123
4124/// Emits a call or invoke instruction to the given nullary runtime function.
4125llvm::CallBase *
4126CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4127 const Twine &name) {
4128 return EmitRuntimeCallOrInvoke(callee, None, name);
4129}
4130
4131/// Emits a call or invoke instruction to the given runtime function.
4132llvm::CallBase *
4133CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4134 ArrayRef<llvm::Value *> args,
4135 const Twine &name) {
4136 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4137 call->setCallingConv(getRuntimeCC());
4138 return call;
4139}
4140
4141/// Emits a call or invoke instruction to the given function, depending
4142/// on the current state of the EH stack.
4143llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4144 ArrayRef<llvm::Value *> Args,
4145 const Twine &Name) {
4146 llvm::BasicBlock *InvokeDest = getInvokeDest();
4147 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4148 getBundlesForFunclet(Callee.getCallee());
4149
4150 llvm::CallBase *Inst;
4151 if (!InvokeDest)
4152 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4153 else {
4154 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4155 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4156 Name);
4157 EmitBlock(ContBB);
4158 }
4159
4160 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4161 // optimizer it can aggressively ignore unwind edges.
4162 if (CGM.getLangOpts().ObjCAutoRefCount)
4163 AddObjCARCExceptionMetadata(Inst);
4164
4165 return Inst;
4166}
4167
4168void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4169 llvm::Value *New) {
4170 DeferredReplacements.push_back(std::make_pair(Old, New));
4171}
4172
4173namespace {
4174
4175/// Specify given \p NewAlign as the alignment of return value attribute. If
4176/// such attribute already exists, re-set it to the maximal one of two options.
4177LLVM_NODISCARD[[clang::warn_unused_result]] llvm::AttributeList
4178maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4179 const llvm::AttributeList &Attrs,
4180 llvm::Align NewAlign) {
4181 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4182 if (CurAlign >= NewAlign)
4183 return Attrs;
4184 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4185 return Attrs
4186 .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
4187 llvm::Attribute::AttrKind::Alignment)
4188 .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
4189}
4190
4191template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4192protected:
4193 CodeGenFunction &CGF;
4194
4195 /// We do nothing if this is, or becomes, nullptr.
4196 const AlignedAttrTy *AA = nullptr;
4197
4198 llvm::Value *Alignment = nullptr; // May or may not be a constant.
4199 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4200
4201 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4202 : CGF(CGF_) {
4203 if (!FuncDecl)
4204 return;
4205 AA = FuncDecl->getAttr<AlignedAttrTy>();
4206 }
4207
4208public:
4209 /// If we can, materialize the alignment as an attribute on return value.
4210 LLVM_NODISCARD[[clang::warn_unused_result]] llvm::AttributeList
4211 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4212 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4213 return Attrs;
4214 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4215 if (!AlignmentCI)
4216 return Attrs;
4217 // We may legitimately have non-power-of-2 alignment here.
4218 // If so, this is UB land, emit it via `@llvm.assume` instead.
4219 if (!AlignmentCI->getValue().isPowerOf2())
4220 return Attrs;
4221 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4222 CGF.getLLVMContext(), Attrs,
4223 llvm::Align(
4224 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4225 AA = nullptr; // We're done. Disallow doing anything else.
4226 return NewAttrs;
4227 }
4228
4229 /// Emit alignment assumption.
4230 /// This is a general fallback that we take if either there is an offset,
4231 /// or the alignment is variable or we are sanitizing for alignment.
4232 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4233 if (!AA)
4234 return;
4235 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4236 AA->getLocation(), Alignment, OffsetCI);
4237 AA = nullptr; // We're done. Disallow doing anything else.
4238 }
4239};
4240
4241/// Helper data structure to emit `AssumeAlignedAttr`.
4242class AssumeAlignedAttrEmitter final
4243 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4244public:
4245 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4246 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4247 if (!AA)
4248 return;
4249 // It is guaranteed that the alignment/offset are constants.
4250 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4251 if (Expr *Offset = AA->getOffset()) {
4252 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4253 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4254 OffsetCI = nullptr;
4255 }
4256 }
4257};
4258
4259/// Helper data structure to emit `AllocAlignAttr`.
4260class AllocAlignAttrEmitter final
4261 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4262public:
4263 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4264 const CallArgList &CallArgs)
4265 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4266 if (!AA)
4267 return;
4268 // Alignment may or may not be a constant, and that is okay.
4269 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4270 .getRValue(CGF)
4271 .getScalarVal();
4272 }
4273};
4274
4275} // namespace
4276
4277RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4278 const CGCallee &Callee,
4279 ReturnValueSlot ReturnValue,
4280 const CallArgList &CallArgs,
4281 llvm::CallBase **callOrInvoke,
4282 SourceLocation Loc) {
4283 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4284
4285 assert(Callee.isOrdinary() || Callee.isVirtual())((Callee.isOrdinary() || Callee.isVirtual()) ? static_cast<
void> (0) : __assert_fail ("Callee.isOrdinary() || Callee.isVirtual()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4285, __PRETTY_FUNCTION__))
;
4286
4287 // Handle struct-return functions by passing a pointer to the
4288 // location that we would like to return into.
4289 QualType RetTy = CallInfo.getReturnType();
4290 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4291
4292 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4293
4294 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4295 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4296 // We can only guarantee that a function is called from the correct
4297 // context/function based on the appropriate target attributes,
4298 // so only check in the case where we have both always_inline and target
4299 // since otherwise we could be making a conditional call after a check for
4300 // the proper cpu features (and it won't cause code generation issues due to
4301 // function based code generation).
4302 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4303 TargetDecl->hasAttr<TargetAttr>())
4304 checkTargetFeatures(Loc, FD);
4305
4306 // Some architectures (such as x86-64) have the ABI changed based on
4307 // attribute-target/features. Give them a chance to diagnose.
4308 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4309 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4310 }
4311
4312#ifndef NDEBUG
4313 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4314 // For an inalloca varargs function, we don't expect CallInfo to match the
4315 // function pointer's type, because the inalloca struct a will have extra
4316 // fields in it for the varargs parameters. Code later in this function
4317 // bitcasts the function pointer to the type derived from CallInfo.
4318 //
4319 // In other cases, we assert that the types match up (until pointers stop
4320 // having pointee types).
4321 llvm::Type *TypeFromVal;
4322 if (Callee.isVirtual())
4323 TypeFromVal = Callee.getVirtualFunctionType();
4324 else
4325 TypeFromVal =
4326 Callee.getFunctionPointer()->getType()->getPointerElementType();
4327 assert(IRFuncTy == TypeFromVal)((IRFuncTy == TypeFromVal) ? static_cast<void> (0) : __assert_fail
("IRFuncTy == TypeFromVal", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4327, __PRETTY_FUNCTION__))
;
4328 }
4329#endif
4330
4331 // 1. Set up the arguments.
4332
4333 // If we're using inalloca, insert the allocation after the stack save.
4334 // FIXME: Do this earlier rather than hacking it in here!
4335 Address ArgMemory = Address::invalid();
4336 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4337 const llvm::DataLayout &DL = CGM.getDataLayout();
4338 llvm::Instruction *IP = CallArgs.getStackBase();
4339 llvm::AllocaInst *AI;
4340 if (IP) {
4341 IP = IP->getNextNode();
4342 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4343 "argmem", IP);
4344 } else {
4345 AI = CreateTempAlloca(ArgStruct, "argmem");
4346 }
4347 auto Align = CallInfo.getArgStructAlignment();
4348 AI->setAlignment(Align.getAsAlign());
4349 AI->setUsedWithInAlloca(true);
4350 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca())((AI->isUsedWithInAlloca() && !AI->isStaticAlloca
()) ? static_cast<void> (0) : __assert_fail ("AI->isUsedWithInAlloca() && !AI->isStaticAlloca()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4350, __PRETTY_FUNCTION__))
;
4351 ArgMemory = Address(AI, Align);
4352 }
4353
4354 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4355 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4356
4357 // If the call returns a temporary with struct return, create a temporary
4358 // alloca to hold the result, unless one is given to us.
4359 Address SRetPtr = Address::invalid();
4360 Address SRetAlloca = Address::invalid();
4361 llvm::Value *UnusedReturnSizePtr = nullptr;
4362 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4363 if (!ReturnValue.isNull()) {
4364 SRetPtr = ReturnValue.getValue();
4365 } else {
4366 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4367 if (HaveInsertPoint() && ReturnValue.isUnused()) {
4368 uint64_t size =
4369 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4370 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4371 }
4372 }
4373 if (IRFunctionArgs.hasSRetArg()) {
4374 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4375 } else if (RetAI.isInAlloca()) {
4376 Address Addr =
4377 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4378 Builder.CreateStore(SRetPtr.getPointer(), Addr);
4379 }
4380 }
4381
4382 Address swiftErrorTemp = Address::invalid();
4383 Address swiftErrorArg = Address::invalid();
4384
4385 // When passing arguments using temporary allocas, we need to add the
4386 // appropriate lifetime markers. This vector keeps track of all the lifetime
4387 // markers that need to be ended right after the call.
4388 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4389
4390 // Translate all of the arguments as necessary to match the IR lowering.
4391 assert(CallInfo.arg_size() == CallArgs.size() &&((CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4392, __PRETTY_FUNCTION__))
4392 "Mismatch between function signature & arguments.")((CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."
) ? static_cast<void> (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4392, __PRETTY_FUNCTION__))
;
4393 unsigned ArgNo = 0;
4394 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4395 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4396 I != E; ++I, ++info_it, ++ArgNo) {
4397 const ABIArgInfo &ArgInfo = info_it->info;
4398
4399 // Insert a padding argument to ensure proper alignment.
4400 if (IRFunctionArgs.hasPaddingArg(ArgNo))
4401 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4402 llvm::UndefValue::get(ArgInfo.getPaddingType());
4403
4404 unsigned FirstIRArg, NumIRArgs;
4405 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4406
4407 switch (ArgInfo.getKind()) {
4408 case ABIArgInfo::InAlloca: {
4409 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4409, __PRETTY_FUNCTION__))
;
4410 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)((getTarget().getTriple().getArch() == llvm::Triple::x86) ? static_cast
<void> (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4410, __PRETTY_FUNCTION__))
;
4411 if (I->isAggregate()) {
4412 Address Addr = I->hasLValue()
4413 ? I->getKnownLValue().getAddress(*this)
4414 : I->getKnownRValue().getAggregateAddress();
4415 llvm::Instruction *Placeholder =
4416 cast<llvm::Instruction>(Addr.getPointer());
4417
4418 if (!ArgInfo.getInAllocaIndirect()) {
4419 // Replace the placeholder with the appropriate argument slot GEP.
4420 CGBuilderTy::InsertPoint IP = Builder.saveIP();
4421 Builder.SetInsertPoint(Placeholder);
4422 Addr = Builder.CreateStructGEP(ArgMemory,
4423 ArgInfo.getInAllocaFieldIndex());
4424 Builder.restoreIP(IP);
4425 } else {
4426 // For indirect things such as overaligned structs, replace the
4427 // placeholder with a regular aggregate temporary alloca. Store the
4428 // address of this alloca into the struct.
4429 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4430 Address ArgSlot = Builder.CreateStructGEP(
4431 ArgMemory, ArgInfo.getInAllocaFieldIndex());
4432 Builder.CreateStore(Addr.getPointer(), ArgSlot);
4433 }
4434 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4435 } else if (ArgInfo.getInAllocaIndirect()) {
4436 // Make a temporary alloca and store the address of it into the argument
4437 // struct.
4438 Address Addr = CreateMemTempWithoutCast(
4439 I->Ty, getContext().getTypeAlignInChars(I->Ty),
4440 "indirect-arg-temp");
4441 I->copyInto(*this, Addr);
4442 Address ArgSlot =
4443 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4444 Builder.CreateStore(Addr.getPointer(), ArgSlot);
4445 } else {
4446 // Store the RValue into the argument struct.
4447 Address Addr =
4448 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4449 unsigned AS = Addr.getType()->getPointerAddressSpace();
4450 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4451 // There are some cases where a trivial bitcast is not avoidable. The
4452 // definition of a type later in a translation unit may change it's type
4453 // from {}* to (%struct.foo*)*.
4454 if (Addr.getType() != MemType)
4455 Addr = Builder.CreateBitCast(Addr, MemType);
4456 I->copyInto(*this, Addr);
4457 }
4458 break;
4459 }
4460
4461 case ABIArgInfo::Indirect:
4462 case ABIArgInfo::IndirectAliased: {
4463 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4463, __PRETTY_FUNCTION__))
;
4464 if (!I->isAggregate()) {
4465 // Make a temporary alloca to pass the argument.
4466 Address Addr = CreateMemTempWithoutCast(
4467 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4468 IRCallArgs[FirstIRArg] = Addr.getPointer();
4469
4470 I->copyInto(*this, Addr);
4471 } else {
4472 // We want to avoid creating an unnecessary temporary+copy here;
4473 // however, we need one in three cases:
4474 // 1. If the argument is not byval, and we are required to copy the
4475 // source. (This case doesn't occur on any common architecture.)
4476 // 2. If the argument is byval, RV is not sufficiently aligned, and
4477 // we cannot force it to be sufficiently aligned.
4478 // 3. If the argument is byval, but RV is not located in default
4479 // or alloca address space.
4480 Address Addr = I->hasLValue()
4481 ? I->getKnownLValue().getAddress(*this)
4482 : I->getKnownRValue().getAggregateAddress();
4483 llvm::Value *V = Addr.getPointer();
4484 CharUnits Align = ArgInfo.getIndirectAlign();
4485 const llvm::DataLayout *TD = &CGM.getDataLayout();
4486
4487 assert((FirstIRArg >= IRFuncTy->getNumParams() ||(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4490, __PRETTY_FUNCTION__))
4488 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4490, __PRETTY_FUNCTION__))
4489 TD->getAllocaAddrSpace()) &&(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4490, __PRETTY_FUNCTION__))
4490 "indirect argument must be in alloca address space")(((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace() == TD->
getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? static_cast<void> (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4490, __PRETTY_FUNCTION__))
;
4491
4492 bool NeedCopy = false;
4493
4494 if (Addr.getAlignment() < Align &&
4495 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4496 Align.getAsAlign()) {
4497 NeedCopy = true;
4498 } else if (I->hasLValue()) {
4499 auto LV = I->getKnownLValue();
4500 auto AS = LV.getAddressSpace();
4501
4502 if (!ArgInfo.getIndirectByVal() ||
4503 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4504 NeedCopy = true;
4505 }
4506 if (!getLangOpts().OpenCL) {
4507 if ((ArgInfo.getIndirectByVal() &&
4508 (AS != LangAS::Default &&
4509 AS != CGM.getASTAllocaAddressSpace()))) {
4510 NeedCopy = true;
4511 }
4512 }
4513 // For OpenCL even if RV is located in default or alloca address space
4514 // we don't want to perform address space cast for it.
4515 else if ((ArgInfo.getIndirectByVal() &&
4516 Addr.getType()->getAddressSpace() != IRFuncTy->
4517 getParamType(FirstIRArg)->getPointerAddressSpace())) {
4518 NeedCopy = true;
4519 }
4520 }
4521
4522 if (NeedCopy) {
4523 // Create an aligned temporary, and copy to it.
4524 Address AI = CreateMemTempWithoutCast(
4525 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4526 IRCallArgs[FirstIRArg] = AI.getPointer();
4527
4528 // Emit lifetime markers for the temporary alloca.
4529 uint64_t ByvalTempElementSize =
4530 CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4531 llvm::Value *LifetimeSize =
4532 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4533
4534 // Add cleanup code to emit the end lifetime marker after the call.
4535 if (LifetimeSize) // In case we disabled lifetime markers.
4536 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4537
4538 // Generate the copy.
4539 I->copyInto(*this, AI);
4540 } else {
4541 // Skip the extra memcpy call.
4542 auto *T = V->getType()->getPointerElementType()->getPointerTo(
4543 CGM.getDataLayout().getAllocaAddrSpace());
4544 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4545 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4546 true);
4547 }
4548 }
4549 break;
4550 }
4551
4552 case ABIArgInfo::Ignore:
4553 assert(NumIRArgs == 0)((NumIRArgs == 0) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 0", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4553, __PRETTY_FUNCTION__))
;
4554 break;
4555
4556 case ABIArgInfo::Extend:
4557 case ABIArgInfo::Direct: {
4558 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4559 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4560 ArgInfo.getDirectOffset() == 0) {
4561 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4561, __PRETTY_FUNCTION__))
;
4562 llvm::Value *V;
4563 if (!I->isAggregate())
4564 V = I->getKnownRValue().getScalarVal();
4565 else
4566 V = Builder.CreateLoad(
4567 I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4568 : I->getKnownRValue().getAggregateAddress());
4569
4570 // Implement swifterror by copying into a new swifterror argument.
4571 // We'll write back in the normal path out of the call.
4572 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4573 == ParameterABI::SwiftErrorResult) {
4574 assert(!swiftErrorTemp.isValid() && "multiple swifterror args")((!swiftErrorTemp.isValid() && "multiple swifterror args"
) ? static_cast<void> (0) : __assert_fail ("!swiftErrorTemp.isValid() && \"multiple swifterror args\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4574, __PRETTY_FUNCTION__))
;
4575
4576 QualType pointeeTy = I->Ty->getPointeeType();
4577 swiftErrorArg =
4578 Address(V, getContext().getTypeAlignInChars(pointeeTy));
4579
4580 swiftErrorTemp =
4581 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4582 V = swiftErrorTemp.getPointer();
4583 cast<llvm::AllocaInst>(V)->setSwiftError(true);
4584
4585 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4586 Builder.CreateStore(errorValue, swiftErrorTemp);
4587 }
4588
4589 // We might have to widen integers, but we should never truncate.
4590 if (ArgInfo.getCoerceToType() != V->getType() &&
4591 V->getType()->isIntegerTy())
4592 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4593
4594 // If the argument doesn't match, perform a bitcast to coerce it. This
4595 // can happen due to trivial type mismatches.
4596 if (FirstIRArg < IRFuncTy->getNumParams() &&
4597 V->getType() != IRFuncTy->getParamType(FirstIRArg))
4598 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4599
4600 IRCallArgs[FirstIRArg] = V;
4601 break;
4602 }
4603
4604 // FIXME: Avoid the conversion through memory if possible.
4605 Address Src = Address::invalid();
4606 if (!I->isAggregate()) {
4607 Src = CreateMemTemp(I->Ty, "coerce");
4608 I->copyInto(*this, Src);
4609 } else {
4610 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4611 : I->getKnownRValue().getAggregateAddress();
4612 }
4613
4614 // If the value is offset in memory, apply the offset now.
4615 Src = emitAddressAtOffset(*this, Src, ArgInfo);
4616
4617 // Fast-isel and the optimizer generally like scalar values better than
4618 // FCAs, so we flatten them if this is safe to do for this argument.
4619 llvm::StructType *STy =
4620 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4621 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4622 llvm::Type *SrcTy = Src.getElementType();
4623 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4624 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4625
4626 // If the source type is smaller than the destination type of the
4627 // coerce-to logic, copy the source value into a temp alloca the size
4628 // of the destination type to allow loading all of it. The bits past
4629 // the source value are left undef.
4630 if (SrcSize < DstSize) {
4631 Address TempAlloca
4632 = CreateTempAlloca(STy, Src.getAlignment(),
4633 Src.getName() + ".coerce");
4634 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4635 Src = TempAlloca;
4636 } else {
4637 Src = Builder.CreateBitCast(Src,
4638 STy->getPointerTo(Src.getAddressSpace()));
4639 }
4640
4641 assert(NumIRArgs == STy->getNumElements())((NumIRArgs == STy->getNumElements()) ? static_cast<void
> (0) : __assert_fail ("NumIRArgs == STy->getNumElements()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4641, __PRETTY_FUNCTION__))
;
4642 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4643 Address EltPtr = Builder.CreateStructGEP(Src, i);
4644 llvm::Value *LI = Builder.CreateLoad(EltPtr);
4645 IRCallArgs[FirstIRArg + i] = LI;
4646 }
4647 } else {
4648 // In the simple case, just pass the coerced loaded value.
4649 assert(NumIRArgs == 1)((NumIRArgs == 1) ? static_cast<void> (0) : __assert_fail
("NumIRArgs == 1", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4649, __PRETTY_FUNCTION__))
;
4650 llvm::Value *Load =
4651 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4652
4653 if (CallInfo.isCmseNSCall()) {
4654 // For certain parameter types, clear padding bits, as they may reveal
4655 // sensitive information.
4656 // Small struct/union types are passed as integer arrays.
4657 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
4658 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
4659 Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
4660 }
4661 IRCallArgs[FirstIRArg] = Load;
4662 }
4663
4664 break;
4665 }
4666
4667 case ABIArgInfo::CoerceAndExpand: {
4668 auto coercionType = ArgInfo.getCoerceAndExpandType();
4669 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4670
4671 llvm::Value *tempSize = nullptr;
4672 Address addr = Address::invalid();
4673 Address AllocaAddr = Address::invalid();
4674 if (I->isAggregate()) {
4675 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4676 : I->getKnownRValue().getAggregateAddress();
4677
4678 } else {
4679 RValue RV = I->getKnownRValue();
4680 assert(RV.isScalar())((RV.isScalar()) ? static_cast<void> (0) : __assert_fail
("RV.isScalar()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4680, __PRETTY_FUNCTION__))
; // complex should always just be direct
4681
4682 llvm::Type *scalarType = RV.getScalarVal()->getType();
4683 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4684 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4685
4686 // Materialize to a temporary.
4687 addr = CreateTempAlloca(
4688 RV.getScalarVal()->getType(),
4689 CharUnits::fromQuantity(std::max(
4690 (unsigned)layout->getAlignment().value(), scalarAlign)),
4691 "tmp",
4692 /*ArraySize=*/nullptr, &AllocaAddr);
4693 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4694
4695 Builder.CreateStore(RV.getScalarVal(), addr);
4696 }
4697
4698 addr = Builder.CreateElementBitCast(addr, coercionType);
4699
4700 unsigned IRArgPos = FirstIRArg;
4701 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4702 llvm::Type *eltType = coercionType->getElementType(i);
4703 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4704 Address eltAddr = Builder.CreateStructGEP(addr, i);
4705 llvm::Value *elt = Builder.CreateLoad(eltAddr);
4706 IRCallArgs[IRArgPos++] = elt;
4707 }
4708 assert(IRArgPos == FirstIRArg + NumIRArgs)((IRArgPos == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4708, __PRETTY_FUNCTION__))
;
4709
4710 if (tempSize) {
4711 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4712 }
4713
4714 break;
4715 }
4716
4717 case ABIArgInfo::Expand: {
4718 unsigned IRArgPos = FirstIRArg;
4719 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4720 assert(IRArgPos == FirstIRArg + NumIRArgs)((IRArgPos == FirstIRArg + NumIRArgs) ? static_cast<void>
(0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4720, __PRETTY_FUNCTION__))
;
4721 break;
4722 }
4723 }
4724 }
4725
4726 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4727 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4728
4729 // If we're using inalloca, set up that argument.
4730 if (ArgMemory.isValid()) {
4731 llvm::Value *Arg = ArgMemory.getPointer();
4732 if (CallInfo.isVariadic()) {
4733 // When passing non-POD arguments by value to variadic functions, we will
4734 // end up with a variadic prototype and an inalloca call site. In such
4735 // cases, we can't do any parameter mismatch checks. Give up and bitcast
4736 // the callee.
4737 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4738 CalleePtr =
4739 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4740 } else {
4741 llvm::Type *LastParamTy =
4742 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4743 if (Arg->getType() != LastParamTy) {
4744#ifndef NDEBUG
4745 // Assert that these structs have equivalent element types.
4746 llvm::StructType *FullTy = CallInfo.getArgStruct();
4747 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4748 cast<llvm::PointerType>(LastParamTy)->getElementType());
4749 assert(DeclaredTy->getNumElements() == FullTy->getNumElements())((DeclaredTy->getNumElements() == FullTy->getNumElements
()) ? static_cast<void> (0) : __assert_fail ("DeclaredTy->getNumElements() == FullTy->getNumElements()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4749, __PRETTY_FUNCTION__))
;
4750 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4751 DE = DeclaredTy->element_end(),
4752 FI = FullTy->element_begin();
4753 DI != DE; ++DI, ++FI)
4754 assert(*DI == *FI)((*DI == *FI) ? static_cast<void> (0) : __assert_fail (
"*DI == *FI", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4754, __PRETTY_FUNCTION__))
;
4755#endif
4756 Arg = Builder.CreateBitCast(Arg, LastParamTy);
4757 }
4758 }
4759 assert(IRFunctionArgs.hasInallocaArg())((IRFunctionArgs.hasInallocaArg()) ? static_cast<void> (
0) : __assert_fail ("IRFunctionArgs.hasInallocaArg()", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4759, __PRETTY_FUNCTION__))
;
4760 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4761 }
4762
4763 // 2. Prepare the function pointer.
4764
4765 // If the callee is a bitcast of a non-variadic function to have a
4766 // variadic function pointer type, check to see if we can remove the
4767 // bitcast. This comes up with unprototyped functions.
4768 //
4769 // This makes the IR nicer, but more importantly it ensures that we
4770 // can inline the function at -O0 if it is marked always_inline.
4771 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4772 llvm::Value *Ptr) -> llvm::Function * {
4773 if (!CalleeFT->isVarArg())
4774 return nullptr;
4775
4776 // Get underlying value if it's a bitcast
4777 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4778 if (CE->getOpcode() == llvm::Instruction::BitCast)
4779 Ptr = CE->getOperand(0);
4780 }
4781
4782 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4783 if (!OrigFn)
4784 return nullptr;
4785
4786 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4787
4788 // If the original type is variadic, or if any of the component types
4789 // disagree, we cannot remove the cast.
4790 if (OrigFT->isVarArg() ||
4791 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4792 OrigFT->getReturnType() != CalleeFT->getReturnType())
4793 return nullptr;
4794
4795 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4796 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4797 return nullptr;
4798
4799 return OrigFn;
4800 };
4801
4802 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4803 CalleePtr = OrigFn;
4804 IRFuncTy = OrigFn->getFunctionType();
4805 }
4806
4807 // 3. Perform the actual call.
4808
4809 // Deactivate any cleanups that we're supposed to do immediately before
4810 // the call.
4811 if (!CallArgs.getCleanupsToDeactivate().empty())
4812 deactivateArgCleanupsBeforeCall(*this, CallArgs);
4813
4814 // Assert that the arguments we computed match up. The IR verifier
4815 // will catch this, but this is a common enough source of problems
4816 // during IRGen changes that it's way better for debugging to catch
4817 // it ourselves here.
4818#ifndef NDEBUG
4819 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg())((IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy
->isVarArg()) ? static_cast<void> (0) : __assert_fail
("IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4819, __PRETTY_FUNCTION__))
;
4820 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4821 // Inalloca argument can have different type.
4822 if (IRFunctionArgs.hasInallocaArg() &&
4823 i == IRFunctionArgs.getInallocaArgNo())
4824 continue;
4825 if (i < IRFuncTy->getNumParams())
4826 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i))((IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)
) ? static_cast<void> (0) : __assert_fail ("IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 4826, __PRETTY_FUNCTION__))
;
4827 }
4828#endif
4829
4830 // Update the largest vector width if any arguments have vector types.
4831 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4832 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4833 LargestVectorWidth =
4834 std::max((uint64_t)LargestVectorWidth,
4835 VT->getPrimitiveSizeInBits().getKnownMinSize());
4836 }
4837
4838 // Compute the calling convention and attributes.
4839 unsigned CallingConv;
4840 llvm::AttributeList Attrs;
4841 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4842 Callee.getAbstractInfo(), Attrs, CallingConv,
4843 /*AttrOnCallSite=*/true);
4844
4845 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4846 if (FD->usesFPIntrin())
4847 // All calls within a strictfp function are marked strictfp
4848 Attrs =
4849 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4850 llvm::Attribute::StrictFP);
4851
4852 // Add call-site nomerge attribute if exists.
4853 if (InNoMergeAttributedStmt)
4854 Attrs =
4855 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4856 llvm::Attribute::NoMerge);
4857
4858 // Apply some call-site-specific attributes.
4859 // TODO: work this into building the attribute set.
4860
4861 // Apply always_inline to all calls within flatten functions.
4862 // FIXME: should this really take priority over __try, below?
4863 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4864 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4865 Attrs =
4866 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4867 llvm::Attribute::AlwaysInline);
4868 }
4869
4870 // Disable inlining inside SEH __try blocks.
4871 if (isSEHTryScope()) {
4872 Attrs =
4873 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4874 llvm::Attribute::NoInline);
4875 }
4876
4877 // Decide whether to use a call or an invoke.
4878 bool CannotThrow;
4879 if (currentFunctionUsesSEHTry()) {
4880 // SEH cares about asynchronous exceptions, so everything can "throw."
4881 CannotThrow = false;
4882 } else if (isCleanupPadScope() &&
4883 EHPersonality::get(*this).isMSVCXXPersonality()) {
4884 // The MSVC++ personality will implicitly terminate the program if an
4885 // exception is thrown during a cleanup outside of a try/catch.
4886 // We don't need to model anything in IR to get this behavior.
4887 CannotThrow = true;
4888 } else {
4889 // Otherwise, nounwind call sites will never throw.
4890 CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
4891
4892 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
4893 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
4894 CannotThrow = true;
4895 }
4896
4897 // If we made a temporary, be sure to clean up after ourselves. Note that we
4898 // can't depend on being inside of an ExprWithCleanups, so we need to manually
4899 // pop this cleanup later on. Being eager about this is OK, since this
4900 // temporary is 'invisible' outside of the callee.
4901 if (UnusedReturnSizePtr)
4902 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4903 UnusedReturnSizePtr);
4904
4905 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4906
4907 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4908 getBundlesForFunclet(CalleePtr);
4909
4910 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4911 if (FD->usesFPIntrin())
4912 // All calls within a strictfp function are marked strictfp
4913 Attrs =
4914 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4915 llvm::Attribute::StrictFP);
4916
4917 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
4918 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
4919
4920 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
4921 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
4922
4923 // Emit the actual call/invoke instruction.
4924 llvm::CallBase *CI;
4925 if (!InvokeDest) {
4926 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4927 } else {
4928 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4929 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4930 BundleList);
4931 EmitBlock(Cont);
4932 }
4933 if (callOrInvoke)
4934 *callOrInvoke = CI;
4935
4936 // If this is within a function that has the guard(nocf) attribute and is an
4937 // indirect call, add the "guard_nocf" attribute to this call to indicate that
4938 // Control Flow Guard checks should not be added, even if the call is inlined.
4939 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
4940 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
4941 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
4942 Attrs = Attrs.addAttribute(
4943 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
4944 }
4945 }
4946
4947 // Apply the attributes and calling convention.
4948 CI->setAttributes(Attrs);
4949 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4950
4951 // Apply various metadata.
4952
4953 if (!CI->getType()->isVoidTy())
4954 CI->setName("call");
4955
4956 // Update largest vector width from the return type.
4957 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4958 LargestVectorWidth =
4959 std::max((uint64_t)LargestVectorWidth,
4960 VT->getPrimitiveSizeInBits().getKnownMinSize());
4961
4962 // Insert instrumentation or attach profile metadata at indirect call sites.
4963 // For more details, see the comment before the definition of
4964 // IPVK_IndirectCallTarget in InstrProfData.inc.
4965 if (!CI->getCalledFunction())
4966 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4967 CI, CalleePtr);
4968
4969 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4970 // optimizer it can aggressively ignore unwind edges.
4971 if (CGM.getLangOpts().ObjCAutoRefCount)
4972 AddObjCARCExceptionMetadata(CI);
4973
4974 // Suppress tail calls if requested.
4975 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4976 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4977 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4978 }
4979
4980 // Add metadata for calls to MSAllocator functions
4981 if (getDebugInfo() && TargetDecl &&
4982 TargetDecl->hasAttr<MSAllocatorAttr>())
4983 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
4984
4985 // 4. Finish the call.
4986
4987 // If the call doesn't return, finish the basic block and clear the
4988 // insertion point; this allows the rest of IRGen to discard
4989 // unreachable code.
4990 if (CI->doesNotReturn()) {
4991 if (UnusedReturnSizePtr)
4992 PopCleanupBlock();
4993
4994 // Strip away the noreturn attribute to better diagnose unreachable UB.
4995 if (SanOpts.has(SanitizerKind::Unreachable)) {
4996 // Also remove from function since CallBase::hasFnAttr additionally checks
4997 // attributes of the called function.
4998 if (auto *F = CI->getCalledFunction())
4999 F->removeFnAttr(llvm::Attribute::NoReturn);
5000 CI->removeAttribute(llvm::AttributeList::FunctionIndex,
5001 llvm::Attribute::NoReturn);
5002
5003 // Avoid incompatibility with ASan which relies on the `noreturn`
5004 // attribute to insert handler calls.
5005 if (SanOpts.hasOneOf(SanitizerKind::Address |
5006 SanitizerKind::KernelAddress)) {
5007 SanitizerScope SanScope(this);
5008 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5009 Builder.SetInsertPoint(CI);
5010 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5011 llvm::FunctionCallee Fn =
5012 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5013 EmitNounwindRuntimeCall(Fn);
5014 }
5015 }
5016
5017 EmitUnreachable(Loc);
5018 Builder.ClearInsertionPoint();
5019
5020 // FIXME: For now, emit a dummy basic block because expr emitters in
5021 // generally are not ready to handle emitting expressions at unreachable
5022 // points.
5023 EnsureInsertPoint();
5024
5025 // Return a reasonable RValue.
5026 return GetUndefRValue(RetTy);
5027 }
5028
5029 // Perform the swifterror writeback.
5030 if (swiftErrorTemp.isValid()) {
5031 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5032 Builder.CreateStore(errorResult, swiftErrorArg);
5033 }
5034
5035 // Emit any call-associated writebacks immediately. Arguably this
5036 // should happen after any return-value munging.
5037 if (CallArgs.hasWritebacks())
5038 emitWritebacks(*this, CallArgs);
5039
5040 // The stack cleanup for inalloca arguments has to run out of the normal
5041 // lexical order, so deactivate it and run it manually here.
5042 CallArgs.freeArgumentMemory(*this);
5043
5044 // Extract the return value.
5045 RValue Ret = [&] {
5046 switch (RetAI.getKind()) {
5047 case ABIArgInfo::CoerceAndExpand: {
5048 auto coercionType = RetAI.getCoerceAndExpandType();
5049
5050 Address addr = SRetPtr;
5051 addr = Builder.CreateElementBitCast(addr, coercionType);
5052
5053 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())((CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())
? static_cast<void> (0) : __assert_fail ("CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 5053, __PRETTY_FUNCTION__))
;
5054 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5055
5056 unsigned unpaddedIndex = 0;
5057 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5058 llvm::Type *eltType = coercionType->getElementType(i);
5059 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5060 Address eltAddr = Builder.CreateStructGEP(addr, i);
5061 llvm::Value *elt = CI;
5062 if (requiresExtract)
5063 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5064 else
5065 assert(unpaddedIndex == 0)((unpaddedIndex == 0) ? static_cast<void> (0) : __assert_fail
("unpaddedIndex == 0", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 5065, __PRETTY_FUNCTION__))
;
5066 Builder.CreateStore(elt, eltAddr);
5067 }
5068 // FALLTHROUGH
5069 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5070 }
5071
5072 case ABIArgInfo::InAlloca:
5073 case ABIArgInfo::Indirect: {
5074 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5075 if (UnusedReturnSizePtr)
5076 PopCleanupBlock();
5077 return ret;
5078 }
5079
5080 case ABIArgInfo::Ignore:
5081 // If we are ignoring an argument that had a result, make sure to
5082 // construct the appropriate return value for our caller.
5083 return GetUndefRValue(RetTy);
5084
5085 case ABIArgInfo::Extend:
5086 case ABIArgInfo::Direct: {
5087 llvm::Type *RetIRTy = ConvertType(RetTy);
5088 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5089 switch (getEvaluationKind(RetTy)) {
5090 case TEK_Complex: {
5091 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5092 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5093 return RValue::getComplex(std::make_pair(Real, Imag));
5094 }
5095 case TEK_Aggregate: {
5096 Address DestPtr = ReturnValue.getValue();
5097 bool DestIsVolatile = ReturnValue.isVolatile();
5098
5099 if (!DestPtr.isValid()) {
5100 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5101 DestIsVolatile = false;
5102 }
5103 EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5104 return RValue::getAggregate(DestPtr);
5105 }
5106 case TEK_Scalar: {
5107 // If the argument doesn't match, perform a bitcast to coerce it. This
5108 // can happen due to trivial type mismatches.
5109 llvm::Value *V = CI;
5110 if (V->getType() != RetIRTy)
5111 V = Builder.CreateBitCast(V, RetIRTy);
5112 return RValue::get(V);
5113 }
5114 }
5115 llvm_unreachable("bad evaluation kind")::llvm::llvm_unreachable_internal("bad evaluation kind", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 5115)
;
5116 }
5117
5118 Address DestPtr = ReturnValue.getValue();
5119 bool DestIsVolatile = ReturnValue.isVolatile();
5120
5121 if (!DestPtr.isValid()) {
5122 DestPtr = CreateMemTemp(RetTy, "coerce");
5123 DestIsVolatile = false;
5124 }
5125
5126 // If the value is offset in memory, apply the offset now.
5127 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5128 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5129
5130 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5131 }
5132
5133 case ABIArgInfo::Expand:
5134 case ABIArgInfo::IndirectAliased:
5135 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 5135)
;
5136 }
5137
5138 llvm_unreachable("Unhandled ABIArgInfo::Kind")::llvm::llvm_unreachable_internal("Unhandled ABIArgInfo::Kind"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/clang/lib/CodeGen/CGCall.cpp"
, 5138)
;
5139 } ();
5140
5141 // Emit the assume_aligned check on the return value.
5142 if (Ret.isScalar() && TargetDecl) {
5143 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5144 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5145 }
5146
5147 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5148 // we can't use the full cleanup mechanism.
5149 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5150 LifetimeEnd.Emit(*this, /*Flags=*/{});
5151
5152 if (!ReturnValue.isExternallyDestructed() &&
5153 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5154 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5155 RetTy);
5156
5157 return Ret;
5158}
5159
5160CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5161 if (isVirtual()) {
5162 const CallExpr *CE = getVirtualCallExpr();
5163 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5164 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5165 CE ? CE->getBeginLoc() : SourceLocation());
5166 }
5167
5168 return *this;
5169}
5170
5171/* VarArg handling */
5172
5173Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5174 VAListAddr = VE->isMicrosoftABI()
5175 ? EmitMSVAListRef(VE->getSubExpr())
5176 : EmitVAListRef(VE->getSubExpr());
5177 QualType Ty = VE->getType();
5178 if (VE->isMicrosoftABI())
5179 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5180 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5181}