Bug Summary

File:build/source/clang/lib/CodeGen/CGCall.cpp
Warning:line 982, column 27
The left operand of '*' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CGCall.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-17/lib/clang/17 -I tools/clang/lib/CodeGen -I /build/source/clang/lib/CodeGen -I /build/source/clang/include -I tools/clang/include -I include -I /build/source/llvm/include -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-05-10-133810-16478-1 -x c++ /build/source/clang/lib/CodeGen/CGCall.cpp

/build/source/clang/lib/CodeGen/CGCall.cpp

1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCall.h"
15#include "ABIInfo.h"
16#include "CGBlocks.h"
17#include "CGCXXABI.h"
18#include "CGCleanup.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "TargetInfo.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/DeclCXX.h"
26#include "clang/AST/DeclObjC.h"
27#include "clang/Basic/CodeGenOptions.h"
28#include "clang/Basic/TargetInfo.h"
29#include "clang/CodeGen/CGFunctionInfo.h"
30#include "clang/CodeGen/SwiftCallingConv.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/Analysis/ValueTracking.h"
33#include "llvm/IR/Assumptions.h"
34#include "llvm/IR/Attributes.h"
35#include "llvm/IR/CallingConv.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/InlineAsm.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/Type.h"
41#include "llvm/Transforms/Utils/Local.h"
42#include <optional>
43using namespace clang;
44using namespace CodeGen;
45
46/***/
47
48unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
49 switch (CC) {
50 default: return llvm::CallingConv::C;
51 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
52 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
53 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
54 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
55 case CC_Win64: return llvm::CallingConv::Win64;
56 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
57 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
58 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
59 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
60 // TODO: Add support for __pascal to LLVM.
61 case CC_X86Pascal: return llvm::CallingConv::C;
62 // TODO: Add support for __vectorcall to LLVM.
63 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
64 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
65 case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall;
66 case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL;
67 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
68 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
69 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
70 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
71 case CC_Swift: return llvm::CallingConv::Swift;
72 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
73 }
74}
75
76/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
77/// qualification. Either or both of RD and MD may be null. A null RD indicates
78/// that there is no meaningful 'this' type, and a null MD can occur when
79/// calling a method pointer.
80CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
81 const CXXMethodDecl *MD) {
82 QualType RecTy;
83 if (RD)
84 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
85 else
86 RecTy = Context.VoidTy;
87
88 if (MD)
89 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
90 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
91}
92
93/// Returns the canonical formal type of the given C++ method.
94static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
95 return MD->getType()->getCanonicalTypeUnqualified()
96 .getAs<FunctionProtoType>();
97}
98
99/// Returns the "extra-canonicalized" return type, which discards
100/// qualifiers on the return type. Codegen doesn't care about them,
101/// and it makes ABI code a little easier to be able to assume that
102/// all parameter and return types are top-level unqualified.
103static CanQualType GetReturnType(QualType RetTy) {
104 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
105}
106
107/// Arrange the argument and result information for a value of the given
108/// unprototyped freestanding function type.
109const CGFunctionInfo &
110CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
111 // When translating an unprototyped function type, always use a
112 // variadic type.
113 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
114 /*instanceMethod=*/false,
115 /*chainCall=*/false, std::nullopt,
116 FTNP->getExtInfo(), {}, RequiredArgs(0));
117}
118
119static void addExtParameterInfosForCall(
120 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
121 const FunctionProtoType *proto,
122 unsigned prefixArgs,
123 unsigned totalArgs) {
124 assert(proto->hasExtParameterInfos())(static_cast <bool> (proto->hasExtParameterInfos()) ?
void (0) : __assert_fail ("proto->hasExtParameterInfos()"
, "clang/lib/CodeGen/CGCall.cpp", 124, __extension__ __PRETTY_FUNCTION__
))
;
125 assert(paramInfos.size() <= prefixArgs)(static_cast <bool> (paramInfos.size() <= prefixArgs
) ? void (0) : __assert_fail ("paramInfos.size() <= prefixArgs"
, "clang/lib/CodeGen/CGCall.cpp", 125, __extension__ __PRETTY_FUNCTION__
))
;
126 assert(proto->getNumParams() + prefixArgs <= totalArgs)(static_cast <bool> (proto->getNumParams() + prefixArgs
<= totalArgs) ? void (0) : __assert_fail ("proto->getNumParams() + prefixArgs <= totalArgs"
, "clang/lib/CodeGen/CGCall.cpp", 126, __extension__ __PRETTY_FUNCTION__
))
;
127
128 paramInfos.reserve(totalArgs);
129
130 // Add default infos for any prefix args that don't already have infos.
131 paramInfos.resize(prefixArgs);
132
133 // Add infos for the prototype.
134 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
135 paramInfos.push_back(ParamInfo);
136 // pass_object_size params have no parameter info.
137 if (ParamInfo.hasPassObjectSize())
138 paramInfos.emplace_back();
139 }
140
141 assert(paramInfos.size() <= totalArgs &&(static_cast <bool> (paramInfos.size() <= totalArgs &&
"Did we forget to insert pass_object_size args?") ? void (0)
: __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "clang/lib/CodeGen/CGCall.cpp", 142, __extension__ __PRETTY_FUNCTION__
))
142 "Did we forget to insert pass_object_size args?")(static_cast <bool> (paramInfos.size() <= totalArgs &&
"Did we forget to insert pass_object_size args?") ? void (0)
: __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\""
, "clang/lib/CodeGen/CGCall.cpp", 142, __extension__ __PRETTY_FUNCTION__
))
;
143 // Add default infos for the variadic and/or suffix arguments.
144 paramInfos.resize(totalArgs);
145}
146
147/// Adds the formal parameters in FPT to the given prefix. If any parameter in
148/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
149static void appendParameterTypes(const CodeGenTypes &CGT,
150 SmallVectorImpl<CanQualType> &prefix,
151 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
152 CanQual<FunctionProtoType> FPT) {
153 // Fast path: don't touch param info if we don't need to.
154 if (!FPT->hasExtParameterInfos()) {
155 assert(paramInfos.empty() &&(static_cast <bool> (paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? void (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "clang/lib/CodeGen/CGCall.cpp", 156, __extension__ __PRETTY_FUNCTION__
))
156 "We have paramInfos, but the prototype doesn't?")(static_cast <bool> (paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"
) ? void (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\""
, "clang/lib/CodeGen/CGCall.cpp", 156, __extension__ __PRETTY_FUNCTION__
))
;
157 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
158 return;
159 }
160
161 unsigned PrefixSize = prefix.size();
162 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
163 // parameters; the only thing that can change this is the presence of
164 // pass_object_size. So, we preallocate for the common case.
165 prefix.reserve(prefix.size() + FPT->getNumParams());
166
167 auto ExtInfos = FPT->getExtParameterInfos();
168 assert(ExtInfos.size() == FPT->getNumParams())(static_cast <bool> (ExtInfos.size() == FPT->getNumParams
()) ? void (0) : __assert_fail ("ExtInfos.size() == FPT->getNumParams()"
, "clang/lib/CodeGen/CGCall.cpp", 168, __extension__ __PRETTY_FUNCTION__
))
;
169 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
170 prefix.push_back(FPT->getParamType(I));
171 if (ExtInfos[I].hasPassObjectSize())
172 prefix.push_back(CGT.getContext().getSizeType());
173 }
174
175 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
176 prefix.size());
177}
178
179/// Arrange the LLVM function layout for a value of the given function
180/// type, on top of any implicit parameters already stored.
181static const CGFunctionInfo &
182arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
183 SmallVectorImpl<CanQualType> &prefix,
184 CanQual<FunctionProtoType> FTP) {
185 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
186 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
187 // FIXME: Kill copy.
188 appendParameterTypes(CGT, prefix, paramInfos, FTP);
189 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
190
191 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
192 /*chainCall=*/false, prefix,
193 FTP->getExtInfo(), paramInfos,
194 Required);
195}
196
197/// Arrange the argument and result information for a value of the
198/// given freestanding function type.
199const CGFunctionInfo &
200CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
201 SmallVector<CanQualType, 16> argTypes;
202 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
203 FTP);
204}
205
206static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
207 bool IsWindows) {
208 // Set the appropriate calling convention for the Function.
209 if (D->hasAttr<StdCallAttr>())
210 return CC_X86StdCall;
211
212 if (D->hasAttr<FastCallAttr>())
213 return CC_X86FastCall;
214
215 if (D->hasAttr<RegCallAttr>())
216 return CC_X86RegCall;
217
218 if (D->hasAttr<ThisCallAttr>())
219 return CC_X86ThisCall;
220
221 if (D->hasAttr<VectorCallAttr>())
222 return CC_X86VectorCall;
223
224 if (D->hasAttr<PascalAttr>())
225 return CC_X86Pascal;
226
227 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
228 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
229
230 if (D->hasAttr<AArch64VectorPcsAttr>())
231 return CC_AArch64VectorCall;
232
233 if (D->hasAttr<AArch64SVEPcsAttr>())
234 return CC_AArch64SVEPCS;
235
236 if (D->hasAttr<AMDGPUKernelCallAttr>())
237 return CC_AMDGPUKernelCall;
238
239 if (D->hasAttr<IntelOclBiccAttr>())
240 return CC_IntelOclBicc;
241
242 if (D->hasAttr<MSABIAttr>())
243 return IsWindows ? CC_C : CC_Win64;
244
245 if (D->hasAttr<SysVABIAttr>())
246 return IsWindows ? CC_X86_64SysV : CC_C;
247
248 if (D->hasAttr<PreserveMostAttr>())
249 return CC_PreserveMost;
250
251 if (D->hasAttr<PreserveAllAttr>())
252 return CC_PreserveAll;
253
254 return CC_C;
255}
256
257/// Arrange the argument and result information for a call to an
258/// unknown C++ non-static member function of the given abstract type.
259/// (A null RD means we don't have any meaningful "this" argument type,
260/// so fall back to a generic pointer type).
261/// The member function must be an ordinary function, i.e. not a
262/// constructor or destructor.
263const CGFunctionInfo &
264CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
265 const FunctionProtoType *FTP,
266 const CXXMethodDecl *MD) {
267 SmallVector<CanQualType, 16> argTypes;
268
269 // Add the 'this' pointer.
270 argTypes.push_back(DeriveThisType(RD, MD));
271
272 return ::arrangeLLVMFunctionInfo(
273 *this, true, argTypes,
274 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
275}
276
277/// Set calling convention for CUDA/HIP kernel.
278static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
279 const FunctionDecl *FD) {
280 if (FD->hasAttr<CUDAGlobalAttr>()) {
281 const FunctionType *FT = FTy->getAs<FunctionType>();
282 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
283 FTy = FT->getCanonicalTypeUnqualified();
284 }
285}
286
287/// Arrange the argument and result information for a declaration or
288/// definition of the given C++ non-static member function. The
289/// member function must be an ordinary function, i.e. not a
290/// constructor or destructor.
291const CGFunctionInfo &
292CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
293 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!")(static_cast <bool> (!isa<CXXConstructorDecl>(MD)
&& "wrong method for constructors!") ? void (0) : __assert_fail
("!isa<CXXConstructorDecl>(MD) && \"wrong method for constructors!\""
, "clang/lib/CodeGen/CGCall.cpp", 293, __extension__ __PRETTY_FUNCTION__
))
;
294 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!")(static_cast <bool> (!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!") ? void (0) : __assert_fail (
"!isa<CXXDestructorDecl>(MD) && \"wrong method for destructors!\""
, "clang/lib/CodeGen/CGCall.cpp", 294, __extension__ __PRETTY_FUNCTION__
))
;
295
296 CanQualType FT = GetFormalType(MD).getAs<Type>();
297 setCUDAKernelCallingConvention(FT, CGM, MD);
298 auto prototype = FT.getAs<FunctionProtoType>();
299
300 if (MD->isInstance()) {
301 // The abstract case is perfectly fine.
302 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
303 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
304 }
305
306 return arrangeFreeFunctionType(prototype);
307}
308
309bool CodeGenTypes::inheritingCtorHasParams(
310 const InheritedConstructor &Inherited, CXXCtorType Type) {
311 // Parameters are unnecessary if we're constructing a base class subobject
312 // and the inherited constructor lives in a virtual base.
313 return Type == Ctor_Complete ||
314 !Inherited.getShadowDecl()->constructsVirtualBase() ||
315 !Target.getCXXABI().hasConstructorVariants();
316}
317
318const CGFunctionInfo &
319CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
320 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
321
322 SmallVector<CanQualType, 16> argTypes;
323 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
324
325 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD);
326 argTypes.push_back(DeriveThisType(ThisType, MD));
327
328 bool PassParams = true;
329
330 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
331 // A base class inheriting constructor doesn't get forwarded arguments
332 // needed to construct a virtual base (or base class thereof).
333 if (auto Inherited = CD->getInheritedConstructor())
334 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
335 }
336
337 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
338
339 // Add the formal parameters.
340 if (PassParams)
341 appendParameterTypes(*this, argTypes, paramInfos, FTP);
342
343 CGCXXABI::AddedStructorArgCounts AddedArgs =
344 TheCXXABI.buildStructorSignature(GD, argTypes);
345 if (!paramInfos.empty()) {
346 // Note: prefix implies after the first param.
347 if (AddedArgs.Prefix)
348 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
349 FunctionProtoType::ExtParameterInfo{});
350 if (AddedArgs.Suffix)
351 paramInfos.append(AddedArgs.Suffix,
352 FunctionProtoType::ExtParameterInfo{});
353 }
354
355 RequiredArgs required =
356 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
357 : RequiredArgs::All);
358
359 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
360 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
361 ? argTypes.front()
362 : TheCXXABI.hasMostDerivedReturn(GD)
363 ? CGM.getContext().VoidPtrTy
364 : Context.VoidTy;
365 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
366 /*chainCall=*/false, argTypes, extInfo,
367 paramInfos, required);
368}
369
370static SmallVector<CanQualType, 16>
371getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
372 SmallVector<CanQualType, 16> argTypes;
373 for (auto &arg : args)
374 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
375 return argTypes;
376}
377
378static SmallVector<CanQualType, 16>
379getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
380 SmallVector<CanQualType, 16> argTypes;
381 for (auto &arg : args)
382 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
383 return argTypes;
384}
385
386static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
387getExtParameterInfosForCall(const FunctionProtoType *proto,
388 unsigned prefixArgs, unsigned totalArgs) {
389 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
390 if (proto->hasExtParameterInfos()) {
391 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
392 }
393 return result;
394}
395
396/// Arrange a call to a C++ method, passing the given arguments.
397///
398/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
399/// parameter.
400/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
401/// args.
402/// PassProtoArgs indicates whether `args` has args for the parameters in the
403/// given CXXConstructorDecl.
404const CGFunctionInfo &
405CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
406 const CXXConstructorDecl *D,
407 CXXCtorType CtorKind,
408 unsigned ExtraPrefixArgs,
409 unsigned ExtraSuffixArgs,
410 bool PassProtoArgs) {
411 // FIXME: Kill copy.
412 SmallVector<CanQualType, 16> ArgTypes;
413 for (const auto &Arg : args)
414 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
415
416 // +1 for implicit this, which should always be args[0].
417 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
418
419 CanQual<FunctionProtoType> FPT = GetFormalType(D);
420 RequiredArgs Required = PassProtoArgs
421 ? RequiredArgs::forPrototypePlus(
422 FPT, TotalPrefixArgs + ExtraSuffixArgs)
423 : RequiredArgs::All;
424
425 GlobalDecl GD(D, CtorKind);
426 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
427 ? ArgTypes.front()
428 : TheCXXABI.hasMostDerivedReturn(GD)
429 ? CGM.getContext().VoidPtrTy
430 : Context.VoidTy;
431
432 FunctionType::ExtInfo Info = FPT->getExtInfo();
433 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
434 // If the prototype args are elided, we should only have ABI-specific args,
435 // which never have param info.
436 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
437 // ABI-specific suffix arguments are treated the same as variadic arguments.
438 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
439 ArgTypes.size());
440 }
441 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
442 /*chainCall=*/false, ArgTypes, Info,
443 ParamInfos, Required);
444}
445
446/// Arrange the argument and result information for the declaration or
447/// definition of the given function.
448const CGFunctionInfo &
449CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
450 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
451 if (MD->isInstance())
452 return arrangeCXXMethodDeclaration(MD);
453
454 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
455
456 assert(isa<FunctionType>(FTy))(static_cast <bool> (isa<FunctionType>(FTy)) ? void
(0) : __assert_fail ("isa<FunctionType>(FTy)", "clang/lib/CodeGen/CGCall.cpp"
, 456, __extension__ __PRETTY_FUNCTION__))
;
457 setCUDAKernelCallingConvention(FTy, CGM, FD);
458
459 // When declaring a function without a prototype, always use a
460 // non-variadic type.
461 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
462 return arrangeLLVMFunctionInfo(
463 noProto->getReturnType(), /*instanceMethod=*/false,
464 /*chainCall=*/false, std::nullopt, noProto->getExtInfo(), {},
465 RequiredArgs::All);
466 }
467
468 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
469}
470
471/// Arrange the argument and result information for the declaration or
472/// definition of an Objective-C method.
473const CGFunctionInfo &
474CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
475 // It happens that this is the same as a call with no optional
476 // arguments, except also using the formal 'self' type.
477 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
478}
479
480/// Arrange the argument and result information for the function type
481/// through which to perform a send to the given Objective-C method,
482/// using the given receiver type. The receiver type is not always
483/// the 'self' type of the method or even an Objective-C pointer type.
484/// This is *not* the right method for actually performing such a
485/// message send, due to the possibility of optional arguments.
486const CGFunctionInfo &
487CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
488 QualType receiverType) {
489 SmallVector<CanQualType, 16> argTys;
490 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(
491 MD->isDirectMethod() ? 1 : 2);
492 argTys.push_back(Context.getCanonicalParamType(receiverType));
493 if (!MD->isDirectMethod())
494 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
495 // FIXME: Kill copy?
496 for (const auto *I : MD->parameters()) {
497 argTys.push_back(Context.getCanonicalParamType(I->getType()));
498 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
499 I->hasAttr<NoEscapeAttr>());
500 extParamInfos.push_back(extParamInfo);
501 }
502
503 FunctionType::ExtInfo einfo;
504 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
505 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
506
507 if (getContext().getLangOpts().ObjCAutoRefCount &&
508 MD->hasAttr<NSReturnsRetainedAttr>())
509 einfo = einfo.withProducesResult(true);
510
511 RequiredArgs required =
512 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
513
514 return arrangeLLVMFunctionInfo(
515 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
516 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
517}
518
519const CGFunctionInfo &
520CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
521 const CallArgList &args) {
522 auto argTypes = getArgTypesForCall(Context, args);
523 FunctionType::ExtInfo einfo;
524
525 return arrangeLLVMFunctionInfo(
526 GetReturnType(returnType), /*instanceMethod=*/false,
527 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
528}
529
530const CGFunctionInfo &
531CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
532 // FIXME: Do we need to handle ObjCMethodDecl?
533 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
534
535 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
536 isa<CXXDestructorDecl>(GD.getDecl()))
537 return arrangeCXXStructorDeclaration(GD);
538
539 return arrangeFunctionDeclaration(FD);
540}
541
542/// Arrange a thunk that takes 'this' as the first parameter followed by
543/// varargs. Return a void pointer, regardless of the actual return type.
544/// The body of the thunk will end in a musttail call to a function of the
545/// correct type, and the caller will bitcast the function to the correct
546/// prototype.
547const CGFunctionInfo &
548CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
549 assert(MD->isVirtual() && "only methods have thunks")(static_cast <bool> (MD->isVirtual() && "only methods have thunks"
) ? void (0) : __assert_fail ("MD->isVirtual() && \"only methods have thunks\""
, "clang/lib/CodeGen/CGCall.cpp", 549, __extension__ __PRETTY_FUNCTION__
))
;
550 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
551 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
552 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
553 /*chainCall=*/false, ArgTys,
554 FTP->getExtInfo(), {}, RequiredArgs(1));
555}
556
557const CGFunctionInfo &
558CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
559 CXXCtorType CT) {
560 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure)(static_cast <bool> (CT == Ctor_CopyingClosure || CT ==
Ctor_DefaultClosure) ? void (0) : __assert_fail ("CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure"
, "clang/lib/CodeGen/CGCall.cpp", 560, __extension__ __PRETTY_FUNCTION__
))
;
561
562 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
563 SmallVector<CanQualType, 2> ArgTys;
564 const CXXRecordDecl *RD = CD->getParent();
565 ArgTys.push_back(DeriveThisType(RD, CD));
566 if (CT == Ctor_CopyingClosure)
567 ArgTys.push_back(*FTP->param_type_begin());
568 if (RD->getNumVBases() > 0)
569 ArgTys.push_back(Context.IntTy);
570 CallingConv CC = Context.getDefaultCallingConvention(
571 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
572 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
573 /*chainCall=*/false, ArgTys,
574 FunctionType::ExtInfo(CC), {},
575 RequiredArgs::All);
576}
577
578/// Arrange a call as unto a free function, except possibly with an
579/// additional number of formal parameters considered required.
580static const CGFunctionInfo &
581arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
582 CodeGenModule &CGM,
583 const CallArgList &args,
584 const FunctionType *fnType,
585 unsigned numExtraRequiredArgs,
586 bool chainCall) {
587 assert(args.size() >= numExtraRequiredArgs)(static_cast <bool> (args.size() >= numExtraRequiredArgs
) ? void (0) : __assert_fail ("args.size() >= numExtraRequiredArgs"
, "clang/lib/CodeGen/CGCall.cpp", 587, __extension__ __PRETTY_FUNCTION__
))
;
588
589 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
590
591 // In most cases, there are no optional arguments.
592 RequiredArgs required = RequiredArgs::All;
593
594 // If we have a variadic prototype, the required arguments are the
595 // extra prefix plus the arguments in the prototype.
596 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
597 if (proto->isVariadic())
598 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
599
600 if (proto->hasExtParameterInfos())
601 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
602 args.size());
603
604 // If we don't have a prototype at all, but we're supposed to
605 // explicitly use the variadic convention for unprototyped calls,
606 // treat all of the arguments as required but preserve the nominal
607 // possibility of variadics.
608 } else if (CGM.getTargetCodeGenInfo()
609 .isNoProtoCallVariadic(args,
610 cast<FunctionNoProtoType>(fnType))) {
611 required = RequiredArgs(args.size());
612 }
613
614 // FIXME: Kill copy.
615 SmallVector<CanQualType, 16> argTypes;
616 for (const auto &arg : args)
617 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
618 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
619 /*instanceMethod=*/false, chainCall,
620 argTypes, fnType->getExtInfo(), paramInfos,
621 required);
622}
623
624/// Figure out the rules for calling a function with the given formal
625/// type using the given arguments. The arguments are necessary
626/// because the function might be unprototyped, in which case it's
627/// target-dependent in crazy ways.
628const CGFunctionInfo &
629CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
630 const FunctionType *fnType,
631 bool chainCall) {
632 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
633 chainCall ? 1 : 0, chainCall);
634}
635
636/// A block function is essentially a free function with an
637/// extra implicit argument.
638const CGFunctionInfo &
639CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
640 const FunctionType *fnType) {
641 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
642 /*chainCall=*/false);
643}
644
645const CGFunctionInfo &
646CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
647 const FunctionArgList &params) {
648 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
649 auto argTypes = getArgTypesForDeclaration(Context, params);
650
651 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
652 /*instanceMethod*/ false, /*chainCall*/ false,
653 argTypes, proto->getExtInfo(), paramInfos,
654 RequiredArgs::forPrototypePlus(proto, 1));
655}
656
657const CGFunctionInfo &
658CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
659 const CallArgList &args) {
660 // FIXME: Kill copy.
661 SmallVector<CanQualType, 16> argTypes;
662 for (const auto &Arg : args)
663 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
664 return arrangeLLVMFunctionInfo(
665 GetReturnType(resultType), /*instanceMethod=*/false,
666 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
667 /*paramInfos=*/ {}, RequiredArgs::All);
668}
669
670const CGFunctionInfo &
671CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
672 const FunctionArgList &args) {
673 auto argTypes = getArgTypesForDeclaration(Context, args);
674
675 return arrangeLLVMFunctionInfo(
676 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
677 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
678}
679
680const CGFunctionInfo &
681CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
682 ArrayRef<CanQualType> argTypes) {
683 return arrangeLLVMFunctionInfo(
684 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
685 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
686}
687
688/// Arrange a call to a C++ method, passing the given arguments.
689///
690/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
691/// does not count `this`.
692const CGFunctionInfo &
693CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
694 const FunctionProtoType *proto,
695 RequiredArgs required,
696 unsigned numPrefixArgs) {
697 assert(numPrefixArgs + 1 <= args.size() &&(static_cast <bool> (numPrefixArgs + 1 <= args.size(
) && "Emitting a call with less args than the required prefix?"
) ? void (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "clang/lib/CodeGen/CGCall.cpp", 698, __extension__ __PRETTY_FUNCTION__
))
698 "Emitting a call with less args than the required prefix?")(static_cast <bool> (numPrefixArgs + 1 <= args.size(
) && "Emitting a call with less args than the required prefix?"
) ? void (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\""
, "clang/lib/CodeGen/CGCall.cpp", 698, __extension__ __PRETTY_FUNCTION__
))
;
699 // Add one to account for `this`. It's a bit awkward here, but we don't count
700 // `this` in similar places elsewhere.
701 auto paramInfos =
702 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
703
704 // FIXME: Kill copy.
705 auto argTypes = getArgTypesForCall(Context, args);
706
707 FunctionType::ExtInfo info = proto->getExtInfo();
708 return arrangeLLVMFunctionInfo(
709 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
710 /*chainCall=*/false, argTypes, info, paramInfos, required);
711}
712
713const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
714 return arrangeLLVMFunctionInfo(
715 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
716 std::nullopt, FunctionType::ExtInfo(), {}, RequiredArgs::All);
717}
718
719const CGFunctionInfo &
720CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
721 const CallArgList &args) {
722 assert(signature.arg_size() <= args.size())(static_cast <bool> (signature.arg_size() <= args.size
()) ? void (0) : __assert_fail ("signature.arg_size() <= args.size()"
, "clang/lib/CodeGen/CGCall.cpp", 722, __extension__ __PRETTY_FUNCTION__
))
;
723 if (signature.arg_size() == args.size())
724 return signature;
725
726 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
727 auto sigParamInfos = signature.getExtParameterInfos();
728 if (!sigParamInfos.empty()) {
729 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
730 paramInfos.resize(args.size());
731 }
732
733 auto argTypes = getArgTypesForCall(Context, args);
734
735 assert(signature.getRequiredArgs().allowsOptionalArgs())(static_cast <bool> (signature.getRequiredArgs().allowsOptionalArgs
()) ? void (0) : __assert_fail ("signature.getRequiredArgs().allowsOptionalArgs()"
, "clang/lib/CodeGen/CGCall.cpp", 735, __extension__ __PRETTY_FUNCTION__
))
;
736 return arrangeLLVMFunctionInfo(signature.getReturnType(),
737 signature.isInstanceMethod(),
738 signature.isChainCall(),
739 argTypes,
740 signature.getExtInfo(),
741 paramInfos,
742 signature.getRequiredArgs());
743}
744
745namespace clang {
746namespace CodeGen {
747void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
748}
749}
750
751/// Arrange the argument and result information for an abstract value
752/// of a given function type. This is the method which all of the
753/// above functions ultimately defer to.
754const CGFunctionInfo &
755CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
756 bool instanceMethod,
757 bool chainCall,
758 ArrayRef<CanQualType> argTypes,
759 FunctionType::ExtInfo info,
760 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
761 RequiredArgs required) {
762 assert(llvm::all_of(argTypes,(static_cast <bool> (llvm::all_of(argTypes, [](CanQualType
T) { return T.isCanonicalAsParam(); })) ? void (0) : __assert_fail
("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "clang/lib/CodeGen/CGCall.cpp", 763, __extension__ __PRETTY_FUNCTION__
))
763 [](CanQualType T) { return T.isCanonicalAsParam(); }))(static_cast <bool> (llvm::all_of(argTypes, [](CanQualType
T) { return T.isCanonicalAsParam(); })) ? void (0) : __assert_fail
("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })"
, "clang/lib/CodeGen/CGCall.cpp", 763, __extension__ __PRETTY_FUNCTION__
))
;
764
765 // Lookup or create unique function info.
766 llvm::FoldingSetNodeID ID;
767 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
768 required, resultType, argTypes);
769
770 void *insertPos = nullptr;
771 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
772 if (FI)
773 return *FI;
774
775 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
776
777 // Construct the function info. We co-allocate the ArgInfos.
778 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
779 paramInfos, resultType, argTypes, required);
780 FunctionInfos.InsertNode(FI, insertPos);
781
782 bool inserted = FunctionsBeingProcessed.insert(FI).second;
783 (void)inserted;
784 assert(inserted && "Recursively being processed?")(static_cast <bool> (inserted && "Recursively being processed?"
) ? void (0) : __assert_fail ("inserted && \"Recursively being processed?\""
, "clang/lib/CodeGen/CGCall.cpp", 784, __extension__ __PRETTY_FUNCTION__
))
;
785
786 // Compute ABI information.
787 if (CC == llvm::CallingConv::SPIR_KERNEL) {
788 // Force target independent argument handling for the host visible
789 // kernel functions.
790 computeSPIRKernelABIInfo(CGM, *FI);
791 } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) {
792 swiftcall::computeABIInfo(CGM, *FI);
793 } else {
794 getABIInfo().computeInfo(*FI);
795 }
796
797 // Loop over all of the computed argument and return value info. If any of
798 // them are direct or extend without a specified coerce type, specify the
799 // default now.
800 ABIArgInfo &retInfo = FI->getReturnInfo();
801 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
802 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
803
804 for (auto &I : FI->arguments())
805 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
806 I.info.setCoerceToType(ConvertType(I.type));
807
808 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
809 assert(erased && "Not in set?")(static_cast <bool> (erased && "Not in set?") ?
void (0) : __assert_fail ("erased && \"Not in set?\""
, "clang/lib/CodeGen/CGCall.cpp", 809, __extension__ __PRETTY_FUNCTION__
))
;
810
811 return *FI;
812}
813
814CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
815 bool instanceMethod,
816 bool chainCall,
817 const FunctionType::ExtInfo &info,
818 ArrayRef<ExtParameterInfo> paramInfos,
819 CanQualType resultType,
820 ArrayRef<CanQualType> argTypes,
821 RequiredArgs required) {
822 assert(paramInfos.empty() || paramInfos.size() == argTypes.size())(static_cast <bool> (paramInfos.empty() || paramInfos.size
() == argTypes.size()) ? void (0) : __assert_fail ("paramInfos.empty() || paramInfos.size() == argTypes.size()"
, "clang/lib/CodeGen/CGCall.cpp", 822, __extension__ __PRETTY_FUNCTION__
))
;
823 assert(!required.allowsOptionalArgs() ||(static_cast <bool> (!required.allowsOptionalArgs() || required
.getNumRequiredArgs() <= argTypes.size()) ? void (0) : __assert_fail
("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()"
, "clang/lib/CodeGen/CGCall.cpp", 824, __extension__ __PRETTY_FUNCTION__
))
824 required.getNumRequiredArgs() <= argTypes.size())(static_cast <bool> (!required.allowsOptionalArgs() || required
.getNumRequiredArgs() <= argTypes.size()) ? void (0) : __assert_fail
("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()"
, "clang/lib/CodeGen/CGCall.cpp", 824, __extension__ __PRETTY_FUNCTION__
))
;
825
826 void *buffer =
827 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
828 argTypes.size() + 1, paramInfos.size()));
829
830 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
831 FI->CallingConvention = llvmCC;
832 FI->EffectiveCallingConvention = llvmCC;
833 FI->ASTCallingConvention = info.getCC();
834 FI->InstanceMethod = instanceMethod;
835 FI->ChainCall = chainCall;
836 FI->CmseNSCall = info.getCmseNSCall();
837 FI->NoReturn = info.getNoReturn();
838 FI->ReturnsRetained = info.getProducesResult();
839 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
840 FI->NoCfCheck = info.getNoCfCheck();
841 FI->Required = required;
842 FI->HasRegParm = info.getHasRegParm();
843 FI->RegParm = info.getRegParm();
844 FI->ArgStruct = nullptr;
845 FI->ArgStructAlign = 0;
846 FI->NumArgs = argTypes.size();
847 FI->HasExtParameterInfos = !paramInfos.empty();
848 FI->getArgsBuffer()[0].type = resultType;
849 FI->MaxVectorWidth = 0;
850 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
851 FI->getArgsBuffer()[i + 1].type = argTypes[i];
852 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
853 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
854 return FI;
855}
856
857/***/
858
859namespace {
860// ABIArgInfo::Expand implementation.
861
862// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
863struct TypeExpansion {
864 enum TypeExpansionKind {
865 // Elements of constant arrays are expanded recursively.
866 TEK_ConstantArray,
867 // Record fields are expanded recursively (but if record is a union, only
868 // the field with the largest size is expanded).
869 TEK_Record,
870 // For complex types, real and imaginary parts are expanded recursively.
871 TEK_Complex,
872 // All other types are not expandable.
873 TEK_None
874 };
875
876 const TypeExpansionKind Kind;
877
878 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
879 virtual ~TypeExpansion() {}
880};
881
882struct ConstantArrayExpansion : TypeExpansion {
883 QualType EltTy;
884 uint64_t NumElts;
885
886 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
887 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
888 static bool classof(const TypeExpansion *TE) {
889 return TE->Kind == TEK_ConstantArray;
890 }
891};
892
893struct RecordExpansion : TypeExpansion {
894 SmallVector<const CXXBaseSpecifier *, 1> Bases;
895
896 SmallVector<const FieldDecl *, 1> Fields;
897
898 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
899 SmallVector<const FieldDecl *, 1> &&Fields)
900 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
901 Fields(std::move(Fields)) {}
902 static bool classof(const TypeExpansion *TE) {
903 return TE->Kind == TEK_Record;
904 }
905};
906
907struct ComplexExpansion : TypeExpansion {
908 QualType EltTy;
909
910 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
911 static bool classof(const TypeExpansion *TE) {
912 return TE->Kind == TEK_Complex;
913 }
914};
915
916struct NoExpansion : TypeExpansion {
917 NoExpansion() : TypeExpansion(TEK_None) {}
918 static bool classof(const TypeExpansion *TE) {
919 return TE->Kind == TEK_None;
920 }
921};
922} // namespace
923
924static std::unique_ptr<TypeExpansion>
925getTypeExpansion(QualType Ty, const ASTContext &Context) {
926 if (const ConstantArrayType *AT
17.1
'AT' is null
17.1
'AT' is null
= Context.getAsConstantArrayType(Ty)) {
18
Taking false branch
927 return std::make_unique<ConstantArrayExpansion>(
928 AT->getElementType(), AT->getSize().getZExtValue());
929 }
930 if (const RecordType *RT
19.1
'RT' is null
19.1
'RT' is null
= Ty->getAs<RecordType>()) {
19
Assuming the object is not a 'const RecordType *'
20
Taking false branch
931 SmallVector<const CXXBaseSpecifier *, 1> Bases;
932 SmallVector<const FieldDecl *, 1> Fields;
933 const RecordDecl *RD = RT->getDecl();
934 assert(!RD->hasFlexibleArrayMember() &&(static_cast <bool> (!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.") ? void (0) :
__assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "clang/lib/CodeGen/CGCall.cpp", 935, __extension__ __PRETTY_FUNCTION__
))
935 "Cannot expand structure with flexible array.")(static_cast <bool> (!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.") ? void (0) :
__assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\""
, "clang/lib/CodeGen/CGCall.cpp", 935, __extension__ __PRETTY_FUNCTION__
))
;
936 if (RD->isUnion()) {
937 // Unions can be here only in degenerative cases - all the fields are same
938 // after flattening. Thus we have to use the "largest" field.
939 const FieldDecl *LargestFD = nullptr;
940 CharUnits UnionSize = CharUnits::Zero();
941
942 for (const auto *FD : RD->fields()) {
943 if (FD->isZeroLengthBitField(Context))
944 continue;
945 assert(!FD->isBitField() &&(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "clang/lib/CodeGen/CGCall.cpp", 946, __extension__ __PRETTY_FUNCTION__
))
946 "Cannot expand structure with bit-field members.")(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "clang/lib/CodeGen/CGCall.cpp", 946, __extension__ __PRETTY_FUNCTION__
))
;
947 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
948 if (UnionSize < FieldSize) {
949 UnionSize = FieldSize;
950 LargestFD = FD;
951 }
952 }
953 if (LargestFD)
954 Fields.push_back(LargestFD);
955 } else {
956 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
957 assert(!CXXRD->isDynamicClass() &&(static_cast <bool> (!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes") ? void (
0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "clang/lib/CodeGen/CGCall.cpp", 958, __extension__ __PRETTY_FUNCTION__
))
958 "cannot expand vtable pointers in dynamic classes")(static_cast <bool> (!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes") ? void (
0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\""
, "clang/lib/CodeGen/CGCall.cpp", 958, __extension__ __PRETTY_FUNCTION__
))
;
959 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
960 }
961
962 for (const auto *FD : RD->fields()) {
963 if (FD->isZeroLengthBitField(Context))
964 continue;
965 assert(!FD->isBitField() &&(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "clang/lib/CodeGen/CGCall.cpp", 966, __extension__ __PRETTY_FUNCTION__
))
966 "Cannot expand structure with bit-field members.")(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members."
) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\""
, "clang/lib/CodeGen/CGCall.cpp", 966, __extension__ __PRETTY_FUNCTION__
))
;
967 Fields.push_back(FD);
968 }
969 }
970 return std::make_unique<RecordExpansion>(std::move(Bases),
971 std::move(Fields));
972 }
973 if (const ComplexType *CT
21.1
'CT' is null
21.1
'CT' is null
= Ty->getAs<ComplexType>()) {
21
Assuming the object is not a 'const class clang::ComplexType *'
22
Taking false branch
974 return std::make_unique<ComplexExpansion>(CT->getElementType());
975 }
976 return std::make_unique<NoExpansion>();
23
Calling 'make_unique<(anonymous namespace)::NoExpansion, >'
31
Returning from 'make_unique<(anonymous namespace)::NoExpansion, >'
32
Calling constructor for 'unique_ptr<(anonymous namespace)::TypeExpansion, std::default_delete<(anonymous namespace)::TypeExpansion>>'
37
Returning from constructor for 'unique_ptr<(anonymous namespace)::TypeExpansion, std::default_delete<(anonymous namespace)::TypeExpansion>>'
977}
978
979static int getExpansionSize(QualType Ty, const ASTContext &Context) {
980 auto Exp = getTypeExpansion(Ty, Context);
17
Calling 'getTypeExpansion'
38
Returning from 'getTypeExpansion'
981 if (auto CAExp
39.1
'CAExp' is non-null
39.1
'CAExp' is non-null
= dyn_cast<ConstantArrayExpansion>(Exp.get())) {
39
Assuming the object is a 'CastReturnType'
40
Taking true branch
982 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
41
The left operand of '*' is a garbage value
983 }
984 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
985 int Res = 0;
986 for (auto BS : RExp->Bases)
987 Res += getExpansionSize(BS->getType(), Context);
988 for (auto FD : RExp->Fields)
989 Res += getExpansionSize(FD->getType(), Context);
990 return Res;
991 }
992 if (isa<ComplexExpansion>(Exp.get()))
993 return 2;
994 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "clang/lib/CodeGen/CGCall.cpp", 994, __extension__ __PRETTY_FUNCTION__
))
;
995 return 1;
996}
997
998void
999CodeGenTypes::getExpandedTypes(QualType Ty,
1000 SmallVectorImpl<llvm::Type *>::iterator &TI) {
1001 auto Exp = getTypeExpansion(Ty, Context);
1002 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1003 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
1004 getExpandedTypes(CAExp->EltTy, TI);
1005 }
1006 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1007 for (auto BS : RExp->Bases)
1008 getExpandedTypes(BS->getType(), TI);
1009 for (auto FD : RExp->Fields)
1010 getExpandedTypes(FD->getType(), TI);
1011 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1012 llvm::Type *EltTy = ConvertType(CExp->EltTy);
1013 *TI++ = EltTy;
1014 *TI++ = EltTy;
1015 } else {
1016 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "clang/lib/CodeGen/CGCall.cpp", 1016, __extension__ __PRETTY_FUNCTION__
))
;
1017 *TI++ = ConvertType(Ty);
1018 }
1019}
1020
1021static void forConstantArrayExpansion(CodeGenFunction &CGF,
1022 ConstantArrayExpansion *CAE,
1023 Address BaseAddr,
1024 llvm::function_ref<void(Address)> Fn) {
1025 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1026 CharUnits EltAlign =
1027 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1028 llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy);
1029
1030 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1031 llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
1032 BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
1033 Fn(Address(EltAddr, EltTy, EltAlign));
1034 }
1035}
1036
1037void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1038 llvm::Function::arg_iterator &AI) {
1039 assert(LV.isSimple() &&(static_cast <bool> (LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? void (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "clang/lib/CodeGen/CGCall.cpp", 1040, __extension__ __PRETTY_FUNCTION__
))
1040 "Unexpected non-simple lvalue during struct expansion.")(static_cast <bool> (LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."
) ? void (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\""
, "clang/lib/CodeGen/CGCall.cpp", 1040, __extension__ __PRETTY_FUNCTION__
))
;
1041
1042 auto Exp = getTypeExpansion(Ty, getContext());
1043 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1044 forConstantArrayExpansion(
1045 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1046 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1047 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1048 });
1049 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1050 Address This = LV.getAddress(*this);
1051 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1052 // Perform a single step derived-to-base conversion.
1053 Address Base =
1054 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1055 /*NullCheckValue=*/false, SourceLocation());
1056 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1057
1058 // Recurse onto bases.
1059 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1060 }
1061 for (auto FD : RExp->Fields) {
1062 // FIXME: What are the right qualifiers here?
1063 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1064 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1065 }
1066 } else if (isa<ComplexExpansion>(Exp.get())) {
1067 auto realValue = &*AI++;
1068 auto imagValue = &*AI++;
1069 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1070 } else {
1071 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1072 // primitive store.
1073 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "clang/lib/CodeGen/CGCall.cpp", 1073, __extension__ __PRETTY_FUNCTION__
))
;
1074 llvm::Value *Arg = &*AI++;
1075 if (LV.isBitField()) {
1076 EmitStoreThroughLValue(RValue::get(Arg), LV);
1077 } else {
1078 // TODO: currently there are some places are inconsistent in what LLVM
1079 // pointer type they use (see D118744). Once clang uses opaque pointers
1080 // all LLVM pointer types will be the same and we can remove this check.
1081 if (Arg->getType()->isPointerTy()) {
1082 Address Addr = LV.getAddress(*this);
1083 Arg = Builder.CreateBitCast(Arg, Addr.getElementType());
1084 }
1085 EmitStoreOfScalar(Arg, LV);
1086 }
1087 }
1088}
1089
1090void CodeGenFunction::ExpandTypeToArgs(
1091 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1092 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1093 auto Exp = getTypeExpansion(Ty, getContext());
1094 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1095 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1096 : Arg.getKnownRValue().getAggregateAddress();
1097 forConstantArrayExpansion(
1098 *this, CAExp, Addr, [&](Address EltAddr) {
1099 CallArg EltArg = CallArg(
1100 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1101 CAExp->EltTy);
1102 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1103 IRCallArgPos);
1104 });
1105 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1106 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1107 : Arg.getKnownRValue().getAggregateAddress();
1108 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1109 // Perform a single step derived-to-base conversion.
1110 Address Base =
1111 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1112 /*NullCheckValue=*/false, SourceLocation());
1113 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1114
1115 // Recurse onto bases.
1116 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1117 IRCallArgPos);
1118 }
1119
1120 LValue LV = MakeAddrLValue(This, Ty);
1121 for (auto FD : RExp->Fields) {
1122 CallArg FldArg =
1123 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1124 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1125 IRCallArgPos);
1126 }
1127 } else if (isa<ComplexExpansion>(Exp.get())) {
1128 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1129 IRCallArgs[IRCallArgPos++] = CV.first;
1130 IRCallArgs[IRCallArgPos++] = CV.second;
1131 } else {
1132 assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get()))
? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())"
, "clang/lib/CodeGen/CGCall.cpp", 1132, __extension__ __PRETTY_FUNCTION__
))
;
1133 auto RV = Arg.getKnownRValue();
1134 assert(RV.isScalar() &&(static_cast <bool> (RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? void (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "clang/lib/CodeGen/CGCall.cpp", 1135, __extension__ __PRETTY_FUNCTION__
))
1135 "Unexpected non-scalar rvalue during struct expansion.")(static_cast <bool> (RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."
) ? void (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\""
, "clang/lib/CodeGen/CGCall.cpp", 1135, __extension__ __PRETTY_FUNCTION__
))
;
1136
1137 // Insert a bitcast as needed.
1138 llvm::Value *V = RV.getScalarVal();
1139 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1140 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1141 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1142
1143 IRCallArgs[IRCallArgPos++] = V;
1144 }
1145}
1146
1147/// Create a temporary allocation for the purposes of coercion.
1148static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1149 CharUnits MinAlign,
1150 const Twine &Name = "tmp") {
1151 // Don't use an alignment that's worse than what LLVM would prefer.
1152 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty);
1153 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1154
1155 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1156}
1157
1158/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1159/// accessing some number of bytes out of it, try to gep into the struct to get
1160/// at its inner goodness. Dive as deep as possible without entering an element
1161/// with an in-memory size smaller than DstSize.
1162static Address
1163EnterStructPointerForCoercedAccess(Address SrcPtr,
1164 llvm::StructType *SrcSTy,
1165 uint64_t DstSize, CodeGenFunction &CGF) {
1166 // We can't dive into a zero-element struct.
1167 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1168
1169 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1170
1171 // If the first elt is at least as large as what we're looking for, or if the
1172 // first element is the same size as the whole struct, we can enter it. The
1173 // comparison must be made on the store size and not the alloca size. Using
1174 // the alloca size may overstate the size of the load.
1175 uint64_t FirstEltSize =
1176 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1177 if (FirstEltSize < DstSize &&
1178 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1179 return SrcPtr;
1180
1181 // GEP into the first element.
1182 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1183
1184 // If the first element is a struct, recurse.
1185 llvm::Type *SrcTy = SrcPtr.getElementType();
1186 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1187 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1188
1189 return SrcPtr;
1190}
1191
1192/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1193/// are either integers or pointers. This does a truncation of the value if it
1194/// is too large or a zero extension if it is too small.
1195///
1196/// This behaves as if the value were coerced through memory, so on big-endian
1197/// targets the high bits are preserved in a truncation, while little-endian
1198/// targets preserve the low bits.
1199static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1200 llvm::Type *Ty,
1201 CodeGenFunction &CGF) {
1202 if (Val->getType() == Ty)
1203 return Val;
1204
1205 if (isa<llvm::PointerType>(Val->getType())) {
1206 // If this is Pointer->Pointer avoid conversion to and from int.
1207 if (isa<llvm::PointerType>(Ty))
1208 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1209
1210 // Convert the pointer to an integer so we can play with its width.
1211 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1212 }
1213
1214 llvm::Type *DestIntTy = Ty;
1215 if (isa<llvm::PointerType>(DestIntTy))
1216 DestIntTy = CGF.IntPtrTy;
1217
1218 if (Val->getType() != DestIntTy) {
1219 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1220 if (DL.isBigEndian()) {
1221 // Preserve the high bits on big-endian targets.
1222 // That is what memory coercion does.
1223 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1224 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1225
1226 if (SrcSize > DstSize) {
1227 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1228 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1229 } else {
1230 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1231 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1232 }
1233 } else {
1234 // Little-endian targets preserve the low bits. No shifts required.
1235 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1236 }
1237 }
1238
1239 if (isa<llvm::PointerType>(Ty))
1240 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1241 return Val;
1242}
1243
1244
1245
1246/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1247/// a pointer to an object of type \arg Ty, known to be aligned to
1248/// \arg SrcAlign bytes.
1249///
1250/// This safely handles the case when the src type is smaller than the
1251/// destination type; in this situation the values of bits which not
1252/// present in the src are undefined.
1253static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1254 CodeGenFunction &CGF) {
1255 llvm::Type *SrcTy = Src.getElementType();
1256
1257 // If SrcTy and Ty are the same, just do a load.
1258 if (SrcTy == Ty)
1259 return CGF.Builder.CreateLoad(Src);
1260
1261 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1262
1263 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1264 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1265 DstSize.getFixedValue(), CGF);
1266 SrcTy = Src.getElementType();
1267 }
1268
1269 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1270
1271 // If the source and destination are integer or pointer types, just do an
1272 // extension or truncation to the desired type.
1273 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1274 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1275 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1276 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1277 }
1278
1279 // If load is legal, just bitcast the src pointer.
1280 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1281 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1282 // Generally SrcSize is never greater than DstSize, since this means we are
1283 // losing bits. However, this can happen in cases where the structure has
1284 // additional padding, for example due to a user specified alignment.
1285 //
1286 // FIXME: Assert that we aren't truncating non-padding bits when have access
1287 // to that information.
1288 Src = CGF.Builder.CreateElementBitCast(Src, Ty);
1289 return CGF.Builder.CreateLoad(Src);
1290 }
1291
1292 // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1293 // the types match, use the llvm.vector.insert intrinsic to perform the
1294 // conversion.
1295 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1296 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1297 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
1298 // vector, use a vector insert and bitcast the result.
1299 bool NeedsBitcast = false;
1300 auto PredType =
1301 llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16);
1302 llvm::Type *OrigType = Ty;
1303 if (ScalableDst == PredType &&
1304 FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) {
1305 ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2);
1306 NeedsBitcast = true;
1307 }
1308 if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
1309 auto *Load = CGF.Builder.CreateLoad(Src);
1310 auto *UndefVec = llvm::UndefValue::get(ScalableDst);
1311 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1312 llvm::Value *Result = CGF.Builder.CreateInsertVector(
1313 ScalableDst, UndefVec, Load, Zero, "castScalableSve");
1314 if (NeedsBitcast)
1315 Result = CGF.Builder.CreateBitCast(Result, OrigType);
1316 return Result;
1317 }
1318 }
1319 }
1320
1321 // Otherwise do coercion through memory. This is stupid, but simple.
1322 Address Tmp =
1323 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1324 CGF.Builder.CreateMemCpy(
1325 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1326 Src.getAlignment().getAsAlign(),
1327 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue()));
1328 return CGF.Builder.CreateLoad(Tmp);
1329}
1330
1331// Function to store a first-class aggregate into memory. We prefer to
1332// store the elements rather than the aggregate to be more friendly to
1333// fast-isel.
1334// FIXME: Do we need to recurse here?
1335void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1336 bool DestIsVolatile) {
1337 // Prefer scalar stores to first-class aggregate stores.
1338 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1339 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1340 Address EltPtr = Builder.CreateStructGEP(Dest, i);
1341 llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1342 Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1343 }
1344 } else {
1345 Builder.CreateStore(Val, Dest, DestIsVolatile);
1346 }
1347}
1348
1349/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1350/// where the source and destination may have different types. The
1351/// destination is known to be aligned to \arg DstAlign bytes.
1352///
1353/// This safely handles the case when the src type is larger than the
1354/// destination type; the upper bits of the src will be lost.
1355static void CreateCoercedStore(llvm::Value *Src,
1356 Address Dst,
1357 bool DstIsVolatile,
1358 CodeGenFunction &CGF) {
1359 llvm::Type *SrcTy = Src->getType();
1360 llvm::Type *DstTy = Dst.getElementType();
1361 if (SrcTy == DstTy) {
1362 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1363 return;
1364 }
1365
1366 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1367
1368 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1369 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1370 SrcSize.getFixedValue(), CGF);
1371 DstTy = Dst.getElementType();
1372 }
1373
1374 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1375 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1376 if (SrcPtrTy && DstPtrTy &&
1377 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1378 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1379 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1380 return;
1381 }
1382
1383 // If the source and destination are integer or pointer types, just do an
1384 // extension or truncation to the desired type.
1385 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1386 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1387 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1388 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1389 return;
1390 }
1391
1392 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1393
1394 // If store is legal, just bitcast the src pointer.
1395 if (isa<llvm::ScalableVectorType>(SrcTy) ||
1396 isa<llvm::ScalableVectorType>(DstTy) ||
1397 SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
1398 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1399 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1400 } else {
1401 // Otherwise do coercion through memory. This is stupid, but
1402 // simple.
1403
1404 // Generally SrcSize is never greater than DstSize, since this means we are
1405 // losing bits. However, this can happen in cases where the structure has
1406 // additional padding, for example due to a user specified alignment.
1407 //
1408 // FIXME: Assert that we aren't truncating non-padding bits when have access
1409 // to that information.
1410 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1411 CGF.Builder.CreateStore(Src, Tmp);
1412 CGF.Builder.CreateMemCpy(
1413 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1414 Tmp.getAlignment().getAsAlign(),
1415 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue()));
1416 }
1417}
1418
1419static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1420 const ABIArgInfo &info) {
1421 if (unsigned offset = info.getDirectOffset()) {
1422 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1423 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1424 CharUnits::fromQuantity(offset));
1425 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1426 }
1427 return addr;
1428}
1429
1430namespace {
1431
1432/// Encapsulates information about the way function arguments from
1433/// CGFunctionInfo should be passed to actual LLVM IR function.
1434class ClangToLLVMArgMapping {
1435 static const unsigned InvalidIndex = ~0U;
1436 unsigned InallocaArgNo;
1437 unsigned SRetArgNo;
1438 unsigned TotalIRArgs;
1439
1440 /// Arguments of LLVM IR function corresponding to single Clang argument.
1441 struct IRArgs {
1442 unsigned PaddingArgIndex;
1443 // Argument is expanded to IR arguments at positions
1444 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1445 unsigned FirstArgIndex;
1446 unsigned NumberOfArgs;
1447
1448 IRArgs()
1449 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1450 NumberOfArgs(0) {}
1451 };
1452
1453 SmallVector<IRArgs, 8> ArgInfo;
1454
1455public:
1456 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1457 bool OnlyRequiredArgs = false)
1458 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1459 ArgInfo(OnlyRequiredArgs
4.1
'OnlyRequiredArgs' is false
4.1
'OnlyRequiredArgs' is false
? FI.getNumRequiredArgs() : FI.arg_size()) {
5
'?' condition is false
1460 construct(Context, FI, OnlyRequiredArgs);
6
Calling 'ClangToLLVMArgMapping::construct'
1461 }
1462
1463 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1464 unsigned getInallocaArgNo() const {
1465 assert(hasInallocaArg())(static_cast <bool> (hasInallocaArg()) ? void (0) : __assert_fail
("hasInallocaArg()", "clang/lib/CodeGen/CGCall.cpp", 1465, __extension__
__PRETTY_FUNCTION__))
;
1466 return InallocaArgNo;
1467 }
1468
1469 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1470 unsigned getSRetArgNo() const {
1471 assert(hasSRetArg())(static_cast <bool> (hasSRetArg()) ? void (0) : __assert_fail
("hasSRetArg()", "clang/lib/CodeGen/CGCall.cpp", 1471, __extension__
__PRETTY_FUNCTION__))
;
1472 return SRetArgNo;
1473 }
1474
1475 unsigned totalIRArgs() const { return TotalIRArgs; }
1476
1477 bool hasPaddingArg(unsigned ArgNo) const {
1478 assert(ArgNo < ArgInfo.size())(static_cast <bool> (ArgNo < ArgInfo.size()) ? void (
0) : __assert_fail ("ArgNo < ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp"
, 1478, __extension__ __PRETTY_FUNCTION__))
;
1479 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1480 }
1481 unsigned getPaddingArgNo(unsigned ArgNo) const {
1482 assert(hasPaddingArg(ArgNo))(static_cast <bool> (hasPaddingArg(ArgNo)) ? void (0) :
__assert_fail ("hasPaddingArg(ArgNo)", "clang/lib/CodeGen/CGCall.cpp"
, 1482, __extension__ __PRETTY_FUNCTION__))
;
1483 return ArgInfo[ArgNo].PaddingArgIndex;
1484 }
1485
1486 /// Returns index of first IR argument corresponding to ArgNo, and their
1487 /// quantity.
1488 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1489 assert(ArgNo < ArgInfo.size())(static_cast <bool> (ArgNo < ArgInfo.size()) ? void (
0) : __assert_fail ("ArgNo < ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp"
, 1489, __extension__ __PRETTY_FUNCTION__))
;
1490 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1491 ArgInfo[ArgNo].NumberOfArgs);
1492 }
1493
1494private:
1495 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1496 bool OnlyRequiredArgs);
1497};
1498
1499void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1500 const CGFunctionInfo &FI,
1501 bool OnlyRequiredArgs) {
1502 unsigned IRArgNo = 0;
1503 bool SwapThisWithSRet = false;
1504 const ABIArgInfo &RetAI = FI.getReturnInfo();
1505
1506 if (RetAI.getKind() == ABIArgInfo::Indirect) {
7
Assuming the condition is false
8
Taking false branch
1507 SwapThisWithSRet = RetAI.isSRetAfterThis();
1508 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1509 }
1510
1511 unsigned ArgNo = 0;
1512 unsigned NumArgs = OnlyRequiredArgs
8.1
'OnlyRequiredArgs' is false
8.1
'OnlyRequiredArgs' is false
? FI.getNumRequiredArgs() : FI.arg_size();
9
'?' condition is false
1513 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
10
Assuming 'ArgNo' is < 'NumArgs'
11
Loop condition is true. Entering loop body
1514 ++I, ++ArgNo) {
1515 assert(I != FI.arg_end())(static_cast <bool> (I != FI.arg_end()) ? void (0) : __assert_fail
("I != FI.arg_end()", "clang/lib/CodeGen/CGCall.cpp", 1515, __extension__
__PRETTY_FUNCTION__))
;
12
'?' condition is true
1516 QualType ArgType = I->type;
1517 const ABIArgInfo &AI = I->info;
1518 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1519 auto &IRArgs = ArgInfo[ArgNo];
1520
1521 if (AI.getPaddingType())
13
Assuming the condition is false
14
Taking false branch
1522 IRArgs.PaddingArgIndex = IRArgNo++;
1523
1524 switch (AI.getKind()) {
15
Control jumps to 'case Expand:' at line 1548
1525 case ABIArgInfo::Extend:
1526 case ABIArgInfo::Direct: {
1527 // FIXME: handle sseregparm someday...
1528 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1529 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1530 IRArgs.NumberOfArgs = STy->getNumElements();
1531 } else {
1532 IRArgs.NumberOfArgs = 1;
1533 }
1534 break;
1535 }
1536 case ABIArgInfo::Indirect:
1537 case ABIArgInfo::IndirectAliased:
1538 IRArgs.NumberOfArgs = 1;
1539 break;
1540 case ABIArgInfo::Ignore:
1541 case ABIArgInfo::InAlloca:
1542 // ignore and inalloca doesn't have matching LLVM parameters.
1543 IRArgs.NumberOfArgs = 0;
1544 break;
1545 case ABIArgInfo::CoerceAndExpand:
1546 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1547 break;
1548 case ABIArgInfo::Expand:
1549 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
16
Calling 'getExpansionSize'
1550 break;
1551 }
1552
1553 if (IRArgs.NumberOfArgs > 0) {
1554 IRArgs.FirstArgIndex = IRArgNo;
1555 IRArgNo += IRArgs.NumberOfArgs;
1556 }
1557
1558 // Skip over the sret parameter when it comes second. We already handled it
1559 // above.
1560 if (IRArgNo == 1 && SwapThisWithSRet)
1561 IRArgNo++;
1562 }
1563 assert(ArgNo == ArgInfo.size())(static_cast <bool> (ArgNo == ArgInfo.size()) ? void (0
) : __assert_fail ("ArgNo == ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp"
, 1563, __extension__ __PRETTY_FUNCTION__))
;
1564
1565 if (FI.usesInAlloca())
1566 InallocaArgNo = IRArgNo++;
1567
1568 TotalIRArgs = IRArgNo;
1569}
1570} // namespace
1571
1572/***/
1573
1574bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1575 const auto &RI = FI.getReturnInfo();
1576 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1577}
1578
1579bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1580 return ReturnTypeUsesSRet(FI) &&
1581 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1582}
1583
1584bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1585 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1586 switch (BT->getKind()) {
1587 default:
1588 return false;
1589 case BuiltinType::Float:
1590 return getTarget().useObjCFPRetForRealType(FloatModeKind::Float);
1591 case BuiltinType::Double:
1592 return getTarget().useObjCFPRetForRealType(FloatModeKind::Double);
1593 case BuiltinType::LongDouble:
1594 return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble);
1595 }
1596 }
1597
1598 return false;
1599}
1600
1601bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1602 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1603 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1604 if (BT->getKind() == BuiltinType::LongDouble)
1605 return getTarget().useObjCFP2RetForComplexLongDouble();
1606 }
1607 }
1608
1609 return false;
1610}
1611
1612llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1613 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1614 return GetFunctionType(FI);
1615}
1616
1617llvm::FunctionType *
1618CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1619
1620 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1621 (void)Inserted;
1622 assert(Inserted && "Recursively being processed?")(static_cast <bool> (Inserted && "Recursively being processed?"
) ? void (0) : __assert_fail ("Inserted && \"Recursively being processed?\""
, "clang/lib/CodeGen/CGCall.cpp", 1622, __extension__ __PRETTY_FUNCTION__
))
;
1623
1624 llvm::Type *resultType = nullptr;
1625 const ABIArgInfo &retAI = FI.getReturnInfo();
1626 switch (retAI.getKind()) {
1627 case ABIArgInfo::Expand:
1628 case ABIArgInfo::IndirectAliased:
1629 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "clang/lib/CodeGen/CGCall.cpp", 1629)
;
1630
1631 case ABIArgInfo::Extend:
1632 case ABIArgInfo::Direct:
1633 resultType = retAI.getCoerceToType();
1634 break;
1635
1636 case ABIArgInfo::InAlloca:
1637 if (retAI.getInAllocaSRet()) {
1638 // sret things on win32 aren't void, they return the sret pointer.
1639 QualType ret = FI.getReturnType();
1640 llvm::Type *ty = ConvertType(ret);
1641 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1642 resultType = llvm::PointerType::get(ty, addressSpace);
1643 } else {
1644 resultType = llvm::Type::getVoidTy(getLLVMContext());
1645 }
1646 break;
1647
1648 case ABIArgInfo::Indirect:
1649 case ABIArgInfo::Ignore:
1650 resultType = llvm::Type::getVoidTy(getLLVMContext());
1651 break;
1652
1653 case ABIArgInfo::CoerceAndExpand:
1654 resultType = retAI.getUnpaddedCoerceAndExpandType();
1655 break;
1656 }
1657
1658 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1659 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1660
1661 // Add type for sret argument.
1662 if (IRFunctionArgs.hasSRetArg()) {
1663 QualType Ret = FI.getReturnType();
1664 llvm::Type *Ty = ConvertType(Ret);
1665 unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret);
1666 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1667 llvm::PointerType::get(Ty, AddressSpace);
1668 }
1669
1670 // Add type for inalloca argument.
1671 if (IRFunctionArgs.hasInallocaArg()) {
1672 auto ArgStruct = FI.getArgStruct();
1673 assert(ArgStruct)(static_cast <bool> (ArgStruct) ? void (0) : __assert_fail
("ArgStruct", "clang/lib/CodeGen/CGCall.cpp", 1673, __extension__
__PRETTY_FUNCTION__))
;
1674 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1675 }
1676
1677 // Add in all of the required arguments.
1678 unsigned ArgNo = 0;
1679 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1680 ie = it + FI.getNumRequiredArgs();
1681 for (; it != ie; ++it, ++ArgNo) {
1682 const ABIArgInfo &ArgInfo = it->info;
1683
1684 // Insert a padding type to ensure proper alignment.
1685 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1686 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1687 ArgInfo.getPaddingType();
1688
1689 unsigned FirstIRArg, NumIRArgs;
1690 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1691
1692 switch (ArgInfo.getKind()) {
1693 case ABIArgInfo::Ignore:
1694 case ABIArgInfo::InAlloca:
1695 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 1695, __extension__
__PRETTY_FUNCTION__))
;
1696 break;
1697
1698 case ABIArgInfo::Indirect: {
1699 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1699, __extension__
__PRETTY_FUNCTION__))
;
1700 // indirect arguments are always on the stack, which is alloca addr space.
1701 llvm::Type *LTy = ConvertTypeForMem(it->type);
1702 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1703 CGM.getDataLayout().getAllocaAddrSpace());
1704 break;
1705 }
1706 case ABIArgInfo::IndirectAliased: {
1707 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1707, __extension__
__PRETTY_FUNCTION__))
;
1708 llvm::Type *LTy = ConvertTypeForMem(it->type);
1709 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1710 break;
1711 }
1712 case ABIArgInfo::Extend:
1713 case ABIArgInfo::Direct: {
1714 // Fast-isel and the optimizer generally like scalar values better than
1715 // FCAs, so we flatten them if this is safe to do for this argument.
1716 llvm::Type *argType = ArgInfo.getCoerceToType();
1717 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1718 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1719 assert(NumIRArgs == st->getNumElements())(static_cast <bool> (NumIRArgs == st->getNumElements
()) ? void (0) : __assert_fail ("NumIRArgs == st->getNumElements()"
, "clang/lib/CodeGen/CGCall.cpp", 1719, __extension__ __PRETTY_FUNCTION__
))
;
1720 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1721 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1722 } else {
1723 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1723, __extension__
__PRETTY_FUNCTION__))
;
1724 ArgTypes[FirstIRArg] = argType;
1725 }
1726 break;
1727 }
1728
1729 case ABIArgInfo::CoerceAndExpand: {
1730 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1731 for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1732 *ArgTypesIter++ = EltTy;
1733 }
1734 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (ArgTypesIter == ArgTypes.begin() +
FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 1734, __extension__ __PRETTY_FUNCTION__
))
;
1735 break;
1736 }
1737
1738 case ABIArgInfo::Expand:
1739 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1740 getExpandedTypes(it->type, ArgTypesIter);
1741 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (ArgTypesIter == ArgTypes.begin() +
FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 1741, __extension__ __PRETTY_FUNCTION__
))
;
1742 break;
1743 }
1744 }
1745
1746 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1747 assert(Erased && "Not in set?")(static_cast <bool> (Erased && "Not in set?") ?
void (0) : __assert_fail ("Erased && \"Not in set?\""
, "clang/lib/CodeGen/CGCall.cpp", 1747, __extension__ __PRETTY_FUNCTION__
))
;
1748
1749 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1750}
1751
1752llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1753 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1754 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1755
1756 if (!isFuncTypeConvertible(FPT))
1757 return llvm::StructType::get(getLLVMContext());
1758
1759 return GetFunctionType(GD);
1760}
1761
1762static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1763 llvm::AttrBuilder &FuncAttrs,
1764 const FunctionProtoType *FPT) {
1765 if (!FPT)
1766 return;
1767
1768 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1769 FPT->isNothrow())
1770 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1771}
1772
1773static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs,
1774 const Decl *Callee) {
1775 if (!Callee)
1776 return;
1777
1778 SmallVector<StringRef, 4> Attrs;
1779
1780 for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>())
1781 AA->getAssumption().split(Attrs, ",");
1782
1783 if (!Attrs.empty())
1784 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1785 llvm::join(Attrs.begin(), Attrs.end(), ","));
1786}
1787
1788bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
1789 QualType ReturnType) const {
1790 // We can't just discard the return value for a record type with a
1791 // complex destructor or a non-trivially copyable type.
1792 if (const RecordType *RT =
1793 ReturnType.getCanonicalType()->getAs<RecordType>()) {
1794 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1795 return ClassDecl->hasTrivialDestructor();
1796 }
1797 return ReturnType.isTriviallyCopyableType(Context);
1798}
1799
1800static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy,
1801 const Decl *TargetDecl) {
1802 // As-is msan can not tolerate noundef mismatch between caller and
1803 // implementation. Mismatch is possible for e.g. indirect calls from C-caller
1804 // into C++. Such mismatches lead to confusing false reports. To avoid
1805 // expensive workaround on msan we enforce initialization event in uncommon
1806 // cases where it's allowed.
1807 if (Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1808 return true;
1809 // C++ explicitly makes returning undefined values UB. C's rule only applies
1810 // to used values, so we never mark them noundef for now.
1811 if (!Module.getLangOpts().CPlusPlus)
1812 return false;
1813 if (TargetDecl) {
1814 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1815 if (FDecl->isExternC())
1816 return false;
1817 } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1818 // Function pointer.
1819 if (VDecl->isExternC())
1820 return false;
1821 }
1822 }
1823
1824 // We don't want to be too aggressive with the return checking, unless
1825 // it's explicit in the code opts or we're using an appropriate sanitizer.
1826 // Try to respect what the programmer intended.
1827 return Module.getCodeGenOpts().StrictReturn ||
1828 !Module.MayDropFunctionReturn(Module.getContext(), RetTy) ||
1829 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1830}
1831
1832/// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the
1833/// requested denormal behavior, accounting for the overriding behavior of the
1834/// -f32 case.
1835static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode,
1836 llvm::DenormalMode FP32DenormalMode,
1837 llvm::AttrBuilder &FuncAttrs) {
1838 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1839 FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str());
1840
1841 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1842 FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str());
1843}
1844
1845/// Add default attributes to a function, which have merge semantics under
1846/// -mlink-builtin-bitcode and should not simply overwrite any existing
1847/// attributes in the linked library.
1848static void
1849addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts,
1850 llvm::AttrBuilder &FuncAttrs) {
1851 addDenormalModeAttrs(CodeGenOpts.FPDenormalMode, CodeGenOpts.FP32DenormalMode,
1852 FuncAttrs);
1853}
1854
1855void CodeGenModule::getTrivialDefaultFunctionAttributes(
1856 StringRef Name, bool HasOptnone, bool AttrOnCallSite,
1857 llvm::AttrBuilder &FuncAttrs) {
1858 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1859 if (!HasOptnone) {
1860 if (CodeGenOpts.OptimizeSize)
1861 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1862 if (CodeGenOpts.OptimizeSize == 2)
1863 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1864 }
1865
1866 if (CodeGenOpts.DisableRedZone)
1867 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1868 if (CodeGenOpts.IndirectTlsSegRefs)
1869 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1870 if (CodeGenOpts.NoImplicitFloat)
1871 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1872
1873 if (AttrOnCallSite) {
1874 // Attributes that should go on the call site only.
1875 // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking
1876 // the -fno-builtin-foo list.
1877 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
1878 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1879 if (!CodeGenOpts.TrapFuncName.empty())
1880 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1881 } else {
1882 switch (CodeGenOpts.getFramePointer()) {
1883 case CodeGenOptions::FramePointerKind::None:
1884 // This is the default behavior.
1885 break;
1886 case CodeGenOptions::FramePointerKind::NonLeaf:
1887 case CodeGenOptions::FramePointerKind::All:
1888 FuncAttrs.addAttribute("frame-pointer",
1889 CodeGenOptions::getFramePointerKindName(
1890 CodeGenOpts.getFramePointer()));
1891 }
1892
1893 if (CodeGenOpts.LessPreciseFPMAD)
1894 FuncAttrs.addAttribute("less-precise-fpmad", "true");
1895
1896 if (CodeGenOpts.NullPointerIsValid)
1897 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1898
1899 if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore)
1900 FuncAttrs.addAttribute("no-trapping-math", "true");
1901
1902 // TODO: Are these all needed?
1903 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1904 if (LangOpts.NoHonorInfs)
1905 FuncAttrs.addAttribute("no-infs-fp-math", "true");
1906 if (LangOpts.NoHonorNaNs)
1907 FuncAttrs.addAttribute("no-nans-fp-math", "true");
1908 if (LangOpts.ApproxFunc)
1909 FuncAttrs.addAttribute("approx-func-fp-math", "true");
1910 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1911 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1912 (LangOpts.getDefaultFPContractMode() ==
1913 LangOptions::FPModeKind::FPM_Fast ||
1914 LangOpts.getDefaultFPContractMode() ==
1915 LangOptions::FPModeKind::FPM_FastHonorPragmas))
1916 FuncAttrs.addAttribute("unsafe-fp-math", "true");
1917 if (CodeGenOpts.SoftFloat)
1918 FuncAttrs.addAttribute("use-soft-float", "true");
1919 FuncAttrs.addAttribute("stack-protector-buffer-size",
1920 llvm::utostr(CodeGenOpts.SSPBufferSize));
1921 if (LangOpts.NoSignedZero)
1922 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1923
1924 // TODO: Reciprocal estimate codegen options should apply to instructions?
1925 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1926 if (!Recips.empty())
1927 FuncAttrs.addAttribute("reciprocal-estimates",
1928 llvm::join(Recips, ","));
1929
1930 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1931 CodeGenOpts.PreferVectorWidth != "none")
1932 FuncAttrs.addAttribute("prefer-vector-width",
1933 CodeGenOpts.PreferVectorWidth);
1934
1935 if (CodeGenOpts.StackRealignment)
1936 FuncAttrs.addAttribute("stackrealign");
1937 if (CodeGenOpts.Backchain)
1938 FuncAttrs.addAttribute("backchain");
1939 if (CodeGenOpts.EnableSegmentedStacks)
1940 FuncAttrs.addAttribute("split-stack");
1941
1942 if (CodeGenOpts.SpeculativeLoadHardening)
1943 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1944
1945 // Add zero-call-used-regs attribute.
1946 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1947 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1948 FuncAttrs.removeAttribute("zero-call-used-regs");
1949 break;
1950 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1951 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg");
1952 break;
1953 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1954 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr");
1955 break;
1956 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1957 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg");
1958 break;
1959 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1960 FuncAttrs.addAttribute("zero-call-used-regs", "used");
1961 break;
1962 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1963 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg");
1964 break;
1965 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1966 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr");
1967 break;
1968 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1969 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg");
1970 break;
1971 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
1972 FuncAttrs.addAttribute("zero-call-used-regs", "all");
1973 break;
1974 }
1975 }
1976
1977 if (getLangOpts().assumeFunctionsAreConvergent()) {
1978 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1979 // convergent (meaning, they may call an intrinsically convergent op, such
1980 // as __syncthreads() / barrier(), and so can't have certain optimizations
1981 // applied around them). LLVM will remove this attribute where it safely
1982 // can.
1983 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1984 }
1985
1986 // TODO: NoUnwind attribute should be added for other GPU modes HIP,
1987 // OpenMP offload. AFAIK, neither of them support exceptions in device code.
1988 if ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice) ||
1989 getLangOpts().OpenCL || getLangOpts().SYCLIsDevice) {
1990 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1991 }
1992
1993 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1994 StringRef Var, Value;
1995 std::tie(Var, Value) = Attr.split('=');
1996 FuncAttrs.addAttribute(Var, Value);
1997 }
1998}
1999
2000void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2001 bool HasOptnone,
2002 bool AttrOnCallSite,
2003 llvm::AttrBuilder &FuncAttrs) {
2004 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2005 FuncAttrs);
2006 if (!AttrOnCallSite) {
2007 // If we're just getting the default, get the default values for mergeable
2008 // attributes.
2009 addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs);
2010 }
2011}
2012
2013void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
2014 llvm::AttrBuilder FuncAttrs(F.getContext());
2015 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
2016 /* AttrOnCallSite = */ false, FuncAttrs);
2017 // TODO: call GetCPUAndFeaturesAttributes?
2018 F.addFnAttrs(FuncAttrs);
2019}
2020
2021/// Apply default attributes to \p F, accounting for merge semantics of
2022/// attributes that should not overwrite existing attributes.
2023void CodeGenModule::mergeDefaultFunctionDefinitionAttributes(
2024 llvm::Function &F, bool WillInternalize) {
2025 llvm::AttrBuilder FuncAttrs(F.getContext());
2026 getTrivialDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
2027 /*AttrOnCallSite=*/false, FuncAttrs);
2028 GetCPUAndFeaturesAttributes(GlobalDecl(), FuncAttrs);
2029
2030 if (!WillInternalize && F.isInterposable()) {
2031 // Do not promote "dynamic" denormal-fp-math to this translation unit's
2032 // setting for weak functions that won't be internalized. The user has no
2033 // real control for how builtin bitcode is linked, so we shouldn't assume
2034 // later copies will use a consistent mode.
2035 F.addFnAttrs(FuncAttrs);
2036 return;
2037 }
2038
2039 llvm::AttributeMask AttrsToRemove;
2040
2041 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2042 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2043 llvm::DenormalMode Merged =
2044 CodeGenOpts.FPDenormalMode.mergeCalleeMode(DenormModeToMerge);
2045 llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode;
2046
2047 if (DenormModeToMergeF32.isValid()) {
2048 MergedF32 =
2049 CodeGenOpts.FP32DenormalMode.mergeCalleeMode(DenormModeToMergeF32);
2050 }
2051
2052 if (Merged == llvm::DenormalMode::getDefault()) {
2053 AttrsToRemove.addAttribute("denormal-fp-math");
2054 } else if (Merged != DenormModeToMerge) {
2055 // Overwrite existing attribute
2056 FuncAttrs.addAttribute("denormal-fp-math",
2057 CodeGenOpts.FPDenormalMode.str());
2058 }
2059
2060 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2061 AttrsToRemove.addAttribute("denormal-fp-math-f32");
2062 } else if (MergedF32 != DenormModeToMergeF32) {
2063 // Overwrite existing attribute
2064 FuncAttrs.addAttribute("denormal-fp-math-f32",
2065 CodeGenOpts.FP32DenormalMode.str());
2066 }
2067
2068 F.removeFnAttrs(AttrsToRemove);
2069 addDenormalModeAttrs(Merged, MergedF32, FuncAttrs);
2070 F.addFnAttrs(FuncAttrs);
2071}
2072
2073void CodeGenModule::addDefaultFunctionDefinitionAttributes(
2074 llvm::AttrBuilder &attrs) {
2075 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
2076 /*for call*/ false, attrs);
2077 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
2078}
2079
2080static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
2081 const LangOptions &LangOpts,
2082 const NoBuiltinAttr *NBA = nullptr) {
2083 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2084 SmallString<32> AttributeName;
2085 AttributeName += "no-builtin-";
2086 AttributeName += BuiltinName;
2087 FuncAttrs.addAttribute(AttributeName);
2088 };
2089
2090 // First, handle the language options passed through -fno-builtin.
2091 if (LangOpts.NoBuiltin) {
2092 // -fno-builtin disables them all.
2093 FuncAttrs.addAttribute("no-builtins");
2094 return;
2095 }
2096
2097 // Then, add attributes for builtins specified through -fno-builtin-<name>.
2098 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
2099
2100 // Now, let's check the __attribute__((no_builtin("...")) attribute added to
2101 // the source.
2102 if (!NBA)
2103 return;
2104
2105 // If there is a wildcard in the builtin names specified through the
2106 // attribute, disable them all.
2107 if (llvm::is_contained(NBA->builtinNames(), "*")) {
2108 FuncAttrs.addAttribute("no-builtins");
2109 return;
2110 }
2111
2112 // And last, add the rest of the builtin names.
2113 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2114}
2115
2116static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
2117 const llvm::DataLayout &DL, const ABIArgInfo &AI,
2118 bool CheckCoerce = true) {
2119 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2120 if (AI.getKind() == ABIArgInfo::Indirect)
2121 return true;
2122 if (AI.getKind() == ABIArgInfo::Extend)
2123 return true;
2124 if (!DL.typeSizeEqualsStoreSize(Ty))
2125 // TODO: This will result in a modest amount of values not marked noundef
2126 // when they could be. We care about values that *invisibly* contain undef
2127 // bits from the perspective of LLVM IR.
2128 return false;
2129 if (CheckCoerce && AI.canHaveCoerceToType()) {
2130 llvm::Type *CoerceTy = AI.getCoerceToType();
2131 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2132 DL.getTypeSizeInBits(Ty)))
2133 // If we're coercing to a type with a greater size than the canonical one,
2134 // we're introducing new undef bits.
2135 // Coercing to a type of smaller or equal size is ok, as we know that
2136 // there's no internal padding (typeSizeEqualsStoreSize).
2137 return false;
2138 }
2139 if (QTy->isBitIntType())
2140 return true;
2141 if (QTy->isReferenceType())
2142 return true;
2143 if (QTy->isNullPtrType())
2144 return false;
2145 if (QTy->isMemberPointerType())
2146 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For
2147 // now, never mark them.
2148 return false;
2149 if (QTy->isScalarType()) {
2150 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy))
2151 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
2152 return true;
2153 }
2154 if (const VectorType *Vector = dyn_cast<VectorType>(QTy))
2155 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
2156 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2157 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
2158 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2159 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
2160
2161 // TODO: Some structs may be `noundef`, in specific situations.
2162 return false;
2163}
2164
2165/// Check if the argument of a function has maybe_undef attribute.
2166static bool IsArgumentMaybeUndef(const Decl *TargetDecl,
2167 unsigned NumRequiredArgs, unsigned ArgNo) {
2168 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2169 if (!FD)
2170 return false;
2171
2172 // Assume variadic arguments do not have maybe_undef attribute.
2173 if (ArgNo >= NumRequiredArgs)
2174 return false;
2175
2176 // Check if argument has maybe_undef attribute.
2177 if (ArgNo < FD->getNumParams()) {
2178 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2179 if (Param && Param->hasAttr<MaybeUndefAttr>())
2180 return true;
2181 }
2182
2183 return false;
2184}
2185
2186/// Test if it's legal to apply nofpclass for the given parameter type and it's
2187/// lowered IR type.
2188static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType,
2189 bool IsReturn) {
2190 // Should only apply to FP types in the source, not ABI promoted.
2191 if (!ParamType->hasFloatingRepresentation())
2192 return false;
2193
2194 // The promoted-to IR type also needs to support nofpclass.
2195 llvm::Type *IRTy = AI.getCoerceToType();
2196 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2197 return true;
2198
2199 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2200 return !IsReturn && AI.getCanBeFlattened() &&
2201 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2202 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2203 });
2204 }
2205
2206 return false;
2207}
2208
2209/// Return the nofpclass mask that can be applied to floating-point parameters.
2210static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) {
2211 llvm::FPClassTest Mask = llvm::fcNone;
2212 if (LangOpts.NoHonorInfs)
2213 Mask |= llvm::fcInf;
2214 if (LangOpts.NoHonorNaNs)
2215 Mask |= llvm::fcNan;
2216 return Mask;
2217}
2218
2219/// Construct the IR attribute list of a function or call.
2220///
2221/// When adding an attribute, please consider where it should be handled:
2222///
2223/// - getDefaultFunctionAttributes is for attributes that are essentially
2224/// part of the global target configuration (but perhaps can be
2225/// overridden on a per-function basis). Adding attributes there
2226/// will cause them to also be set in frontends that build on Clang's
2227/// target-configuration logic, as well as for code defined in library
2228/// modules such as CUDA's libdevice.
2229///
2230/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
2231/// and adds declaration-specific, convention-specific, and
2232/// frontend-specific logic. The last is of particular importance:
2233/// attributes that restrict how the frontend generates code must be
2234/// added here rather than getDefaultFunctionAttributes.
2235///
2236void CodeGenModule::ConstructAttributeList(StringRef Name,
2237 const CGFunctionInfo &FI,
2238 CGCalleeInfo CalleeInfo,
2239 llvm::AttributeList &AttrList,
2240 unsigned &CallingConv,
2241 bool AttrOnCallSite, bool IsThunk) {
2242 llvm::AttrBuilder FuncAttrs(getLLVMContext());
2243 llvm::AttrBuilder RetAttrs(getLLVMContext());
2244
2245 // Collect function IR attributes from the CC lowering.
2246 // We'll collect the paramete and result attributes later.
2247 CallingConv = FI.getEffectiveCallingConvention();
2248 if (FI.isNoReturn())
2249 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2250 if (FI.isCmseNSCall())
2251 FuncAttrs.addAttribute("cmse_nonsecure_call");
2252
2253 // Collect function IR attributes from the callee prototype if we have one.
2254 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
2255 CalleeInfo.getCalleeFunctionProtoType());
2256
2257 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
2258
2259 // Attach assumption attributes to the declaration. If this is a call
2260 // site, attach assumptions from the caller to the call as well.
2261 AddAttributesFromAssumes(FuncAttrs, TargetDecl);
2262
2263 bool HasOptnone = false;
2264 // The NoBuiltinAttr attached to the target FunctionDecl.
2265 const NoBuiltinAttr *NBA = nullptr;
2266
2267 // Some ABIs may result in additional accesses to arguments that may
2268 // otherwise not be present.
2269 auto AddPotentialArgAccess = [&]() {
2270 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2271 if (A.isValid())
2272 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2273 llvm::MemoryEffects::argMemOnly());
2274 };
2275
2276 // Collect function IR attributes based on declaration-specific
2277 // information.
2278 // FIXME: handle sseregparm someday...
2279 if (TargetDecl) {
2280 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
2281 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2282 if (TargetDecl->hasAttr<NoThrowAttr>())
2283 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2284 if (TargetDecl->hasAttr<NoReturnAttr>())
2285 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2286 if (TargetDecl->hasAttr<ColdAttr>())
2287 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2288 if (TargetDecl->hasAttr<HotAttr>())
2289 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2290 if (TargetDecl->hasAttr<NoDuplicateAttr>())
2291 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2292 if (TargetDecl->hasAttr<ConvergentAttr>())
2293 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2294
2295 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2296 AddAttributesFromFunctionProtoType(
2297 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
2298 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2299 // A sane operator new returns a non-aliasing pointer.
2300 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2301 if (getCodeGenOpts().AssumeSaneOperatorNew &&
2302 (Kind == OO_New || Kind == OO_Array_New))
2303 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2304 }
2305 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
2306 const bool IsVirtualCall = MD && MD->isVirtual();
2307 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
2308 // virtual function. These attributes are not inherited by overloads.
2309 if (!(AttrOnCallSite && IsVirtualCall)) {
2310 if (Fn->isNoReturn())
2311 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2312 NBA = Fn->getAttr<NoBuiltinAttr>();
2313 }
2314 // Only place nomerge attribute on call sites, never functions. This
2315 // allows it to work on indirect virtual function calls.
2316 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
2317 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2318 }
2319
2320 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
2321 if (TargetDecl->hasAttr<ConstAttr>()) {
2322 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2323 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2324 // gcc specifies that 'const' functions have greater restrictions than
2325 // 'pure' functions, so they also cannot have infinite loops.
2326 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2327 } else if (TargetDecl->hasAttr<PureAttr>()) {
2328 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2329 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2330 // gcc specifies that 'pure' functions cannot have infinite loops.
2331 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2332 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2333 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::argMemOnly());
2334 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2335 }
2336 if (TargetDecl->hasAttr<RestrictAttr>())
2337 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2338 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2339 !CodeGenOpts.NullPointerIsValid)
2340 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2341 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2342 FuncAttrs.addAttribute("no_caller_saved_registers");
2343 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2344 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2345 if (TargetDecl->hasAttr<LeafAttr>())
2346 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2347
2348 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2349 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2350 std::optional<unsigned> NumElemsParam;
2351 if (AllocSize->getNumElemsParam().isValid())
2352 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2353 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2354 NumElemsParam);
2355 }
2356
2357 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2358 if (getLangOpts().OpenCLVersion <= 120) {
2359 // OpenCL v1.2 Work groups are always uniform
2360 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2361 } else {
2362 // OpenCL v2.0 Work groups may be whether uniform or not.
2363 // '-cl-uniform-work-group-size' compile option gets a hint
2364 // to the compiler that the global work-size be a multiple of
2365 // the work-group size specified to clEnqueueNDRangeKernel
2366 // (i.e. work groups are uniform).
2367 FuncAttrs.addAttribute("uniform-work-group-size",
2368 llvm::toStringRef(CodeGenOpts.UniformWGSize));
2369 }
2370 }
2371 }
2372
2373 // Attach "no-builtins" attributes to:
2374 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2375 // * definitions: "no-builtins" or "no-builtin-<name>" only.
2376 // The attributes can come from:
2377 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2378 // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2379 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2380
2381 // Collect function IR attributes based on global settiings.
2382 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2383
2384 // Override some default IR attributes based on declaration-specific
2385 // information.
2386 if (TargetDecl) {
2387 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2388 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2389 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2390 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2391 if (TargetDecl->hasAttr<NoSplitStackAttr>())
2392 FuncAttrs.removeAttribute("split-stack");
2393 if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) {
2394 // A function "__attribute__((...))" overrides the command-line flag.
2395 auto Kind =
2396 TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2397 FuncAttrs.removeAttribute("zero-call-used-regs");
2398 FuncAttrs.addAttribute(
2399 "zero-call-used-regs",
2400 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2401 }
2402
2403 // Add NonLazyBind attribute to function declarations when -fno-plt
2404 // is used.
2405 // FIXME: what if we just haven't processed the function definition
2406 // yet, or if it's an external definition like C99 inline?
2407 if (CodeGenOpts.NoPLT) {
2408 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2409 if (!Fn->isDefined() && !AttrOnCallSite) {
2410 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2411 }
2412 }
2413 }
2414 }
2415
2416 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
2417 // functions with -funique-internal-linkage-names.
2418 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2419 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2420 if (!FD->isExternallyVisible())
2421 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2422 "selected");
2423 }
2424 }
2425
2426 // Collect non-call-site function IR attributes from declaration-specific
2427 // information.
2428 if (!AttrOnCallSite) {
2429 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2430 FuncAttrs.addAttribute("cmse_nonsecure_entry");
2431
2432 // Whether tail calls are enabled.
2433 auto shouldDisableTailCalls = [&] {
2434 // Should this be honored in getDefaultFunctionAttributes?
2435 if (CodeGenOpts.DisableTailCalls)
2436 return true;
2437
2438 if (!TargetDecl)
2439 return false;
2440
2441 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2442 TargetDecl->hasAttr<AnyX86InterruptAttr>())
2443 return true;
2444
2445 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2446 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2447 if (!BD->doesNotEscape())
2448 return true;
2449 }
2450
2451 return false;
2452 };
2453 if (shouldDisableTailCalls())
2454 FuncAttrs.addAttribute("disable-tail-calls", "true");
2455
2456 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
2457 // handles these separately to set them based on the global defaults.
2458 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2459 }
2460
2461 // Collect attributes from arguments and return values.
2462 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2463
2464 QualType RetTy = FI.getReturnType();
2465 const ABIArgInfo &RetAI = FI.getReturnInfo();
2466 const llvm::DataLayout &DL = getDataLayout();
2467
2468 // Determine if the return type could be partially undef
2469 if (CodeGenOpts.EnableNoundefAttrs &&
2470 HasStrictReturn(*this, RetTy, TargetDecl)) {
2471 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
2472 DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
2473 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2474 }
2475
2476 switch (RetAI.getKind()) {
2477 case ABIArgInfo::Extend:
2478 if (RetAI.isSignExt())
2479 RetAttrs.addAttribute(llvm::Attribute::SExt);
2480 else
2481 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2482 [[fallthrough]];
2483 case ABIArgInfo::Direct:
2484 if (RetAI.getInReg())
2485 RetAttrs.addAttribute(llvm::Attribute::InReg);
2486
2487 if (canApplyNoFPClass(RetAI, RetTy, true))
2488 RetAttrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
2489
2490 break;
2491 case ABIArgInfo::Ignore:
2492 break;
2493
2494 case ABIArgInfo::InAlloca:
2495 case ABIArgInfo::Indirect: {
2496 // inalloca and sret disable readnone and readonly
2497 AddPotentialArgAccess();
2498 break;
2499 }
2500
2501 case ABIArgInfo::CoerceAndExpand:
2502 break;
2503
2504 case ABIArgInfo::Expand:
2505 case ABIArgInfo::IndirectAliased:
2506 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "clang/lib/CodeGen/CGCall.cpp", 2506)
;
2507 }
2508
2509 if (!IsThunk) {
2510 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2511 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2512 QualType PTy = RefTy->getPointeeType();
2513 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2514 RetAttrs.addDereferenceableAttr(
2515 getMinimumObjectSize(PTy).getQuantity());
2516 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2517 !CodeGenOpts.NullPointerIsValid)
2518 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2519 if (PTy->isObjectType()) {
2520 llvm::Align Alignment =
2521 getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2522 RetAttrs.addAlignmentAttr(Alignment);
2523 }
2524 }
2525 }
2526
2527 bool hasUsedSRet = false;
2528 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2529
2530 // Attach attributes to sret.
2531 if (IRFunctionArgs.hasSRetArg()) {
2532 llvm::AttrBuilder SRETAttrs(getLLVMContext());
2533 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2534 hasUsedSRet = true;
2535 if (RetAI.getInReg())
2536 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2537 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2538 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2539 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2540 }
2541
2542 // Attach attributes to inalloca argument.
2543 if (IRFunctionArgs.hasInallocaArg()) {
2544 llvm::AttrBuilder Attrs(getLLVMContext());
2545 Attrs.addInAllocaAttr(FI.getArgStruct());
2546 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2547 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2548 }
2549
2550 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument,
2551 // unless this is a thunk function.
2552 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2553 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2554 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2555 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2556
2557 assert(IRArgs.second == 1 && "Expected only a single `this` pointer.")(static_cast <bool> (IRArgs.second == 1 && "Expected only a single `this` pointer."
) ? void (0) : __assert_fail ("IRArgs.second == 1 && \"Expected only a single `this` pointer.\""
, "clang/lib/CodeGen/CGCall.cpp", 2557, __extension__ __PRETTY_FUNCTION__
))
;
2558
2559 llvm::AttrBuilder Attrs(getLLVMContext());
2560
2561 QualType ThisTy =
2562 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
2563
2564 if (!CodeGenOpts.NullPointerIsValid &&
2565 getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2566 Attrs.addAttribute(llvm::Attribute::NonNull);
2567 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
2568 } else {
2569 // FIXME dereferenceable should be correct here, regardless of
2570 // NullPointerIsValid. However, dereferenceable currently does not always
2571 // respect NullPointerIsValid and may imply nonnull and break the program.
2572 // See https://reviews.llvm.org/D66618 for discussions.
2573 Attrs.addDereferenceableOrNullAttr(
2574 getMinimumObjectSize(
2575 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2576 .getQuantity());
2577 }
2578
2579 llvm::Align Alignment =
2580 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr,
2581 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true)
2582 .getAsAlign();
2583 Attrs.addAlignmentAttr(Alignment);
2584
2585 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2586 }
2587
2588 unsigned ArgNo = 0;
2589 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2590 E = FI.arg_end();
2591 I != E; ++I, ++ArgNo) {
2592 QualType ParamType = I->type;
2593 const ABIArgInfo &AI = I->info;
2594 llvm::AttrBuilder Attrs(getLLVMContext());
2595
2596 // Add attribute for padding argument, if necessary.
2597 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2598 if (AI.getPaddingInReg()) {
2599 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2600 llvm::AttributeSet::get(
2601 getLLVMContext(),
2602 llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2603 }
2604 }
2605
2606 // Decide whether the argument we're handling could be partially undef
2607 if (CodeGenOpts.EnableNoundefAttrs &&
2608 DetermineNoUndef(ParamType, getTypes(), DL, AI)) {
2609 Attrs.addAttribute(llvm::Attribute::NoUndef);
2610 }
2611
2612 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2613 // have the corresponding parameter variable. It doesn't make
2614 // sense to do it here because parameters are so messed up.
2615 switch (AI.getKind()) {
2616 case ABIArgInfo::Extend:
2617 if (AI.isSignExt())
2618 Attrs.addAttribute(llvm::Attribute::SExt);
2619 else
2620 Attrs.addAttribute(llvm::Attribute::ZExt);
2621 [[fallthrough]];
2622 case ABIArgInfo::Direct:
2623 if (ArgNo == 0 && FI.isChainCall())
2624 Attrs.addAttribute(llvm::Attribute::Nest);
2625 else if (AI.getInReg())
2626 Attrs.addAttribute(llvm::Attribute::InReg);
2627 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
2628
2629 if (canApplyNoFPClass(AI, ParamType, false))
2630 Attrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
2631 break;
2632 case ABIArgInfo::Indirect: {
2633 if (AI.getInReg())
2634 Attrs.addAttribute(llvm::Attribute::InReg);
2635
2636 if (AI.getIndirectByVal())
2637 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2638
2639 auto *Decl = ParamType->getAsRecordDecl();
2640 if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2641 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2642 // When calling the function, the pointer passed in will be the only
2643 // reference to the underlying object. Mark it accordingly.
2644 Attrs.addAttribute(llvm::Attribute::NoAlias);
2645
2646 // TODO: We could add the byref attribute if not byval, but it would
2647 // require updating many testcases.
2648
2649 CharUnits Align = AI.getIndirectAlign();
2650
2651 // In a byval argument, it is important that the required
2652 // alignment of the type is honored, as LLVM might be creating a
2653 // *new* stack object, and needs to know what alignment to give
2654 // it. (Sometimes it can deduce a sensible alignment on its own,
2655 // but not if clang decides it must emit a packed struct, or the
2656 // user specifies increased alignment requirements.)
2657 //
2658 // This is different from indirect *not* byval, where the object
2659 // exists already, and the align attribute is purely
2660 // informative.
2661 assert(!Align.isZero())(static_cast <bool> (!Align.isZero()) ? void (0) : __assert_fail
("!Align.isZero()", "clang/lib/CodeGen/CGCall.cpp", 2661, __extension__
__PRETTY_FUNCTION__))
;
2662
2663 // For now, only add this when we have a byval argument.
2664 // TODO: be less lazy about updating test cases.
2665 if (AI.getIndirectByVal())
2666 Attrs.addAlignmentAttr(Align.getQuantity());
2667
2668 // byval disables readnone and readonly.
2669 AddPotentialArgAccess();
2670 break;
2671 }
2672 case ABIArgInfo::IndirectAliased: {
2673 CharUnits Align = AI.getIndirectAlign();
2674 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2675 Attrs.addAlignmentAttr(Align.getQuantity());
2676 break;
2677 }
2678 case ABIArgInfo::Ignore:
2679 case ABIArgInfo::Expand:
2680 case ABIArgInfo::CoerceAndExpand:
2681 break;
2682
2683 case ABIArgInfo::InAlloca:
2684 // inalloca disables readnone and readonly.
2685 AddPotentialArgAccess();
2686 continue;
2687 }
2688
2689 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2690 QualType PTy = RefTy->getPointeeType();
2691 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2692 Attrs.addDereferenceableAttr(
2693 getMinimumObjectSize(PTy).getQuantity());
2694 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2695 !CodeGenOpts.NullPointerIsValid)
2696 Attrs.addAttribute(llvm::Attribute::NonNull);
2697 if (PTy->isObjectType()) {
2698 llvm::Align Alignment =
2699 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2700 Attrs.addAlignmentAttr(Alignment);
2701 }
2702 }
2703
2704 // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types:
2705 // > For arguments to a __kernel function declared to be a pointer to a
2706 // > data type, the OpenCL compiler can assume that the pointee is always
2707 // > appropriately aligned as required by the data type.
2708 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() &&
2709 ParamType->isPointerType()) {
2710 QualType PTy = ParamType->getPointeeType();
2711 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2712 llvm::Align Alignment =
2713 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2714 Attrs.addAlignmentAttr(Alignment);
2715 }
2716 }
2717
2718 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2719 case ParameterABI::Ordinary:
2720 break;
2721
2722 case ParameterABI::SwiftIndirectResult: {
2723 // Add 'sret' if we haven't already used it for something, but
2724 // only if the result is void.
2725 if (!hasUsedSRet && RetTy->isVoidType()) {
2726 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2727 hasUsedSRet = true;
2728 }
2729
2730 // Add 'noalias' in either case.
2731 Attrs.addAttribute(llvm::Attribute::NoAlias);
2732
2733 // Add 'dereferenceable' and 'alignment'.
2734 auto PTy = ParamType->getPointeeType();
2735 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2736 auto info = getContext().getTypeInfoInChars(PTy);
2737 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2738 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2739 }
2740 break;
2741 }
2742
2743 case ParameterABI::SwiftErrorResult:
2744 Attrs.addAttribute(llvm::Attribute::SwiftError);
2745 break;
2746
2747 case ParameterABI::SwiftContext:
2748 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2749 break;
2750
2751 case ParameterABI::SwiftAsyncContext:
2752 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2753 break;
2754 }
2755
2756 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2757 Attrs.addAttribute(llvm::Attribute::NoCapture);
2758
2759 if (Attrs.hasAttributes()) {
2760 unsigned FirstIRArg, NumIRArgs;
2761 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2762 for (unsigned i = 0; i < NumIRArgs; i++)
2763 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2764 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs));
2765 }
2766 }
2767 assert(ArgNo == FI.arg_size())(static_cast <bool> (ArgNo == FI.arg_size()) ? void (0)
: __assert_fail ("ArgNo == FI.arg_size()", "clang/lib/CodeGen/CGCall.cpp"
, 2767, __extension__ __PRETTY_FUNCTION__))
;
2768
2769 AttrList = llvm::AttributeList::get(
2770 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2771 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2772}
2773
2774/// An argument came in as a promoted argument; demote it back to its
2775/// declared type.
2776static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2777 const VarDecl *var,
2778 llvm::Value *value) {
2779 llvm::Type *varType = CGF.ConvertType(var->getType());
2780
2781 // This can happen with promotions that actually don't change the
2782 // underlying type, like the enum promotions.
2783 if (value->getType() == varType) return value;
2784
2785 assert((varType->isIntegerTy() || varType->isFloatingPointTy())(static_cast <bool> ((varType->isIntegerTy() || varType
->isFloatingPointTy()) && "unexpected promotion type"
) ? void (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "clang/lib/CodeGen/CGCall.cpp", 2786, __extension__ __PRETTY_FUNCTION__
))
2786 && "unexpected promotion type")(static_cast <bool> ((varType->isIntegerTy() || varType
->isFloatingPointTy()) && "unexpected promotion type"
) ? void (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\""
, "clang/lib/CodeGen/CGCall.cpp", 2786, __extension__ __PRETTY_FUNCTION__
))
;
2787
2788 if (isa<llvm::IntegerType>(varType))
2789 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2790
2791 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2792}
2793
2794/// Returns the attribute (either parameter attribute, or function
2795/// attribute), which declares argument ArgNo to be non-null.
2796static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2797 QualType ArgType, unsigned ArgNo) {
2798 // FIXME: __attribute__((nonnull)) can also be applied to:
2799 // - references to pointers, where the pointee is known to be
2800 // nonnull (apparently a Clang extension)
2801 // - transparent unions containing pointers
2802 // In the former case, LLVM IR cannot represent the constraint. In
2803 // the latter case, we have no guarantee that the transparent union
2804 // is in fact passed as a pointer.
2805 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2806 return nullptr;
2807 // First, check attribute on parameter itself.
2808 if (PVD) {
2809 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2810 return ParmNNAttr;
2811 }
2812 // Check function attributes.
2813 if (!FD)
2814 return nullptr;
2815 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2816 if (NNAttr->isNonNull(ArgNo))
2817 return NNAttr;
2818 }
2819 return nullptr;
2820}
2821
2822namespace {
2823 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2824 Address Temp;
2825 Address Arg;
2826 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2827 void Emit(CodeGenFunction &CGF, Flags flags) override {
2828 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2829 CGF.Builder.CreateStore(errorValue, Arg);
2830 }
2831 };
2832}
2833
2834void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2835 llvm::Function *Fn,
2836 const FunctionArgList &Args) {
2837 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1
Assuming field 'CurCodeDecl' is null
2838 // Naked functions don't have prologues.
2839 return;
2840
2841 // If this is an implicit-return-zero function, go ahead and
2842 // initialize the return value. TODO: it might be nice to have
2843 // a more general mechanism for this that didn't require synthesized
2844 // return statements.
2845 if (const FunctionDecl *FD
2.1
'FD' is null
2.1
'FD' is null
= dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2
Assuming null pointer is passed into cast
3
Taking false branch
2846 if (FD->hasImplicitReturnZero()) {
2847 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2848 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2849 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2850 Builder.CreateStore(Zero, ReturnValue);
2851 }
2852 }
2853
2854 // FIXME: We no longer need the types from FunctionArgList; lift up and
2855 // simplify.
2856
2857 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
4
Calling constructor for 'ClangToLLVMArgMapping'
2858 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs())(static_cast <bool> (Fn->arg_size() == IRFunctionArgs
.totalIRArgs()) ? void (0) : __assert_fail ("Fn->arg_size() == IRFunctionArgs.totalIRArgs()"
, "clang/lib/CodeGen/CGCall.cpp", 2858, __extension__ __PRETTY_FUNCTION__
))
;
2859
2860 // If we're using inalloca, all the memory arguments are GEPs off of the last
2861 // parameter, which is a pointer to the complete memory area.
2862 Address ArgStruct = Address::invalid();
2863 if (IRFunctionArgs.hasInallocaArg()) {
2864 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2865 FI.getArgStruct(), FI.getArgStructAlignment());
2866
2867 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo())(static_cast <bool> (ArgStruct.getType() == FI.getArgStruct
()->getPointerTo()) ? void (0) : __assert_fail ("ArgStruct.getType() == FI.getArgStruct()->getPointerTo()"
, "clang/lib/CodeGen/CGCall.cpp", 2867, __extension__ __PRETTY_FUNCTION__
))
;
2868 }
2869
2870 // Name the struct return parameter.
2871 if (IRFunctionArgs.hasSRetArg()) {
2872 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2873 AI->setName("agg.result");
2874 AI->addAttr(llvm::Attribute::NoAlias);
2875 }
2876
2877 // Track if we received the parameter as a pointer (indirect, byval, or
2878 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2879 // into a local alloca for us.
2880 SmallVector<ParamValue, 16> ArgVals;
2881 ArgVals.reserve(Args.size());
2882
2883 // Create a pointer value for every parameter declaration. This usually
2884 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2885 // any cleanups or do anything that might unwind. We do that separately, so
2886 // we can push the cleanups in the correct order for the ABI.
2887 assert(FI.arg_size() == Args.size() &&(static_cast <bool> (FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.") ? void
(0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "clang/lib/CodeGen/CGCall.cpp", 2888, __extension__ __PRETTY_FUNCTION__
))
2888 "Mismatch between function signature & arguments.")(static_cast <bool> (FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.") ? void
(0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\""
, "clang/lib/CodeGen/CGCall.cpp", 2888, __extension__ __PRETTY_FUNCTION__
))
;
2889 unsigned ArgNo = 0;
2890 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2891 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2892 i != e; ++i, ++info_it, ++ArgNo) {
2893 const VarDecl *Arg = *i;
2894 const ABIArgInfo &ArgI = info_it->info;
2895
2896 bool isPromoted =
2897 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2898 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2899 // the parameter is promoted. In this case we convert to
2900 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2901 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2902 assert(hasScalarEvaluationKind(Ty) ==(static_cast <bool> (hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind
(Arg->getType())) ? void (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "clang/lib/CodeGen/CGCall.cpp", 2903, __extension__ __PRETTY_FUNCTION__
))
2903 hasScalarEvaluationKind(Arg->getType()))(static_cast <bool> (hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind
(Arg->getType())) ? void (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())"
, "clang/lib/CodeGen/CGCall.cpp", 2903, __extension__ __PRETTY_FUNCTION__
))
;
2904
2905 unsigned FirstIRArg, NumIRArgs;
2906 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2907
2908 switch (ArgI.getKind()) {
2909 case ABIArgInfo::InAlloca: {
2910 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 2910, __extension__
__PRETTY_FUNCTION__))
;
2911 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2912 Address V =
2913 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2914 if (ArgI.getInAllocaIndirect())
2915 V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty),
2916 getContext().getTypeAlignInChars(Ty));
2917 ArgVals.push_back(ParamValue::forIndirect(V));
2918 break;
2919 }
2920
2921 case ABIArgInfo::Indirect:
2922 case ABIArgInfo::IndirectAliased: {
2923 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2923, __extension__
__PRETTY_FUNCTION__))
;
2924 Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
2925 ArgI.getIndirectAlign(), KnownNonNull);
2926
2927 if (!hasScalarEvaluationKind(Ty)) {
2928 // Aggregates and complex variables are accessed by reference. All we
2929 // need to do is realign the value, if requested. Also, if the address
2930 // may be aliased, copy it to ensure that the parameter variable is
2931 // mutable and has a unique adress, as C requires.
2932 Address V = ParamAddr;
2933 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2934 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2935
2936 // Copy from the incoming argument pointer to the temporary with the
2937 // appropriate alignment.
2938 //
2939 // FIXME: We should have a common utility for generating an aggregate
2940 // copy.
2941 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2942 Builder.CreateMemCpy(
2943 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2944 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2945 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2946 V = AlignedTemp;
2947 }
2948 ArgVals.push_back(ParamValue::forIndirect(V));
2949 } else {
2950 // Load scalar value from indirect argument.
2951 llvm::Value *V =
2952 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2953
2954 if (isPromoted)
2955 V = emitArgumentDemotion(*this, Arg, V);
2956 ArgVals.push_back(ParamValue::forDirect(V));
2957 }
2958 break;
2959 }
2960
2961 case ABIArgInfo::Extend:
2962 case ABIArgInfo::Direct: {
2963 auto AI = Fn->getArg(FirstIRArg);
2964 llvm::Type *LTy = ConvertType(Arg->getType());
2965
2966 // Prepare parameter attributes. So far, only attributes for pointer
2967 // parameters are prepared. See
2968 // http://llvm.org/docs/LangRef.html#paramattrs.
2969 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2970 ArgI.getCoerceToType()->isPointerTy()) {
2971 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2971, __extension__
__PRETTY_FUNCTION__))
;
2972
2973 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2974 // Set `nonnull` attribute if any.
2975 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2976 PVD->getFunctionScopeIndex()) &&
2977 !CGM.getCodeGenOpts().NullPointerIsValid)
2978 AI->addAttr(llvm::Attribute::NonNull);
2979
2980 QualType OTy = PVD->getOriginalType();
2981 if (const auto *ArrTy =
2982 getContext().getAsConstantArrayType(OTy)) {
2983 // A C99 array parameter declaration with the static keyword also
2984 // indicates dereferenceability, and if the size is constant we can
2985 // use the dereferenceable attribute (which requires the size in
2986 // bytes).
2987 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2988 QualType ETy = ArrTy->getElementType();
2989 llvm::Align Alignment =
2990 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2991 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
2992 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2993 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2994 ArrSize) {
2995 llvm::AttrBuilder Attrs(getLLVMContext());
2996 Attrs.addDereferenceableAttr(
2997 getContext().getTypeSizeInChars(ETy).getQuantity() *
2998 ArrSize);
2999 AI->addAttrs(Attrs);
3000 } else if (getContext().getTargetInfo().getNullPointerValue(
3001 ETy.getAddressSpace()) == 0 &&
3002 !CGM.getCodeGenOpts().NullPointerIsValid) {
3003 AI->addAttr(llvm::Attribute::NonNull);
3004 }
3005 }
3006 } else if (const auto *ArrTy =
3007 getContext().getAsVariableArrayType(OTy)) {
3008 // For C99 VLAs with the static keyword, we don't know the size so
3009 // we can't use the dereferenceable attribute, but in addrspace(0)
3010 // we know that it must be nonnull.
3011 if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
3012 QualType ETy = ArrTy->getElementType();
3013 llvm::Align Alignment =
3014 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3015 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3016 if (!getTypes().getTargetAddressSpace(ETy) &&
3017 !CGM.getCodeGenOpts().NullPointerIsValid)
3018 AI->addAttr(llvm::Attribute::NonNull);
3019 }
3020 }
3021
3022 // Set `align` attribute if any.
3023 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3024 if (!AVAttr)
3025 if (const auto *TOTy = OTy->getAs<TypedefType>())
3026 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3027 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
3028 // If alignment-assumption sanitizer is enabled, we do *not* add
3029 // alignment attribute here, but emit normal alignment assumption,
3030 // so the UBSAN check could function.
3031 llvm::ConstantInt *AlignmentCI =
3032 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
3033 uint64_t AlignmentInt =
3034 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3035 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3036 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3037 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
3038 llvm::Align(AlignmentInt)));
3039 }
3040 }
3041 }
3042
3043 // Set 'noalias' if an argument type has the `restrict` qualifier.
3044 if (Arg->getType().isRestrictQualified())
3045 AI->addAttr(llvm::Attribute::NoAlias);
3046 }
3047
3048 // Prepare the argument value. If we have the trivial case, handle it
3049 // with no muss and fuss.
3050 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
3051 ArgI.getCoerceToType() == ConvertType(Ty) &&
3052 ArgI.getDirectOffset() == 0) {
3053 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 3053, __extension__
__PRETTY_FUNCTION__))
;
3054
3055 // LLVM expects swifterror parameters to be used in very restricted
3056 // ways. Copy the value into a less-restricted temporary.
3057 llvm::Value *V = AI;
3058 if (FI.getExtParameterInfo(ArgNo).getABI()
3059 == ParameterABI::SwiftErrorResult) {
3060 QualType pointeeTy = Ty->getPointeeType();
3061 assert(pointeeTy->isPointerType())(static_cast <bool> (pointeeTy->isPointerType()) ? void
(0) : __assert_fail ("pointeeTy->isPointerType()", "clang/lib/CodeGen/CGCall.cpp"
, 3061, __extension__ __PRETTY_FUNCTION__))
;
3062 Address temp =
3063 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3064 Address arg(V, ConvertTypeForMem(pointeeTy),
3065 getContext().getTypeAlignInChars(pointeeTy));
3066 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
3067 Builder.CreateStore(incomingErrorValue, temp);
3068 V = temp.getPointer();
3069
3070 // Push a cleanup to copy the value back at the end of the function.
3071 // The convention does not guarantee that the value will be written
3072 // back if the function exits with an unwind exception.
3073 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
3074 }
3075
3076 // Ensure the argument is the correct type.
3077 if (V->getType() != ArgI.getCoerceToType())
3078 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
3079
3080 if (isPromoted)
3081 V = emitArgumentDemotion(*this, Arg, V);
3082
3083 // Because of merging of function types from multiple decls it is
3084 // possible for the type of an argument to not match the corresponding
3085 // type in the function type. Since we are codegening the callee
3086 // in here, add a cast to the argument type.
3087 llvm::Type *LTy = ConvertType(Arg->getType());
3088 if (V->getType() != LTy)
3089 V = Builder.CreateBitCast(V, LTy);
3090
3091 ArgVals.push_back(ParamValue::forDirect(V));
3092 break;
3093 }
3094
3095 // VLST arguments are coerced to VLATs at the function boundary for
3096 // ABI consistency. If this is a VLST that was coerced to
3097 // a VLAT at the function boundary and the types match up, use
3098 // llvm.vector.extract to convert back to the original VLST.
3099 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
3100 llvm::Value *Coerced = Fn->getArg(FirstIRArg);
3101 if (auto *VecTyFrom =
3102 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3103 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
3104 // vector, bitcast the source and use a vector extract.
3105 auto PredType =
3106 llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
3107 if (VecTyFrom == PredType &&
3108 VecTyTo->getElementType() == Builder.getInt8Ty()) {
3109 VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
3110 Coerced = Builder.CreateBitCast(Coerced, VecTyFrom);
3111 }
3112 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3113 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
3114
3115 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 3115, __extension__
__PRETTY_FUNCTION__))
;
3116 Coerced->setName(Arg->getName() + ".coerce");
3117 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
3118 VecTyTo, Coerced, Zero, "castFixedSve")));
3119 break;
3120 }
3121 }
3122 }
3123
3124 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
3125 Arg->getName());
3126
3127 // Pointer to store into.
3128 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
3129
3130 // Fast-isel and the optimizer generally like scalar values better than
3131 // FCAs, so we flatten them if this is safe to do for this argument.
3132 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
3133 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
3134 STy->getNumElements() > 1) {
3135 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
3136 llvm::Type *DstTy = Ptr.getElementType();
3137 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
3138
3139 Address AddrToStoreInto = Address::invalid();
3140 if (SrcSize <= DstSize) {
3141 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
3142 } else {
3143 AddrToStoreInto =
3144 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
3145 }
3146
3147 assert(STy->getNumElements() == NumIRArgs)(static_cast <bool> (STy->getNumElements() == NumIRArgs
) ? void (0) : __assert_fail ("STy->getNumElements() == NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 3147, __extension__ __PRETTY_FUNCTION__
))
;
3148 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3149 auto AI = Fn->getArg(FirstIRArg + i);
3150 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3151 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
3152 Builder.CreateStore(AI, EltPtr);
3153 }
3154
3155 if (SrcSize > DstSize) {
3156 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3157 }
3158
3159 } else {
3160 // Simple case, just do a coerced store of the argument into the alloca.
3161 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 3161, __extension__
__PRETTY_FUNCTION__))
;
3162 auto AI = Fn->getArg(FirstIRArg);
3163 AI->setName(Arg->getName() + ".coerce");
3164 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
3165 }
3166
3167 // Match to what EmitParmDecl is expecting for this type.
3168 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
3169 llvm::Value *V =
3170 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
3171 if (isPromoted)
3172 V = emitArgumentDemotion(*this, Arg, V);
3173 ArgVals.push_back(ParamValue::forDirect(V));
3174 } else {
3175 ArgVals.push_back(ParamValue::forIndirect(Alloca));
3176 }
3177 break;
3178 }
3179
3180 case ABIArgInfo::CoerceAndExpand: {
3181 // Reconstruct into a temporary.
3182 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
3183 ArgVals.push_back(ParamValue::forIndirect(alloca));
3184
3185 auto coercionType = ArgI.getCoerceAndExpandType();
3186 alloca = Builder.CreateElementBitCast(alloca, coercionType);
3187
3188 unsigned argIndex = FirstIRArg;
3189 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3190 llvm::Type *eltType = coercionType->getElementType(i);
3191 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
3192 continue;
3193
3194 auto eltAddr = Builder.CreateStructGEP(alloca, i);
3195 auto elt = Fn->getArg(argIndex++);
3196 Builder.CreateStore(elt, eltAddr);
3197 }
3198 assert(argIndex == FirstIRArg + NumIRArgs)(static_cast <bool> (argIndex == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("argIndex == FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 3198, __extension__ __PRETTY_FUNCTION__
))
;
3199 break;
3200 }
3201
3202 case ABIArgInfo::Expand: {
3203 // If this structure was expanded into multiple arguments then
3204 // we need to create a temporary and reconstruct it from the
3205 // arguments.
3206 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
3207 LValue LV = MakeAddrLValue(Alloca, Ty);
3208 ArgVals.push_back(ParamValue::forIndirect(Alloca));
3209
3210 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3211 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3212 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (FnArgIter == Fn->arg_begin() + FirstIRArg
+ NumIRArgs) ? void (0) : __assert_fail ("FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 3212, __extension__ __PRETTY_FUNCTION__
))
;
3213 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3214 auto AI = Fn->getArg(FirstIRArg + i);
3215 AI->setName(Arg->getName() + "." + Twine(i));
3216 }
3217 break;
3218 }
3219
3220 case ABIArgInfo::Ignore:
3221 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 3221, __extension__
__PRETTY_FUNCTION__))
;
3222 // Initialize the local variable appropriately.
3223 if (!hasScalarEvaluationKind(Ty)) {
3224 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
3225 } else {
3226 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
3227 ArgVals.push_back(ParamValue::forDirect(U));
3228 }
3229 break;
3230 }
3231 }
3232
3233 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3234 for (int I = Args.size() - 1; I >= 0; --I)
3235 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
3236 } else {
3237 for (unsigned I = 0, E = Args.size(); I != E; ++I)
3238 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
3239 }
3240}
3241
3242static void eraseUnusedBitCasts(llvm::Instruction *insn) {
3243 while (insn->use_empty()) {
3244 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3245 if (!bitcast) return;
3246
3247 // This is "safe" because we would have used a ConstantExpr otherwise.
3248 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3249 bitcast->eraseFromParent();
3250 }
3251}
3252
3253/// Try to emit a fused autorelease of a return result.
3254static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
3255 llvm::Value *result) {
3256 // We must be immediately followed the cast.
3257 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
3258 if (BB->empty()) return nullptr;
3259 if (&BB->back() != result) return nullptr;
3260
3261 llvm::Type *resultType = result->getType();
3262
3263 // result is in a BasicBlock and is therefore an Instruction.
3264 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3265
3266 SmallVector<llvm::Instruction *, 4> InstsToKill;
3267
3268 // Look for:
3269 // %generator = bitcast %type1* %generator2 to %type2*
3270 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3271 // We would have emitted this as a constant if the operand weren't
3272 // an Instruction.
3273 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3274
3275 // Require the generator to be immediately followed by the cast.
3276 if (generator->getNextNode() != bitcast)
3277 return nullptr;
3278
3279 InstsToKill.push_back(bitcast);
3280 }
3281
3282 // Look for:
3283 // %generator = call i8* @objc_retain(i8* %originalResult)
3284 // or
3285 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
3286 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3287 if (!call) return nullptr;
3288
3289 bool doRetainAutorelease;
3290
3291 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
3292 doRetainAutorelease = true;
3293 } else if (call->getCalledOperand() ==
3294 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
3295 doRetainAutorelease = false;
3296
3297 // If we emitted an assembly marker for this call (and the
3298 // ARCEntrypoints field should have been set if so), go looking
3299 // for that call. If we can't find it, we can't do this
3300 // optimization. But it should always be the immediately previous
3301 // instruction, unless we needed bitcasts around the call.
3302 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
3303 llvm::Instruction *prev = call->getPrevNode();
3304 assert(prev)(static_cast <bool> (prev) ? void (0) : __assert_fail (
"prev", "clang/lib/CodeGen/CGCall.cpp", 3304, __extension__ __PRETTY_FUNCTION__
))
;
3305 if (isa<llvm::BitCastInst>(prev)) {
3306 prev = prev->getPrevNode();
3307 assert(prev)(static_cast <bool> (prev) ? void (0) : __assert_fail (
"prev", "clang/lib/CodeGen/CGCall.cpp", 3307, __extension__ __PRETTY_FUNCTION__
))
;
3308 }
3309 assert(isa<llvm::CallInst>(prev))(static_cast <bool> (isa<llvm::CallInst>(prev)) ?
void (0) : __assert_fail ("isa<llvm::CallInst>(prev)",
"clang/lib/CodeGen/CGCall.cpp", 3309, __extension__ __PRETTY_FUNCTION__
))
;
3310 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==(static_cast <bool> (cast<llvm::CallInst>(prev)->
getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? void (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "clang/lib/CodeGen/CGCall.cpp", 3311, __extension__ __PRETTY_FUNCTION__
))
3311 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker)(static_cast <bool> (cast<llvm::CallInst>(prev)->
getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker
) ? void (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker"
, "clang/lib/CodeGen/CGCall.cpp", 3311, __extension__ __PRETTY_FUNCTION__
))
;
3312 InstsToKill.push_back(prev);
3313 }
3314 } else {
3315 return nullptr;
3316 }
3317
3318 result = call->getArgOperand(0);
3319 InstsToKill.push_back(call);
3320
3321 // Keep killing bitcasts, for sanity. Note that we no longer care
3322 // about precise ordering as long as there's exactly one use.
3323 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3324 if (!bitcast->hasOneUse()) break;
3325 InstsToKill.push_back(bitcast);
3326 result = bitcast->getOperand(0);
3327 }
3328
3329 // Delete all the unnecessary instructions, from latest to earliest.
3330 for (auto *I : InstsToKill)
3331 I->eraseFromParent();
3332
3333 // Do the fused retain/autorelease if we were asked to.
3334 if (doRetainAutorelease)
3335 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
3336
3337 // Cast back to the result type.
3338 return CGF.Builder.CreateBitCast(result, resultType);
3339}
3340
3341/// If this is a +1 of the value of an immutable 'self', remove it.
3342static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
3343 llvm::Value *result) {
3344 // This is only applicable to a method with an immutable 'self'.
3345 const ObjCMethodDecl *method =
3346 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
3347 if (!method) return nullptr;
3348 const VarDecl *self = method->getSelfDecl();
3349 if (!self->getType().isConstQualified()) return nullptr;
3350
3351 // Look for a retain call.
3352 llvm::CallInst *retainCall =
3353 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
3354 if (!retainCall || retainCall->getCalledOperand() !=
3355 CGF.CGM.getObjCEntrypoints().objc_retain)
3356 return nullptr;
3357
3358 // Look for an ordinary load of 'self'.
3359 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3360 llvm::LoadInst *load =
3361 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3362 if (!load || load->isAtomic() || load->isVolatile() ||
3363 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
3364 return nullptr;
3365
3366 // Okay! Burn it all down. This relies for correctness on the
3367 // assumption that the retain is emitted as part of the return and
3368 // that thereafter everything is used "linearly".
3369 llvm::Type *resultType = result->getType();
3370 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
3371 assert(retainCall->use_empty())(static_cast <bool> (retainCall->use_empty()) ? void
(0) : __assert_fail ("retainCall->use_empty()", "clang/lib/CodeGen/CGCall.cpp"
, 3371, __extension__ __PRETTY_FUNCTION__))
;
3372 retainCall->eraseFromParent();
3373 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
3374
3375 return CGF.Builder.CreateBitCast(load, resultType);
3376}
3377
3378/// Emit an ARC autorelease of the result of a function.
3379///
3380/// \return the value to actually return from the function
3381static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
3382 llvm::Value *result) {
3383 // If we're returning 'self', kill the initial retain. This is a
3384 // heuristic attempt to "encourage correctness" in the really unfortunate
3385 // case where we have a return of self during a dealloc and we desperately
3386 // need to avoid the possible autorelease.
3387 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
3388 return self;
3389
3390 // At -O0, try to emit a fused retain/autorelease.
3391 if (CGF.shouldUseFusedARCCalls())
3392 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
3393 return fused;
3394
3395 return CGF.EmitARCAutoreleaseReturnValue(result);
3396}
3397
3398/// Heuristically search for a dominating store to the return-value slot.
3399static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
3400 // Check if a User is a store which pointerOperand is the ReturnValue.
3401 // We are looking for stores to the ReturnValue, not for stores of the
3402 // ReturnValue to some other location.
3403 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3404 auto *SI = dyn_cast<llvm::StoreInst>(U);
3405 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() ||
3406 SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
3407 return nullptr;
3408 // These aren't actually possible for non-coerced returns, and we
3409 // only care about non-coerced returns on this code path.
3410 assert(!SI->isAtomic() && !SI->isVolatile())(static_cast <bool> (!SI->isAtomic() && !SI->
isVolatile()) ? void (0) : __assert_fail ("!SI->isAtomic() && !SI->isVolatile()"
, "clang/lib/CodeGen/CGCall.cpp", 3410, __extension__ __PRETTY_FUNCTION__
))
;
3411 return SI;
3412 };
3413 // If there are multiple uses of the return-value slot, just check
3414 // for something immediately preceding the IP. Sometimes this can
3415 // happen with how we generate implicit-returns; it can also happen
3416 // with noreturn cleanups.
3417 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3418 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3419 if (IP->empty()) return nullptr;
3420
3421 // Look at directly preceding instruction, skipping bitcasts and lifetime
3422 // markers.
3423 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3424 if (isa<llvm::BitCastInst>(&I))
3425 continue;
3426 if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3427 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3428 continue;
3429
3430 return GetStoreIfValid(&I);
3431 }
3432 return nullptr;
3433 }
3434
3435 llvm::StoreInst *store =
3436 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3437 if (!store) return nullptr;
3438
3439 // Now do a first-and-dirty dominance check: just walk up the
3440 // single-predecessors chain from the current insertion point.
3441 llvm::BasicBlock *StoreBB = store->getParent();
3442 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3443 while (IP != StoreBB) {
3444 if (!(IP = IP->getSinglePredecessor()))
3445 return nullptr;
3446 }
3447
3448 // Okay, the store's basic block dominates the insertion point; we
3449 // can do our thing.
3450 return store;
3451}
3452
3453// Helper functions for EmitCMSEClearRecord
3454
3455// Set the bits corresponding to a field having width `BitWidth` and located at
3456// offset `BitOffset` (from the least significant bit) within a storage unit of
3457// `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3458// Use little-endian layout, i.e.`Bits[0]` is the LSB.
3459static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3460 int BitWidth, int CharWidth) {
3461 assert(CharWidth <= 64)(static_cast <bool> (CharWidth <= 64) ? void (0) : __assert_fail
("CharWidth <= 64", "clang/lib/CodeGen/CGCall.cpp", 3461,
__extension__ __PRETTY_FUNCTION__))
;
3462 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth)(static_cast <bool> (static_cast<unsigned>(BitWidth
) <= Bits.size() * CharWidth) ? void (0) : __assert_fail (
"static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth"
, "clang/lib/CodeGen/CGCall.cpp", 3462, __extension__ __PRETTY_FUNCTION__
))
;
3463
3464 int Pos = 0;
3465 if (BitOffset >= CharWidth) {
3466 Pos += BitOffset / CharWidth;
3467 BitOffset = BitOffset % CharWidth;
3468 }
3469
3470 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3471 if (BitOffset + BitWidth >= CharWidth) {
3472 Bits[Pos++] |= (Used << BitOffset) & Used;
3473 BitWidth -= CharWidth - BitOffset;
3474 BitOffset = 0;
3475 }
3476
3477 while (BitWidth >= CharWidth) {
3478 Bits[Pos++] = Used;
3479 BitWidth -= CharWidth;
3480 }
3481
3482 if (BitWidth > 0)
3483 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3484}
3485
3486// Set the bits corresponding to a field having width `BitWidth` and located at
3487// offset `BitOffset` (from the least significant bit) within a storage unit of
3488// `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3489// `Bits` corresponds to one target byte. Use target endian layout.
3490static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3491 int StorageSize, int BitOffset, int BitWidth,
3492 int CharWidth, bool BigEndian) {
3493
3494 SmallVector<uint64_t, 8> TmpBits(StorageSize);
3495 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3496
3497 if (BigEndian)
3498 std::reverse(TmpBits.begin(), TmpBits.end());
3499
3500 for (uint64_t V : TmpBits)
3501 Bits[StorageOffset++] |= V;
3502}
3503
3504static void setUsedBits(CodeGenModule &, QualType, int,
3505 SmallVectorImpl<uint64_t> &);
3506
3507// Set the bits in `Bits`, which correspond to the value representations of
3508// the actual members of the record type `RTy`. Note that this function does
3509// not handle base classes, virtual tables, etc, since they cannot happen in
3510// CMSE function arguments or return. The bit mask corresponds to the target
3511// memory layout, i.e. it's endian dependent.
3512static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3513 SmallVectorImpl<uint64_t> &Bits) {
3514 ASTContext &Context = CGM.getContext();
3515 int CharWidth = Context.getCharWidth();
3516 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3517 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3518 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3519
3520 int Idx = 0;
3521 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3522 const FieldDecl *F = *I;
3523
3524 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3525 F->getType()->isIncompleteArrayType())
3526 continue;
3527
3528 if (F->isBitField()) {
3529 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3530 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3531 BFI.StorageSize / CharWidth, BFI.Offset,
3532 BFI.Size, CharWidth,
3533 CGM.getDataLayout().isBigEndian());
3534 continue;
3535 }
3536
3537 setUsedBits(CGM, F->getType(),
3538 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3539 }
3540}
3541
3542// Set the bits in `Bits`, which correspond to the value representations of
3543// the elements of an array type `ATy`.
3544static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3545 int Offset, SmallVectorImpl<uint64_t> &Bits) {
3546 const ASTContext &Context = CGM.getContext();
3547
3548 QualType ETy = Context.getBaseElementType(ATy);
3549 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3550 SmallVector<uint64_t, 4> TmpBits(Size);
3551 setUsedBits(CGM, ETy, 0, TmpBits);
3552
3553 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3554 auto Src = TmpBits.begin();
3555 auto Dst = Bits.begin() + Offset + I * Size;
3556 for (int J = 0; J < Size; ++J)
3557 *Dst++ |= *Src++;
3558 }
3559}
3560
3561// Set the bits in `Bits`, which correspond to the value representations of
3562// the type `QTy`.
3563static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3564 SmallVectorImpl<uint64_t> &Bits) {
3565 if (const auto *RTy = QTy->getAs<RecordType>())
3566 return setUsedBits(CGM, RTy, Offset, Bits);
3567
3568 ASTContext &Context = CGM.getContext();
3569 if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3570 return setUsedBits(CGM, ATy, Offset, Bits);
3571
3572 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3573 if (Size <= 0)
3574 return;
3575
3576 std::fill_n(Bits.begin() + Offset, Size,
3577 (uint64_t(1) << Context.getCharWidth()) - 1);
3578}
3579
3580static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3581 int Pos, int Size, int CharWidth,
3582 bool BigEndian) {
3583 assert(Size > 0)(static_cast <bool> (Size > 0) ? void (0) : __assert_fail
("Size > 0", "clang/lib/CodeGen/CGCall.cpp", 3583, __extension__
__PRETTY_FUNCTION__))
;
3584 uint64_t Mask = 0;
3585 if (BigEndian) {
3586 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3587 ++P)
3588 Mask = (Mask << CharWidth) | *P;
3589 } else {
3590 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3591 do
3592 Mask = (Mask << CharWidth) | *--P;
3593 while (P != End);
3594 }
3595 return Mask;
3596}
3597
3598// Emit code to clear the bits in a record, which aren't a part of any user
3599// declared member, when the record is a function return.
3600llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3601 llvm::IntegerType *ITy,
3602 QualType QTy) {
3603 assert(Src->getType() == ITy)(static_cast <bool> (Src->getType() == ITy) ? void (
0) : __assert_fail ("Src->getType() == ITy", "clang/lib/CodeGen/CGCall.cpp"
, 3603, __extension__ __PRETTY_FUNCTION__))
;
3604 assert(ITy->getScalarSizeInBits() <= 64)(static_cast <bool> (ITy->getScalarSizeInBits() <=
64) ? void (0) : __assert_fail ("ITy->getScalarSizeInBits() <= 64"
, "clang/lib/CodeGen/CGCall.cpp", 3604, __extension__ __PRETTY_FUNCTION__
))
;
3605
3606 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3607 int Size = DataLayout.getTypeStoreSize(ITy);
3608 SmallVector<uint64_t, 4> Bits(Size);
3609 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3610
3611 int CharWidth = CGM.getContext().getCharWidth();
3612 uint64_t Mask =
3613 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3614
3615 return Builder.CreateAnd(Src, Mask, "cmse.clear");
3616}
3617
3618// Emit code to clear the bits in a record, which aren't a part of any user
3619// declared member, when the record is a function argument.
3620llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3621 llvm::ArrayType *ATy,
3622 QualType QTy) {
3623 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3624 int Size = DataLayout.getTypeStoreSize(ATy);
3625 SmallVector<uint64_t, 16> Bits(Size);
3626 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3627
3628 // Clear each element of the LLVM array.
3629 int CharWidth = CGM.getContext().getCharWidth();
3630 int CharsPerElt =
3631 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3632 int MaskIndex = 0;
3633 llvm::Value *R = llvm::PoisonValue::get(ATy);
3634 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3635 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3636 DataLayout.isBigEndian());
3637 MaskIndex += CharsPerElt;
3638 llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3639 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3640 R = Builder.CreateInsertValue(R, T1, I);
3641 }
3642
3643 return R;
3644}
3645
3646void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3647 bool EmitRetDbgLoc,
3648 SourceLocation EndLoc) {
3649 if (FI.isNoReturn()) {
3650 // Noreturn functions don't return.
3651 EmitUnreachable(EndLoc);
3652 return;
3653 }
3654
3655 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3656 // Naked functions don't have epilogues.
3657 Builder.CreateUnreachable();
3658 return;
3659 }
3660
3661 // Functions with no result always return void.
3662 if (!ReturnValue.isValid()) {
3663 Builder.CreateRetVoid();
3664 return;
3665 }
3666
3667 llvm::DebugLoc RetDbgLoc;
3668 llvm::Value *RV = nullptr;
3669 QualType RetTy = FI.getReturnType();
3670 const ABIArgInfo &RetAI = FI.getReturnInfo();
3671
3672 switch (RetAI.getKind()) {
3673 case ABIArgInfo::InAlloca:
3674 // Aggregates get evaluated directly into the destination. Sometimes we
3675 // need to return the sret value in a register, though.
3676 assert(hasAggregateEvaluationKind(RetTy))(static_cast <bool> (hasAggregateEvaluationKind(RetTy))
? void (0) : __assert_fail ("hasAggregateEvaluationKind(RetTy)"
, "clang/lib/CodeGen/CGCall.cpp", 3676, __extension__ __PRETTY_FUNCTION__
))
;
3677 if (RetAI.getInAllocaSRet()) {
3678 llvm::Function::arg_iterator EI = CurFn->arg_end();
3679 --EI;
3680 llvm::Value *ArgStruct = &*EI;
3681 llvm::Value *SRet = Builder.CreateStructGEP(
3682 FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex());
3683 llvm::Type *Ty =
3684 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3685 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
3686 }
3687 break;
3688
3689 case ABIArgInfo::Indirect: {
3690 auto AI = CurFn->arg_begin();
3691 if (RetAI.isSRetAfterThis())
3692 ++AI;
3693 switch (getEvaluationKind(RetTy)) {
3694 case TEK_Complex: {
3695 ComplexPairTy RT =
3696 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3697 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3698 /*isInit*/ true);
3699 break;
3700 }
3701 case TEK_Aggregate:
3702 // Do nothing; aggregates get evaluated directly into the destination.
3703 break;
3704 case TEK_Scalar: {
3705 LValueBaseInfo BaseInfo;
3706 TBAAAccessInfo TBAAInfo;
3707 CharUnits Alignment =
3708 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
3709 Address ArgAddr(&*AI, ConvertType(RetTy), Alignment);
3710 LValue ArgVal =
3711 LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo);
3712 EmitStoreOfScalar(
3713 Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true);
3714 break;
3715 }
3716 }
3717 break;
3718 }
3719
3720 case ABIArgInfo::Extend:
3721 case ABIArgInfo::Direct:
3722 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3723 RetAI.getDirectOffset() == 0) {
3724 // The internal return value temp always will have pointer-to-return-type
3725 // type, just do a load.
3726
3727 // If there is a dominating store to ReturnValue, we can elide
3728 // the load, zap the store, and usually zap the alloca.
3729 if (llvm::StoreInst *SI =
3730 findDominatingStoreToReturnValue(*this)) {
3731 // Reuse the debug location from the store unless there is
3732 // cleanup code to be emitted between the store and return
3733 // instruction.
3734 if (EmitRetDbgLoc && !AutoreleaseResult)
3735 RetDbgLoc = SI->getDebugLoc();
3736 // Get the stored value and nuke the now-dead store.
3737 RV = SI->getValueOperand();
3738 SI->eraseFromParent();
3739
3740 // Otherwise, we have to do a simple load.
3741 } else {
3742 RV = Builder.CreateLoad(ReturnValue);
3743 }
3744 } else {
3745 // If the value is offset in memory, apply the offset now.
3746 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3747
3748 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3749 }
3750
3751 // In ARC, end functions that return a retainable type with a call
3752 // to objc_autoreleaseReturnValue.
3753 if (AutoreleaseResult) {
3754#ifndef NDEBUG
3755 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3756 // been stripped of the typedefs, so we cannot use RetTy here. Get the
3757 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3758 // CurCodeDecl or BlockInfo.
3759 QualType RT;
3760
3761 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3762 RT = FD->getReturnType();
3763 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3764 RT = MD->getReturnType();
3765 else if (isa<BlockDecl>(CurCodeDecl))
3766 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3767 else
3768 llvm_unreachable("Unexpected function/method type")::llvm::llvm_unreachable_internal("Unexpected function/method type"
, "clang/lib/CodeGen/CGCall.cpp", 3768)
;
3769
3770 assert(getLangOpts().ObjCAutoRefCount &&(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "clang/lib/CodeGen/CGCall.cpp", 3772, __extension__ __PRETTY_FUNCTION__
))
3771 !FI.isReturnsRetained() &&(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "clang/lib/CodeGen/CGCall.cpp", 3772, __extension__ __PRETTY_FUNCTION__
))
3772 RT->isObjCRetainableType())(static_cast <bool> (getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() && RT->isObjCRetainableType
()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()"
, "clang/lib/CodeGen/CGCall.cpp", 3772, __extension__ __PRETTY_FUNCTION__
))
;
3773#endif
3774 RV = emitAutoreleaseOfResult(*this, RV);
3775 }
3776
3777 break;
3778
3779 case ABIArgInfo::Ignore:
3780 break;
3781
3782 case ABIArgInfo::CoerceAndExpand: {
3783 auto coercionType = RetAI.getCoerceAndExpandType();
3784
3785 // Load all of the coerced elements out into results.
3786 llvm::SmallVector<llvm::Value*, 4> results;
3787 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3788 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3789 auto coercedEltType = coercionType->getElementType(i);
3790 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3791 continue;
3792
3793 auto eltAddr = Builder.CreateStructGEP(addr, i);
3794 auto elt = Builder.CreateLoad(eltAddr);
3795 results.push_back(elt);
3796 }
3797
3798 // If we have one result, it's the single direct result type.
3799 if (results.size() == 1) {
3800 RV = results[0];
3801
3802 // Otherwise, we need to make a first-class aggregate.
3803 } else {
3804 // Construct a return type that lacks padding elements.
3805 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3806
3807 RV = llvm::PoisonValue::get(returnType);
3808 for (unsigned i = 0, e = results.size(); i != e; ++i) {
3809 RV = Builder.CreateInsertValue(RV, results[i], i);
3810 }
3811 }
3812 break;
3813 }
3814 case ABIArgInfo::Expand:
3815 case ABIArgInfo::IndirectAliased:
3816 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "clang/lib/CodeGen/CGCall.cpp", 3816)
;
3817 }
3818
3819 llvm::Instruction *Ret;
3820 if (RV) {
3821 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3822 // For certain return types, clear padding bits, as they may reveal
3823 // sensitive information.
3824 // Small struct/union types are passed as integers.
3825 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3826 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3827 RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3828 }
3829 EmitReturnValueCheck(RV);
3830 Ret = Builder.CreateRet(RV);
3831 } else {
3832 Ret = Builder.CreateRetVoid();
3833 }
3834
3835 if (RetDbgLoc)
3836 Ret->setDebugLoc(std::move(RetDbgLoc));
3837}
3838
3839void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3840 // A current decl may not be available when emitting vtable thunks.
3841 if (!CurCodeDecl)
3842 return;
3843
3844 // If the return block isn't reachable, neither is this check, so don't emit
3845 // it.
3846 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3847 return;
3848
3849 ReturnsNonNullAttr *RetNNAttr = nullptr;
3850 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3851 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3852
3853 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3854 return;
3855
3856 // Prefer the returns_nonnull attribute if it's present.
3857 SourceLocation AttrLoc;
3858 SanitizerMask CheckKind;
3859 SanitizerHandler Handler;
3860 if (RetNNAttr) {
3861 assert(!requiresReturnValueNullabilityCheck() &&(static_cast <bool> (!requiresReturnValueNullabilityCheck
() && "Cannot check nullability and the nonnull attribute"
) ? void (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "clang/lib/CodeGen/CGCall.cpp", 3862, __extension__ __PRETTY_FUNCTION__
))
3862 "Cannot check nullability and the nonnull attribute")(static_cast <bool> (!requiresReturnValueNullabilityCheck
() && "Cannot check nullability and the nonnull attribute"
) ? void (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\""
, "clang/lib/CodeGen/CGCall.cpp", 3862, __extension__ __PRETTY_FUNCTION__
))
;
3863 AttrLoc = RetNNAttr->getLocation();
3864 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3865 Handler = SanitizerHandler::NonnullReturn;
3866 } else {
3867 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3868 if (auto *TSI = DD->getTypeSourceInfo())
3869 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3870 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3871 CheckKind = SanitizerKind::NullabilityReturn;
3872 Handler = SanitizerHandler::NullabilityReturn;
3873 }
3874
3875 SanitizerScope SanScope(this);
3876
3877 // Make sure the "return" source location is valid. If we're checking a
3878 // nullability annotation, make sure the preconditions for the check are met.
3879 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3880 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3881 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3882 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3883 if (requiresReturnValueNullabilityCheck())
3884 CanNullCheck =
3885 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3886 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3887 EmitBlock(Check);
3888
3889 // Now do the null check.
3890 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3891 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3892 llvm::Value *DynamicData[] = {SLocPtr};
3893 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3894
3895 EmitBlock(NoCheck);
3896
3897#ifndef NDEBUG
3898 // The return location should not be used after the check has been emitted.
3899 ReturnLocation = Address::invalid();
3900#endif
3901}
3902
3903static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3904 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3905 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3906}
3907
3908static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3909 QualType Ty) {
3910 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3911 // placeholders.
3912 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3913 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3914 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy->getPointerTo());
3915
3916 // FIXME: When we generate this IR in one pass, we shouldn't need
3917 // this win32-specific alignment hack.
3918 CharUnits Align = CharUnits::fromQuantity(4);
3919 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3920
3921 return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align),
3922 Ty.getQualifiers(),
3923 AggValueSlot::IsNotDestructed,
3924 AggValueSlot::DoesNotNeedGCBarriers,
3925 AggValueSlot::IsNotAliased,
3926 AggValueSlot::DoesNotOverlap);
3927}
3928
3929void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3930 const VarDecl *param,
3931 SourceLocation loc) {
3932 // StartFunction converted the ABI-lowered parameter(s) into a
3933 // local alloca. We need to turn that into an r-value suitable
3934 // for EmitCall.
3935 Address local = GetAddrOfLocalVar(param);
3936
3937 QualType type = param->getType();
3938
3939 if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3940 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3941 }
3942
3943 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3944 // but the argument needs to be the original pointer.
3945 if (type->isReferenceType()) {
3946 args.add(RValue::get(Builder.CreateLoad(local)), type);
3947
3948 // In ARC, move out of consumed arguments so that the release cleanup
3949 // entered by StartFunction doesn't cause an over-release. This isn't
3950 // optimal -O0 code generation, but it should get cleaned up when
3951 // optimization is enabled. This also assumes that delegate calls are
3952 // performed exactly once for a set of arguments, but that should be safe.
3953 } else if (getLangOpts().ObjCAutoRefCount &&
3954 param->hasAttr<NSConsumedAttr>() &&
3955 type->isObjCRetainableType()) {
3956 llvm::Value *ptr = Builder.CreateLoad(local);
3957 auto null =
3958 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3959 Builder.CreateStore(null, local);
3960 args.add(RValue::get(ptr), type);
3961
3962 // For the most part, we just need to load the alloca, except that
3963 // aggregate r-values are actually pointers to temporaries.
3964 } else {
3965 args.add(convertTempToRValue(local, type, loc), type);
3966 }
3967
3968 // Deactivate the cleanup for the callee-destructed param that was pushed.
3969 if (type->isRecordType() && !CurFuncIsThunk &&
3970 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3971 param->needsDestruction(getContext())) {
3972 EHScopeStack::stable_iterator cleanup =
3973 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3974 assert(cleanup.isValid() &&(static_cast <bool> (cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? void (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "clang/lib/CodeGen/CGCall.cpp", 3975, __extension__ __PRETTY_FUNCTION__
))
3975 "cleanup for callee-destructed param not recorded")(static_cast <bool> (cleanup.isValid() && "cleanup for callee-destructed param not recorded"
) ? void (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\""
, "clang/lib/CodeGen/CGCall.cpp", 3975, __extension__ __PRETTY_FUNCTION__
))
;
3976 // This unreachable is a temporary marker which will be removed later.
3977 llvm::Instruction *isActive = Builder.CreateUnreachable();
3978 args.addArgCleanupDeactivation(cleanup, isActive);
3979 }
3980}
3981
3982static bool isProvablyNull(llvm::Value *addr) {
3983 return isa<llvm::ConstantPointerNull>(addr);
3984}
3985
3986/// Emit the actual writing-back of a writeback.
3987static void emitWriteback(CodeGenFunction &CGF,
3988 const CallArgList::Writeback &writeback) {
3989 const LValue &srcLV = writeback.Source;
3990 Address srcAddr = srcLV.getAddress(CGF);
3991 assert(!isProvablyNull(srcAddr.getPointer()) &&(static_cast <bool> (!isProvablyNull(srcAddr.getPointer
()) && "shouldn't have writeback for provably null argument"
) ? void (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "clang/lib/CodeGen/CGCall.cpp", 3992, __extension__ __PRETTY_FUNCTION__
))
3992 "shouldn't have writeback for provably null argument")(static_cast <bool> (!isProvablyNull(srcAddr.getPointer
()) && "shouldn't have writeback for provably null argument"
) ? void (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\""
, "clang/lib/CodeGen/CGCall.cpp", 3992, __extension__ __PRETTY_FUNCTION__
))
;
3993
3994 llvm::BasicBlock *contBB = nullptr;
3995
3996 // If the argument wasn't provably non-null, we need to null check
3997 // before doing the store.
3998 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3999 CGF.CGM.getDataLayout());
4000 if (!provablyNonNull) {
4001 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
4002 contBB = CGF.createBasicBlock("icr.done");
4003
4004 llvm::Value *isNull =
4005 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
4006 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
4007 CGF.EmitBlock(writebackBB);
4008 }
4009
4010 // Load the value to writeback.
4011 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
4012
4013 // Cast it back, in case we're writing an id to a Foo* or something.
4014 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
4015 "icr.writeback-cast");
4016
4017 // Perform the writeback.
4018
4019 // If we have a "to use" value, it's something we need to emit a use
4020 // of. This has to be carefully threaded in: if it's done after the
4021 // release it's potentially undefined behavior (and the optimizer
4022 // will ignore it), and if it happens before the retain then the
4023 // optimizer could move the release there.
4024 if (writeback.ToUse) {
4025 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong)(static_cast <bool> (srcLV.getObjCLifetime() == Qualifiers
::OCL_Strong) ? void (0) : __assert_fail ("srcLV.getObjCLifetime() == Qualifiers::OCL_Strong"
, "clang/lib/CodeGen/CGCall.cpp", 4025, __extension__ __PRETTY_FUNCTION__
))
;
4026
4027 // Retain the new value. No need to block-copy here: the block's
4028 // being passed up the stack.
4029 value = CGF.EmitARCRetainNonBlock(value);
4030
4031 // Emit the intrinsic use here.
4032 CGF.EmitARCIntrinsicUse(writeback.ToUse);
4033
4034 // Load the old value (primitively).
4035 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
4036
4037 // Put the new value in place (primitively).
4038 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
4039
4040 // Release the old value.
4041 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
4042
4043 // Otherwise, we can just do a normal lvalue store.
4044 } else {
4045 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
4046 }
4047
4048 // Jump to the continuation block.
4049 if (!provablyNonNull)
4050 CGF.EmitBlock(contBB);
4051}
4052
4053static void emitWritebacks(CodeGenFunction &CGF,
4054 const CallArgList &args) {
4055 for (const auto &I : args.writebacks())
4056 emitWriteback(CGF, I);
4057}
4058
4059static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
4060 const CallArgList &CallArgs) {
4061 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
4062 CallArgs.getCleanupsToDeactivate();
4063 // Iterate in reverse to increase the likelihood of popping the cleanup.
4064 for (const auto &I : llvm::reverse(Cleanups)) {
4065 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
4066 I.IsActiveIP->eraseFromParent();
4067 }
4068}
4069
4070static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
4071 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
4072 if (uop->getOpcode() == UO_AddrOf)
4073 return uop->getSubExpr();
4074 return nullptr;
4075}
4076
4077/// Emit an argument that's being passed call-by-writeback. That is,
4078/// we are passing the address of an __autoreleased temporary; it
4079/// might be copy-initialized with the current value of the given
4080/// address, but it will definitely be copied out of after the call.
4081static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
4082 const ObjCIndirectCopyRestoreExpr *CRE) {
4083 LValue srcLV;
4084
4085 // Make an optimistic effort to emit the address as an l-value.
4086 // This can fail if the argument expression is more complicated.
4087 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
4088 srcLV = CGF.EmitLValue(lvExpr);
4089
4090 // Otherwise, just emit it as a scalar.
4091 } else {
4092 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
4093
4094 QualType srcAddrType =
4095 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
4096 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
4097 }
4098 Address srcAddr = srcLV.getAddress(CGF);
4099
4100 // The dest and src types don't necessarily match in LLVM terms
4101 // because of the crazy ObjC compatibility rules.
4102
4103 llvm::PointerType *destType =
4104 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
4105 llvm::Type *destElemType =
4106 CGF.ConvertTypeForMem(CRE->getType()->getPointeeType());
4107
4108 // If the address is a constant null, just pass the appropriate null.
4109 if (isProvablyNull(srcAddr.getPointer())) {
4110 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
4111 CRE->getType());
4112 return;
4113 }
4114
4115 // Create the temporary.
4116 Address temp =
4117 CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp");
4118 // Loading an l-value can introduce a cleanup if the l-value is __weak,
4119 // and that cleanup will be conditional if we can't prove that the l-value
4120 // isn't null, so we need to register a dominating point so that the cleanups
4121 // system will make valid IR.
4122 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4123
4124 // Zero-initialize it if we're not doing a copy-initialization.
4125 bool shouldCopy = CRE->shouldCopy();
4126 if (!shouldCopy) {
4127 llvm::Value *null =
4128 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4129 CGF.Builder.CreateStore(null, temp);
4130 }
4131
4132 llvm::BasicBlock *contBB = nullptr;
4133 llvm::BasicBlock *originBB = nullptr;
4134
4135 // If the address is *not* known to be non-null, we need to switch.
4136 llvm::Value *finalArgument;
4137
4138 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
4139 CGF.CGM.getDataLayout());
4140 if (provablyNonNull) {
4141 finalArgument = temp.getPointer();
4142 } else {
4143 llvm::Value *isNull =
4144 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
4145
4146 finalArgument = CGF.Builder.CreateSelect(isNull,
4147 llvm::ConstantPointerNull::get(destType),
4148 temp.getPointer(), "icr.argument");
4149
4150 // If we need to copy, then the load has to be conditional, which
4151 // means we need control flow.
4152 if (shouldCopy) {
4153 originBB = CGF.Builder.GetInsertBlock();
4154 contBB = CGF.createBasicBlock("icr.cont");
4155 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
4156 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
4157 CGF.EmitBlock(copyBB);
4158 condEval.begin(CGF);
4159 }
4160 }
4161
4162 llvm::Value *valueToUse = nullptr;
4163
4164 // Perform a copy if necessary.
4165 if (shouldCopy) {
4166 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
4167 assert(srcRV.isScalar())(static_cast <bool> (srcRV.isScalar()) ? void (0) : __assert_fail
("srcRV.isScalar()", "clang/lib/CodeGen/CGCall.cpp", 4167, __extension__
__PRETTY_FUNCTION__))
;
4168
4169 llvm::Value *src = srcRV.getScalarVal();
4170 src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast");
4171
4172 // Use an ordinary store, not a store-to-lvalue.
4173 CGF.Builder.CreateStore(src, temp);
4174
4175 // If optimization is enabled, and the value was held in a
4176 // __strong variable, we need to tell the optimizer that this
4177 // value has to stay alive until we're doing the store back.
4178 // This is because the temporary is effectively unretained,
4179 // and so otherwise we can violate the high-level semantics.
4180 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4181 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
4182 valueToUse = src;
4183 }
4184 }
4185
4186 // Finish the control flow if we needed it.
4187 if (shouldCopy && !provablyNonNull) {
4188 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
4189 CGF.EmitBlock(contBB);
4190
4191 // Make a phi for the value to intrinsically use.
4192 if (valueToUse) {
4193 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
4194 "icr.to-use");
4195 phiToUse->addIncoming(valueToUse, copyBB);
4196 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4197 originBB);
4198 valueToUse = phiToUse;
4199 }
4200
4201 condEval.end(CGF);
4202 }
4203
4204 args.addWriteback(srcLV, temp, valueToUse);
4205 args.add(RValue::get(finalArgument), CRE->getType());
4206}
4207
4208void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
4209 assert(!StackBase)(static_cast <bool> (!StackBase) ? void (0) : __assert_fail
("!StackBase", "clang/lib/CodeGen/CGCall.cpp", 4209, __extension__
__PRETTY_FUNCTION__))
;
4210
4211 // Save the stack.
4212 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
4213 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
4214}
4215
4216void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
4217 if (StackBase) {
4218 // Restore the stack after the call.
4219 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
4220 CGF.Builder.CreateCall(F, StackBase);
4221 }
4222}
4223
4224void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
4225 SourceLocation ArgLoc,
4226 AbstractCallee AC,
4227 unsigned ParmNum) {
4228 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
4229 SanOpts.has(SanitizerKind::NullabilityArg)))
4230 return;
4231
4232 // The param decl may be missing in a variadic function.
4233 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
4234 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4235
4236 // Prefer the nonnull attribute if it's present.
4237 const NonNullAttr *NNAttr = nullptr;
4238 if (SanOpts.has(SanitizerKind::NonnullAttribute))
4239 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
4240
4241 bool CanCheckNullability = false;
4242 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
4243 auto Nullability = PVD->getType()->getNullability();
4244 CanCheckNullability = Nullability &&
4245 *Nullability == NullabilityKind::NonNull &&
4246 PVD->getTypeSourceInfo();
4247 }
4248
4249 if (!NNAttr && !CanCheckNullability)
4250 return;
4251
4252 SourceLocation AttrLoc;
4253 SanitizerMask CheckKind;
4254 SanitizerHandler Handler;
4255 if (NNAttr) {
4256 AttrLoc = NNAttr->getLocation();
4257 CheckKind = SanitizerKind::NonnullAttribute;
4258 Handler = SanitizerHandler::NonnullArg;
4259 } else {
4260 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4261 CheckKind = SanitizerKind::NullabilityArg;
4262 Handler = SanitizerHandler::NullabilityArg;
4263 }
4264
4265 SanitizerScope SanScope(this);
4266 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
4267 llvm::Constant *StaticData[] = {
4268 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
4269 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
4270 };
4271 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4272}
4273
4274// Check if the call is going to use the inalloca convention. This needs to
4275// agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
4276// later, so we can't check it directly.
4277static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
4278 ArrayRef<QualType> ArgTypes) {
4279 // The Swift calling conventions don't go through the target-specific
4280 // argument classification, they never use inalloca.
4281 // TODO: Consider limiting inalloca use to only calling conventions supported
4282 // by MSVC.
4283 if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync)
4284 return false;
4285 if (!CGM.getTarget().getCXXABI().isMicrosoft())
4286 return false;
4287 return llvm::any_of(ArgTypes, [&](QualType Ty) {
4288 return isInAllocaArgument(CGM.getCXXABI(), Ty);
4289 });
4290}
4291
4292#ifndef NDEBUG
4293// Determine whether the given argument is an Objective-C method
4294// that may have type parameters in its signature.
4295static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4296 const DeclContext *dc = method->getDeclContext();
4297 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
4298 return classDecl->getTypeParamListAsWritten();
4299 }
4300
4301 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4302 return catDecl->getTypeParamList();
4303 }
4304
4305 return false;
4306}
4307#endif
4308
4309/// EmitCallArgs - Emit call arguments for a function.
4310void CodeGenFunction::EmitCallArgs(
4311 CallArgList &Args, PrototypeWrapper Prototype,
4312 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4313 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
4314 SmallVector<QualType, 16> ArgTypes;
4315
4316 assert((ParamsToSkip == 0 || Prototype.P) &&(static_cast <bool> ((ParamsToSkip == 0 || Prototype.P)
&& "Can't skip parameters if type info is not provided"
) ? void (0) : __assert_fail ("(ParamsToSkip == 0 || Prototype.P) && \"Can't skip parameters if type info is not provided\""
, "clang/lib/CodeGen/CGCall.cpp", 4317, __extension__ __PRETTY_FUNCTION__
))
4317 "Can't skip parameters if type info is not provided")(static_cast <bool> ((ParamsToSkip == 0 || Prototype.P)
&& "Can't skip parameters if type info is not provided"
) ? void (0) : __assert_fail ("(ParamsToSkip == 0 || Prototype.P) && \"Can't skip parameters if type info is not provided\""
, "clang/lib/CodeGen/CGCall.cpp", 4317, __extension__ __PRETTY_FUNCTION__
))
;
4318
4319 // This variable only captures *explicitly* written conventions, not those
4320 // applied by default via command line flags or target defaults, such as
4321 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
4322 // require knowing if this is a C++ instance method or being able to see
4323 // unprototyped FunctionTypes.
4324 CallingConv ExplicitCC = CC_C;
4325
4326 // First, if a prototype was provided, use those argument types.
4327 bool IsVariadic = false;
4328 if (Prototype.P) {
4329 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
4330 if (MD) {
4331 IsVariadic = MD->isVariadic();
4332 ExplicitCC = getCallingConventionForDecl(
4333 MD, CGM.getTarget().getTriple().isOSWindows());
4334 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4335 MD->param_type_end());
4336 } else {
4337 const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
4338 IsVariadic = FPT->isVariadic();
4339 ExplicitCC = FPT->getExtInfo().getCC();
4340 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4341 FPT->param_type_end());
4342 }
4343
4344#ifndef NDEBUG
4345 // Check that the prototyped types match the argument expression types.
4346 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
4347 CallExpr::const_arg_iterator Arg = ArgRange.begin();
4348 for (QualType Ty : ArgTypes) {
4349 assert(Arg != ArgRange.end() && "Running over edge of argument list!")(static_cast <bool> (Arg != ArgRange.end() && "Running over edge of argument list!"
) ? void (0) : __assert_fail ("Arg != ArgRange.end() && \"Running over edge of argument list!\""
, "clang/lib/CodeGen/CGCall.cpp", 4349, __extension__ __PRETTY_FUNCTION__
))
;
4350 assert((static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4357, __extension__ __PRETTY_FUNCTION__
))
4351 (isGenericMethod || Ty->isVariablyModifiedType() ||(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4357, __extension__ __PRETTY_FUNCTION__
))
4352 Ty.getNonReferenceType()->isObjCRetainableType() ||(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4357, __extension__ __PRETTY_FUNCTION__
))
4353 getContext()(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4357, __extension__ __PRETTY_FUNCTION__
))
4354 .getCanonicalType(Ty.getNonReferenceType())(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4357, __extension__ __PRETTY_FUNCTION__
))
4355 .getTypePtr() ==(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4357, __extension__ __PRETTY_FUNCTION__
))
4356 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4357, __extension__ __PRETTY_FUNCTION__
))
4357 "type mismatch in call argument!")(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType
() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext
() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() ==
getContext().getCanonicalType((*Arg)->getType()).getTypePtr
()) && "type mismatch in call argument!") ? void (0) :
__assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\""
, "clang/lib/CodeGen/CGCall.cpp", 4357, __extension__ __PRETTY_FUNCTION__
))
;
4358 ++Arg;
4359 }
4360
4361 // Either we've emitted all the call args, or we have a call to variadic
4362 // function.
4363 assert((Arg == ArgRange.end() || IsVariadic) &&(static_cast <bool> ((Arg == ArgRange.end() || IsVariadic
) && "Extra arguments in non-variadic function!") ? void
(0) : __assert_fail ("(Arg == ArgRange.end() || IsVariadic) && \"Extra arguments in non-variadic function!\""
, "clang/lib/CodeGen/CGCall.cpp", 4364, __extension__ __PRETTY_FUNCTION__
))
4364 "Extra arguments in non-variadic function!")(static_cast <bool> ((Arg == ArgRange.end() || IsVariadic
) && "Extra arguments in non-variadic function!") ? void
(0) : __assert_fail ("(Arg == ArgRange.end() || IsVariadic) && \"Extra arguments in non-variadic function!\""
, "clang/lib/CodeGen/CGCall.cpp", 4364, __extension__ __PRETTY_FUNCTION__
))
;
4365#endif
4366 }
4367
4368 // If we still have any arguments, emit them using the type of the argument.
4369 for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4370 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4371 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()))(static_cast <bool> ((int)ArgTypes.size() == (ArgRange.
end() - ArgRange.begin())) ? void (0) : __assert_fail ("(int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())"
, "clang/lib/CodeGen/CGCall.cpp", 4371, __extension__ __PRETTY_FUNCTION__
))
;
4372
4373 // We must evaluate arguments from right to left in the MS C++ ABI,
4374 // because arguments are destroyed left to right in the callee. As a special
4375 // case, there are certain language constructs that require left-to-right
4376 // evaluation, and in those cases we consider the evaluation order requirement
4377 // to trump the "destruction order is reverse construction order" guarantee.
4378 bool LeftToRight =
4379 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4380 ? Order == EvaluationOrder::ForceLeftToRight
4381 : Order != EvaluationOrder::ForceRightToLeft;
4382
4383 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
4384 RValue EmittedArg) {
4385 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4386 return;
4387 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4388 if (PS == nullptr)
4389 return;
4390
4391 const auto &Context = getContext();
4392 auto SizeTy = Context.getSizeType();
4393 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4394 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?")(static_cast <bool> (EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?") ? void (0) : __assert_fail
("EmittedArg.getScalarVal() && \"We emitted nothing for the arg?\""
, "clang/lib/CodeGen/CGCall.cpp", 4394, __extension__ __PRETTY_FUNCTION__
))
;
4395 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4396 EmittedArg.getScalarVal(),
4397 PS->isDynamic());
4398 Args.add(RValue::get(V), SizeTy);
4399 // If we're emitting args in reverse, be sure to do so with
4400 // pass_object_size, as well.
4401 if (!LeftToRight)
4402 std::swap(Args.back(), *(&Args.back() - 1));
4403 };
4404
4405 // Insert a stack save if we're going to need any inalloca args.
4406 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4407 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&(static_cast <bool> (getTarget().getTriple().getArch() ==
llvm::Triple::x86 && "inalloca only supported on x86"
) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86 && \"inalloca only supported on x86\""
, "clang/lib/CodeGen/CGCall.cpp", 4408, __extension__ __PRETTY_FUNCTION__
))
4408 "inalloca only supported on x86")(static_cast <bool> (getTarget().getTriple().getArch() ==
llvm::Triple::x86 && "inalloca only supported on x86"
) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86 && \"inalloca only supported on x86\""
, "clang/lib/CodeGen/CGCall.cpp", 4408, __extension__ __PRETTY_FUNCTION__
))
;
4409 Args.allocateArgumentMemory(*this);
4410 }
4411
4412 // Evaluate each argument in the appropriate order.
4413 size_t CallArgsStart = Args.size();
4414 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4415 unsigned Idx = LeftToRight ? I : E - I - 1;
4416 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4417 unsigned InitialArgSize = Args.size();
4418 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4419 // the argument and parameter match or the objc method is parameterized.
4420 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4425, __extension__ __PRETTY_FUNCTION__
))
4421 getContext().hasSameUnqualifiedType((*Arg)->getType(),(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4425, __extension__ __PRETTY_FUNCTION__
))
4422 ArgTypes[Idx]) ||(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4425, __extension__ __PRETTY_FUNCTION__
))
4423 (isa<ObjCMethodDecl>(AC.getDecl()) &&(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4425, __extension__ __PRETTY_FUNCTION__
))
4424 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4425, __extension__ __PRETTY_FUNCTION__
))
4425 "Argument and parameter types don't match")(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr
>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->
getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl
()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl
>(AC.getDecl())))) && "Argument and parameter types don't match"
) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\""
, "clang/lib/CodeGen/CGCall.cpp", 4425, __extension__ __PRETTY_FUNCTION__
))
;
4426 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4427 // In particular, we depend on it being the last arg in Args, and the
4428 // objectsize bits depend on there only being one arg if !LeftToRight.
4429 assert(InitialArgSize + 1 == Args.size() &&(static_cast <bool> (InitialArgSize + 1 == Args.size() &&
"The code below depends on only adding one arg per EmitCallArg"
) ? void (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "clang/lib/CodeGen/CGCall.cpp", 4430, __extension__ __PRETTY_FUNCTION__
))
4430 "The code below depends on only adding one arg per EmitCallArg")(static_cast <bool> (InitialArgSize + 1 == Args.size() &&
"The code below depends on only adding one arg per EmitCallArg"
) ? void (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\""
, "clang/lib/CodeGen/CGCall.cpp", 4430, __extension__ __PRETTY_FUNCTION__
))
;
4431 (void)InitialArgSize;
4432 // Since pointer argument are never emitted as LValue, it is safe to emit
4433 // non-null argument check for r-value only.
4434 if (!Args.back().hasLValue()) {
4435 RValue RVArg = Args.back().getKnownRValue();
4436 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4437 ParamsToSkip + Idx);
4438 // @llvm.objectsize should never have side-effects and shouldn't need
4439 // destruction/cleanups, so we can safely "emit" it after its arg,
4440 // regardless of right-to-leftness
4441 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4442 }
4443 }
4444
4445 if (!LeftToRight) {
4446 // Un-reverse the arguments we just evaluated so they match up with the LLVM
4447 // IR function.
4448 std::reverse(Args.begin() + CallArgsStart, Args.end());
4449 }
4450}
4451
4452namespace {
4453
4454struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4455 DestroyUnpassedArg(Address Addr, QualType Ty)
4456 : Addr(Addr), Ty(Ty) {}
4457
4458 Address Addr;
4459 QualType Ty;
4460
4461 void Emit(CodeGenFunction &CGF, Flags flags) override {
4462 QualType::DestructionKind DtorKind = Ty.isDestructedType();
4463 if (DtorKind == QualType::DK_cxx_destructor) {
4464 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4465 assert(!Dtor->isTrivial())(static_cast <bool> (!Dtor->isTrivial()) ? void (0) :
__assert_fail ("!Dtor->isTrivial()", "clang/lib/CodeGen/CGCall.cpp"
, 4465, __extension__ __PRETTY_FUNCTION__))
;
4466 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4467 /*Delegating=*/false, Addr, Ty);
4468 } else {
4469 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4470 }
4471 }
4472};
4473
4474struct DisableDebugLocationUpdates {
4475 CodeGenFunction &CGF;
4476 bool disabledDebugInfo;
4477 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4478 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4479 CGF.disableDebugInfo();
4480 }
4481 ~DisableDebugLocationUpdates() {
4482 if (disabledDebugInfo)
4483 CGF.enableDebugInfo();
4484 }
4485};
4486
4487} // end anonymous namespace
4488
4489RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4490 if (!HasLV)
4491 return RV;
4492 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4493 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4494 LV.isVolatile());
4495 IsUsed = true;
4496 return RValue::getAggregate(Copy.getAddress(CGF));
4497}
4498
4499void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4500 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4501 if (!HasLV && RV.isScalar())
4502 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4503 else if (!HasLV && RV.isComplex())
4504 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4505 else {
4506 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4507 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4508 // We assume that call args are never copied into subobjects.
4509 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4510 HasLV ? LV.isVolatileQualified()
4511 : RV.isVolatileQualified());
4512 }
4513 IsUsed = true;
4514}
4515
4516void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4517 QualType type) {
4518 DisableDebugLocationUpdates Dis(*this, E);
4519 if (const ObjCIndirectCopyRestoreExpr *CRE
4520 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4521 assert(getLangOpts().ObjCAutoRefCount)(static_cast <bool> (getLangOpts().ObjCAutoRefCount) ? void
(0) : __assert_fail ("getLangOpts().ObjCAutoRefCount", "clang/lib/CodeGen/CGCall.cpp"
, 4521, __extension__ __PRETTY_FUNCTION__))
;
4522 return emitWritebackArg(*this, args, CRE);
4523 }
4524
4525 assert(type->isReferenceType() == E->isGLValue() &&(static_cast <bool> (type->isReferenceType() == E->
isGLValue() && "reference binding to unmaterialized r-value!"
) ? void (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "clang/lib/CodeGen/CGCall.cpp", 4526, __extension__ __PRETTY_FUNCTION__
))
4526 "reference binding to unmaterialized r-value!")(static_cast <bool> (type->isReferenceType() == E->
isGLValue() && "reference binding to unmaterialized r-value!"
) ? void (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\""
, "clang/lib/CodeGen/CGCall.cpp", 4526, __extension__ __PRETTY_FUNCTION__
))
;
4527
4528 if (E->isGLValue()) {
4529 assert(E->getObjectKind() == OK_Ordinary)(static_cast <bool> (E->getObjectKind() == OK_Ordinary
) ? void (0) : __assert_fail ("E->getObjectKind() == OK_Ordinary"
, "clang/lib/CodeGen/CGCall.cpp", 4529, __extension__ __PRETTY_FUNCTION__
))
;
4530 return args.add(EmitReferenceBindingToExpr(E), type);
4531 }
4532
4533 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4534
4535 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4536 // However, we still have to push an EH-only cleanup in case we unwind before
4537 // we make it to the call.
4538 if (type->isRecordType() &&
4539 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4540 // If we're using inalloca, use the argument memory. Otherwise, use a
4541 // temporary.
4542 AggValueSlot Slot = args.isUsingInAlloca()
4543 ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
4544
4545 bool DestroyedInCallee = true, NeedsEHCleanup = true;
4546 if (const auto *RD = type->getAsCXXRecordDecl())
4547 DestroyedInCallee = RD->hasNonTrivialDestructor();
4548 else
4549 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4550
4551 if (DestroyedInCallee)
4552 Slot.setExternallyDestructed();
4553
4554 EmitAggExpr(E, Slot);
4555 RValue RV = Slot.asRValue();
4556 args.add(RV, type);
4557
4558 if (DestroyedInCallee && NeedsEHCleanup) {
4559 // Create a no-op GEP between the placeholder and the cleanup so we can
4560 // RAUW it successfully. It also serves as a marker of the first
4561 // instruction where the cleanup is active.
4562 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4563 type);
4564 // This unreachable is a temporary marker which will be removed later.
4565 llvm::Instruction *IsActive = Builder.CreateUnreachable();
4566 args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive);
4567 }
4568 return;
4569 }
4570
4571 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4572 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4573 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4574 assert(L.isSimple())(static_cast <bool> (L.isSimple()) ? void (0) : __assert_fail
("L.isSimple()", "clang/lib/CodeGen/CGCall.cpp", 4574, __extension__
__PRETTY_FUNCTION__))
;
4575 args.addUncopiedAggregate(L, type);
4576 return;
4577 }
4578
4579 args.add(EmitAnyExprToTemp(E), type);
4580}
4581
4582QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4583 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4584 // implicitly widens null pointer constants that are arguments to varargs
4585 // functions to pointer-sized ints.
4586 if (!getTarget().getTriple().isOSWindows())
4587 return Arg->getType();
4588
4589 if (Arg->getType()->isIntegerType() &&
4590 getContext().getTypeSize(Arg->getType()) <
4591 getContext().getTargetInfo().getPointerWidth(LangAS::Default) &&
4592 Arg->isNullPointerConstant(getContext(),
4593 Expr::NPC_ValueDependentIsNotNull)) {
4594 return getContext().getIntPtrType();
4595 }
4596
4597 return Arg->getType();
4598}
4599
4600// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4601// optimizer it can aggressively ignore unwind edges.
4602void
4603CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4604 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4605 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4606 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4607 CGM.getNoObjCARCExceptionsMetadata());
4608}
4609
4610/// Emits a call to the given no-arguments nounwind runtime function.
4611llvm::CallInst *
4612CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4613 const llvm::Twine &name) {
4614 return EmitNounwindRuntimeCall(callee, std::nullopt, name);
4615}
4616
4617/// Emits a call to the given nounwind runtime function.
4618llvm::CallInst *
4619CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4620 ArrayRef<llvm::Value *> args,
4621 const llvm::Twine &name) {
4622 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4623 call->setDoesNotThrow();
4624 return call;
4625}
4626
4627/// Emits a simple call (never an invoke) to the given no-arguments
4628/// runtime function.
4629llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4630 const llvm::Twine &name) {
4631 return EmitRuntimeCall(callee, std::nullopt, name);
4632}
4633
4634// Calls which may throw must have operand bundles indicating which funclet
4635// they are nested within.
4636SmallVector<llvm::OperandBundleDef, 1>
4637CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4638 // There is no need for a funclet operand bundle if we aren't inside a
4639 // funclet.
4640 if (!CurrentFuncletPad)
4641 return (SmallVector<llvm::OperandBundleDef, 1>());
4642
4643 // Skip intrinsics which cannot throw (as long as they don't lower into
4644 // regular function calls in the course of IR transformations).
4645 if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
4646 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4647 auto IID = CalleeFn->getIntrinsicID();
4648 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4649 return (SmallVector<llvm::OperandBundleDef, 1>());
4650 }
4651 }
4652
4653 SmallVector<llvm::OperandBundleDef, 1> BundleList;
4654 BundleList.emplace_back("funclet", CurrentFuncletPad);
4655 return BundleList;
4656}
4657
4658/// Emits a simple call (never an invoke) to the given runtime function.
4659llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4660 ArrayRef<llvm::Value *> args,
4661 const llvm::Twine &name) {
4662 llvm::CallInst *call = Builder.CreateCall(
4663 callee, args, getBundlesForFunclet(callee.getCallee()), name);
4664 call->setCallingConv(getRuntimeCC());
4665 return call;
4666}
4667
4668/// Emits a call or invoke to the given noreturn runtime function.
4669void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4670 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4671 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4672 getBundlesForFunclet(callee.getCallee());
4673
4674 if (getInvokeDest()) {
4675 llvm::InvokeInst *invoke =
4676 Builder.CreateInvoke(callee,
4677 getUnreachableBlock(),
4678 getInvokeDest(),
4679 args,
4680 BundleList);
4681 invoke->setDoesNotReturn();
4682 invoke->setCallingConv(getRuntimeCC());
4683 } else {
4684 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4685 call->setDoesNotReturn();
4686 call->setCallingConv(getRuntimeCC());
4687 Builder.CreateUnreachable();
4688 }
4689}
4690
4691/// Emits a call or invoke instruction to the given nullary runtime function.
4692llvm::CallBase *
4693CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4694 const Twine &name) {
4695 return EmitRuntimeCallOrInvoke(callee, std::nullopt, name);
4696}
4697
4698/// Emits a call or invoke instruction to the given runtime function.
4699llvm::CallBase *
4700CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4701 ArrayRef<llvm::Value *> args,
4702 const Twine &name) {
4703 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4704 call->setCallingConv(getRuntimeCC());
4705 return call;
4706}
4707
4708/// Emits a call or invoke instruction to the given function, depending
4709/// on the current state of the EH stack.
4710llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4711 ArrayRef<llvm::Value *> Args,
4712 const Twine &Name) {
4713 llvm::BasicBlock *InvokeDest = getInvokeDest();
4714 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4715 getBundlesForFunclet(Callee.getCallee());
4716
4717 llvm::CallBase *Inst;
4718 if (!InvokeDest)
4719 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4720 else {
4721 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4722 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4723 Name);
4724 EmitBlock(ContBB);
4725 }
4726
4727 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4728 // optimizer it can aggressively ignore unwind edges.
4729 if (CGM.getLangOpts().ObjCAutoRefCount)
4730 AddObjCARCExceptionMetadata(Inst);
4731
4732 return Inst;
4733}
4734
4735void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4736 llvm::Value *New) {
4737 DeferredReplacements.push_back(
4738 std::make_pair(llvm::WeakTrackingVH(Old), New));
4739}
4740
4741namespace {
4742
4743/// Specify given \p NewAlign as the alignment of return value attribute. If
4744/// such attribute already exists, re-set it to the maximal one of two options.
4745[[nodiscard]] llvm::AttributeList
4746maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4747 const llvm::AttributeList &Attrs,
4748 llvm::Align NewAlign) {
4749 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4750 if (CurAlign >= NewAlign)
4751 return Attrs;
4752 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4753 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4754 .addRetAttribute(Ctx, AlignAttr);
4755}
4756
4757template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4758protected:
4759 CodeGenFunction &CGF;
4760
4761 /// We do nothing if this is, or becomes, nullptr.
4762 const AlignedAttrTy *AA = nullptr;
4763
4764 llvm::Value *Alignment = nullptr; // May or may not be a constant.
4765 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4766
4767 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4768 : CGF(CGF_) {
4769 if (!FuncDecl)
4770 return;
4771 AA = FuncDecl->getAttr<AlignedAttrTy>();
4772 }
4773
4774public:
4775 /// If we can, materialize the alignment as an attribute on return value.
4776 [[nodiscard]] llvm::AttributeList
4777 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4778 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4779 return Attrs;
4780 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4781 if (!AlignmentCI)
4782 return Attrs;
4783 // We may legitimately have non-power-of-2 alignment here.
4784 // If so, this is UB land, emit it via `@llvm.assume` instead.
4785 if (!AlignmentCI->getValue().isPowerOf2())
4786 return Attrs;
4787 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4788 CGF.getLLVMContext(), Attrs,
4789 llvm::Align(
4790 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4791 AA = nullptr; // We're done. Disallow doing anything else.
4792 return NewAttrs;
4793 }
4794
4795 /// Emit alignment assumption.
4796 /// This is a general fallback that we take if either there is an offset,
4797 /// or the alignment is variable or we are sanitizing for alignment.
4798 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4799 if (!AA)
4800 return;
4801 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4802 AA->getLocation(), Alignment, OffsetCI);
4803 AA = nullptr; // We're done. Disallow doing anything else.
4804 }
4805};
4806
4807/// Helper data structure to emit `AssumeAlignedAttr`.
4808class AssumeAlignedAttrEmitter final
4809 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4810public:
4811 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4812 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4813 if (!AA)
4814 return;
4815 // It is guaranteed that the alignment/offset are constants.
4816 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4817 if (Expr *Offset = AA->getOffset()) {
4818 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4819 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4820 OffsetCI = nullptr;
4821 }
4822 }
4823};
4824
4825/// Helper data structure to emit `AllocAlignAttr`.
4826class AllocAlignAttrEmitter final
4827 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4828public:
4829 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4830 const CallArgList &CallArgs)
4831 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4832 if (!AA)
4833 return;
4834 // Alignment may or may not be a constant, and that is okay.
4835 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4836 .getRValue(CGF)
4837 .getScalarVal();
4838 }
4839};
4840
4841} // namespace
4842
4843static unsigned getMaxVectorWidth(const llvm::Type *Ty) {
4844 if (auto *VT = dyn_cast<llvm::VectorType>(Ty))
4845 return VT->getPrimitiveSizeInBits().getKnownMinValue();
4846 if (auto *AT = dyn_cast<llvm::ArrayType>(Ty))
4847 return getMaxVectorWidth(AT->getElementType());
4848
4849 unsigned MaxVectorWidth = 0;
4850 if (auto *ST = dyn_cast<llvm::StructType>(Ty))
4851 for (auto *I : ST->elements())
4852 MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I));
4853 return MaxVectorWidth;
4854}
4855
4856RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4857 const CGCallee &Callee,
4858 ReturnValueSlot ReturnValue,
4859 const CallArgList &CallArgs,
4860 llvm::CallBase **callOrInvoke, bool IsMustTail,
4861 SourceLocation Loc) {
4862 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4863
4864 assert(Callee.isOrdinary() || Callee.isVirtual())(static_cast <bool> (Callee.isOrdinary() || Callee.isVirtual
()) ? void (0) : __assert_fail ("Callee.isOrdinary() || Callee.isVirtual()"
, "clang/lib/CodeGen/CGCall.cpp", 4864, __extension__ __PRETTY_FUNCTION__
))
;
4865
4866 // Handle struct-return functions by passing a pointer to the
4867 // location that we would like to return into.
4868 QualType RetTy = CallInfo.getReturnType();
4869 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4870
4871 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4872
4873 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4874 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4875 // We can only guarantee that a function is called from the correct
4876 // context/function based on the appropriate target attributes,
4877 // so only check in the case where we have both always_inline and target
4878 // since otherwise we could be making a conditional call after a check for
4879 // the proper cpu features (and it won't cause code generation issues due to
4880 // function based code generation).
4881 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4882 (TargetDecl->hasAttr<TargetAttr>() ||
4883 (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>())))
4884 checkTargetFeatures(Loc, FD);
4885
4886 // Some architectures (such as x86-64) have the ABI changed based on
4887 // attribute-target/features. Give them a chance to diagnose.
4888 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4889 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4890 }
4891
4892#ifndef NDEBUG
4893 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4894 // For an inalloca varargs function, we don't expect CallInfo to match the
4895 // function pointer's type, because the inalloca struct a will have extra
4896 // fields in it for the varargs parameters. Code later in this function
4897 // bitcasts the function pointer to the type derived from CallInfo.
4898 //
4899 // In other cases, we assert that the types match up (until pointers stop
4900 // having pointee types).
4901 if (Callee.isVirtual())
4902 assert(IRFuncTy == Callee.getVirtualFunctionType())(static_cast <bool> (IRFuncTy == Callee.getVirtualFunctionType
()) ? void (0) : __assert_fail ("IRFuncTy == Callee.getVirtualFunctionType()"
, "clang/lib/CodeGen/CGCall.cpp", 4902, __extension__ __PRETTY_FUNCTION__
))
;
4903 else {
4904 llvm::PointerType *PtrTy =
4905 llvm::cast<llvm::PointerType>(Callee.getFunctionPointer()->getType());
4906 assert(PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy))(static_cast <bool> (PtrTy->isOpaqueOrPointeeTypeMatches
(IRFuncTy)) ? void (0) : __assert_fail ("PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy)"
, "clang/lib/CodeGen/CGCall.cpp", 4906, __extension__ __PRETTY_FUNCTION__
))
;
4907 }
4908 }
4909#endif
4910
4911 // 1. Set up the arguments.
4912
4913 // If we're using inalloca, insert the allocation after the stack save.
4914 // FIXME: Do this earlier rather than hacking it in here!
4915 Address ArgMemory = Address::invalid();
4916 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4917 const llvm::DataLayout &DL = CGM.getDataLayout();
4918 llvm::Instruction *IP = CallArgs.getStackBase();
4919 llvm::AllocaInst *AI;
4920 if (IP) {
4921 IP = IP->getNextNode();
4922 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4923 "argmem", IP);
4924 } else {
4925 AI = CreateTempAlloca(ArgStruct, "argmem");
4926 }
4927 auto Align = CallInfo.getArgStructAlignment();
4928 AI->setAlignment(Align.getAsAlign());
4929 AI->setUsedWithInAlloca(true);
4930 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca())(static_cast <bool> (AI->isUsedWithInAlloca() &&
!AI->isStaticAlloca()) ? void (0) : __assert_fail ("AI->isUsedWithInAlloca() && !AI->isStaticAlloca()"
, "clang/lib/CodeGen/CGCall.cpp", 4930, __extension__ __PRETTY_FUNCTION__
))
;
4931 ArgMemory = Address(AI, ArgStruct, Align);
4932 }
4933
4934 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4935 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4936
4937 // If the call returns a temporary with struct return, create a temporary
4938 // alloca to hold the result, unless one is given to us.
4939 Address SRetPtr = Address::invalid();
4940 Address SRetAlloca = Address::invalid();
4941 llvm::Value *UnusedReturnSizePtr = nullptr;
4942 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4943 if (!ReturnValue.isNull()) {
4944 SRetPtr = ReturnValue.getValue();
4945 } else {
4946 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4947 if (HaveInsertPoint() && ReturnValue.isUnused()) {
4948 llvm::TypeSize size =
4949 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4950 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4951 }
4952 }
4953 if (IRFunctionArgs.hasSRetArg()) {
4954 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4955 } else if (RetAI.isInAlloca()) {
4956 Address Addr =
4957 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4958 Builder.CreateStore(SRetPtr.getPointer(), Addr);
4959 }
4960 }
4961
4962 Address swiftErrorTemp = Address::invalid();
4963 Address swiftErrorArg = Address::invalid();
4964
4965 // When passing arguments using temporary allocas, we need to add the
4966 // appropriate lifetime markers. This vector keeps track of all the lifetime
4967 // markers that need to be ended right after the call.
4968 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4969
4970 // Translate all of the arguments as necessary to match the IR lowering.
4971 assert(CallInfo.arg_size() == CallArgs.size() &&(static_cast <bool> (CallInfo.arg_size() == CallArgs.size
() && "Mismatch between function signature & arguments."
) ? void (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "clang/lib/CodeGen/CGCall.cpp", 4972, __extension__ __PRETTY_FUNCTION__
))
4972 "Mismatch between function signature & arguments.")(static_cast <bool> (CallInfo.arg_size() == CallArgs.size
() && "Mismatch between function signature & arguments."
) ? void (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\""
, "clang/lib/CodeGen/CGCall.cpp", 4972, __extension__ __PRETTY_FUNCTION__
))
;
4973 unsigned ArgNo = 0;
4974 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4975 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4976 I != E; ++I, ++info_it, ++ArgNo) {
4977 const ABIArgInfo &ArgInfo = info_it->info;
4978
4979 // Insert a padding argument to ensure proper alignment.
4980 if (IRFunctionArgs.hasPaddingArg(ArgNo))
4981 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4982 llvm::UndefValue::get(ArgInfo.getPaddingType());
4983
4984 unsigned FirstIRArg, NumIRArgs;
4985 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4986
4987 bool ArgHasMaybeUndefAttr =
4988 IsArgumentMaybeUndef(TargetDecl, CallInfo.getNumRequiredArgs(), ArgNo);
4989
4990 switch (ArgInfo.getKind()) {
4991 case ABIArgInfo::InAlloca: {
4992 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 4992, __extension__
__PRETTY_FUNCTION__))
;
4993 assert(getTarget().getTriple().getArch() == llvm::Triple::x86)(static_cast <bool> (getTarget().getTriple().getArch() ==
llvm::Triple::x86) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86"
, "clang/lib/CodeGen/CGCall.cpp", 4993, __extension__ __PRETTY_FUNCTION__
))
;
4994 if (I->isAggregate()) {
4995 Address Addr = I->hasLValue()
4996 ? I->getKnownLValue().getAddress(*this)
4997 : I->getKnownRValue().getAggregateAddress();
4998 llvm::Instruction *Placeholder =
4999 cast<llvm::Instruction>(Addr.getPointer());
5000
5001 if (!ArgInfo.getInAllocaIndirect()) {
5002 // Replace the placeholder with the appropriate argument slot GEP.
5003 CGBuilderTy::InsertPoint IP = Builder.saveIP();
5004 Builder.SetInsertPoint(Placeholder);
5005 Addr = Builder.CreateStructGEP(ArgMemory,
5006 ArgInfo.getInAllocaFieldIndex());
5007 Builder.restoreIP(IP);
5008 } else {
5009 // For indirect things such as overaligned structs, replace the
5010 // placeholder with a regular aggregate temporary alloca. Store the
5011 // address of this alloca into the struct.
5012 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
5013 Address ArgSlot = Builder.CreateStructGEP(
5014 ArgMemory, ArgInfo.getInAllocaFieldIndex());
5015 Builder.CreateStore(Addr.getPointer(), ArgSlot);
5016 }
5017 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
5018 } else if (ArgInfo.getInAllocaIndirect()) {
5019 // Make a temporary alloca and store the address of it into the argument
5020 // struct.
5021 Address Addr = CreateMemTempWithoutCast(
5022 I->Ty, getContext().getTypeAlignInChars(I->Ty),
5023 "indirect-arg-temp");
5024 I->copyInto(*this, Addr);
5025 Address ArgSlot =
5026 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
5027 Builder.CreateStore(Addr.getPointer(), ArgSlot);
5028 } else {
5029 // Store the RValue into the argument struct.
5030 Address Addr =
5031 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
5032 // There are some cases where a trivial bitcast is not avoidable. The
5033 // definition of a type later in a translation unit may change it's type
5034 // from {}* to (%struct.foo*)*.
5035 Addr = Builder.CreateElementBitCast(Addr, ConvertTypeForMem(I->Ty));
5036 I->copyInto(*this, Addr);
5037 }
5038 break;
5039 }
5040
5041 case ABIArgInfo::Indirect:
5042 case ABIArgInfo::IndirectAliased: {
5043 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 5043, __extension__
__PRETTY_FUNCTION__))
;
5044 if (!I->isAggregate()) {
5045 // Make a temporary alloca to pass the argument.
5046 Address Addr = CreateMemTempWithoutCast(
5047 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
5048
5049 llvm::Value *Val = Addr.getPointer();
5050 if (ArgHasMaybeUndefAttr)
5051 Val = Builder.CreateFreeze(Addr.getPointer());
5052 IRCallArgs[FirstIRArg] = Val;
5053
5054 I->copyInto(*this, Addr);
5055 } else {
5056 // We want to avoid creating an unnecessary temporary+copy here;
5057 // however, we need one in three cases:
5058 // 1. If the argument is not byval, and we are required to copy the
5059 // source. (This case doesn't occur on any common architecture.)
5060 // 2. If the argument is byval, RV is not sufficiently aligned, and
5061 // we cannot force it to be sufficiently aligned.
5062 // 3. If the argument is byval, but RV is not located in default
5063 // or alloca address space.
5064 Address Addr = I->hasLValue()
5065 ? I->getKnownLValue().getAddress(*this)
5066 : I->getKnownRValue().getAggregateAddress();
5067 llvm::Value *V = Addr.getPointer();
5068 CharUnits Align = ArgInfo.getIndirectAlign();
5069 const llvm::DataLayout *TD = &CGM.getDataLayout();
5070
5071 assert((FirstIRArg >= IRFuncTy->getNumParams() ||(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "clang/lib/CodeGen/CGCall.cpp", 5074, __extension__ __PRETTY_FUNCTION__
))
5072 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "clang/lib/CodeGen/CGCall.cpp", 5074, __extension__ __PRETTY_FUNCTION__
))
5073 TD->getAllocaAddrSpace()) &&(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "clang/lib/CodeGen/CGCall.cpp", 5074, __extension__ __PRETTY_FUNCTION__
))
5074 "indirect argument must be in alloca address space")(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams
() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace
() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"
) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\""
, "clang/lib/CodeGen/CGCall.cpp", 5074, __extension__ __PRETTY_FUNCTION__
))
;
5075
5076 bool NeedCopy = false;
5077
5078 if (Addr.getAlignment() < Align &&
5079 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
5080 Align.getAsAlign()) {
5081 NeedCopy = true;
5082 } else if (I->hasLValue()) {
5083 auto LV = I->getKnownLValue();
5084 auto AS = LV.getAddressSpace();
5085
5086 if (!ArgInfo.getIndirectByVal() ||
5087 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
5088 NeedCopy = true;
5089 }
5090 if (!getLangOpts().OpenCL) {
5091 if ((ArgInfo.getIndirectByVal() &&
5092 (AS != LangAS::Default &&
5093 AS != CGM.getASTAllocaAddressSpace()))) {
5094 NeedCopy = true;
5095 }
5096 }
5097 // For OpenCL even if RV is located in default or alloca address space
5098 // we don't want to perform address space cast for it.
5099 else if ((ArgInfo.getIndirectByVal() &&
5100 Addr.getType()->getAddressSpace() != IRFuncTy->
5101 getParamType(FirstIRArg)->getPointerAddressSpace())) {
5102 NeedCopy = true;
5103 }
5104 }
5105
5106 if (NeedCopy) {
5107 // Create an aligned temporary, and copy to it.
5108 Address AI = CreateMemTempWithoutCast(
5109 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
5110 llvm::Value *Val = AI.getPointer();
5111 if (ArgHasMaybeUndefAttr)
5112 Val = Builder.CreateFreeze(AI.getPointer());
5113 IRCallArgs[FirstIRArg] = Val;
5114
5115 // Emit lifetime markers for the temporary alloca.
5116 llvm::TypeSize ByvalTempElementSize =
5117 CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
5118 llvm::Value *LifetimeSize =
5119 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
5120
5121 // Add cleanup code to emit the end lifetime marker after the call.
5122 if (LifetimeSize) // In case we disabled lifetime markers.
5123 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5124
5125 // Generate the copy.
5126 I->copyInto(*this, AI);
5127 } else {
5128 // Skip the extra memcpy call.
5129 auto *T = llvm::PointerType::getWithSamePointeeType(
5130 cast<llvm::PointerType>(V->getType()),
5131 CGM.getDataLayout().getAllocaAddrSpace());
5132
5133 llvm::Value *Val = getTargetHooks().performAddrSpaceCast(
5134 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
5135 true);
5136 if (ArgHasMaybeUndefAttr)
5137 Val = Builder.CreateFreeze(Val);
5138 IRCallArgs[FirstIRArg] = Val;
5139 }
5140 }
5141 break;
5142 }
5143
5144 case ABIArgInfo::Ignore:
5145 assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail
("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 5145, __extension__
__PRETTY_FUNCTION__))
;
5146 break;
5147
5148 case ABIArgInfo::Extend:
5149 case ABIArgInfo::Direct: {
5150 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
5151 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
5152 ArgInfo.getDirectOffset() == 0) {
5153 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 5153, __extension__
__PRETTY_FUNCTION__))
;
5154 llvm::Value *V;
5155 if (!I->isAggregate())
5156 V = I->getKnownRValue().getScalarVal();
5157 else
5158 V = Builder.CreateLoad(
5159 I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5160 : I->getKnownRValue().getAggregateAddress());
5161
5162 // Implement swifterror by copying into a new swifterror argument.
5163 // We'll write back in the normal path out of the call.
5164 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
5165 == ParameterABI::SwiftErrorResult) {
5166 assert(!swiftErrorTemp.isValid() && "multiple swifterror args")(static_cast <bool> (!swiftErrorTemp.isValid() &&
"multiple swifterror args") ? void (0) : __assert_fail ("!swiftErrorTemp.isValid() && \"multiple swifterror args\""
, "clang/lib/CodeGen/CGCall.cpp", 5166, __extension__ __PRETTY_FUNCTION__
))
;
5167
5168 QualType pointeeTy = I->Ty->getPointeeType();
5169 swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy),
5170 getContext().getTypeAlignInChars(pointeeTy));
5171
5172 swiftErrorTemp =
5173 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
5174 V = swiftErrorTemp.getPointer();
5175 cast<llvm::AllocaInst>(V)->setSwiftError(true);
5176
5177 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
5178 Builder.CreateStore(errorValue, swiftErrorTemp);
5179 }
5180
5181 // We might have to widen integers, but we should never truncate.
5182 if (ArgInfo.getCoerceToType() != V->getType() &&
5183 V->getType()->isIntegerTy())
5184 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
5185
5186 // If the argument doesn't match, perform a bitcast to coerce it. This
5187 // can happen due to trivial type mismatches.
5188 if (FirstIRArg < IRFuncTy->getNumParams() &&
5189 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5190 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
5191
5192 if (ArgHasMaybeUndefAttr)
5193 V = Builder.CreateFreeze(V);
5194 IRCallArgs[FirstIRArg] = V;
5195 break;
5196 }
5197
5198 // FIXME: Avoid the conversion through memory if possible.
5199 Address Src = Address::invalid();
5200 if (!I->isAggregate()) {
5201 Src = CreateMemTemp(I->Ty, "coerce");
5202 I->copyInto(*this, Src);
5203 } else {
5204 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5205 : I->getKnownRValue().getAggregateAddress();
5206 }
5207
5208 // If the value is offset in memory, apply the offset now.
5209 Src = emitAddressAtOffset(*this, Src, ArgInfo);
5210
5211 // Fast-isel and the optimizer generally like scalar values better than
5212 // FCAs, so we flatten them if this is safe to do for this argument.
5213 llvm::StructType *STy =
5214 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
5215 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
5216 llvm::Type *SrcTy = Src.getElementType();
5217 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
5218 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
5219
5220 // If the source type is smaller than the destination type of the
5221 // coerce-to logic, copy the source value into a temp alloca the size
5222 // of the destination type to allow loading all of it. The bits past
5223 // the source value are left undef.
5224 if (SrcSize < DstSize) {
5225 Address TempAlloca
5226 = CreateTempAlloca(STy, Src.getAlignment(),
5227 Src.getName() + ".coerce");
5228 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5229 Src = TempAlloca;
5230 } else {
5231 Src = Builder.CreateElementBitCast(Src, STy);
5232 }
5233
5234 assert(NumIRArgs == STy->getNumElements())(static_cast <bool> (NumIRArgs == STy->getNumElements
()) ? void (0) : __assert_fail ("NumIRArgs == STy->getNumElements()"
, "clang/lib/CodeGen/CGCall.cpp", 5234, __extension__ __PRETTY_FUNCTION__
))
;
5235 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5236 Address EltPtr = Builder.CreateStructGEP(Src, i);
5237 llvm::Value *LI = Builder.CreateLoad(EltPtr);
5238 if (ArgHasMaybeUndefAttr)
5239 LI = Builder.CreateFreeze(LI);
5240 IRCallArgs[FirstIRArg + i] = LI;
5241 }
5242 } else {
5243 // In the simple case, just pass the coerced loaded value.
5244 assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail
("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 5244, __extension__
__PRETTY_FUNCTION__))
;
5245 llvm::Value *Load =
5246 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
5247
5248 if (CallInfo.isCmseNSCall()) {
5249 // For certain parameter types, clear padding bits, as they may reveal
5250 // sensitive information.
5251 // Small struct/union types are passed as integer arrays.
5252 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5253 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5254 Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
5255 }
5256
5257 if (ArgHasMaybeUndefAttr)
5258 Load = Builder.CreateFreeze(Load);
5259 IRCallArgs[FirstIRArg] = Load;
5260 }
5261
5262 break;
5263 }
5264
5265 case ABIArgInfo::CoerceAndExpand: {
5266 auto coercionType = ArgInfo.getCoerceAndExpandType();
5267 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
5268
5269 llvm::Value *tempSize = nullptr;
5270 Address addr = Address::invalid();
5271 Address AllocaAddr = Address::invalid();
5272 if (I->isAggregate()) {
5273 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5274 : I->getKnownRValue().getAggregateAddress();
5275
5276 } else {
5277 RValue RV = I->getKnownRValue();
5278 assert(RV.isScalar())(static_cast <bool> (RV.isScalar()) ? void (0) : __assert_fail
("RV.isScalar()", "clang/lib/CodeGen/CGCall.cpp", 5278, __extension__
__PRETTY_FUNCTION__))
; // complex should always just be direct
5279
5280 llvm::Type *scalarType = RV.getScalarVal()->getType();
5281 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
5282 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
5283
5284 // Materialize to a temporary.
5285 addr = CreateTempAlloca(
5286 RV.getScalarVal()->getType(),
5287 CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)),
5288 "tmp",
5289 /*ArraySize=*/nullptr, &AllocaAddr);
5290 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
5291
5292 Builder.CreateStore(RV.getScalarVal(), addr);
5293 }
5294
5295 addr = Builder.CreateElementBitCast(addr, coercionType);
5296
5297 unsigned IRArgPos = FirstIRArg;
5298 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5299 llvm::Type *eltType = coercionType->getElementType(i);
5300 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5301 Address eltAddr = Builder.CreateStructGEP(addr, i);
5302 llvm::Value *elt = Builder.CreateLoad(eltAddr);
5303 if (ArgHasMaybeUndefAttr)
5304 elt = Builder.CreateFreeze(elt);
5305 IRCallArgs[IRArgPos++] = elt;
5306 }
5307 assert(IRArgPos == FirstIRArg + NumIRArgs)(static_cast <bool> (IRArgPos == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 5307, __extension__ __PRETTY_FUNCTION__
))
;
5308
5309 if (tempSize) {
5310 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
5311 }
5312
5313 break;
5314 }
5315
5316 case ABIArgInfo::Expand: {
5317 unsigned IRArgPos = FirstIRArg;
5318 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5319 assert(IRArgPos == FirstIRArg + NumIRArgs)(static_cast <bool> (IRArgPos == FirstIRArg + NumIRArgs
) ? void (0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs"
, "clang/lib/CodeGen/CGCall.cpp", 5319, __extension__ __PRETTY_FUNCTION__
))
;
5320 break;
5321 }
5322 }
5323 }
5324
5325 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
5326 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
5327
5328 // If we're using inalloca, set up that argument.
5329 if (ArgMemory.isValid()) {
5330 llvm::Value *Arg = ArgMemory.getPointer();
5331 if (CallInfo.isVariadic()) {
5332 // When passing non-POD arguments by value to variadic functions, we will
5333 // end up with a variadic prototype and an inalloca call site. In such
5334 // cases, we can't do any parameter mismatch checks. Give up and bitcast
5335 // the callee.
5336 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
5337 CalleePtr =
5338 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
5339 } else {
5340 llvm::Type *LastParamTy =
5341 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
5342 if (Arg->getType() != LastParamTy) {
5343#ifndef NDEBUG
5344 // Assert that these structs have equivalent element types.
5345 llvm::StructType *FullTy = CallInfo.getArgStruct();
5346 if (!LastParamTy->isOpaquePointerTy()) {
5347 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
5348 LastParamTy->getNonOpaquePointerElementType());
5349 assert(DeclaredTy->getNumElements() == FullTy->getNumElements())(static_cast <bool> (DeclaredTy->getNumElements() ==
FullTy->getNumElements()) ? void (0) : __assert_fail ("DeclaredTy->getNumElements() == FullTy->getNumElements()"
, "clang/lib/CodeGen/CGCall.cpp", 5349, __extension__ __PRETTY_FUNCTION__
))
;
5350 for (auto DI = DeclaredTy->element_begin(),
5351 DE = DeclaredTy->element_end(),
5352 FI = FullTy->element_begin();
5353 DI != DE; ++DI, ++FI)
5354 assert(*DI == *FI)(static_cast <bool> (*DI == *FI) ? void (0) : __assert_fail
("*DI == *FI", "clang/lib/CodeGen/CGCall.cpp", 5354, __extension__
__PRETTY_FUNCTION__))
;
5355 }
5356#endif
5357 Arg = Builder.CreateBitCast(Arg, LastParamTy);
5358 }
5359 }
5360 assert(IRFunctionArgs.hasInallocaArg())(static_cast <bool> (IRFunctionArgs.hasInallocaArg()) ?
void (0) : __assert_fail ("IRFunctionArgs.hasInallocaArg()",
"clang/lib/CodeGen/CGCall.cpp", 5360, __extension__ __PRETTY_FUNCTION__
))
;
5361 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5362 }
5363
5364 // 2. Prepare the function pointer.
5365
5366 // If the callee is a bitcast of a non-variadic function to have a
5367 // variadic function pointer type, check to see if we can remove the
5368 // bitcast. This comes up with unprototyped functions.
5369 //
5370 // This makes the IR nicer, but more importantly it ensures that we
5371 // can inline the function at -O0 if it is marked always_inline.
5372 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5373 llvm::Value *Ptr) -> llvm::Function * {
5374 if (!CalleeFT->isVarArg())
5375 return nullptr;
5376
5377 // Get underlying value if it's a bitcast
5378 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5379 if (CE->getOpcode() == llvm::Instruction::BitCast)
5380 Ptr = CE->getOperand(0);
5381 }
5382
5383 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5384 if (!OrigFn)
5385 return nullptr;
5386
5387 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5388
5389 // If the original type is variadic, or if any of the component types
5390 // disagree, we cannot remove the cast.
5391 if (OrigFT->isVarArg() ||
5392 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5393 OrigFT->getReturnType() != CalleeFT->getReturnType())
5394 return nullptr;
5395
5396 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5397 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5398 return nullptr;
5399
5400 return OrigFn;
5401 };
5402
5403 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5404 CalleePtr = OrigFn;
5405 IRFuncTy = OrigFn->getFunctionType();
5406 }
5407
5408 // 3. Perform the actual call.
5409
5410 // Deactivate any cleanups that we're supposed to do immediately before
5411 // the call.
5412 if (!CallArgs.getCleanupsToDeactivate().empty())
5413 deactivateArgCleanupsBeforeCall(*this, CallArgs);
5414
5415 // Assert that the arguments we computed match up. The IR verifier
5416 // will catch this, but this is a common enough source of problems
5417 // during IRGen changes that it's way better for debugging to catch
5418 // it ourselves here.
5419#ifndef NDEBUG
5420 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg())(static_cast <bool> (IRCallArgs.size() == IRFuncTy->
getNumParams() || IRFuncTy->isVarArg()) ? void (0) : __assert_fail
("IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()"
, "clang/lib/CodeGen/CGCall.cpp", 5420, __extension__ __PRETTY_FUNCTION__
))
;
5421 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5422 // Inalloca argument can have different type.
5423 if (IRFunctionArgs.hasInallocaArg() &&
5424 i == IRFunctionArgs.getInallocaArgNo())
5425 continue;
5426 if (i < IRFuncTy->getNumParams())
5427 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i))(static_cast <bool> (IRCallArgs[i]->getType() == IRFuncTy
->getParamType(i)) ? void (0) : __assert_fail ("IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)"
, "clang/lib/CodeGen/CGCall.cpp", 5427, __extension__ __PRETTY_FUNCTION__
))
;
5428 }
5429#endif
5430
5431 // Update the largest vector width if any arguments have vector types.
5432 for (unsigned i = 0; i < IRCallArgs.size(); ++i)
5433 LargestVectorWidth = std::max(LargestVectorWidth,
5434 getMaxVectorWidth(IRCallArgs[i]->getType()));
5435
5436 // Compute the calling convention and attributes.
5437 unsigned CallingConv;
5438 llvm::AttributeList Attrs;
5439 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5440 Callee.getAbstractInfo(), Attrs, CallingConv,
5441 /*AttrOnCallSite=*/true,
5442 /*IsThunk=*/false);
5443
5444 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5445 if (FD->hasAttr<StrictFPAttr>())
5446 // All calls within a strictfp function are marked strictfp
5447 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5448
5449 // Add call-site nomerge attribute if exists.
5450 if (InNoMergeAttributedStmt)
5451 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge);
5452
5453 // Add call-site noinline attribute if exists.
5454 if (InNoInlineAttributedStmt)
5455 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5456
5457 // Add call-site always_inline attribute if exists.
5458 if (InAlwaysInlineAttributedStmt)
5459 Attrs =
5460 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5461
5462 // Apply some call-site-specific attributes.
5463 // TODO: work this into building the attribute set.
5464
5465 // Apply always_inline to all calls within flatten functions.
5466 // FIXME: should this really take priority over __try, below?
5467 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5468 !InNoInlineAttributedStmt &&
5469 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5470 Attrs =
5471 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5472 }
5473
5474 // Disable inlining inside SEH __try blocks.
5475 if (isSEHTryScope()) {
5476 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5477 }
5478
5479 // Decide whether to use a call or an invoke.
5480 bool CannotThrow;
5481 if (currentFunctionUsesSEHTry()) {
5482 // SEH cares about asynchronous exceptions, so everything can "throw."
5483 CannotThrow = false;
5484 } else if (isCleanupPadScope() &&
5485 EHPersonality::get(*this).isMSVCXXPersonality()) {
5486 // The MSVC++ personality will implicitly terminate the program if an
5487 // exception is thrown during a cleanup outside of a try/catch.
5488 // We don't need to model anything in IR to get this behavior.
5489 CannotThrow = true;
5490 } else {
5491 // Otherwise, nounwind call sites will never throw.
5492 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5493
5494 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5495 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5496 CannotThrow = true;
5497 }
5498
5499 // If we made a temporary, be sure to clean up after ourselves. Note that we
5500 // can't depend on being inside of an ExprWithCleanups, so we need to manually
5501 // pop this cleanup later on. Being eager about this is OK, since this
5502 // temporary is 'invisible' outside of the callee.
5503 if (UnusedReturnSizePtr)
5504 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5505 UnusedReturnSizePtr);
5506
5507 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5508
5509 SmallVector<llvm::OperandBundleDef, 1> BundleList =
5510 getBundlesForFunclet(CalleePtr);
5511
5512 if (SanOpts.has(SanitizerKind::KCFI) &&
5513 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5514 EmitKCFIOperandBundle(ConcreteCallee, BundleList);
5515
5516 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5517 if (FD->hasAttr<StrictFPAttr>())
5518 // All calls within a strictfp function are marked strictfp
5519 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5520
5521 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5522 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5523
5524 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5525 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5526
5527 // Emit the actual call/invoke instruction.
5528 llvm::CallBase *CI;
5529 if (!InvokeDest) {
5530 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5531 } else {
5532 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5533 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5534 BundleList);
5535 EmitBlock(Cont);
5536 }
5537 if (callOrInvoke)
5538 *callOrInvoke = CI;
5539
5540 // If this is within a function that has the guard(nocf) attribute and is an
5541 // indirect call, add the "guard_nocf" attribute to this call to indicate that
5542 // Control Flow Guard checks should not be added, even if the call is inlined.
5543 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5544 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5545 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5546 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf");
5547 }
5548 }
5549
5550 // Apply the attributes and calling convention.
5551 CI->setAttributes(Attrs);
5552 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5553
5554 // Apply various metadata.
5555
5556 if (!CI->getType()->isVoidTy())
5557 CI->setName("call");
5558
5559 // Update largest vector width from the return type.
5560 LargestVectorWidth =
5561 std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
5562
5563 // Insert instrumentation or attach profile metadata at indirect call sites.
5564 // For more details, see the comment before the definition of
5565 // IPVK_IndirectCallTarget in InstrProfData.inc.
5566 if (!CI->getCalledFunction())
5567 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5568 CI, CalleePtr);
5569
5570 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5571 // optimizer it can aggressively ignore unwind edges.
5572 if (CGM.getLangOpts().ObjCAutoRefCount)
5573 AddObjCARCExceptionMetadata(CI);
5574
5575 // Set tail call kind if necessary.
5576 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5577 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5578 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5579 else if (IsMustTail)
5580 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5581 }
5582
5583 // Add metadata for calls to MSAllocator functions
5584 if (getDebugInfo() && TargetDecl &&
5585 TargetDecl->hasAttr<MSAllocatorAttr>())
5586 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5587
5588 // Add metadata if calling an __attribute__((error(""))) or warning fn.
5589 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) {
5590 llvm::ConstantInt *Line =
5591 llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding());
5592 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line);
5593 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD});
5594 CI->setMetadata("srcloc", MDT);
5595 }
5596
5597 // 4. Finish the call.
5598
5599 // If the call doesn't return, finish the basic block and clear the
5600 // insertion point; this allows the rest of IRGen to discard
5601 // unreachable code.
5602 if (CI->doesNotReturn()) {
5603 if (UnusedReturnSizePtr)
5604 PopCleanupBlock();
5605
5606 // Strip away the noreturn attribute to better diagnose unreachable UB.
5607 if (SanOpts.has(SanitizerKind::Unreachable)) {
5608 // Also remove from function since CallBase::hasFnAttr additionally checks
5609 // attributes of the called function.
5610 if (auto *F = CI->getCalledFunction())
5611 F->removeFnAttr(llvm::Attribute::NoReturn);
5612 CI->removeFnAttr(llvm::Attribute::NoReturn);
5613
5614 // Avoid incompatibility with ASan which relies on the `noreturn`
5615 // attribute to insert handler calls.
5616 if (SanOpts.hasOneOf(SanitizerKind::Address |
5617 SanitizerKind::KernelAddress)) {
5618 SanitizerScope SanScope(this);
5619 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5620 Builder.SetInsertPoint(CI);
5621 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5622 llvm::FunctionCallee Fn =
5623 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5624 EmitNounwindRuntimeCall(Fn);
5625 }
5626 }
5627
5628 EmitUnreachable(Loc);
5629 Builder.ClearInsertionPoint();
5630
5631 // FIXME: For now, emit a dummy basic block because expr emitters in
5632 // generally are not ready to handle emitting expressions at unreachable
5633 // points.
5634 EnsureInsertPoint();
5635
5636 // Return a reasonable RValue.
5637 return GetUndefRValue(RetTy);
5638 }
5639
5640 // If this is a musttail call, return immediately. We do not branch to the
5641 // epilogue in this case.
5642 if (IsMustTail) {
5643 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end();
5644 ++it) {
5645 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it);
5646 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
5647 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups");
5648 }
5649 if (CI->getType()->isVoidTy())
5650 Builder.CreateRetVoid();
5651 else
5652 Builder.CreateRet(CI);
5653 Builder.ClearInsertionPoint();
5654 EnsureInsertPoint();
5655 return GetUndefRValue(RetTy);
5656 }
5657
5658 // Perform the swifterror writeback.
5659 if (swiftErrorTemp.isValid()) {
5660 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5661 Builder.CreateStore(errorResult, swiftErrorArg);
5662 }
5663
5664 // Emit any call-associated writebacks immediately. Arguably this
5665 // should happen after any return-value munging.
5666 if (CallArgs.hasWritebacks())
5667 emitWritebacks(*this, CallArgs);
5668
5669 // The stack cleanup for inalloca arguments has to run out of the normal
5670 // lexical order, so deactivate it and run it manually here.
5671 CallArgs.freeArgumentMemory(*this);
5672
5673 // Extract the return value.
5674 RValue Ret = [&] {
5675 switch (RetAI.getKind()) {
5676 case ABIArgInfo::CoerceAndExpand: {
5677 auto coercionType = RetAI.getCoerceAndExpandType();
5678
5679 Address addr = SRetPtr;
5680 addr = Builder.CreateElementBitCast(addr, coercionType);
5681
5682 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())(static_cast <bool> (CI->getType() == RetAI.getUnpaddedCoerceAndExpandType
()) ? void (0) : __assert_fail ("CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()"
, "clang/lib/CodeGen/CGCall.cpp", 5682, __extension__ __PRETTY_FUNCTION__
))
;
5683 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5684
5685 unsigned unpaddedIndex = 0;
5686 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5687 llvm::Type *eltType = coercionType->getElementType(i);
5688 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5689 Address eltAddr = Builder.CreateStructGEP(addr, i);
5690 llvm::Value *elt = CI;
5691 if (requiresExtract)
5692 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5693 else
5694 assert(unpaddedIndex == 0)(static_cast <bool> (unpaddedIndex == 0) ? void (0) : __assert_fail
("unpaddedIndex == 0", "clang/lib/CodeGen/CGCall.cpp", 5694,
__extension__ __PRETTY_FUNCTION__))
;
5695 Builder.CreateStore(elt, eltAddr);
5696 }
5697 // FALLTHROUGH
5698 [[fallthrough]];
5699 }
5700
5701 case ABIArgInfo::InAlloca:
5702 case ABIArgInfo::Indirect: {
5703 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5704 if (UnusedReturnSizePtr)
5705 PopCleanupBlock();
5706 return ret;
5707 }
5708
5709 case ABIArgInfo::Ignore:
5710 // If we are ignoring an argument that had a result, make sure to
5711 // construct the appropriate return value for our caller.
5712 return GetUndefRValue(RetTy);
5713
5714 case ABIArgInfo::Extend:
5715 case ABIArgInfo::Direct: {
5716 llvm::Type *RetIRTy = ConvertType(RetTy);
5717 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5718 switch (getEvaluationKind(RetTy)) {
5719 case TEK_Complex: {
5720 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5721 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5722 return RValue::getComplex(std::make_pair(Real, Imag));
5723 }
5724 case TEK_Aggregate: {
5725 Address DestPtr = ReturnValue.getValue();
5726 bool DestIsVolatile = ReturnValue.isVolatile();
5727
5728 if (!DestPtr.isValid()) {
5729 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5730 DestIsVolatile = false;
5731 }
5732 EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5733 return RValue::getAggregate(DestPtr);
5734 }
5735 case TEK_Scalar: {
5736 // If the argument doesn't match, perform a bitcast to coerce it. This
5737 // can happen due to trivial type mismatches.
5738 llvm::Value *V = CI;
5739 if (V->getType() != RetIRTy)
5740 V = Builder.CreateBitCast(V, RetIRTy);
5741 return RValue::get(V);
5742 }
5743 }
5744 llvm_unreachable("bad evaluation kind")::llvm::llvm_unreachable_internal("bad evaluation kind", "clang/lib/CodeGen/CGCall.cpp"
, 5744)
;
5745 }
5746
5747 Address DestPtr = ReturnValue.getValue();
5748 bool DestIsVolatile = ReturnValue.isVolatile();
5749
5750 if (!DestPtr.isValid()) {
5751 DestPtr = CreateMemTemp(RetTy, "coerce");
5752 DestIsVolatile = false;
5753 }
5754
5755 // If the value is offset in memory, apply the offset now.
5756 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5757 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5758
5759 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5760 }
5761
5762 case ABIArgInfo::Expand:
5763 case ABIArgInfo::IndirectAliased:
5764 llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument"
, "clang/lib/CodeGen/CGCall.cpp", 5764)
;
5765 }
5766
5767 llvm_unreachable("Unhandled ABIArgInfo::Kind")::llvm::llvm_unreachable_internal("Unhandled ABIArgInfo::Kind"
, "clang/lib/CodeGen/CGCall.cpp", 5767)
;
5768 } ();
5769
5770 // Emit the assume_aligned check on the return value.
5771 if (Ret.isScalar() && TargetDecl) {
5772 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5773 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5774 }
5775
5776 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5777 // we can't use the full cleanup mechanism.
5778 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5779 LifetimeEnd.Emit(*this, /*Flags=*/{});
5780
5781 if (!ReturnValue.isExternallyDestructed() &&
5782 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5783 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5784 RetTy);
5785
5786 return Ret;
5787}
5788
5789CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5790 if (isVirtual()) {
5791 const CallExpr *CE = getVirtualCallExpr();
5792 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5793 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5794 CE ? CE->getBeginLoc() : SourceLocation());
5795 }
5796
5797 return *this;
5798}
5799
5800/* VarArg handling */
5801
5802Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5803 VAListAddr = VE->isMicrosoftABI()
5804 ? EmitMSVAListRef(VE->getSubExpr())
5805 : EmitVAListRef(VE->getSubExpr());
5806 QualType Ty = VE->getType();
5807 if (VE->isMicrosoftABI())
5808 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5809 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5810}

/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/unique_ptr.h

1// unique_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2008-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/unique_ptr.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _UNIQUE_PTR_H1
31#define _UNIQUE_PTR_H1 1
32
33#include <bits/c++config.h>
34#include <debug/assertions.h>
35#include <type_traits>
36#include <utility>
37#include <tuple>
38#include <bits/stl_function.h>
39#include <bits/functional_hash.h>
40#if __cplusplus201703L > 201703L
41# include <compare>
42# include <ostream>
43#endif
44
45namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
46{
47_GLIBCXX_BEGIN_NAMESPACE_VERSION
48
49 /**
50 * @addtogroup pointer_abstractions
51 * @{
52 */
53
54#if _GLIBCXX_USE_DEPRECATED1
55#pragma GCC diagnostic push
56#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
57 template<typename> class auto_ptr;
58#pragma GCC diagnostic pop
59#endif
60
61 /// Primary template of default_delete, used by unique_ptr for single objects
62 template<typename _Tp>
63 struct default_delete
64 {
65 /// Default constructor
66 constexpr default_delete() noexcept = default;
67
68 /** @brief Converting constructor.
69 *
70 * Allows conversion from a deleter for objects of another type, `_Up`,
71 * only if `_Up*` is convertible to `_Tp*`.
72 */
73 template<typename _Up,
74 typename = _Require<is_convertible<_Up*, _Tp*>>>
75 default_delete(const default_delete<_Up>&) noexcept { }
76
77 /// Calls `delete __ptr`
78 void
79 operator()(_Tp* __ptr) const
80 {
81 static_assert(!is_void<_Tp>::value,
82 "can't delete pointer to incomplete type");
83 static_assert(sizeof(_Tp)>0,
84 "can't delete pointer to incomplete type");
85 delete __ptr;
86 }
87 };
88
89 // _GLIBCXX_RESOLVE_LIB_DEFECTS
90 // DR 740 - omit specialization for array objects with a compile time length
91
92 /// Specialization of default_delete for arrays, used by `unique_ptr<T[]>`
93 template<typename _Tp>
94 struct default_delete<_Tp[]>
95 {
96 public:
97 /// Default constructor
98 constexpr default_delete() noexcept = default;
99
100 /** @brief Converting constructor.
101 *
102 * Allows conversion from a deleter for arrays of another type, such as
103 * a const-qualified version of `_Tp`.
104 *
105 * Conversions from types derived from `_Tp` are not allowed because
106 * it is undefined to `delete[]` an array of derived types through a
107 * pointer to the base type.
108 */
109 template<typename _Up,
110 typename = _Require<is_convertible<_Up(*)[], _Tp(*)[]>>>
111 default_delete(const default_delete<_Up[]>&) noexcept { }
112
113 /// Calls `delete[] __ptr`
114 template<typename _Up>
115 typename enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type
116 operator()(_Up* __ptr) const
117 {
118 static_assert(sizeof(_Tp)>0,
119 "can't delete pointer to incomplete type");
120 delete [] __ptr;
121 }
122 };
123
124 /// @cond undocumented
125
126 // Manages the pointer and deleter of a unique_ptr
127 template <typename _Tp, typename _Dp>
128 class __uniq_ptr_impl
129 {
130 template <typename _Up, typename _Ep, typename = void>
131 struct _Ptr
132 {
133 using type = _Up*;
134 };
135
136 template <typename _Up, typename _Ep>
137 struct
138 _Ptr<_Up, _Ep, __void_t<typename remove_reference<_Ep>::type::pointer>>
139 {
140 using type = typename remove_reference<_Ep>::type::pointer;
141 };
142
143 public:
144 using _DeleterConstraint = enable_if<
145 __and_<__not_<is_pointer<_Dp>>,
146 is_default_constructible<_Dp>>::value>;
147
148 using pointer = typename _Ptr<_Tp, _Dp>::type;
149
150 static_assert( !is_rvalue_reference<_Dp>::value,
151 "unique_ptr's deleter type must be a function object type"
152 " or an lvalue reference type" );
153
154 __uniq_ptr_impl() = default;
155 __uniq_ptr_impl(pointer __p) : _M_t() { _M_ptr() = __p; }
156
157 template<typename _Del>
158 __uniq_ptr_impl(pointer __p, _Del&& __d)
159 : _M_t(__p, std::forward<_Del>(__d)) { }
160
161 __uniq_ptr_impl(__uniq_ptr_impl&& __u) noexcept
162 : _M_t(std::move(__u._M_t))
163 { __u._M_ptr() = nullptr; }
164
165 __uniq_ptr_impl& operator=(__uniq_ptr_impl&& __u) noexcept
166 {
167 reset(__u.release());
168 _M_deleter() = std::forward<_Dp>(__u._M_deleter());
169 return *this;
170 }
171
172 pointer& _M_ptr() { return std::get<0>(_M_t); }
173 pointer _M_ptr() const { return std::get<0>(_M_t); }
174 _Dp& _M_deleter() { return std::get<1>(_M_t); }
175 const _Dp& _M_deleter() const { return std::get<1>(_M_t); }
176
177 void reset(pointer __p) noexcept
178 {
179 const pointer __old_p = _M_ptr();
180 _M_ptr() = __p;
181 if (__old_p)
182 _M_deleter()(__old_p);
183 }
184
185 pointer release() noexcept
186 {
187 pointer __p = _M_ptr();
188 _M_ptr() = nullptr;
189 return __p;
190 }
191
192 void
193 swap(__uniq_ptr_impl& __rhs) noexcept
194 {
195 using std::swap;
196 swap(this->_M_ptr(), __rhs._M_ptr());
197 swap(this->_M_deleter(), __rhs._M_deleter());
198 }
199
200 private:
201 tuple<pointer, _Dp> _M_t;
202 };
203
204 // Defines move construction + assignment as either defaulted or deleted.
205 template <typename _Tp, typename _Dp,
206 bool = is_move_constructible<_Dp>::value,
207 bool = is_move_assignable<_Dp>::value>
208 struct __uniq_ptr_data : __uniq_ptr_impl<_Tp, _Dp>
209 {
210 using __uniq_ptr_impl<_Tp, _Dp>::__uniq_ptr_impl;
211 __uniq_ptr_data(__uniq_ptr_data&&) = default;
212 __uniq_ptr_data& operator=(__uniq_ptr_data&&) = default;
213 };
214
215 template <typename _Tp, typename _Dp>
216 struct __uniq_ptr_data<_Tp, _Dp, true, false> : __uniq_ptr_impl<_Tp, _Dp>
217 {
218 using __uniq_ptr_impl<_Tp, _Dp>::__uniq_ptr_impl;
219 __uniq_ptr_data(__uniq_ptr_data&&) = default;
220 __uniq_ptr_data& operator=(__uniq_ptr_data&&) = delete;
221 };
222
223 template <typename _Tp, typename _Dp>
224 struct __uniq_ptr_data<_Tp, _Dp, false, true> : __uniq_ptr_impl<_Tp, _Dp>
225 {
226 using __uniq_ptr_impl<_Tp, _Dp>::__uniq_ptr_impl;
227 __uniq_ptr_data(__uniq_ptr_data&&) = delete;
228 __uniq_ptr_data& operator=(__uniq_ptr_data&&) = default;
229 };
230
231 template <typename _Tp, typename _Dp>
232 struct __uniq_ptr_data<_Tp, _Dp, false, false> : __uniq_ptr_impl<_Tp, _Dp>
233 {
234 using __uniq_ptr_impl<_Tp, _Dp>::__uniq_ptr_impl;
235 __uniq_ptr_data(__uniq_ptr_data&&) = delete;
236 __uniq_ptr_data& operator=(__uniq_ptr_data&&) = delete;
237 };
238 /// @endcond
239
240 /// 20.7.1.2 unique_ptr for single objects.
241 template <typename _Tp, typename _Dp = default_delete<_Tp>>
242 class unique_ptr
243 {
244 template <typename _Up>
245 using _DeleterConstraint =
246 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
247
248 __uniq_ptr_data<_Tp, _Dp> _M_t;
249
250 public:
251 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
252 using element_type = _Tp;
253 using deleter_type = _Dp;
254
255 private:
256 // helper template for detecting a safe conversion from another
257 // unique_ptr
258 template<typename _Up, typename _Ep>
259 using __safe_conversion_up = __and_<
260 is_convertible<typename unique_ptr<_Up, _Ep>::pointer, pointer>,
261 __not_<is_array<_Up>>
262 >;
263
264 public:
265 // Constructors.
266
267 /// Default constructor, creates a unique_ptr that owns nothing.
268 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
269 constexpr unique_ptr() noexcept
270 : _M_t()
271 { }
272
273 /** Takes ownership of a pointer.
274 *
275 * @param __p A pointer to an object of @c element_type
276 *
277 * The deleter will be value-initialized.
278 */
279 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
280 explicit
281 unique_ptr(pointer __p) noexcept
282 : _M_t(__p)
26
Calling constructor for '__uniq_ptr_data<(anonymous namespace)::NoExpansion, std::default_delete<(anonymous namespace)::NoExpansion>, true, true>'
27
Calling constructor for '__uniq_ptr_impl<(anonymous namespace)::NoExpansion, std::default_delete<(anonymous namespace)::NoExpansion>>'
28
Returning from constructor for '__uniq_ptr_impl<(anonymous namespace)::NoExpansion, std::default_delete<(anonymous namespace)::NoExpansion>>'
29
Returning from constructor for '__uniq_ptr_data<(anonymous namespace)::NoExpansion, std::default_delete<(anonymous namespace)::NoExpansion>, true, true>'
283 { }
284
285 /** Takes ownership of a pointer.
286 *
287 * @param __p A pointer to an object of @c element_type
288 * @param __d A reference to a deleter.
289 *
290 * The deleter will be initialized with @p __d
291 */
292 template<typename _Del = deleter_type,
293 typename = _Require<is_copy_constructible<_Del>>>
294 unique_ptr(pointer __p, const deleter_type& __d) noexcept
295 : _M_t(__p, __d) { }
296
297 /** Takes ownership of a pointer.
298 *
299 * @param __p A pointer to an object of @c element_type
300 * @param __d An rvalue reference to a (non-reference) deleter.
301 *
302 * The deleter will be initialized with @p std::move(__d)
303 */
304 template<typename _Del = deleter_type,
305 typename = _Require<is_move_constructible<_Del>>>
306 unique_ptr(pointer __p,
307 __enable_if_t<!is_lvalue_reference<_Del>::value,
308 _Del&&> __d) noexcept
309 : _M_t(__p, std::move(__d))
310 { }
311
312 template<typename _Del = deleter_type,
313 typename _DelUnref = typename remove_reference<_Del>::type>
314 unique_ptr(pointer,
315 __enable_if_t<is_lvalue_reference<_Del>::value,
316 _DelUnref&&>) = delete;
317
318 /// Creates a unique_ptr that owns nothing.
319 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
320 constexpr unique_ptr(nullptr_t) noexcept
321 : _M_t()
322 { }
323
324 // Move constructors.
325
326 /// Move constructor.
327 unique_ptr(unique_ptr&&) = default;
328
329 /** @brief Converting constructor from another type
330 *
331 * Requires that the pointer owned by @p __u is convertible to the
332 * type of pointer owned by this object, @p __u does not own an array,
333 * and @p __u has a compatible deleter type.
334 */
335 template<typename _Up, typename _Ep, typename = _Require<
336 __safe_conversion_up<_Up, _Ep>,
337 typename conditional<is_reference<_Dp>::value,
338 is_same<_Ep, _Dp>,
339 is_convertible<_Ep, _Dp>>::type>>
340 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
341 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
33
Calling constructor for '__uniq_ptr_data<(anonymous namespace)::TypeExpansion, std::default_delete<(anonymous namespace)::TypeExpansion>, true, true>'
34
Calling constructor for '__uniq_ptr_impl<(anonymous namespace)::TypeExpansion, std::default_delete<(anonymous namespace)::TypeExpansion>>'
35
Returning from constructor for '__uniq_ptr_impl<(anonymous namespace)::TypeExpansion, std::default_delete<(anonymous namespace)::TypeExpansion>>'
36
Returning from constructor for '__uniq_ptr_data<(anonymous namespace)::TypeExpansion, std::default_delete<(anonymous namespace)::TypeExpansion>, true, true>'
342 { }
343
344#if _GLIBCXX_USE_DEPRECATED1
345#pragma GCC diagnostic push
346#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
347 /// Converting constructor from @c auto_ptr
348 template<typename _Up, typename = _Require<
349 is_convertible<_Up*, _Tp*>, is_same<_Dp, default_delete<_Tp>>>>
350 unique_ptr(auto_ptr<_Up>&& __u) noexcept;
351#pragma GCC diagnostic pop
352#endif
353
354 /// Destructor, invokes the deleter if the stored pointer is not null.
355 ~unique_ptr() noexcept
356 {
357 static_assert(__is_invocable<deleter_type&, pointer>::value,
358 "unique_ptr's deleter must be invocable with a pointer");
359 auto& __ptr = _M_t._M_ptr();
360 if (__ptr != nullptr)
361 get_deleter()(std::move(__ptr));
362 __ptr = pointer();
363 }
364
365 // Assignment.
366
367 /** @brief Move assignment operator.
368 *
369 * Invokes the deleter if this object owns a pointer.
370 */
371 unique_ptr& operator=(unique_ptr&&) = default;
372
373 /** @brief Assignment from another type.
374 *
375 * @param __u The object to transfer ownership from, which owns a
376 * convertible pointer to a non-array object.
377 *
378 * Invokes the deleter if this object owns a pointer.
379 */
380 template<typename _Up, typename _Ep>
381 typename enable_if< __and_<
382 __safe_conversion_up<_Up, _Ep>,
383 is_assignable<deleter_type&, _Ep&&>
384 >::value,
385 unique_ptr&>::type
386 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
387 {
388 reset(__u.release());
389 get_deleter() = std::forward<_Ep>(__u.get_deleter());
390 return *this;
391 }
392
393 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
394 unique_ptr&
395 operator=(nullptr_t) noexcept
396 {
397 reset();
398 return *this;
399 }
400
401 // Observers.
402
403 /// Dereference the stored pointer.
404 typename add_lvalue_reference<element_type>::type
405 operator*() const
406 {
407 __glibcxx_assert(get() != pointer())do { if (! (get() != pointer())) std::__replacement_assert("/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/unique_ptr.h"
, 407, __PRETTY_FUNCTION__, "get() != pointer()"); } while (false
)
;
408 return *get();
409 }
410
411 /// Return the stored pointer.
412 pointer
413 operator->() const noexcept
414 {
415 _GLIBCXX_DEBUG_PEDASSERT(get() != pointer());
416 return get();
417 }
418
419 /// Return the stored pointer.
420 pointer
421 get() const noexcept
422 { return _M_t._M_ptr(); }
423
424 /// Return a reference to the stored deleter.
425 deleter_type&
426 get_deleter() noexcept
427 { return _M_t._M_deleter(); }
428
429 /// Return a reference to the stored deleter.
430 const deleter_type&
431 get_deleter() const noexcept
432 { return _M_t._M_deleter(); }
433
434 /// Return @c true if the stored pointer is not null.
435 explicit operator bool() const noexcept
436 { return get() == pointer() ? false : true; }
437
438 // Modifiers.
439
440 /// Release ownership of any stored pointer.
441 pointer
442 release() noexcept
443 { return _M_t.release(); }
444
445 /** @brief Replace the stored pointer.
446 *
447 * @param __p The new pointer to store.
448 *
449 * The deleter will be invoked if a pointer is already owned.
450 */
451 void
452 reset(pointer __p = pointer()) noexcept
453 {
454 static_assert(__is_invocable<deleter_type&, pointer>::value,
455 "unique_ptr's deleter must be invocable with a pointer");
456 _M_t.reset(std::move(__p));
457 }
458
459 /// Exchange the pointer and deleter with another object.
460 void
461 swap(unique_ptr& __u) noexcept
462 {
463 static_assert(__is_swappable<_Dp>::value, "deleter must be swappable");
464 _M_t.swap(__u._M_t);
465 }
466
467 // Disable copy from lvalue.
468 unique_ptr(const unique_ptr&) = delete;
469 unique_ptr& operator=(const unique_ptr&) = delete;
470 };
471
472 /// 20.7.1.3 unique_ptr for array objects with a runtime length
473 // [unique.ptr.runtime]
474 // _GLIBCXX_RESOLVE_LIB_DEFECTS
475 // DR 740 - omit specialization for array objects with a compile time length
476 template<typename _Tp, typename _Dp>
477 class unique_ptr<_Tp[], _Dp>
478 {
479 template <typename _Up>
480 using _DeleterConstraint =
481 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
482
483 __uniq_ptr_data<_Tp, _Dp> _M_t;
484
485 template<typename _Up>
486 using __remove_cv = typename remove_cv<_Up>::type;
487
488 // like is_base_of<_Tp, _Up> but false if unqualified types are the same
489 template<typename _Up>
490 using __is_derived_Tp
491 = __and_< is_base_of<_Tp, _Up>,
492 __not_<is_same<__remove_cv<_Tp>, __remove_cv<_Up>>> >;
493
494 public:
495 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
496 using element_type = _Tp;
497 using deleter_type = _Dp;
498
499 // helper template for detecting a safe conversion from another
500 // unique_ptr
501 template<typename _Up, typename _Ep,
502 typename _UPtr = unique_ptr<_Up, _Ep>,
503 typename _UP_pointer = typename _UPtr::pointer,
504 typename _UP_element_type = typename _UPtr::element_type>
505 using __safe_conversion_up = __and_<
506 is_array<_Up>,
507 is_same<pointer, element_type*>,
508 is_same<_UP_pointer, _UP_element_type*>,
509 is_convertible<_UP_element_type(*)[], element_type(*)[]>
510 >;
511
512 // helper template for detecting a safe conversion from a raw pointer
513 template<typename _Up>
514 using __safe_conversion_raw = __and_<
515 __or_<__or_<is_same<_Up, pointer>,
516 is_same<_Up, nullptr_t>>,
517 __and_<is_pointer<_Up>,
518 is_same<pointer, element_type*>,
519 is_convertible<
520 typename remove_pointer<_Up>::type(*)[],
521 element_type(*)[]>
522 >
523 >
524 >;
525
526 // Constructors.
527
528 /// Default constructor, creates a unique_ptr that owns nothing.
529 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
530 constexpr unique_ptr() noexcept
531 : _M_t()
532 { }
533
534 /** Takes ownership of a pointer.
535 *
536 * @param __p A pointer to an array of a type safely convertible
537 * to an array of @c element_type
538 *
539 * The deleter will be value-initialized.
540 */
541 template<typename _Up,
542 typename _Vp = _Dp,
543 typename = _DeleterConstraint<_Vp>,
544 typename = typename enable_if<
545 __safe_conversion_raw<_Up>::value, bool>::type>
546 explicit
547 unique_ptr(_Up __p) noexcept
548 : _M_t(__p)
549 { }
550
551 /** Takes ownership of a pointer.
552 *
553 * @param __p A pointer to an array of a type safely convertible
554 * to an array of @c element_type
555 * @param __d A reference to a deleter.
556 *
557 * The deleter will be initialized with @p __d
558 */
559 template<typename _Up, typename _Del = deleter_type,
560 typename = _Require<__safe_conversion_raw<_Up>,
561 is_copy_constructible<_Del>>>
562 unique_ptr(_Up __p, const deleter_type& __d) noexcept
563 : _M_t(__p, __d) { }
564
565 /** Takes ownership of a pointer.
566 *
567 * @param __p A pointer to an array of a type safely convertible
568 * to an array of @c element_type
569 * @param __d A reference to a deleter.
570 *
571 * The deleter will be initialized with @p std::move(__d)
572 */
573 template<typename _Up, typename _Del = deleter_type,
574 typename = _Require<__safe_conversion_raw<_Up>,
575 is_move_constructible<_Del>>>
576 unique_ptr(_Up __p,
577 __enable_if_t<!is_lvalue_reference<_Del>::value,
578 _Del&&> __d) noexcept
579 : _M_t(std::move(__p), std::move(__d))
580 { }
581
582 template<typename _Up, typename _Del = deleter_type,
583 typename _DelUnref = typename remove_reference<_Del>::type,
584 typename = _Require<__safe_conversion_raw<_Up>>>
585 unique_ptr(_Up,
586 __enable_if_t<is_lvalue_reference<_Del>::value,
587 _DelUnref&&>) = delete;
588
589 /// Move constructor.
590 unique_ptr(unique_ptr&&) = default;
591
592 /// Creates a unique_ptr that owns nothing.
593 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
594 constexpr unique_ptr(nullptr_t) noexcept
595 : _M_t()
596 { }
597
598 template<typename _Up, typename _Ep, typename = _Require<
599 __safe_conversion_up<_Up, _Ep>,
600 typename conditional<is_reference<_Dp>::value,
601 is_same<_Ep, _Dp>,
602 is_convertible<_Ep, _Dp>>::type>>
603 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
604 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
605 { }
606
607 /// Destructor, invokes the deleter if the stored pointer is not null.
608 ~unique_ptr()
609 {
610 auto& __ptr = _M_t._M_ptr();
611 if (__ptr != nullptr)
612 get_deleter()(__ptr);
613 __ptr = pointer();
614 }
615
616 // Assignment.
617
618 /** @brief Move assignment operator.
619 *
620 * Invokes the deleter if this object owns a pointer.
621 */
622 unique_ptr&
623 operator=(unique_ptr&&) = default;
624
625 /** @brief Assignment from another type.
626 *
627 * @param __u The object to transfer ownership from, which owns a
628 * convertible pointer to an array object.
629 *
630 * Invokes the deleter if this object owns a pointer.
631 */
632 template<typename _Up, typename _Ep>
633 typename
634 enable_if<__and_<__safe_conversion_up<_Up, _Ep>,
635 is_assignable<deleter_type&, _Ep&&>
636 >::value,
637 unique_ptr&>::type
638 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
639 {
640 reset(__u.release());
641 get_deleter() = std::forward<_Ep>(__u.get_deleter());
642 return *this;
643 }
644
645 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
646 unique_ptr&
647 operator=(nullptr_t) noexcept
648 {
649 reset();
650 return *this;
651 }
652
653 // Observers.
654
655 /// Access an element of owned array.
656 typename std::add_lvalue_reference<element_type>::type
657 operator[](size_t __i) const
658 {
659 __glibcxx_assert(get() != pointer())do { if (! (get() != pointer())) std::__replacement_assert("/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/unique_ptr.h"
, 659, __PRETTY_FUNCTION__, "get() != pointer()"); } while (false
)
;
660 return get()[__i];
661 }
662
663 /// Return the stored pointer.
664 pointer
665 get() const noexcept
666 { return _M_t._M_ptr(); }
667
668 /// Return a reference to the stored deleter.
669 deleter_type&
670 get_deleter() noexcept
671 { return _M_t._M_deleter(); }
672
673 /// Return a reference to the stored deleter.
674 const deleter_type&
675 get_deleter() const noexcept
676 { return _M_t._M_deleter(); }
677
678 /// Return @c true if the stored pointer is not null.
679 explicit operator bool() const noexcept
680 { return get() == pointer() ? false : true; }
681
682 // Modifiers.
683
684 /// Release ownership of any stored pointer.
685 pointer
686 release() noexcept
687 { return _M_t.release(); }
688
689 /** @brief Replace the stored pointer.
690 *
691 * @param __p The new pointer to store.
692 *
693 * The deleter will be invoked if a pointer is already owned.
694 */
695 template <typename _Up,
696 typename = _Require<
697 __or_<is_same<_Up, pointer>,
698 __and_<is_same<pointer, element_type*>,
699 is_pointer<_Up>,
700 is_convertible<
701 typename remove_pointer<_Up>::type(*)[],
702 element_type(*)[]
703 >
704 >
705 >
706 >>
707 void
708 reset(_Up __p) noexcept
709 { _M_t.reset(std::move(__p)); }
710
711 void reset(nullptr_t = nullptr) noexcept
712 { reset(pointer()); }
713
714 /// Exchange the pointer and deleter with another object.
715 void
716 swap(unique_ptr& __u) noexcept
717 {
718 static_assert(__is_swappable<_Dp>::value, "deleter must be swappable");
719 _M_t.swap(__u._M_t);
720 }
721
722 // Disable copy from lvalue.
723 unique_ptr(const unique_ptr&) = delete;
724 unique_ptr& operator=(const unique_ptr&) = delete;
725 };
726
727 /// @relates unique_ptr @{
728
729 /// Swap overload for unique_ptr
730 template<typename _Tp, typename _Dp>
731 inline
732#if __cplusplus201703L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
733 // Constrained free swap overload, see p0185r1
734 typename enable_if<__is_swappable<_Dp>::value>::type
735#else
736 void
737#endif
738 swap(unique_ptr<_Tp, _Dp>& __x,
739 unique_ptr<_Tp, _Dp>& __y) noexcept
740 { __x.swap(__y); }
741
742#if __cplusplus201703L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
743 template<typename _Tp, typename _Dp>
744 typename enable_if<!__is_swappable<_Dp>::value>::type
745 swap(unique_ptr<_Tp, _Dp>&,
746 unique_ptr<_Tp, _Dp>&) = delete;
747#endif
748
749 /// Equality operator for unique_ptr objects, compares the owned pointers
750 template<typename _Tp, typename _Dp,
751 typename _Up, typename _Ep>
752 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
753 operator==(const unique_ptr<_Tp, _Dp>& __x,
754 const unique_ptr<_Up, _Ep>& __y)
755 { return __x.get() == __y.get(); }
756
757 /// unique_ptr comparison with nullptr
758 template<typename _Tp, typename _Dp>
759 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
760 operator==(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
761 { return !__x; }
762
763#ifndef __cpp_lib_three_way_comparison
764 /// unique_ptr comparison with nullptr
765 template<typename _Tp, typename _Dp>
766 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
767 operator==(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
768 { return !__x; }
769
770 /// Inequality operator for unique_ptr objects, compares the owned pointers
771 template<typename _Tp, typename _Dp,
772 typename _Up, typename _Ep>
773 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
774 operator!=(const unique_ptr<_Tp, _Dp>& __x,
775 const unique_ptr<_Up, _Ep>& __y)
776 { return __x.get() != __y.get(); }
777
778 /// unique_ptr comparison with nullptr
779 template<typename _Tp, typename _Dp>
780 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
781 operator!=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
782 { return (bool)__x; }
783
784 /// unique_ptr comparison with nullptr
785 template<typename _Tp, typename _Dp>
786 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
787 operator!=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
788 { return (bool)__x; }
789#endif // three way comparison
790
791 /// Relational operator for unique_ptr objects, compares the owned pointers
792 template<typename _Tp, typename _Dp,
793 typename _Up, typename _Ep>
794 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
795 operator<(const unique_ptr<_Tp, _Dp>& __x,
796 const unique_ptr<_Up, _Ep>& __y)
797 {
798 typedef typename
799 std::common_type<typename unique_ptr<_Tp, _Dp>::pointer,
800 typename unique_ptr<_Up, _Ep>::pointer>::type _CT;
801 return std::less<_CT>()(__x.get(), __y.get());
802 }
803
804 /// unique_ptr comparison with nullptr
805 template<typename _Tp, typename _Dp>
806 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
807 operator<(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
808 {
809 return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
810 nullptr);
811 }
812
813 /// unique_ptr comparison with nullptr
814 template<typename _Tp, typename _Dp>
815 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
816 operator<(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
817 {
818 return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
819 __x.get());
820 }
821
822 /// Relational operator for unique_ptr objects, compares the owned pointers
823 template<typename _Tp, typename _Dp,
824 typename _Up, typename _Ep>
825 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
826 operator<=(const unique_ptr<_Tp, _Dp>& __x,
827 const unique_ptr<_Up, _Ep>& __y)
828 { return !(__y < __x); }
829
830 /// unique_ptr comparison with nullptr
831 template<typename _Tp, typename _Dp>
832 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
833 operator<=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
834 { return !(nullptr < __x); }
835
836 /// unique_ptr comparison with nullptr
837 template<typename _Tp, typename _Dp>
838 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
839 operator<=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
840 { return !(__x < nullptr); }
841
842 /// Relational operator for unique_ptr objects, compares the owned pointers
843 template<typename _Tp, typename _Dp,
844 typename _Up, typename _Ep>
845 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
846 operator>(const unique_ptr<_Tp, _Dp>& __x,
847 const unique_ptr<_Up, _Ep>& __y)
848 { return (__y < __x); }
849
850 /// unique_ptr comparison with nullptr
851 template<typename _Tp, typename _Dp>
852 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
853 operator>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
854 {
855 return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
856 __x.get());
857 }
858
859 /// unique_ptr comparison with nullptr
860 template<typename _Tp, typename _Dp>
861 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
862 operator>(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
863 {
864 return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
865 nullptr);
866 }
867
868 /// Relational operator for unique_ptr objects, compares the owned pointers
869 template<typename _Tp, typename _Dp,
870 typename _Up, typename _Ep>
871 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
872 operator>=(const unique_ptr<_Tp, _Dp>& __x,
873 const unique_ptr<_Up, _Ep>& __y)
874 { return !(__x < __y); }
875
876 /// unique_ptr comparison with nullptr
877 template<typename _Tp, typename _Dp>
878 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
879 operator>=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
880 { return !(__x < nullptr); }
881
882 /// unique_ptr comparison with nullptr
883 template<typename _Tp, typename _Dp>
884 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
885 operator>=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
886 { return !(nullptr < __x); }
887
888#ifdef __cpp_lib_three_way_comparison
889 template<typename _Tp, typename _Dp, typename _Up, typename _Ep>
890 requires three_way_comparable_with<typename unique_ptr<_Tp, _Dp>::pointer,
891 typename unique_ptr<_Up, _Ep>::pointer>
892 inline
893 compare_three_way_result_t<typename unique_ptr<_Tp, _Dp>::pointer,
894 typename unique_ptr<_Up, _Ep>::pointer>
895 operator<=>(const unique_ptr<_Tp, _Dp>& __x,
896 const unique_ptr<_Up, _Ep>& __y)
897 { return compare_three_way()(__x.get(), __y.get()); }
898
899 template<typename _Tp, typename _Dp>
900 requires three_way_comparable<typename unique_ptr<_Tp, _Dp>::pointer>
901 inline
902 compare_three_way_result_t<typename unique_ptr<_Tp, _Dp>::pointer>
903 operator<=>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
904 {
905 using pointer = typename unique_ptr<_Tp, _Dp>::pointer;
906 return compare_three_way()(__x.get(), static_cast<pointer>(nullptr));
907 }
908#endif
909 // @} relates unique_ptr
910
911 /// @cond undocumented
912 template<typename _Up, typename _Ptr = typename _Up::pointer,
913 bool = __poison_hash<_Ptr>::__enable_hash_call>
914 struct __uniq_ptr_hash
915#if ! _GLIBCXX_INLINE_VERSION0
916 : private __poison_hash<_Ptr>
917#endif
918 {
919 size_t
920 operator()(const _Up& __u) const
921 noexcept(noexcept(std::declval<hash<_Ptr>>()(std::declval<_Ptr>())))
922 { return hash<_Ptr>()(__u.get()); }
923 };
924
925 template<typename _Up, typename _Ptr>
926 struct __uniq_ptr_hash<_Up, _Ptr, false>
927 : private __poison_hash<_Ptr>
928 { };
929 /// @endcond
930
931 /// std::hash specialization for unique_ptr.
932 template<typename _Tp, typename _Dp>
933 struct hash<unique_ptr<_Tp, _Dp>>
934 : public __hash_base<size_t, unique_ptr<_Tp, _Dp>>,
935 public __uniq_ptr_hash<unique_ptr<_Tp, _Dp>>
936 { };
937
938#if __cplusplus201703L >= 201402L
939 /// @relates unique_ptr @{
940#define __cpp_lib_make_unique201304 201304
941
942 /// @cond undocumented
943
944 template<typename _Tp>
945 struct _MakeUniq
946 { typedef unique_ptr<_Tp> __single_object; };
947
948 template<typename _Tp>
949 struct _MakeUniq<_Tp[]>
950 { typedef unique_ptr<_Tp[]> __array; };
951
952 template<typename _Tp, size_t _Bound>
953 struct _MakeUniq<_Tp[_Bound]>
954 { struct __invalid_type { }; };
955
956 /// @endcond
957
958 /// std::make_unique for single objects
959 template<typename _Tp, typename... _Args>
960 inline typename _MakeUniq<_Tp>::__single_object
961 make_unique(_Args&&... __args)
962 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
24
Uninitialized value stored to field 'NumElts'
25
Calling constructor for 'unique_ptr<(anonymous namespace)::NoExpansion, std::default_delete<(anonymous namespace)::NoExpansion>>'
30
Returning from constructor for 'unique_ptr<(anonymous namespace)::NoExpansion, std::default_delete<(anonymous namespace)::NoExpansion>>'
963
964 /// std::make_unique for arrays of unknown bound
965 template<typename _Tp>
966 inline typename _MakeUniq<_Tp>::__array
967 make_unique(size_t __num)
968 { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]()); }
969
970 /// Disable std::make_unique for arrays of known bound
971 template<typename _Tp, typename... _Args>
972 inline typename _MakeUniq<_Tp>::__invalid_type
973 make_unique(_Args&&...) = delete;
974 // @} relates unique_ptr
975#endif // C++14
976
977#if __cplusplus201703L > 201703L && __cpp_concepts
978 // _GLIBCXX_RESOLVE_LIB_DEFECTS
979 // 2948. unique_ptr does not define operator<< for stream output
980 /// Stream output operator for unique_ptr
981 template<typename _CharT, typename _Traits, typename _Tp, typename _Dp>
982 inline basic_ostream<_CharT, _Traits>&
983 operator<<(basic_ostream<_CharT, _Traits>& __os,
984 const unique_ptr<_Tp, _Dp>& __p)
985 requires requires { __os << __p.get(); }
986 {
987 __os << __p.get();
988 return __os;
989 }
990#endif // C++20
991
992 // @} group pointer_abstractions
993
994#if __cplusplus201703L >= 201703L
995 namespace __detail::__variant
996 {
997 template<typename> struct _Never_valueless_alt; // see <variant>
998
999 // Provide the strong exception-safety guarantee when emplacing a
1000 // unique_ptr into a variant.
1001 template<typename _Tp, typename _Del>
1002 struct _Never_valueless_alt<std::unique_ptr<_Tp, _Del>>
1003 : std::true_type
1004 { };
1005 } // namespace __detail::__variant
1006#endif // C++17
1007
1008_GLIBCXX_END_NAMESPACE_VERSION
1009} // namespace
1010
1011#endif /* _UNIQUE_PTR_H */