File: | build/source/clang/lib/CodeGen/CGCall.cpp |
Warning: | line 982, column 27 The left operand of '*' is a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===--- CGCall.cpp - Encapsulate calling convention details --------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // These classes wrap the information about a call or function | ||||
10 | // definition used to handle ABI compliancy. | ||||
11 | // | ||||
12 | //===----------------------------------------------------------------------===// | ||||
13 | |||||
14 | #include "CGCall.h" | ||||
15 | #include "ABIInfo.h" | ||||
16 | #include "CGBlocks.h" | ||||
17 | #include "CGCXXABI.h" | ||||
18 | #include "CGCleanup.h" | ||||
19 | #include "CGRecordLayout.h" | ||||
20 | #include "CodeGenFunction.h" | ||||
21 | #include "CodeGenModule.h" | ||||
22 | #include "TargetInfo.h" | ||||
23 | #include "clang/AST/Attr.h" | ||||
24 | #include "clang/AST/Decl.h" | ||||
25 | #include "clang/AST/DeclCXX.h" | ||||
26 | #include "clang/AST/DeclObjC.h" | ||||
27 | #include "clang/Basic/CodeGenOptions.h" | ||||
28 | #include "clang/Basic/TargetInfo.h" | ||||
29 | #include "clang/CodeGen/CGFunctionInfo.h" | ||||
30 | #include "clang/CodeGen/SwiftCallingConv.h" | ||||
31 | #include "llvm/ADT/StringExtras.h" | ||||
32 | #include "llvm/Analysis/ValueTracking.h" | ||||
33 | #include "llvm/IR/Assumptions.h" | ||||
34 | #include "llvm/IR/Attributes.h" | ||||
35 | #include "llvm/IR/CallingConv.h" | ||||
36 | #include "llvm/IR/DataLayout.h" | ||||
37 | #include "llvm/IR/InlineAsm.h" | ||||
38 | #include "llvm/IR/IntrinsicInst.h" | ||||
39 | #include "llvm/IR/Intrinsics.h" | ||||
40 | #include "llvm/IR/Type.h" | ||||
41 | #include "llvm/Transforms/Utils/Local.h" | ||||
42 | #include <optional> | ||||
43 | using namespace clang; | ||||
44 | using namespace CodeGen; | ||||
45 | |||||
46 | /***/ | ||||
47 | |||||
48 | unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { | ||||
49 | switch (CC) { | ||||
50 | default: return llvm::CallingConv::C; | ||||
51 | case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; | ||||
52 | case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; | ||||
53 | case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; | ||||
54 | case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; | ||||
55 | case CC_Win64: return llvm::CallingConv::Win64; | ||||
56 | case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; | ||||
57 | case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; | ||||
58 | case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; | ||||
59 | case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; | ||||
60 | // TODO: Add support for __pascal to LLVM. | ||||
61 | case CC_X86Pascal: return llvm::CallingConv::C; | ||||
62 | // TODO: Add support for __vectorcall to LLVM. | ||||
63 | case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; | ||||
64 | case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; | ||||
65 | case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall; | ||||
66 | case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL; | ||||
67 | case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; | ||||
68 | case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); | ||||
69 | case CC_PreserveMost: return llvm::CallingConv::PreserveMost; | ||||
70 | case CC_PreserveAll: return llvm::CallingConv::PreserveAll; | ||||
71 | case CC_Swift: return llvm::CallingConv::Swift; | ||||
72 | case CC_SwiftAsync: return llvm::CallingConv::SwiftTail; | ||||
73 | } | ||||
74 | } | ||||
75 | |||||
76 | /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR | ||||
77 | /// qualification. Either or both of RD and MD may be null. A null RD indicates | ||||
78 | /// that there is no meaningful 'this' type, and a null MD can occur when | ||||
79 | /// calling a method pointer. | ||||
80 | CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, | ||||
81 | const CXXMethodDecl *MD) { | ||||
82 | QualType RecTy; | ||||
83 | if (RD) | ||||
84 | RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); | ||||
85 | else | ||||
86 | RecTy = Context.VoidTy; | ||||
87 | |||||
88 | if (MD) | ||||
89 | RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); | ||||
90 | return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); | ||||
91 | } | ||||
92 | |||||
93 | /// Returns the canonical formal type of the given C++ method. | ||||
94 | static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { | ||||
95 | return MD->getType()->getCanonicalTypeUnqualified() | ||||
96 | .getAs<FunctionProtoType>(); | ||||
97 | } | ||||
98 | |||||
99 | /// Returns the "extra-canonicalized" return type, which discards | ||||
100 | /// qualifiers on the return type. Codegen doesn't care about them, | ||||
101 | /// and it makes ABI code a little easier to be able to assume that | ||||
102 | /// all parameter and return types are top-level unqualified. | ||||
103 | static CanQualType GetReturnType(QualType RetTy) { | ||||
104 | return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); | ||||
105 | } | ||||
106 | |||||
107 | /// Arrange the argument and result information for a value of the given | ||||
108 | /// unprototyped freestanding function type. | ||||
109 | const CGFunctionInfo & | ||||
110 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { | ||||
111 | // When translating an unprototyped function type, always use a | ||||
112 | // variadic type. | ||||
113 | return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), | ||||
114 | /*instanceMethod=*/false, | ||||
115 | /*chainCall=*/false, std::nullopt, | ||||
116 | FTNP->getExtInfo(), {}, RequiredArgs(0)); | ||||
117 | } | ||||
118 | |||||
119 | static void addExtParameterInfosForCall( | ||||
120 | llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, | ||||
121 | const FunctionProtoType *proto, | ||||
122 | unsigned prefixArgs, | ||||
123 | unsigned totalArgs) { | ||||
124 | assert(proto->hasExtParameterInfos())(static_cast <bool> (proto->hasExtParameterInfos()) ? void (0) : __assert_fail ("proto->hasExtParameterInfos()" , "clang/lib/CodeGen/CGCall.cpp", 124, __extension__ __PRETTY_FUNCTION__ )); | ||||
125 | assert(paramInfos.size() <= prefixArgs)(static_cast <bool> (paramInfos.size() <= prefixArgs ) ? void (0) : __assert_fail ("paramInfos.size() <= prefixArgs" , "clang/lib/CodeGen/CGCall.cpp", 125, __extension__ __PRETTY_FUNCTION__ )); | ||||
126 | assert(proto->getNumParams() + prefixArgs <= totalArgs)(static_cast <bool> (proto->getNumParams() + prefixArgs <= totalArgs) ? void (0) : __assert_fail ("proto->getNumParams() + prefixArgs <= totalArgs" , "clang/lib/CodeGen/CGCall.cpp", 126, __extension__ __PRETTY_FUNCTION__ )); | ||||
127 | |||||
128 | paramInfos.reserve(totalArgs); | ||||
129 | |||||
130 | // Add default infos for any prefix args that don't already have infos. | ||||
131 | paramInfos.resize(prefixArgs); | ||||
132 | |||||
133 | // Add infos for the prototype. | ||||
134 | for (const auto &ParamInfo : proto->getExtParameterInfos()) { | ||||
135 | paramInfos.push_back(ParamInfo); | ||||
136 | // pass_object_size params have no parameter info. | ||||
137 | if (ParamInfo.hasPassObjectSize()) | ||||
138 | paramInfos.emplace_back(); | ||||
139 | } | ||||
140 | |||||
141 | assert(paramInfos.size() <= totalArgs &&(static_cast <bool> (paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?") ? void (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\"" , "clang/lib/CodeGen/CGCall.cpp", 142, __extension__ __PRETTY_FUNCTION__ )) | ||||
142 | "Did we forget to insert pass_object_size args?")(static_cast <bool> (paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?") ? void (0) : __assert_fail ("paramInfos.size() <= totalArgs && \"Did we forget to insert pass_object_size args?\"" , "clang/lib/CodeGen/CGCall.cpp", 142, __extension__ __PRETTY_FUNCTION__ )); | ||||
143 | // Add default infos for the variadic and/or suffix arguments. | ||||
144 | paramInfos.resize(totalArgs); | ||||
145 | } | ||||
146 | |||||
147 | /// Adds the formal parameters in FPT to the given prefix. If any parameter in | ||||
148 | /// FPT has pass_object_size attrs, then we'll add parameters for those, too. | ||||
149 | static void appendParameterTypes(const CodeGenTypes &CGT, | ||||
150 | SmallVectorImpl<CanQualType> &prefix, | ||||
151 | SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, | ||||
152 | CanQual<FunctionProtoType> FPT) { | ||||
153 | // Fast path: don't touch param info if we don't need to. | ||||
154 | if (!FPT->hasExtParameterInfos()) { | ||||
155 | assert(paramInfos.empty() &&(static_cast <bool> (paramInfos.empty() && "We have paramInfos, but the prototype doesn't?" ) ? void (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\"" , "clang/lib/CodeGen/CGCall.cpp", 156, __extension__ __PRETTY_FUNCTION__ )) | ||||
156 | "We have paramInfos, but the prototype doesn't?")(static_cast <bool> (paramInfos.empty() && "We have paramInfos, but the prototype doesn't?" ) ? void (0) : __assert_fail ("paramInfos.empty() && \"We have paramInfos, but the prototype doesn't?\"" , "clang/lib/CodeGen/CGCall.cpp", 156, __extension__ __PRETTY_FUNCTION__ )); | ||||
157 | prefix.append(FPT->param_type_begin(), FPT->param_type_end()); | ||||
158 | return; | ||||
159 | } | ||||
160 | |||||
161 | unsigned PrefixSize = prefix.size(); | ||||
162 | // In the vast majority of cases, we'll have precisely FPT->getNumParams() | ||||
163 | // parameters; the only thing that can change this is the presence of | ||||
164 | // pass_object_size. So, we preallocate for the common case. | ||||
165 | prefix.reserve(prefix.size() + FPT->getNumParams()); | ||||
166 | |||||
167 | auto ExtInfos = FPT->getExtParameterInfos(); | ||||
168 | assert(ExtInfos.size() == FPT->getNumParams())(static_cast <bool> (ExtInfos.size() == FPT->getNumParams ()) ? void (0) : __assert_fail ("ExtInfos.size() == FPT->getNumParams()" , "clang/lib/CodeGen/CGCall.cpp", 168, __extension__ __PRETTY_FUNCTION__ )); | ||||
169 | for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { | ||||
170 | prefix.push_back(FPT->getParamType(I)); | ||||
171 | if (ExtInfos[I].hasPassObjectSize()) | ||||
172 | prefix.push_back(CGT.getContext().getSizeType()); | ||||
173 | } | ||||
174 | |||||
175 | addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, | ||||
176 | prefix.size()); | ||||
177 | } | ||||
178 | |||||
179 | /// Arrange the LLVM function layout for a value of the given function | ||||
180 | /// type, on top of any implicit parameters already stored. | ||||
181 | static const CGFunctionInfo & | ||||
182 | arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, | ||||
183 | SmallVectorImpl<CanQualType> &prefix, | ||||
184 | CanQual<FunctionProtoType> FTP) { | ||||
185 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; | ||||
186 | RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); | ||||
187 | // FIXME: Kill copy. | ||||
188 | appendParameterTypes(CGT, prefix, paramInfos, FTP); | ||||
189 | CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); | ||||
190 | |||||
191 | return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, | ||||
192 | /*chainCall=*/false, prefix, | ||||
193 | FTP->getExtInfo(), paramInfos, | ||||
194 | Required); | ||||
195 | } | ||||
196 | |||||
197 | /// Arrange the argument and result information for a value of the | ||||
198 | /// given freestanding function type. | ||||
199 | const CGFunctionInfo & | ||||
200 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { | ||||
201 | SmallVector<CanQualType, 16> argTypes; | ||||
202 | return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, | ||||
203 | FTP); | ||||
204 | } | ||||
205 | |||||
206 | static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, | ||||
207 | bool IsWindows) { | ||||
208 | // Set the appropriate calling convention for the Function. | ||||
209 | if (D->hasAttr<StdCallAttr>()) | ||||
210 | return CC_X86StdCall; | ||||
211 | |||||
212 | if (D->hasAttr<FastCallAttr>()) | ||||
213 | return CC_X86FastCall; | ||||
214 | |||||
215 | if (D->hasAttr<RegCallAttr>()) | ||||
216 | return CC_X86RegCall; | ||||
217 | |||||
218 | if (D->hasAttr<ThisCallAttr>()) | ||||
219 | return CC_X86ThisCall; | ||||
220 | |||||
221 | if (D->hasAttr<VectorCallAttr>()) | ||||
222 | return CC_X86VectorCall; | ||||
223 | |||||
224 | if (D->hasAttr<PascalAttr>()) | ||||
225 | return CC_X86Pascal; | ||||
226 | |||||
227 | if (PcsAttr *PCS = D->getAttr<PcsAttr>()) | ||||
228 | return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); | ||||
229 | |||||
230 | if (D->hasAttr<AArch64VectorPcsAttr>()) | ||||
231 | return CC_AArch64VectorCall; | ||||
232 | |||||
233 | if (D->hasAttr<AArch64SVEPcsAttr>()) | ||||
234 | return CC_AArch64SVEPCS; | ||||
235 | |||||
236 | if (D->hasAttr<AMDGPUKernelCallAttr>()) | ||||
237 | return CC_AMDGPUKernelCall; | ||||
238 | |||||
239 | if (D->hasAttr<IntelOclBiccAttr>()) | ||||
240 | return CC_IntelOclBicc; | ||||
241 | |||||
242 | if (D->hasAttr<MSABIAttr>()) | ||||
243 | return IsWindows ? CC_C : CC_Win64; | ||||
244 | |||||
245 | if (D->hasAttr<SysVABIAttr>()) | ||||
246 | return IsWindows ? CC_X86_64SysV : CC_C; | ||||
247 | |||||
248 | if (D->hasAttr<PreserveMostAttr>()) | ||||
249 | return CC_PreserveMost; | ||||
250 | |||||
251 | if (D->hasAttr<PreserveAllAttr>()) | ||||
252 | return CC_PreserveAll; | ||||
253 | |||||
254 | return CC_C; | ||||
255 | } | ||||
256 | |||||
257 | /// Arrange the argument and result information for a call to an | ||||
258 | /// unknown C++ non-static member function of the given abstract type. | ||||
259 | /// (A null RD means we don't have any meaningful "this" argument type, | ||||
260 | /// so fall back to a generic pointer type). | ||||
261 | /// The member function must be an ordinary function, i.e. not a | ||||
262 | /// constructor or destructor. | ||||
263 | const CGFunctionInfo & | ||||
264 | CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, | ||||
265 | const FunctionProtoType *FTP, | ||||
266 | const CXXMethodDecl *MD) { | ||||
267 | SmallVector<CanQualType, 16> argTypes; | ||||
268 | |||||
269 | // Add the 'this' pointer. | ||||
270 | argTypes.push_back(DeriveThisType(RD, MD)); | ||||
271 | |||||
272 | return ::arrangeLLVMFunctionInfo( | ||||
273 | *this, true, argTypes, | ||||
274 | FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); | ||||
275 | } | ||||
276 | |||||
277 | /// Set calling convention for CUDA/HIP kernel. | ||||
278 | static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, | ||||
279 | const FunctionDecl *FD) { | ||||
280 | if (FD->hasAttr<CUDAGlobalAttr>()) { | ||||
281 | const FunctionType *FT = FTy->getAs<FunctionType>(); | ||||
282 | CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); | ||||
283 | FTy = FT->getCanonicalTypeUnqualified(); | ||||
284 | } | ||||
285 | } | ||||
286 | |||||
287 | /// Arrange the argument and result information for a declaration or | ||||
288 | /// definition of the given C++ non-static member function. The | ||||
289 | /// member function must be an ordinary function, i.e. not a | ||||
290 | /// constructor or destructor. | ||||
291 | const CGFunctionInfo & | ||||
292 | CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { | ||||
293 | assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!")(static_cast <bool> (!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!") ? void (0) : __assert_fail ("!isa<CXXConstructorDecl>(MD) && \"wrong method for constructors!\"" , "clang/lib/CodeGen/CGCall.cpp", 293, __extension__ __PRETTY_FUNCTION__ )); | ||||
294 | assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!")(static_cast <bool> (!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!") ? void (0) : __assert_fail ( "!isa<CXXDestructorDecl>(MD) && \"wrong method for destructors!\"" , "clang/lib/CodeGen/CGCall.cpp", 294, __extension__ __PRETTY_FUNCTION__ )); | ||||
295 | |||||
296 | CanQualType FT = GetFormalType(MD).getAs<Type>(); | ||||
297 | setCUDAKernelCallingConvention(FT, CGM, MD); | ||||
298 | auto prototype = FT.getAs<FunctionProtoType>(); | ||||
299 | |||||
300 | if (MD->isInstance()) { | ||||
301 | // The abstract case is perfectly fine. | ||||
302 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); | ||||
303 | return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); | ||||
304 | } | ||||
305 | |||||
306 | return arrangeFreeFunctionType(prototype); | ||||
307 | } | ||||
308 | |||||
309 | bool CodeGenTypes::inheritingCtorHasParams( | ||||
310 | const InheritedConstructor &Inherited, CXXCtorType Type) { | ||||
311 | // Parameters are unnecessary if we're constructing a base class subobject | ||||
312 | // and the inherited constructor lives in a virtual base. | ||||
313 | return Type == Ctor_Complete || | ||||
314 | !Inherited.getShadowDecl()->constructsVirtualBase() || | ||||
315 | !Target.getCXXABI().hasConstructorVariants(); | ||||
316 | } | ||||
317 | |||||
318 | const CGFunctionInfo & | ||||
319 | CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { | ||||
320 | auto *MD = cast<CXXMethodDecl>(GD.getDecl()); | ||||
321 | |||||
322 | SmallVector<CanQualType, 16> argTypes; | ||||
323 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; | ||||
324 | |||||
325 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD); | ||||
326 | argTypes.push_back(DeriveThisType(ThisType, MD)); | ||||
327 | |||||
328 | bool PassParams = true; | ||||
329 | |||||
330 | if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { | ||||
331 | // A base class inheriting constructor doesn't get forwarded arguments | ||||
332 | // needed to construct a virtual base (or base class thereof). | ||||
333 | if (auto Inherited = CD->getInheritedConstructor()) | ||||
334 | PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); | ||||
335 | } | ||||
336 | |||||
337 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); | ||||
338 | |||||
339 | // Add the formal parameters. | ||||
340 | if (PassParams) | ||||
341 | appendParameterTypes(*this, argTypes, paramInfos, FTP); | ||||
342 | |||||
343 | CGCXXABI::AddedStructorArgCounts AddedArgs = | ||||
344 | TheCXXABI.buildStructorSignature(GD, argTypes); | ||||
345 | if (!paramInfos.empty()) { | ||||
346 | // Note: prefix implies after the first param. | ||||
347 | if (AddedArgs.Prefix) | ||||
348 | paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, | ||||
349 | FunctionProtoType::ExtParameterInfo{}); | ||||
350 | if (AddedArgs.Suffix) | ||||
351 | paramInfos.append(AddedArgs.Suffix, | ||||
352 | FunctionProtoType::ExtParameterInfo{}); | ||||
353 | } | ||||
354 | |||||
355 | RequiredArgs required = | ||||
356 | (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) | ||||
357 | : RequiredArgs::All); | ||||
358 | |||||
359 | FunctionType::ExtInfo extInfo = FTP->getExtInfo(); | ||||
360 | CanQualType resultType = TheCXXABI.HasThisReturn(GD) | ||||
361 | ? argTypes.front() | ||||
362 | : TheCXXABI.hasMostDerivedReturn(GD) | ||||
363 | ? CGM.getContext().VoidPtrTy | ||||
364 | : Context.VoidTy; | ||||
365 | return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, | ||||
366 | /*chainCall=*/false, argTypes, extInfo, | ||||
367 | paramInfos, required); | ||||
368 | } | ||||
369 | |||||
370 | static SmallVector<CanQualType, 16> | ||||
371 | getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { | ||||
372 | SmallVector<CanQualType, 16> argTypes; | ||||
373 | for (auto &arg : args) | ||||
374 | argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); | ||||
375 | return argTypes; | ||||
376 | } | ||||
377 | |||||
378 | static SmallVector<CanQualType, 16> | ||||
379 | getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { | ||||
380 | SmallVector<CanQualType, 16> argTypes; | ||||
381 | for (auto &arg : args) | ||||
382 | argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); | ||||
383 | return argTypes; | ||||
384 | } | ||||
385 | |||||
386 | static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> | ||||
387 | getExtParameterInfosForCall(const FunctionProtoType *proto, | ||||
388 | unsigned prefixArgs, unsigned totalArgs) { | ||||
389 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; | ||||
390 | if (proto->hasExtParameterInfos()) { | ||||
391 | addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); | ||||
392 | } | ||||
393 | return result; | ||||
394 | } | ||||
395 | |||||
396 | /// Arrange a call to a C++ method, passing the given arguments. | ||||
397 | /// | ||||
398 | /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` | ||||
399 | /// parameter. | ||||
400 | /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of | ||||
401 | /// args. | ||||
402 | /// PassProtoArgs indicates whether `args` has args for the parameters in the | ||||
403 | /// given CXXConstructorDecl. | ||||
404 | const CGFunctionInfo & | ||||
405 | CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, | ||||
406 | const CXXConstructorDecl *D, | ||||
407 | CXXCtorType CtorKind, | ||||
408 | unsigned ExtraPrefixArgs, | ||||
409 | unsigned ExtraSuffixArgs, | ||||
410 | bool PassProtoArgs) { | ||||
411 | // FIXME: Kill copy. | ||||
412 | SmallVector<CanQualType, 16> ArgTypes; | ||||
413 | for (const auto &Arg : args) | ||||
414 | ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); | ||||
415 | |||||
416 | // +1 for implicit this, which should always be args[0]. | ||||
417 | unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; | ||||
418 | |||||
419 | CanQual<FunctionProtoType> FPT = GetFormalType(D); | ||||
420 | RequiredArgs Required = PassProtoArgs | ||||
421 | ? RequiredArgs::forPrototypePlus( | ||||
422 | FPT, TotalPrefixArgs + ExtraSuffixArgs) | ||||
423 | : RequiredArgs::All; | ||||
424 | |||||
425 | GlobalDecl GD(D, CtorKind); | ||||
426 | CanQualType ResultType = TheCXXABI.HasThisReturn(GD) | ||||
427 | ? ArgTypes.front() | ||||
428 | : TheCXXABI.hasMostDerivedReturn(GD) | ||||
429 | ? CGM.getContext().VoidPtrTy | ||||
430 | : Context.VoidTy; | ||||
431 | |||||
432 | FunctionType::ExtInfo Info = FPT->getExtInfo(); | ||||
433 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; | ||||
434 | // If the prototype args are elided, we should only have ABI-specific args, | ||||
435 | // which never have param info. | ||||
436 | if (PassProtoArgs && FPT->hasExtParameterInfos()) { | ||||
437 | // ABI-specific suffix arguments are treated the same as variadic arguments. | ||||
438 | addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, | ||||
439 | ArgTypes.size()); | ||||
440 | } | ||||
441 | return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, | ||||
442 | /*chainCall=*/false, ArgTypes, Info, | ||||
443 | ParamInfos, Required); | ||||
444 | } | ||||
445 | |||||
446 | /// Arrange the argument and result information for the declaration or | ||||
447 | /// definition of the given function. | ||||
448 | const CGFunctionInfo & | ||||
449 | CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { | ||||
450 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) | ||||
451 | if (MD->isInstance()) | ||||
452 | return arrangeCXXMethodDeclaration(MD); | ||||
453 | |||||
454 | CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); | ||||
455 | |||||
456 | assert(isa<FunctionType>(FTy))(static_cast <bool> (isa<FunctionType>(FTy)) ? void (0) : __assert_fail ("isa<FunctionType>(FTy)", "clang/lib/CodeGen/CGCall.cpp" , 456, __extension__ __PRETTY_FUNCTION__)); | ||||
457 | setCUDAKernelCallingConvention(FTy, CGM, FD); | ||||
458 | |||||
459 | // When declaring a function without a prototype, always use a | ||||
460 | // non-variadic type. | ||||
461 | if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { | ||||
462 | return arrangeLLVMFunctionInfo( | ||||
463 | noProto->getReturnType(), /*instanceMethod=*/false, | ||||
464 | /*chainCall=*/false, std::nullopt, noProto->getExtInfo(), {}, | ||||
465 | RequiredArgs::All); | ||||
466 | } | ||||
467 | |||||
468 | return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); | ||||
469 | } | ||||
470 | |||||
471 | /// Arrange the argument and result information for the declaration or | ||||
472 | /// definition of an Objective-C method. | ||||
473 | const CGFunctionInfo & | ||||
474 | CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { | ||||
475 | // It happens that this is the same as a call with no optional | ||||
476 | // arguments, except also using the formal 'self' type. | ||||
477 | return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); | ||||
478 | } | ||||
479 | |||||
480 | /// Arrange the argument and result information for the function type | ||||
481 | /// through which to perform a send to the given Objective-C method, | ||||
482 | /// using the given receiver type. The receiver type is not always | ||||
483 | /// the 'self' type of the method or even an Objective-C pointer type. | ||||
484 | /// This is *not* the right method for actually performing such a | ||||
485 | /// message send, due to the possibility of optional arguments. | ||||
486 | const CGFunctionInfo & | ||||
487 | CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, | ||||
488 | QualType receiverType) { | ||||
489 | SmallVector<CanQualType, 16> argTys; | ||||
490 | SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos( | ||||
491 | MD->isDirectMethod() ? 1 : 2); | ||||
492 | argTys.push_back(Context.getCanonicalParamType(receiverType)); | ||||
493 | if (!MD->isDirectMethod()) | ||||
494 | argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); | ||||
495 | // FIXME: Kill copy? | ||||
496 | for (const auto *I : MD->parameters()) { | ||||
497 | argTys.push_back(Context.getCanonicalParamType(I->getType())); | ||||
498 | auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( | ||||
499 | I->hasAttr<NoEscapeAttr>()); | ||||
500 | extParamInfos.push_back(extParamInfo); | ||||
501 | } | ||||
502 | |||||
503 | FunctionType::ExtInfo einfo; | ||||
504 | bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); | ||||
505 | einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); | ||||
506 | |||||
507 | if (getContext().getLangOpts().ObjCAutoRefCount && | ||||
508 | MD->hasAttr<NSReturnsRetainedAttr>()) | ||||
509 | einfo = einfo.withProducesResult(true); | ||||
510 | |||||
511 | RequiredArgs required = | ||||
512 | (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); | ||||
513 | |||||
514 | return arrangeLLVMFunctionInfo( | ||||
515 | GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, | ||||
516 | /*chainCall=*/false, argTys, einfo, extParamInfos, required); | ||||
517 | } | ||||
518 | |||||
519 | const CGFunctionInfo & | ||||
520 | CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, | ||||
521 | const CallArgList &args) { | ||||
522 | auto argTypes = getArgTypesForCall(Context, args); | ||||
523 | FunctionType::ExtInfo einfo; | ||||
524 | |||||
525 | return arrangeLLVMFunctionInfo( | ||||
526 | GetReturnType(returnType), /*instanceMethod=*/false, | ||||
527 | /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); | ||||
528 | } | ||||
529 | |||||
530 | const CGFunctionInfo & | ||||
531 | CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { | ||||
532 | // FIXME: Do we need to handle ObjCMethodDecl? | ||||
533 | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); | ||||
534 | |||||
535 | if (isa<CXXConstructorDecl>(GD.getDecl()) || | ||||
536 | isa<CXXDestructorDecl>(GD.getDecl())) | ||||
537 | return arrangeCXXStructorDeclaration(GD); | ||||
538 | |||||
539 | return arrangeFunctionDeclaration(FD); | ||||
540 | } | ||||
541 | |||||
542 | /// Arrange a thunk that takes 'this' as the first parameter followed by | ||||
543 | /// varargs. Return a void pointer, regardless of the actual return type. | ||||
544 | /// The body of the thunk will end in a musttail call to a function of the | ||||
545 | /// correct type, and the caller will bitcast the function to the correct | ||||
546 | /// prototype. | ||||
547 | const CGFunctionInfo & | ||||
548 | CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { | ||||
549 | assert(MD->isVirtual() && "only methods have thunks")(static_cast <bool> (MD->isVirtual() && "only methods have thunks" ) ? void (0) : __assert_fail ("MD->isVirtual() && \"only methods have thunks\"" , "clang/lib/CodeGen/CGCall.cpp", 549, __extension__ __PRETTY_FUNCTION__ )); | ||||
550 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); | ||||
551 | CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; | ||||
552 | return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, | ||||
553 | /*chainCall=*/false, ArgTys, | ||||
554 | FTP->getExtInfo(), {}, RequiredArgs(1)); | ||||
555 | } | ||||
556 | |||||
557 | const CGFunctionInfo & | ||||
558 | CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, | ||||
559 | CXXCtorType CT) { | ||||
560 | assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure)(static_cast <bool> (CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure) ? void (0) : __assert_fail ("CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure" , "clang/lib/CodeGen/CGCall.cpp", 560, __extension__ __PRETTY_FUNCTION__ )); | ||||
561 | |||||
562 | CanQual<FunctionProtoType> FTP = GetFormalType(CD); | ||||
563 | SmallVector<CanQualType, 2> ArgTys; | ||||
564 | const CXXRecordDecl *RD = CD->getParent(); | ||||
565 | ArgTys.push_back(DeriveThisType(RD, CD)); | ||||
566 | if (CT == Ctor_CopyingClosure) | ||||
567 | ArgTys.push_back(*FTP->param_type_begin()); | ||||
568 | if (RD->getNumVBases() > 0) | ||||
569 | ArgTys.push_back(Context.IntTy); | ||||
570 | CallingConv CC = Context.getDefaultCallingConvention( | ||||
571 | /*IsVariadic=*/false, /*IsCXXMethod=*/true); | ||||
572 | return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, | ||||
573 | /*chainCall=*/false, ArgTys, | ||||
574 | FunctionType::ExtInfo(CC), {}, | ||||
575 | RequiredArgs::All); | ||||
576 | } | ||||
577 | |||||
578 | /// Arrange a call as unto a free function, except possibly with an | ||||
579 | /// additional number of formal parameters considered required. | ||||
580 | static const CGFunctionInfo & | ||||
581 | arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, | ||||
582 | CodeGenModule &CGM, | ||||
583 | const CallArgList &args, | ||||
584 | const FunctionType *fnType, | ||||
585 | unsigned numExtraRequiredArgs, | ||||
586 | bool chainCall) { | ||||
587 | assert(args.size() >= numExtraRequiredArgs)(static_cast <bool> (args.size() >= numExtraRequiredArgs ) ? void (0) : __assert_fail ("args.size() >= numExtraRequiredArgs" , "clang/lib/CodeGen/CGCall.cpp", 587, __extension__ __PRETTY_FUNCTION__ )); | ||||
588 | |||||
589 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; | ||||
590 | |||||
591 | // In most cases, there are no optional arguments. | ||||
592 | RequiredArgs required = RequiredArgs::All; | ||||
593 | |||||
594 | // If we have a variadic prototype, the required arguments are the | ||||
595 | // extra prefix plus the arguments in the prototype. | ||||
596 | if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { | ||||
597 | if (proto->isVariadic()) | ||||
598 | required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); | ||||
599 | |||||
600 | if (proto->hasExtParameterInfos()) | ||||
601 | addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, | ||||
602 | args.size()); | ||||
603 | |||||
604 | // If we don't have a prototype at all, but we're supposed to | ||||
605 | // explicitly use the variadic convention for unprototyped calls, | ||||
606 | // treat all of the arguments as required but preserve the nominal | ||||
607 | // possibility of variadics. | ||||
608 | } else if (CGM.getTargetCodeGenInfo() | ||||
609 | .isNoProtoCallVariadic(args, | ||||
610 | cast<FunctionNoProtoType>(fnType))) { | ||||
611 | required = RequiredArgs(args.size()); | ||||
612 | } | ||||
613 | |||||
614 | // FIXME: Kill copy. | ||||
615 | SmallVector<CanQualType, 16> argTypes; | ||||
616 | for (const auto &arg : args) | ||||
617 | argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); | ||||
618 | return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), | ||||
619 | /*instanceMethod=*/false, chainCall, | ||||
620 | argTypes, fnType->getExtInfo(), paramInfos, | ||||
621 | required); | ||||
622 | } | ||||
623 | |||||
624 | /// Figure out the rules for calling a function with the given formal | ||||
625 | /// type using the given arguments. The arguments are necessary | ||||
626 | /// because the function might be unprototyped, in which case it's | ||||
627 | /// target-dependent in crazy ways. | ||||
628 | const CGFunctionInfo & | ||||
629 | CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, | ||||
630 | const FunctionType *fnType, | ||||
631 | bool chainCall) { | ||||
632 | return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, | ||||
633 | chainCall ? 1 : 0, chainCall); | ||||
634 | } | ||||
635 | |||||
636 | /// A block function is essentially a free function with an | ||||
637 | /// extra implicit argument. | ||||
638 | const CGFunctionInfo & | ||||
639 | CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, | ||||
640 | const FunctionType *fnType) { | ||||
641 | return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, | ||||
642 | /*chainCall=*/false); | ||||
643 | } | ||||
644 | |||||
645 | const CGFunctionInfo & | ||||
646 | CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, | ||||
647 | const FunctionArgList ¶ms) { | ||||
648 | auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); | ||||
649 | auto argTypes = getArgTypesForDeclaration(Context, params); | ||||
650 | |||||
651 | return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), | ||||
652 | /*instanceMethod*/ false, /*chainCall*/ false, | ||||
653 | argTypes, proto->getExtInfo(), paramInfos, | ||||
654 | RequiredArgs::forPrototypePlus(proto, 1)); | ||||
655 | } | ||||
656 | |||||
657 | const CGFunctionInfo & | ||||
658 | CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, | ||||
659 | const CallArgList &args) { | ||||
660 | // FIXME: Kill copy. | ||||
661 | SmallVector<CanQualType, 16> argTypes; | ||||
662 | for (const auto &Arg : args) | ||||
663 | argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); | ||||
664 | return arrangeLLVMFunctionInfo( | ||||
665 | GetReturnType(resultType), /*instanceMethod=*/false, | ||||
666 | /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), | ||||
667 | /*paramInfos=*/ {}, RequiredArgs::All); | ||||
668 | } | ||||
669 | |||||
670 | const CGFunctionInfo & | ||||
671 | CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, | ||||
672 | const FunctionArgList &args) { | ||||
673 | auto argTypes = getArgTypesForDeclaration(Context, args); | ||||
674 | |||||
675 | return arrangeLLVMFunctionInfo( | ||||
676 | GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, | ||||
677 | argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); | ||||
678 | } | ||||
679 | |||||
680 | const CGFunctionInfo & | ||||
681 | CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, | ||||
682 | ArrayRef<CanQualType> argTypes) { | ||||
683 | return arrangeLLVMFunctionInfo( | ||||
684 | resultType, /*instanceMethod=*/false, /*chainCall=*/false, | ||||
685 | argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); | ||||
686 | } | ||||
687 | |||||
688 | /// Arrange a call to a C++ method, passing the given arguments. | ||||
689 | /// | ||||
690 | /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It | ||||
691 | /// does not count `this`. | ||||
692 | const CGFunctionInfo & | ||||
693 | CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, | ||||
694 | const FunctionProtoType *proto, | ||||
695 | RequiredArgs required, | ||||
696 | unsigned numPrefixArgs) { | ||||
697 | assert(numPrefixArgs + 1 <= args.size() &&(static_cast <bool> (numPrefixArgs + 1 <= args.size( ) && "Emitting a call with less args than the required prefix?" ) ? void (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\"" , "clang/lib/CodeGen/CGCall.cpp", 698, __extension__ __PRETTY_FUNCTION__ )) | ||||
698 | "Emitting a call with less args than the required prefix?")(static_cast <bool> (numPrefixArgs + 1 <= args.size( ) && "Emitting a call with less args than the required prefix?" ) ? void (0) : __assert_fail ("numPrefixArgs + 1 <= args.size() && \"Emitting a call with less args than the required prefix?\"" , "clang/lib/CodeGen/CGCall.cpp", 698, __extension__ __PRETTY_FUNCTION__ )); | ||||
699 | // Add one to account for `this`. It's a bit awkward here, but we don't count | ||||
700 | // `this` in similar places elsewhere. | ||||
701 | auto paramInfos = | ||||
702 | getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); | ||||
703 | |||||
704 | // FIXME: Kill copy. | ||||
705 | auto argTypes = getArgTypesForCall(Context, args); | ||||
706 | |||||
707 | FunctionType::ExtInfo info = proto->getExtInfo(); | ||||
708 | return arrangeLLVMFunctionInfo( | ||||
709 | GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, | ||||
710 | /*chainCall=*/false, argTypes, info, paramInfos, required); | ||||
711 | } | ||||
712 | |||||
713 | const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { | ||||
714 | return arrangeLLVMFunctionInfo( | ||||
715 | getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, | ||||
716 | std::nullopt, FunctionType::ExtInfo(), {}, RequiredArgs::All); | ||||
717 | } | ||||
718 | |||||
719 | const CGFunctionInfo & | ||||
720 | CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, | ||||
721 | const CallArgList &args) { | ||||
722 | assert(signature.arg_size() <= args.size())(static_cast <bool> (signature.arg_size() <= args.size ()) ? void (0) : __assert_fail ("signature.arg_size() <= args.size()" , "clang/lib/CodeGen/CGCall.cpp", 722, __extension__ __PRETTY_FUNCTION__ )); | ||||
723 | if (signature.arg_size() == args.size()) | ||||
724 | return signature; | ||||
725 | |||||
726 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; | ||||
727 | auto sigParamInfos = signature.getExtParameterInfos(); | ||||
728 | if (!sigParamInfos.empty()) { | ||||
729 | paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); | ||||
730 | paramInfos.resize(args.size()); | ||||
731 | } | ||||
732 | |||||
733 | auto argTypes = getArgTypesForCall(Context, args); | ||||
734 | |||||
735 | assert(signature.getRequiredArgs().allowsOptionalArgs())(static_cast <bool> (signature.getRequiredArgs().allowsOptionalArgs ()) ? void (0) : __assert_fail ("signature.getRequiredArgs().allowsOptionalArgs()" , "clang/lib/CodeGen/CGCall.cpp", 735, __extension__ __PRETTY_FUNCTION__ )); | ||||
736 | return arrangeLLVMFunctionInfo(signature.getReturnType(), | ||||
737 | signature.isInstanceMethod(), | ||||
738 | signature.isChainCall(), | ||||
739 | argTypes, | ||||
740 | signature.getExtInfo(), | ||||
741 | paramInfos, | ||||
742 | signature.getRequiredArgs()); | ||||
743 | } | ||||
744 | |||||
745 | namespace clang { | ||||
746 | namespace CodeGen { | ||||
747 | void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); | ||||
748 | } | ||||
749 | } | ||||
750 | |||||
751 | /// Arrange the argument and result information for an abstract value | ||||
752 | /// of a given function type. This is the method which all of the | ||||
753 | /// above functions ultimately defer to. | ||||
754 | const CGFunctionInfo & | ||||
755 | CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, | ||||
756 | bool instanceMethod, | ||||
757 | bool chainCall, | ||||
758 | ArrayRef<CanQualType> argTypes, | ||||
759 | FunctionType::ExtInfo info, | ||||
760 | ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, | ||||
761 | RequiredArgs required) { | ||||
762 | assert(llvm::all_of(argTypes,(static_cast <bool> (llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })) ? void (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })" , "clang/lib/CodeGen/CGCall.cpp", 763, __extension__ __PRETTY_FUNCTION__ )) | ||||
763 | [](CanQualType T) { return T.isCanonicalAsParam(); }))(static_cast <bool> (llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })) ? void (0) : __assert_fail ("llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })" , "clang/lib/CodeGen/CGCall.cpp", 763, __extension__ __PRETTY_FUNCTION__ )); | ||||
764 | |||||
765 | // Lookup or create unique function info. | ||||
766 | llvm::FoldingSetNodeID ID; | ||||
767 | CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, | ||||
768 | required, resultType, argTypes); | ||||
769 | |||||
770 | void *insertPos = nullptr; | ||||
771 | CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); | ||||
772 | if (FI) | ||||
773 | return *FI; | ||||
774 | |||||
775 | unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); | ||||
776 | |||||
777 | // Construct the function info. We co-allocate the ArgInfos. | ||||
778 | FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, | ||||
779 | paramInfos, resultType, argTypes, required); | ||||
780 | FunctionInfos.InsertNode(FI, insertPos); | ||||
781 | |||||
782 | bool inserted = FunctionsBeingProcessed.insert(FI).second; | ||||
783 | (void)inserted; | ||||
784 | assert(inserted && "Recursively being processed?")(static_cast <bool> (inserted && "Recursively being processed?" ) ? void (0) : __assert_fail ("inserted && \"Recursively being processed?\"" , "clang/lib/CodeGen/CGCall.cpp", 784, __extension__ __PRETTY_FUNCTION__ )); | ||||
785 | |||||
786 | // Compute ABI information. | ||||
787 | if (CC == llvm::CallingConv::SPIR_KERNEL) { | ||||
788 | // Force target independent argument handling for the host visible | ||||
789 | // kernel functions. | ||||
790 | computeSPIRKernelABIInfo(CGM, *FI); | ||||
791 | } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { | ||||
792 | swiftcall::computeABIInfo(CGM, *FI); | ||||
793 | } else { | ||||
794 | getABIInfo().computeInfo(*FI); | ||||
795 | } | ||||
796 | |||||
797 | // Loop over all of the computed argument and return value info. If any of | ||||
798 | // them are direct or extend without a specified coerce type, specify the | ||||
799 | // default now. | ||||
800 | ABIArgInfo &retInfo = FI->getReturnInfo(); | ||||
801 | if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) | ||||
802 | retInfo.setCoerceToType(ConvertType(FI->getReturnType())); | ||||
803 | |||||
804 | for (auto &I : FI->arguments()) | ||||
805 | if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) | ||||
806 | I.info.setCoerceToType(ConvertType(I.type)); | ||||
807 | |||||
808 | bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; | ||||
809 | assert(erased && "Not in set?")(static_cast <bool> (erased && "Not in set?") ? void (0) : __assert_fail ("erased && \"Not in set?\"" , "clang/lib/CodeGen/CGCall.cpp", 809, __extension__ __PRETTY_FUNCTION__ )); | ||||
810 | |||||
811 | return *FI; | ||||
812 | } | ||||
813 | |||||
814 | CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, | ||||
815 | bool instanceMethod, | ||||
816 | bool chainCall, | ||||
817 | const FunctionType::ExtInfo &info, | ||||
818 | ArrayRef<ExtParameterInfo> paramInfos, | ||||
819 | CanQualType resultType, | ||||
820 | ArrayRef<CanQualType> argTypes, | ||||
821 | RequiredArgs required) { | ||||
822 | assert(paramInfos.empty() || paramInfos.size() == argTypes.size())(static_cast <bool> (paramInfos.empty() || paramInfos.size () == argTypes.size()) ? void (0) : __assert_fail ("paramInfos.empty() || paramInfos.size() == argTypes.size()" , "clang/lib/CodeGen/CGCall.cpp", 822, __extension__ __PRETTY_FUNCTION__ )); | ||||
823 | assert(!required.allowsOptionalArgs() ||(static_cast <bool> (!required.allowsOptionalArgs() || required .getNumRequiredArgs() <= argTypes.size()) ? void (0) : __assert_fail ("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()" , "clang/lib/CodeGen/CGCall.cpp", 824, __extension__ __PRETTY_FUNCTION__ )) | ||||
824 | required.getNumRequiredArgs() <= argTypes.size())(static_cast <bool> (!required.allowsOptionalArgs() || required .getNumRequiredArgs() <= argTypes.size()) ? void (0) : __assert_fail ("!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()" , "clang/lib/CodeGen/CGCall.cpp", 824, __extension__ __PRETTY_FUNCTION__ )); | ||||
825 | |||||
826 | void *buffer = | ||||
827 | operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( | ||||
828 | argTypes.size() + 1, paramInfos.size())); | ||||
829 | |||||
830 | CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); | ||||
831 | FI->CallingConvention = llvmCC; | ||||
832 | FI->EffectiveCallingConvention = llvmCC; | ||||
833 | FI->ASTCallingConvention = info.getCC(); | ||||
834 | FI->InstanceMethod = instanceMethod; | ||||
835 | FI->ChainCall = chainCall; | ||||
836 | FI->CmseNSCall = info.getCmseNSCall(); | ||||
837 | FI->NoReturn = info.getNoReturn(); | ||||
838 | FI->ReturnsRetained = info.getProducesResult(); | ||||
839 | FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); | ||||
840 | FI->NoCfCheck = info.getNoCfCheck(); | ||||
841 | FI->Required = required; | ||||
842 | FI->HasRegParm = info.getHasRegParm(); | ||||
843 | FI->RegParm = info.getRegParm(); | ||||
844 | FI->ArgStruct = nullptr; | ||||
845 | FI->ArgStructAlign = 0; | ||||
846 | FI->NumArgs = argTypes.size(); | ||||
847 | FI->HasExtParameterInfos = !paramInfos.empty(); | ||||
848 | FI->getArgsBuffer()[0].type = resultType; | ||||
849 | FI->MaxVectorWidth = 0; | ||||
850 | for (unsigned i = 0, e = argTypes.size(); i != e; ++i) | ||||
851 | FI->getArgsBuffer()[i + 1].type = argTypes[i]; | ||||
852 | for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) | ||||
853 | FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; | ||||
854 | return FI; | ||||
855 | } | ||||
856 | |||||
857 | /***/ | ||||
858 | |||||
859 | namespace { | ||||
860 | // ABIArgInfo::Expand implementation. | ||||
861 | |||||
862 | // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. | ||||
863 | struct TypeExpansion { | ||||
864 | enum TypeExpansionKind { | ||||
865 | // Elements of constant arrays are expanded recursively. | ||||
866 | TEK_ConstantArray, | ||||
867 | // Record fields are expanded recursively (but if record is a union, only | ||||
868 | // the field with the largest size is expanded). | ||||
869 | TEK_Record, | ||||
870 | // For complex types, real and imaginary parts are expanded recursively. | ||||
871 | TEK_Complex, | ||||
872 | // All other types are not expandable. | ||||
873 | TEK_None | ||||
874 | }; | ||||
875 | |||||
876 | const TypeExpansionKind Kind; | ||||
877 | |||||
878 | TypeExpansion(TypeExpansionKind K) : Kind(K) {} | ||||
879 | virtual ~TypeExpansion() {} | ||||
880 | }; | ||||
881 | |||||
882 | struct ConstantArrayExpansion : TypeExpansion { | ||||
883 | QualType EltTy; | ||||
884 | uint64_t NumElts; | ||||
885 | |||||
886 | ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) | ||||
887 | : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} | ||||
888 | static bool classof(const TypeExpansion *TE) { | ||||
889 | return TE->Kind == TEK_ConstantArray; | ||||
890 | } | ||||
891 | }; | ||||
892 | |||||
893 | struct RecordExpansion : TypeExpansion { | ||||
894 | SmallVector<const CXXBaseSpecifier *, 1> Bases; | ||||
895 | |||||
896 | SmallVector<const FieldDecl *, 1> Fields; | ||||
897 | |||||
898 | RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, | ||||
899 | SmallVector<const FieldDecl *, 1> &&Fields) | ||||
900 | : TypeExpansion(TEK_Record), Bases(std::move(Bases)), | ||||
901 | Fields(std::move(Fields)) {} | ||||
902 | static bool classof(const TypeExpansion *TE) { | ||||
903 | return TE->Kind == TEK_Record; | ||||
904 | } | ||||
905 | }; | ||||
906 | |||||
907 | struct ComplexExpansion : TypeExpansion { | ||||
908 | QualType EltTy; | ||||
909 | |||||
910 | ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} | ||||
911 | static bool classof(const TypeExpansion *TE) { | ||||
912 | return TE->Kind == TEK_Complex; | ||||
913 | } | ||||
914 | }; | ||||
915 | |||||
916 | struct NoExpansion : TypeExpansion { | ||||
917 | NoExpansion() : TypeExpansion(TEK_None) {} | ||||
918 | static bool classof(const TypeExpansion *TE) { | ||||
919 | return TE->Kind == TEK_None; | ||||
920 | } | ||||
921 | }; | ||||
922 | } // namespace | ||||
923 | |||||
924 | static std::unique_ptr<TypeExpansion> | ||||
925 | getTypeExpansion(QualType Ty, const ASTContext &Context) { | ||||
926 | if (const ConstantArrayType *AT
| ||||
927 | return std::make_unique<ConstantArrayExpansion>( | ||||
928 | AT->getElementType(), AT->getSize().getZExtValue()); | ||||
929 | } | ||||
930 | if (const RecordType *RT
| ||||
931 | SmallVector<const CXXBaseSpecifier *, 1> Bases; | ||||
932 | SmallVector<const FieldDecl *, 1> Fields; | ||||
933 | const RecordDecl *RD = RT->getDecl(); | ||||
934 | assert(!RD->hasFlexibleArrayMember() &&(static_cast <bool> (!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array.") ? void (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\"" , "clang/lib/CodeGen/CGCall.cpp", 935, __extension__ __PRETTY_FUNCTION__ )) | ||||
935 | "Cannot expand structure with flexible array.")(static_cast <bool> (!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array.") ? void (0) : __assert_fail ("!RD->hasFlexibleArrayMember() && \"Cannot expand structure with flexible array.\"" , "clang/lib/CodeGen/CGCall.cpp", 935, __extension__ __PRETTY_FUNCTION__ )); | ||||
936 | if (RD->isUnion()) { | ||||
937 | // Unions can be here only in degenerative cases - all the fields are same | ||||
938 | // after flattening. Thus we have to use the "largest" field. | ||||
939 | const FieldDecl *LargestFD = nullptr; | ||||
940 | CharUnits UnionSize = CharUnits::Zero(); | ||||
941 | |||||
942 | for (const auto *FD : RD->fields()) { | ||||
943 | if (FD->isZeroLengthBitField(Context)) | ||||
944 | continue; | ||||
945 | assert(!FD->isBitField() &&(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members." ) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\"" , "clang/lib/CodeGen/CGCall.cpp", 946, __extension__ __PRETTY_FUNCTION__ )) | ||||
946 | "Cannot expand structure with bit-field members.")(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members." ) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\"" , "clang/lib/CodeGen/CGCall.cpp", 946, __extension__ __PRETTY_FUNCTION__ )); | ||||
947 | CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); | ||||
948 | if (UnionSize < FieldSize) { | ||||
949 | UnionSize = FieldSize; | ||||
950 | LargestFD = FD; | ||||
951 | } | ||||
952 | } | ||||
953 | if (LargestFD) | ||||
954 | Fields.push_back(LargestFD); | ||||
955 | } else { | ||||
956 | if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { | ||||
957 | assert(!CXXRD->isDynamicClass() &&(static_cast <bool> (!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes") ? void ( 0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\"" , "clang/lib/CodeGen/CGCall.cpp", 958, __extension__ __PRETTY_FUNCTION__ )) | ||||
958 | "cannot expand vtable pointers in dynamic classes")(static_cast <bool> (!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes") ? void ( 0) : __assert_fail ("!CXXRD->isDynamicClass() && \"cannot expand vtable pointers in dynamic classes\"" , "clang/lib/CodeGen/CGCall.cpp", 958, __extension__ __PRETTY_FUNCTION__ )); | ||||
959 | llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases())); | ||||
960 | } | ||||
961 | |||||
962 | for (const auto *FD : RD->fields()) { | ||||
963 | if (FD->isZeroLengthBitField(Context)) | ||||
964 | continue; | ||||
965 | assert(!FD->isBitField() &&(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members." ) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\"" , "clang/lib/CodeGen/CGCall.cpp", 966, __extension__ __PRETTY_FUNCTION__ )) | ||||
966 | "Cannot expand structure with bit-field members.")(static_cast <bool> (!FD->isBitField() && "Cannot expand structure with bit-field members." ) ? void (0) : __assert_fail ("!FD->isBitField() && \"Cannot expand structure with bit-field members.\"" , "clang/lib/CodeGen/CGCall.cpp", 966, __extension__ __PRETTY_FUNCTION__ )); | ||||
967 | Fields.push_back(FD); | ||||
968 | } | ||||
969 | } | ||||
970 | return std::make_unique<RecordExpansion>(std::move(Bases), | ||||
971 | std::move(Fields)); | ||||
972 | } | ||||
973 | if (const ComplexType *CT
| ||||
974 | return std::make_unique<ComplexExpansion>(CT->getElementType()); | ||||
975 | } | ||||
976 | return std::make_unique<NoExpansion>(); | ||||
977 | } | ||||
978 | |||||
979 | static int getExpansionSize(QualType Ty, const ASTContext &Context) { | ||||
980 | auto Exp = getTypeExpansion(Ty, Context); | ||||
981 | if (auto CAExp
| ||||
982 | return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); | ||||
| |||||
983 | } | ||||
984 | if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { | ||||
985 | int Res = 0; | ||||
986 | for (auto BS : RExp->Bases) | ||||
987 | Res += getExpansionSize(BS->getType(), Context); | ||||
988 | for (auto FD : RExp->Fields) | ||||
989 | Res += getExpansionSize(FD->getType(), Context); | ||||
990 | return Res; | ||||
991 | } | ||||
992 | if (isa<ComplexExpansion>(Exp.get())) | ||||
993 | return 2; | ||||
994 | assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get())) ? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())" , "clang/lib/CodeGen/CGCall.cpp", 994, __extension__ __PRETTY_FUNCTION__ )); | ||||
995 | return 1; | ||||
996 | } | ||||
997 | |||||
998 | void | ||||
999 | CodeGenTypes::getExpandedTypes(QualType Ty, | ||||
1000 | SmallVectorImpl<llvm::Type *>::iterator &TI) { | ||||
1001 | auto Exp = getTypeExpansion(Ty, Context); | ||||
1002 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { | ||||
1003 | for (int i = 0, n = CAExp->NumElts; i < n; i++) { | ||||
1004 | getExpandedTypes(CAExp->EltTy, TI); | ||||
1005 | } | ||||
1006 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { | ||||
1007 | for (auto BS : RExp->Bases) | ||||
1008 | getExpandedTypes(BS->getType(), TI); | ||||
1009 | for (auto FD : RExp->Fields) | ||||
1010 | getExpandedTypes(FD->getType(), TI); | ||||
1011 | } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { | ||||
1012 | llvm::Type *EltTy = ConvertType(CExp->EltTy); | ||||
1013 | *TI++ = EltTy; | ||||
1014 | *TI++ = EltTy; | ||||
1015 | } else { | ||||
1016 | assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get())) ? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())" , "clang/lib/CodeGen/CGCall.cpp", 1016, __extension__ __PRETTY_FUNCTION__ )); | ||||
1017 | *TI++ = ConvertType(Ty); | ||||
1018 | } | ||||
1019 | } | ||||
1020 | |||||
1021 | static void forConstantArrayExpansion(CodeGenFunction &CGF, | ||||
1022 | ConstantArrayExpansion *CAE, | ||||
1023 | Address BaseAddr, | ||||
1024 | llvm::function_ref<void(Address)> Fn) { | ||||
1025 | CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); | ||||
1026 | CharUnits EltAlign = | ||||
1027 | BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); | ||||
1028 | llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy); | ||||
1029 | |||||
1030 | for (int i = 0, n = CAE->NumElts; i < n; i++) { | ||||
1031 | llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32( | ||||
1032 | BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i); | ||||
1033 | Fn(Address(EltAddr, EltTy, EltAlign)); | ||||
1034 | } | ||||
1035 | } | ||||
1036 | |||||
1037 | void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, | ||||
1038 | llvm::Function::arg_iterator &AI) { | ||||
1039 | assert(LV.isSimple() &&(static_cast <bool> (LV.isSimple() && "Unexpected non-simple lvalue during struct expansion." ) ? void (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\"" , "clang/lib/CodeGen/CGCall.cpp", 1040, __extension__ __PRETTY_FUNCTION__ )) | ||||
1040 | "Unexpected non-simple lvalue during struct expansion.")(static_cast <bool> (LV.isSimple() && "Unexpected non-simple lvalue during struct expansion." ) ? void (0) : __assert_fail ("LV.isSimple() && \"Unexpected non-simple lvalue during struct expansion.\"" , "clang/lib/CodeGen/CGCall.cpp", 1040, __extension__ __PRETTY_FUNCTION__ )); | ||||
1041 | |||||
1042 | auto Exp = getTypeExpansion(Ty, getContext()); | ||||
1043 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { | ||||
1044 | forConstantArrayExpansion( | ||||
1045 | *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { | ||||
1046 | LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); | ||||
1047 | ExpandTypeFromArgs(CAExp->EltTy, LV, AI); | ||||
1048 | }); | ||||
1049 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { | ||||
1050 | Address This = LV.getAddress(*this); | ||||
1051 | for (const CXXBaseSpecifier *BS : RExp->Bases) { | ||||
1052 | // Perform a single step derived-to-base conversion. | ||||
1053 | Address Base = | ||||
1054 | GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, | ||||
1055 | /*NullCheckValue=*/false, SourceLocation()); | ||||
1056 | LValue SubLV = MakeAddrLValue(Base, BS->getType()); | ||||
1057 | |||||
1058 | // Recurse onto bases. | ||||
1059 | ExpandTypeFromArgs(BS->getType(), SubLV, AI); | ||||
1060 | } | ||||
1061 | for (auto FD : RExp->Fields) { | ||||
1062 | // FIXME: What are the right qualifiers here? | ||||
1063 | LValue SubLV = EmitLValueForFieldInitialization(LV, FD); | ||||
1064 | ExpandTypeFromArgs(FD->getType(), SubLV, AI); | ||||
1065 | } | ||||
1066 | } else if (isa<ComplexExpansion>(Exp.get())) { | ||||
1067 | auto realValue = &*AI++; | ||||
1068 | auto imagValue = &*AI++; | ||||
1069 | EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); | ||||
1070 | } else { | ||||
1071 | // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a | ||||
1072 | // primitive store. | ||||
1073 | assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get())) ? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())" , "clang/lib/CodeGen/CGCall.cpp", 1073, __extension__ __PRETTY_FUNCTION__ )); | ||||
1074 | llvm::Value *Arg = &*AI++; | ||||
1075 | if (LV.isBitField()) { | ||||
1076 | EmitStoreThroughLValue(RValue::get(Arg), LV); | ||||
1077 | } else { | ||||
1078 | // TODO: currently there are some places are inconsistent in what LLVM | ||||
1079 | // pointer type they use (see D118744). Once clang uses opaque pointers | ||||
1080 | // all LLVM pointer types will be the same and we can remove this check. | ||||
1081 | if (Arg->getType()->isPointerTy()) { | ||||
1082 | Address Addr = LV.getAddress(*this); | ||||
1083 | Arg = Builder.CreateBitCast(Arg, Addr.getElementType()); | ||||
1084 | } | ||||
1085 | EmitStoreOfScalar(Arg, LV); | ||||
1086 | } | ||||
1087 | } | ||||
1088 | } | ||||
1089 | |||||
1090 | void CodeGenFunction::ExpandTypeToArgs( | ||||
1091 | QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, | ||||
1092 | SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { | ||||
1093 | auto Exp = getTypeExpansion(Ty, getContext()); | ||||
1094 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { | ||||
1095 | Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) | ||||
1096 | : Arg.getKnownRValue().getAggregateAddress(); | ||||
1097 | forConstantArrayExpansion( | ||||
1098 | *this, CAExp, Addr, [&](Address EltAddr) { | ||||
1099 | CallArg EltArg = CallArg( | ||||
1100 | convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), | ||||
1101 | CAExp->EltTy); | ||||
1102 | ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, | ||||
1103 | IRCallArgPos); | ||||
1104 | }); | ||||
1105 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { | ||||
1106 | Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) | ||||
1107 | : Arg.getKnownRValue().getAggregateAddress(); | ||||
1108 | for (const CXXBaseSpecifier *BS : RExp->Bases) { | ||||
1109 | // Perform a single step derived-to-base conversion. | ||||
1110 | Address Base = | ||||
1111 | GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, | ||||
1112 | /*NullCheckValue=*/false, SourceLocation()); | ||||
1113 | CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); | ||||
1114 | |||||
1115 | // Recurse onto bases. | ||||
1116 | ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, | ||||
1117 | IRCallArgPos); | ||||
1118 | } | ||||
1119 | |||||
1120 | LValue LV = MakeAddrLValue(This, Ty); | ||||
1121 | for (auto FD : RExp->Fields) { | ||||
1122 | CallArg FldArg = | ||||
1123 | CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); | ||||
1124 | ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, | ||||
1125 | IRCallArgPos); | ||||
1126 | } | ||||
1127 | } else if (isa<ComplexExpansion>(Exp.get())) { | ||||
1128 | ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); | ||||
1129 | IRCallArgs[IRCallArgPos++] = CV.first; | ||||
1130 | IRCallArgs[IRCallArgPos++] = CV.second; | ||||
1131 | } else { | ||||
1132 | assert(isa<NoExpansion>(Exp.get()))(static_cast <bool> (isa<NoExpansion>(Exp.get())) ? void (0) : __assert_fail ("isa<NoExpansion>(Exp.get())" , "clang/lib/CodeGen/CGCall.cpp", 1132, __extension__ __PRETTY_FUNCTION__ )); | ||||
1133 | auto RV = Arg.getKnownRValue(); | ||||
1134 | assert(RV.isScalar() &&(static_cast <bool> (RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion." ) ? void (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\"" , "clang/lib/CodeGen/CGCall.cpp", 1135, __extension__ __PRETTY_FUNCTION__ )) | ||||
1135 | "Unexpected non-scalar rvalue during struct expansion.")(static_cast <bool> (RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion." ) ? void (0) : __assert_fail ("RV.isScalar() && \"Unexpected non-scalar rvalue during struct expansion.\"" , "clang/lib/CodeGen/CGCall.cpp", 1135, __extension__ __PRETTY_FUNCTION__ )); | ||||
1136 | |||||
1137 | // Insert a bitcast as needed. | ||||
1138 | llvm::Value *V = RV.getScalarVal(); | ||||
1139 | if (IRCallArgPos < IRFuncTy->getNumParams() && | ||||
1140 | V->getType() != IRFuncTy->getParamType(IRCallArgPos)) | ||||
1141 | V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); | ||||
1142 | |||||
1143 | IRCallArgs[IRCallArgPos++] = V; | ||||
1144 | } | ||||
1145 | } | ||||
1146 | |||||
1147 | /// Create a temporary allocation for the purposes of coercion. | ||||
1148 | static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, | ||||
1149 | CharUnits MinAlign, | ||||
1150 | const Twine &Name = "tmp") { | ||||
1151 | // Don't use an alignment that's worse than what LLVM would prefer. | ||||
1152 | auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty); | ||||
1153 | CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); | ||||
1154 | |||||
1155 | return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); | ||||
1156 | } | ||||
1157 | |||||
1158 | /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are | ||||
1159 | /// accessing some number of bytes out of it, try to gep into the struct to get | ||||
1160 | /// at its inner goodness. Dive as deep as possible without entering an element | ||||
1161 | /// with an in-memory size smaller than DstSize. | ||||
1162 | static Address | ||||
1163 | EnterStructPointerForCoercedAccess(Address SrcPtr, | ||||
1164 | llvm::StructType *SrcSTy, | ||||
1165 | uint64_t DstSize, CodeGenFunction &CGF) { | ||||
1166 | // We can't dive into a zero-element struct. | ||||
1167 | if (SrcSTy->getNumElements() == 0) return SrcPtr; | ||||
1168 | |||||
1169 | llvm::Type *FirstElt = SrcSTy->getElementType(0); | ||||
1170 | |||||
1171 | // If the first elt is at least as large as what we're looking for, or if the | ||||
1172 | // first element is the same size as the whole struct, we can enter it. The | ||||
1173 | // comparison must be made on the store size and not the alloca size. Using | ||||
1174 | // the alloca size may overstate the size of the load. | ||||
1175 | uint64_t FirstEltSize = | ||||
1176 | CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); | ||||
1177 | if (FirstEltSize < DstSize && | ||||
1178 | FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) | ||||
1179 | return SrcPtr; | ||||
1180 | |||||
1181 | // GEP into the first element. | ||||
1182 | SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); | ||||
1183 | |||||
1184 | // If the first element is a struct, recurse. | ||||
1185 | llvm::Type *SrcTy = SrcPtr.getElementType(); | ||||
1186 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) | ||||
1187 | return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); | ||||
1188 | |||||
1189 | return SrcPtr; | ||||
1190 | } | ||||
1191 | |||||
1192 | /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both | ||||
1193 | /// are either integers or pointers. This does a truncation of the value if it | ||||
1194 | /// is too large or a zero extension if it is too small. | ||||
1195 | /// | ||||
1196 | /// This behaves as if the value were coerced through memory, so on big-endian | ||||
1197 | /// targets the high bits are preserved in a truncation, while little-endian | ||||
1198 | /// targets preserve the low bits. | ||||
1199 | static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, | ||||
1200 | llvm::Type *Ty, | ||||
1201 | CodeGenFunction &CGF) { | ||||
1202 | if (Val->getType() == Ty) | ||||
1203 | return Val; | ||||
1204 | |||||
1205 | if (isa<llvm::PointerType>(Val->getType())) { | ||||
1206 | // If this is Pointer->Pointer avoid conversion to and from int. | ||||
1207 | if (isa<llvm::PointerType>(Ty)) | ||||
1208 | return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); | ||||
1209 | |||||
1210 | // Convert the pointer to an integer so we can play with its width. | ||||
1211 | Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); | ||||
1212 | } | ||||
1213 | |||||
1214 | llvm::Type *DestIntTy = Ty; | ||||
1215 | if (isa<llvm::PointerType>(DestIntTy)) | ||||
1216 | DestIntTy = CGF.IntPtrTy; | ||||
1217 | |||||
1218 | if (Val->getType() != DestIntTy) { | ||||
1219 | const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); | ||||
1220 | if (DL.isBigEndian()) { | ||||
1221 | // Preserve the high bits on big-endian targets. | ||||
1222 | // That is what memory coercion does. | ||||
1223 | uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); | ||||
1224 | uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); | ||||
1225 | |||||
1226 | if (SrcSize > DstSize) { | ||||
1227 | Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); | ||||
1228 | Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); | ||||
1229 | } else { | ||||
1230 | Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); | ||||
1231 | Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); | ||||
1232 | } | ||||
1233 | } else { | ||||
1234 | // Little-endian targets preserve the low bits. No shifts required. | ||||
1235 | Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); | ||||
1236 | } | ||||
1237 | } | ||||
1238 | |||||
1239 | if (isa<llvm::PointerType>(Ty)) | ||||
1240 | Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); | ||||
1241 | return Val; | ||||
1242 | } | ||||
1243 | |||||
1244 | |||||
1245 | |||||
1246 | /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as | ||||
1247 | /// a pointer to an object of type \arg Ty, known to be aligned to | ||||
1248 | /// \arg SrcAlign bytes. | ||||
1249 | /// | ||||
1250 | /// This safely handles the case when the src type is smaller than the | ||||
1251 | /// destination type; in this situation the values of bits which not | ||||
1252 | /// present in the src are undefined. | ||||
1253 | static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, | ||||
1254 | CodeGenFunction &CGF) { | ||||
1255 | llvm::Type *SrcTy = Src.getElementType(); | ||||
1256 | |||||
1257 | // If SrcTy and Ty are the same, just do a load. | ||||
1258 | if (SrcTy == Ty) | ||||
1259 | return CGF.Builder.CreateLoad(Src); | ||||
1260 | |||||
1261 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); | ||||
1262 | |||||
1263 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { | ||||
1264 | Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, | ||||
1265 | DstSize.getFixedValue(), CGF); | ||||
1266 | SrcTy = Src.getElementType(); | ||||
1267 | } | ||||
1268 | |||||
1269 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); | ||||
1270 | |||||
1271 | // If the source and destination are integer or pointer types, just do an | ||||
1272 | // extension or truncation to the desired type. | ||||
1273 | if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && | ||||
1274 | (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { | ||||
1275 | llvm::Value *Load = CGF.Builder.CreateLoad(Src); | ||||
1276 | return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); | ||||
1277 | } | ||||
1278 | |||||
1279 | // If load is legal, just bitcast the src pointer. | ||||
1280 | if (!SrcSize.isScalable() && !DstSize.isScalable() && | ||||
1281 | SrcSize.getFixedValue() >= DstSize.getFixedValue()) { | ||||
1282 | // Generally SrcSize is never greater than DstSize, since this means we are | ||||
1283 | // losing bits. However, this can happen in cases where the structure has | ||||
1284 | // additional padding, for example due to a user specified alignment. | ||||
1285 | // | ||||
1286 | // FIXME: Assert that we aren't truncating non-padding bits when have access | ||||
1287 | // to that information. | ||||
1288 | Src = CGF.Builder.CreateElementBitCast(Src, Ty); | ||||
1289 | return CGF.Builder.CreateLoad(Src); | ||||
1290 | } | ||||
1291 | |||||
1292 | // If coercing a fixed vector to a scalable vector for ABI compatibility, and | ||||
1293 | // the types match, use the llvm.vector.insert intrinsic to perform the | ||||
1294 | // conversion. | ||||
1295 | if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { | ||||
1296 | if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { | ||||
1297 | // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate | ||||
1298 | // vector, use a vector insert and bitcast the result. | ||||
1299 | bool NeedsBitcast = false; | ||||
1300 | auto PredType = | ||||
1301 | llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16); | ||||
1302 | llvm::Type *OrigType = Ty; | ||||
1303 | if (ScalableDst == PredType && | ||||
1304 | FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) { | ||||
1305 | ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2); | ||||
1306 | NeedsBitcast = true; | ||||
1307 | } | ||||
1308 | if (ScalableDst->getElementType() == FixedSrc->getElementType()) { | ||||
1309 | auto *Load = CGF.Builder.CreateLoad(Src); | ||||
1310 | auto *UndefVec = llvm::UndefValue::get(ScalableDst); | ||||
1311 | auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); | ||||
1312 | llvm::Value *Result = CGF.Builder.CreateInsertVector( | ||||
1313 | ScalableDst, UndefVec, Load, Zero, "castScalableSve"); | ||||
1314 | if (NeedsBitcast) | ||||
1315 | Result = CGF.Builder.CreateBitCast(Result, OrigType); | ||||
1316 | return Result; | ||||
1317 | } | ||||
1318 | } | ||||
1319 | } | ||||
1320 | |||||
1321 | // Otherwise do coercion through memory. This is stupid, but simple. | ||||
1322 | Address Tmp = | ||||
1323 | CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); | ||||
1324 | CGF.Builder.CreateMemCpy( | ||||
1325 | Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), | ||||
1326 | Src.getAlignment().getAsAlign(), | ||||
1327 | llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue())); | ||||
1328 | return CGF.Builder.CreateLoad(Tmp); | ||||
1329 | } | ||||
1330 | |||||
1331 | // Function to store a first-class aggregate into memory. We prefer to | ||||
1332 | // store the elements rather than the aggregate to be more friendly to | ||||
1333 | // fast-isel. | ||||
1334 | // FIXME: Do we need to recurse here? | ||||
1335 | void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, | ||||
1336 | bool DestIsVolatile) { | ||||
1337 | // Prefer scalar stores to first-class aggregate stores. | ||||
1338 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { | ||||
1339 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { | ||||
1340 | Address EltPtr = Builder.CreateStructGEP(Dest, i); | ||||
1341 | llvm::Value *Elt = Builder.CreateExtractValue(Val, i); | ||||
1342 | Builder.CreateStore(Elt, EltPtr, DestIsVolatile); | ||||
1343 | } | ||||
1344 | } else { | ||||
1345 | Builder.CreateStore(Val, Dest, DestIsVolatile); | ||||
1346 | } | ||||
1347 | } | ||||
1348 | |||||
1349 | /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, | ||||
1350 | /// where the source and destination may have different types. The | ||||
1351 | /// destination is known to be aligned to \arg DstAlign bytes. | ||||
1352 | /// | ||||
1353 | /// This safely handles the case when the src type is larger than the | ||||
1354 | /// destination type; the upper bits of the src will be lost. | ||||
1355 | static void CreateCoercedStore(llvm::Value *Src, | ||||
1356 | Address Dst, | ||||
1357 | bool DstIsVolatile, | ||||
1358 | CodeGenFunction &CGF) { | ||||
1359 | llvm::Type *SrcTy = Src->getType(); | ||||
1360 | llvm::Type *DstTy = Dst.getElementType(); | ||||
1361 | if (SrcTy == DstTy) { | ||||
1362 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); | ||||
1363 | return; | ||||
1364 | } | ||||
1365 | |||||
1366 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); | ||||
1367 | |||||
1368 | if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { | ||||
1369 | Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, | ||||
1370 | SrcSize.getFixedValue(), CGF); | ||||
1371 | DstTy = Dst.getElementType(); | ||||
1372 | } | ||||
1373 | |||||
1374 | llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); | ||||
1375 | llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); | ||||
1376 | if (SrcPtrTy && DstPtrTy && | ||||
1377 | SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { | ||||
1378 | Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); | ||||
1379 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); | ||||
1380 | return; | ||||
1381 | } | ||||
1382 | |||||
1383 | // If the source and destination are integer or pointer types, just do an | ||||
1384 | // extension or truncation to the desired type. | ||||
1385 | if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && | ||||
1386 | (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { | ||||
1387 | Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); | ||||
1388 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); | ||||
1389 | return; | ||||
1390 | } | ||||
1391 | |||||
1392 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); | ||||
1393 | |||||
1394 | // If store is legal, just bitcast the src pointer. | ||||
1395 | if (isa<llvm::ScalableVectorType>(SrcTy) || | ||||
1396 | isa<llvm::ScalableVectorType>(DstTy) || | ||||
1397 | SrcSize.getFixedValue() <= DstSize.getFixedValue()) { | ||||
1398 | Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); | ||||
1399 | CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); | ||||
1400 | } else { | ||||
1401 | // Otherwise do coercion through memory. This is stupid, but | ||||
1402 | // simple. | ||||
1403 | |||||
1404 | // Generally SrcSize is never greater than DstSize, since this means we are | ||||
1405 | // losing bits. However, this can happen in cases where the structure has | ||||
1406 | // additional padding, for example due to a user specified alignment. | ||||
1407 | // | ||||
1408 | // FIXME: Assert that we aren't truncating non-padding bits when have access | ||||
1409 | // to that information. | ||||
1410 | Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); | ||||
1411 | CGF.Builder.CreateStore(Src, Tmp); | ||||
1412 | CGF.Builder.CreateMemCpy( | ||||
1413 | Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), | ||||
1414 | Tmp.getAlignment().getAsAlign(), | ||||
1415 | llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue())); | ||||
1416 | } | ||||
1417 | } | ||||
1418 | |||||
1419 | static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, | ||||
1420 | const ABIArgInfo &info) { | ||||
1421 | if (unsigned offset = info.getDirectOffset()) { | ||||
1422 | addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); | ||||
1423 | addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, | ||||
1424 | CharUnits::fromQuantity(offset)); | ||||
1425 | addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); | ||||
1426 | } | ||||
1427 | return addr; | ||||
1428 | } | ||||
1429 | |||||
1430 | namespace { | ||||
1431 | |||||
1432 | /// Encapsulates information about the way function arguments from | ||||
1433 | /// CGFunctionInfo should be passed to actual LLVM IR function. | ||||
1434 | class ClangToLLVMArgMapping { | ||||
1435 | static const unsigned InvalidIndex = ~0U; | ||||
1436 | unsigned InallocaArgNo; | ||||
1437 | unsigned SRetArgNo; | ||||
1438 | unsigned TotalIRArgs; | ||||
1439 | |||||
1440 | /// Arguments of LLVM IR function corresponding to single Clang argument. | ||||
1441 | struct IRArgs { | ||||
1442 | unsigned PaddingArgIndex; | ||||
1443 | // Argument is expanded to IR arguments at positions | ||||
1444 | // [FirstArgIndex, FirstArgIndex + NumberOfArgs). | ||||
1445 | unsigned FirstArgIndex; | ||||
1446 | unsigned NumberOfArgs; | ||||
1447 | |||||
1448 | IRArgs() | ||||
1449 | : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), | ||||
1450 | NumberOfArgs(0) {} | ||||
1451 | }; | ||||
1452 | |||||
1453 | SmallVector<IRArgs, 8> ArgInfo; | ||||
1454 | |||||
1455 | public: | ||||
1456 | ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, | ||||
1457 | bool OnlyRequiredArgs = false) | ||||
1458 | : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), | ||||
1459 | ArgInfo(OnlyRequiredArgs
| ||||
1460 | construct(Context, FI, OnlyRequiredArgs); | ||||
1461 | } | ||||
1462 | |||||
1463 | bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } | ||||
1464 | unsigned getInallocaArgNo() const { | ||||
1465 | assert(hasInallocaArg())(static_cast <bool> (hasInallocaArg()) ? void (0) : __assert_fail ("hasInallocaArg()", "clang/lib/CodeGen/CGCall.cpp", 1465, __extension__ __PRETTY_FUNCTION__)); | ||||
1466 | return InallocaArgNo; | ||||
1467 | } | ||||
1468 | |||||
1469 | bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } | ||||
1470 | unsigned getSRetArgNo() const { | ||||
1471 | assert(hasSRetArg())(static_cast <bool> (hasSRetArg()) ? void (0) : __assert_fail ("hasSRetArg()", "clang/lib/CodeGen/CGCall.cpp", 1471, __extension__ __PRETTY_FUNCTION__)); | ||||
1472 | return SRetArgNo; | ||||
1473 | } | ||||
1474 | |||||
1475 | unsigned totalIRArgs() const { return TotalIRArgs; } | ||||
1476 | |||||
1477 | bool hasPaddingArg(unsigned ArgNo) const { | ||||
1478 | assert(ArgNo < ArgInfo.size())(static_cast <bool> (ArgNo < ArgInfo.size()) ? void ( 0) : __assert_fail ("ArgNo < ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp" , 1478, __extension__ __PRETTY_FUNCTION__)); | ||||
1479 | return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; | ||||
1480 | } | ||||
1481 | unsigned getPaddingArgNo(unsigned ArgNo) const { | ||||
1482 | assert(hasPaddingArg(ArgNo))(static_cast <bool> (hasPaddingArg(ArgNo)) ? void (0) : __assert_fail ("hasPaddingArg(ArgNo)", "clang/lib/CodeGen/CGCall.cpp" , 1482, __extension__ __PRETTY_FUNCTION__)); | ||||
1483 | return ArgInfo[ArgNo].PaddingArgIndex; | ||||
1484 | } | ||||
1485 | |||||
1486 | /// Returns index of first IR argument corresponding to ArgNo, and their | ||||
1487 | /// quantity. | ||||
1488 | std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { | ||||
1489 | assert(ArgNo < ArgInfo.size())(static_cast <bool> (ArgNo < ArgInfo.size()) ? void ( 0) : __assert_fail ("ArgNo < ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp" , 1489, __extension__ __PRETTY_FUNCTION__)); | ||||
1490 | return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, | ||||
1491 | ArgInfo[ArgNo].NumberOfArgs); | ||||
1492 | } | ||||
1493 | |||||
1494 | private: | ||||
1495 | void construct(const ASTContext &Context, const CGFunctionInfo &FI, | ||||
1496 | bool OnlyRequiredArgs); | ||||
1497 | }; | ||||
1498 | |||||
1499 | void ClangToLLVMArgMapping::construct(const ASTContext &Context, | ||||
1500 | const CGFunctionInfo &FI, | ||||
1501 | bool OnlyRequiredArgs) { | ||||
1502 | unsigned IRArgNo = 0; | ||||
1503 | bool SwapThisWithSRet = false; | ||||
1504 | const ABIArgInfo &RetAI = FI.getReturnInfo(); | ||||
1505 | |||||
1506 | if (RetAI.getKind() == ABIArgInfo::Indirect) { | ||||
1507 | SwapThisWithSRet = RetAI.isSRetAfterThis(); | ||||
1508 | SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; | ||||
1509 | } | ||||
1510 | |||||
1511 | unsigned ArgNo = 0; | ||||
1512 | unsigned NumArgs = OnlyRequiredArgs
| ||||
1513 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; | ||||
1514 | ++I, ++ArgNo) { | ||||
1515 | assert(I != FI.arg_end())(static_cast <bool> (I != FI.arg_end()) ? void (0) : __assert_fail ("I != FI.arg_end()", "clang/lib/CodeGen/CGCall.cpp", 1515, __extension__ __PRETTY_FUNCTION__)); | ||||
1516 | QualType ArgType = I->type; | ||||
1517 | const ABIArgInfo &AI = I->info; | ||||
1518 | // Collect data about IR arguments corresponding to Clang argument ArgNo. | ||||
1519 | auto &IRArgs = ArgInfo[ArgNo]; | ||||
1520 | |||||
1521 | if (AI.getPaddingType()) | ||||
1522 | IRArgs.PaddingArgIndex = IRArgNo++; | ||||
1523 | |||||
1524 | switch (AI.getKind()) { | ||||
1525 | case ABIArgInfo::Extend: | ||||
1526 | case ABIArgInfo::Direct: { | ||||
1527 | // FIXME: handle sseregparm someday... | ||||
1528 | llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); | ||||
1529 | if (AI.isDirect() && AI.getCanBeFlattened() && STy) { | ||||
1530 | IRArgs.NumberOfArgs = STy->getNumElements(); | ||||
1531 | } else { | ||||
1532 | IRArgs.NumberOfArgs = 1; | ||||
1533 | } | ||||
1534 | break; | ||||
1535 | } | ||||
1536 | case ABIArgInfo::Indirect: | ||||
1537 | case ABIArgInfo::IndirectAliased: | ||||
1538 | IRArgs.NumberOfArgs = 1; | ||||
1539 | break; | ||||
1540 | case ABIArgInfo::Ignore: | ||||
1541 | case ABIArgInfo::InAlloca: | ||||
1542 | // ignore and inalloca doesn't have matching LLVM parameters. | ||||
1543 | IRArgs.NumberOfArgs = 0; | ||||
1544 | break; | ||||
1545 | case ABIArgInfo::CoerceAndExpand: | ||||
1546 | IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); | ||||
1547 | break; | ||||
1548 | case ABIArgInfo::Expand: | ||||
1549 | IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); | ||||
1550 | break; | ||||
1551 | } | ||||
1552 | |||||
1553 | if (IRArgs.NumberOfArgs > 0) { | ||||
1554 | IRArgs.FirstArgIndex = IRArgNo; | ||||
1555 | IRArgNo += IRArgs.NumberOfArgs; | ||||
1556 | } | ||||
1557 | |||||
1558 | // Skip over the sret parameter when it comes second. We already handled it | ||||
1559 | // above. | ||||
1560 | if (IRArgNo == 1 && SwapThisWithSRet) | ||||
1561 | IRArgNo++; | ||||
1562 | } | ||||
1563 | assert(ArgNo == ArgInfo.size())(static_cast <bool> (ArgNo == ArgInfo.size()) ? void (0 ) : __assert_fail ("ArgNo == ArgInfo.size()", "clang/lib/CodeGen/CGCall.cpp" , 1563, __extension__ __PRETTY_FUNCTION__)); | ||||
1564 | |||||
1565 | if (FI.usesInAlloca()) | ||||
1566 | InallocaArgNo = IRArgNo++; | ||||
1567 | |||||
1568 | TotalIRArgs = IRArgNo; | ||||
1569 | } | ||||
1570 | } // namespace | ||||
1571 | |||||
1572 | /***/ | ||||
1573 | |||||
1574 | bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { | ||||
1575 | const auto &RI = FI.getReturnInfo(); | ||||
1576 | return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); | ||||
1577 | } | ||||
1578 | |||||
1579 | bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { | ||||
1580 | return ReturnTypeUsesSRet(FI) && | ||||
1581 | getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); | ||||
1582 | } | ||||
1583 | |||||
1584 | bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { | ||||
1585 | if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { | ||||
1586 | switch (BT->getKind()) { | ||||
1587 | default: | ||||
1588 | return false; | ||||
1589 | case BuiltinType::Float: | ||||
1590 | return getTarget().useObjCFPRetForRealType(FloatModeKind::Float); | ||||
1591 | case BuiltinType::Double: | ||||
1592 | return getTarget().useObjCFPRetForRealType(FloatModeKind::Double); | ||||
1593 | case BuiltinType::LongDouble: | ||||
1594 | return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble); | ||||
1595 | } | ||||
1596 | } | ||||
1597 | |||||
1598 | return false; | ||||
1599 | } | ||||
1600 | |||||
1601 | bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { | ||||
1602 | if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { | ||||
1603 | if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { | ||||
1604 | if (BT->getKind() == BuiltinType::LongDouble) | ||||
1605 | return getTarget().useObjCFP2RetForComplexLongDouble(); | ||||
1606 | } | ||||
1607 | } | ||||
1608 | |||||
1609 | return false; | ||||
1610 | } | ||||
1611 | |||||
1612 | llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { | ||||
1613 | const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); | ||||
1614 | return GetFunctionType(FI); | ||||
1615 | } | ||||
1616 | |||||
1617 | llvm::FunctionType * | ||||
1618 | CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { | ||||
1619 | |||||
1620 | bool Inserted = FunctionsBeingProcessed.insert(&FI).second; | ||||
1621 | (void)Inserted; | ||||
1622 | assert(Inserted && "Recursively being processed?")(static_cast <bool> (Inserted && "Recursively being processed?" ) ? void (0) : __assert_fail ("Inserted && \"Recursively being processed?\"" , "clang/lib/CodeGen/CGCall.cpp", 1622, __extension__ __PRETTY_FUNCTION__ )); | ||||
1623 | |||||
1624 | llvm::Type *resultType = nullptr; | ||||
1625 | const ABIArgInfo &retAI = FI.getReturnInfo(); | ||||
1626 | switch (retAI.getKind()) { | ||||
1627 | case ABIArgInfo::Expand: | ||||
1628 | case ABIArgInfo::IndirectAliased: | ||||
1629 | llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument" , "clang/lib/CodeGen/CGCall.cpp", 1629); | ||||
1630 | |||||
1631 | case ABIArgInfo::Extend: | ||||
1632 | case ABIArgInfo::Direct: | ||||
1633 | resultType = retAI.getCoerceToType(); | ||||
1634 | break; | ||||
1635 | |||||
1636 | case ABIArgInfo::InAlloca: | ||||
1637 | if (retAI.getInAllocaSRet()) { | ||||
1638 | // sret things on win32 aren't void, they return the sret pointer. | ||||
1639 | QualType ret = FI.getReturnType(); | ||||
1640 | llvm::Type *ty = ConvertType(ret); | ||||
1641 | unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret); | ||||
1642 | resultType = llvm::PointerType::get(ty, addressSpace); | ||||
1643 | } else { | ||||
1644 | resultType = llvm::Type::getVoidTy(getLLVMContext()); | ||||
1645 | } | ||||
1646 | break; | ||||
1647 | |||||
1648 | case ABIArgInfo::Indirect: | ||||
1649 | case ABIArgInfo::Ignore: | ||||
1650 | resultType = llvm::Type::getVoidTy(getLLVMContext()); | ||||
1651 | break; | ||||
1652 | |||||
1653 | case ABIArgInfo::CoerceAndExpand: | ||||
1654 | resultType = retAI.getUnpaddedCoerceAndExpandType(); | ||||
1655 | break; | ||||
1656 | } | ||||
1657 | |||||
1658 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); | ||||
1659 | SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); | ||||
1660 | |||||
1661 | // Add type for sret argument. | ||||
1662 | if (IRFunctionArgs.hasSRetArg()) { | ||||
1663 | QualType Ret = FI.getReturnType(); | ||||
1664 | llvm::Type *Ty = ConvertType(Ret); | ||||
1665 | unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret); | ||||
1666 | ArgTypes[IRFunctionArgs.getSRetArgNo()] = | ||||
1667 | llvm::PointerType::get(Ty, AddressSpace); | ||||
1668 | } | ||||
1669 | |||||
1670 | // Add type for inalloca argument. | ||||
1671 | if (IRFunctionArgs.hasInallocaArg()) { | ||||
1672 | auto ArgStruct = FI.getArgStruct(); | ||||
1673 | assert(ArgStruct)(static_cast <bool> (ArgStruct) ? void (0) : __assert_fail ("ArgStruct", "clang/lib/CodeGen/CGCall.cpp", 1673, __extension__ __PRETTY_FUNCTION__)); | ||||
1674 | ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); | ||||
1675 | } | ||||
1676 | |||||
1677 | // Add in all of the required arguments. | ||||
1678 | unsigned ArgNo = 0; | ||||
1679 | CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), | ||||
1680 | ie = it + FI.getNumRequiredArgs(); | ||||
1681 | for (; it != ie; ++it, ++ArgNo) { | ||||
1682 | const ABIArgInfo &ArgInfo = it->info; | ||||
1683 | |||||
1684 | // Insert a padding type to ensure proper alignment. | ||||
1685 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) | ||||
1686 | ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = | ||||
1687 | ArgInfo.getPaddingType(); | ||||
1688 | |||||
1689 | unsigned FirstIRArg, NumIRArgs; | ||||
1690 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); | ||||
1691 | |||||
1692 | switch (ArgInfo.getKind()) { | ||||
1693 | case ABIArgInfo::Ignore: | ||||
1694 | case ABIArgInfo::InAlloca: | ||||
1695 | assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail ("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 1695, __extension__ __PRETTY_FUNCTION__)); | ||||
1696 | break; | ||||
1697 | |||||
1698 | case ABIArgInfo::Indirect: { | ||||
1699 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1699, __extension__ __PRETTY_FUNCTION__)); | ||||
1700 | // indirect arguments are always on the stack, which is alloca addr space. | ||||
1701 | llvm::Type *LTy = ConvertTypeForMem(it->type); | ||||
1702 | ArgTypes[FirstIRArg] = LTy->getPointerTo( | ||||
1703 | CGM.getDataLayout().getAllocaAddrSpace()); | ||||
1704 | break; | ||||
1705 | } | ||||
1706 | case ABIArgInfo::IndirectAliased: { | ||||
1707 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1707, __extension__ __PRETTY_FUNCTION__)); | ||||
1708 | llvm::Type *LTy = ConvertTypeForMem(it->type); | ||||
1709 | ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); | ||||
1710 | break; | ||||
1711 | } | ||||
1712 | case ABIArgInfo::Extend: | ||||
1713 | case ABIArgInfo::Direct: { | ||||
1714 | // Fast-isel and the optimizer generally like scalar values better than | ||||
1715 | // FCAs, so we flatten them if this is safe to do for this argument. | ||||
1716 | llvm::Type *argType = ArgInfo.getCoerceToType(); | ||||
1717 | llvm::StructType *st = dyn_cast<llvm::StructType>(argType); | ||||
1718 | if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { | ||||
1719 | assert(NumIRArgs == st->getNumElements())(static_cast <bool> (NumIRArgs == st->getNumElements ()) ? void (0) : __assert_fail ("NumIRArgs == st->getNumElements()" , "clang/lib/CodeGen/CGCall.cpp", 1719, __extension__ __PRETTY_FUNCTION__ )); | ||||
1720 | for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) | ||||
1721 | ArgTypes[FirstIRArg + i] = st->getElementType(i); | ||||
1722 | } else { | ||||
1723 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 1723, __extension__ __PRETTY_FUNCTION__)); | ||||
1724 | ArgTypes[FirstIRArg] = argType; | ||||
1725 | } | ||||
1726 | break; | ||||
1727 | } | ||||
1728 | |||||
1729 | case ABIArgInfo::CoerceAndExpand: { | ||||
1730 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; | ||||
1731 | for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { | ||||
1732 | *ArgTypesIter++ = EltTy; | ||||
1733 | } | ||||
1734 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs" , "clang/lib/CodeGen/CGCall.cpp", 1734, __extension__ __PRETTY_FUNCTION__ )); | ||||
1735 | break; | ||||
1736 | } | ||||
1737 | |||||
1738 | case ABIArgInfo::Expand: | ||||
1739 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; | ||||
1740 | getExpandedTypes(it->type, ArgTypesIter); | ||||
1741 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs" , "clang/lib/CodeGen/CGCall.cpp", 1741, __extension__ __PRETTY_FUNCTION__ )); | ||||
1742 | break; | ||||
1743 | } | ||||
1744 | } | ||||
1745 | |||||
1746 | bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; | ||||
1747 | assert(Erased && "Not in set?")(static_cast <bool> (Erased && "Not in set?") ? void (0) : __assert_fail ("Erased && \"Not in set?\"" , "clang/lib/CodeGen/CGCall.cpp", 1747, __extension__ __PRETTY_FUNCTION__ )); | ||||
1748 | |||||
1749 | return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); | ||||
1750 | } | ||||
1751 | |||||
1752 | llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { | ||||
1753 | const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); | ||||
1754 | const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); | ||||
1755 | |||||
1756 | if (!isFuncTypeConvertible(FPT)) | ||||
1757 | return llvm::StructType::get(getLLVMContext()); | ||||
1758 | |||||
1759 | return GetFunctionType(GD); | ||||
1760 | } | ||||
1761 | |||||
1762 | static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, | ||||
1763 | llvm::AttrBuilder &FuncAttrs, | ||||
1764 | const FunctionProtoType *FPT) { | ||||
1765 | if (!FPT) | ||||
1766 | return; | ||||
1767 | |||||
1768 | if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && | ||||
1769 | FPT->isNothrow()) | ||||
1770 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); | ||||
1771 | } | ||||
1772 | |||||
1773 | static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs, | ||||
1774 | const Decl *Callee) { | ||||
1775 | if (!Callee) | ||||
1776 | return; | ||||
1777 | |||||
1778 | SmallVector<StringRef, 4> Attrs; | ||||
1779 | |||||
1780 | for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>()) | ||||
1781 | AA->getAssumption().split(Attrs, ","); | ||||
1782 | |||||
1783 | if (!Attrs.empty()) | ||||
1784 | FuncAttrs.addAttribute(llvm::AssumptionAttrKey, | ||||
1785 | llvm::join(Attrs.begin(), Attrs.end(), ",")); | ||||
1786 | } | ||||
1787 | |||||
1788 | bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, | ||||
1789 | QualType ReturnType) const { | ||||
1790 | // We can't just discard the return value for a record type with a | ||||
1791 | // complex destructor or a non-trivially copyable type. | ||||
1792 | if (const RecordType *RT = | ||||
1793 | ReturnType.getCanonicalType()->getAs<RecordType>()) { | ||||
1794 | if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) | ||||
1795 | return ClassDecl->hasTrivialDestructor(); | ||||
1796 | } | ||||
1797 | return ReturnType.isTriviallyCopyableType(Context); | ||||
1798 | } | ||||
1799 | |||||
1800 | static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, | ||||
1801 | const Decl *TargetDecl) { | ||||
1802 | // As-is msan can not tolerate noundef mismatch between caller and | ||||
1803 | // implementation. Mismatch is possible for e.g. indirect calls from C-caller | ||||
1804 | // into C++. Such mismatches lead to confusing false reports. To avoid | ||||
1805 | // expensive workaround on msan we enforce initialization event in uncommon | ||||
1806 | // cases where it's allowed. | ||||
1807 | if (Module.getLangOpts().Sanitize.has(SanitizerKind::Memory)) | ||||
1808 | return true; | ||||
1809 | // C++ explicitly makes returning undefined values UB. C's rule only applies | ||||
1810 | // to used values, so we never mark them noundef for now. | ||||
1811 | if (!Module.getLangOpts().CPlusPlus) | ||||
1812 | return false; | ||||
1813 | if (TargetDecl) { | ||||
1814 | if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) { | ||||
1815 | if (FDecl->isExternC()) | ||||
1816 | return false; | ||||
1817 | } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) { | ||||
1818 | // Function pointer. | ||||
1819 | if (VDecl->isExternC()) | ||||
1820 | return false; | ||||
1821 | } | ||||
1822 | } | ||||
1823 | |||||
1824 | // We don't want to be too aggressive with the return checking, unless | ||||
1825 | // it's explicit in the code opts or we're using an appropriate sanitizer. | ||||
1826 | // Try to respect what the programmer intended. | ||||
1827 | return Module.getCodeGenOpts().StrictReturn || | ||||
1828 | !Module.MayDropFunctionReturn(Module.getContext(), RetTy) || | ||||
1829 | Module.getLangOpts().Sanitize.has(SanitizerKind::Return); | ||||
1830 | } | ||||
1831 | |||||
1832 | void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, | ||||
1833 | bool HasOptnone, | ||||
1834 | bool AttrOnCallSite, | ||||
1835 | llvm::AttrBuilder &FuncAttrs) { | ||||
1836 | // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. | ||||
1837 | if (!HasOptnone) { | ||||
1838 | if (CodeGenOpts.OptimizeSize) | ||||
1839 | FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); | ||||
1840 | if (CodeGenOpts.OptimizeSize == 2) | ||||
1841 | FuncAttrs.addAttribute(llvm::Attribute::MinSize); | ||||
1842 | } | ||||
1843 | |||||
1844 | if (CodeGenOpts.DisableRedZone) | ||||
1845 | FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); | ||||
1846 | if (CodeGenOpts.IndirectTlsSegRefs) | ||||
1847 | FuncAttrs.addAttribute("indirect-tls-seg-refs"); | ||||
1848 | if (CodeGenOpts.NoImplicitFloat) | ||||
1849 | FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); | ||||
1850 | |||||
1851 | if (AttrOnCallSite) { | ||||
1852 | // Attributes that should go on the call site only. | ||||
1853 | // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking | ||||
1854 | // the -fno-builtin-foo list. | ||||
1855 | if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) | ||||
1856 | FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); | ||||
1857 | if (!CodeGenOpts.TrapFuncName.empty()) | ||||
1858 | FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); | ||||
1859 | } else { | ||||
1860 | switch (CodeGenOpts.getFramePointer()) { | ||||
1861 | case CodeGenOptions::FramePointerKind::None: | ||||
1862 | // This is the default behavior. | ||||
1863 | break; | ||||
1864 | case CodeGenOptions::FramePointerKind::NonLeaf: | ||||
1865 | case CodeGenOptions::FramePointerKind::All: | ||||
1866 | FuncAttrs.addAttribute("frame-pointer", | ||||
1867 | CodeGenOptions::getFramePointerKindName( | ||||
1868 | CodeGenOpts.getFramePointer())); | ||||
1869 | } | ||||
1870 | |||||
1871 | if (CodeGenOpts.LessPreciseFPMAD) | ||||
1872 | FuncAttrs.addAttribute("less-precise-fpmad", "true"); | ||||
1873 | |||||
1874 | if (CodeGenOpts.NullPointerIsValid) | ||||
1875 | FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); | ||||
1876 | |||||
1877 | if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) | ||||
1878 | FuncAttrs.addAttribute("denormal-fp-math", | ||||
1879 | CodeGenOpts.FPDenormalMode.str()); | ||||
1880 | if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { | ||||
1881 | FuncAttrs.addAttribute( | ||||
1882 | "denormal-fp-math-f32", | ||||
1883 | CodeGenOpts.FP32DenormalMode.str()); | ||||
1884 | } | ||||
1885 | |||||
1886 | if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore) | ||||
1887 | FuncAttrs.addAttribute("no-trapping-math", "true"); | ||||
1888 | |||||
1889 | // TODO: Are these all needed? | ||||
1890 | // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. | ||||
1891 | if (LangOpts.NoHonorInfs) | ||||
1892 | FuncAttrs.addAttribute("no-infs-fp-math", "true"); | ||||
1893 | if (LangOpts.NoHonorNaNs) | ||||
1894 | FuncAttrs.addAttribute("no-nans-fp-math", "true"); | ||||
1895 | if (LangOpts.ApproxFunc) | ||||
1896 | FuncAttrs.addAttribute("approx-func-fp-math", "true"); | ||||
1897 | if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip && | ||||
1898 | LangOpts.NoSignedZero && LangOpts.ApproxFunc && | ||||
1899 | (LangOpts.getDefaultFPContractMode() == | ||||
1900 | LangOptions::FPModeKind::FPM_Fast || | ||||
1901 | LangOpts.getDefaultFPContractMode() == | ||||
1902 | LangOptions::FPModeKind::FPM_FastHonorPragmas)) | ||||
1903 | FuncAttrs.addAttribute("unsafe-fp-math", "true"); | ||||
1904 | if (CodeGenOpts.SoftFloat) | ||||
1905 | FuncAttrs.addAttribute("use-soft-float", "true"); | ||||
1906 | FuncAttrs.addAttribute("stack-protector-buffer-size", | ||||
1907 | llvm::utostr(CodeGenOpts.SSPBufferSize)); | ||||
1908 | if (LangOpts.NoSignedZero) | ||||
1909 | FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); | ||||
1910 | |||||
1911 | // TODO: Reciprocal estimate codegen options should apply to instructions? | ||||
1912 | const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; | ||||
1913 | if (!Recips.empty()) | ||||
1914 | FuncAttrs.addAttribute("reciprocal-estimates", | ||||
1915 | llvm::join(Recips, ",")); | ||||
1916 | |||||
1917 | if (!CodeGenOpts.PreferVectorWidth.empty() && | ||||
1918 | CodeGenOpts.PreferVectorWidth != "none") | ||||
1919 | FuncAttrs.addAttribute("prefer-vector-width", | ||||
1920 | CodeGenOpts.PreferVectorWidth); | ||||
1921 | |||||
1922 | if (CodeGenOpts.StackRealignment) | ||||
1923 | FuncAttrs.addAttribute("stackrealign"); | ||||
1924 | if (CodeGenOpts.Backchain) | ||||
1925 | FuncAttrs.addAttribute("backchain"); | ||||
1926 | if (CodeGenOpts.EnableSegmentedStacks) | ||||
1927 | FuncAttrs.addAttribute("split-stack"); | ||||
1928 | |||||
1929 | if (CodeGenOpts.SpeculativeLoadHardening) | ||||
1930 | FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); | ||||
1931 | |||||
1932 | // Add zero-call-used-regs attribute. | ||||
1933 | switch (CodeGenOpts.getZeroCallUsedRegs()) { | ||||
1934 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip: | ||||
1935 | FuncAttrs.removeAttribute("zero-call-used-regs"); | ||||
1936 | break; | ||||
1937 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg: | ||||
1938 | FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg"); | ||||
1939 | break; | ||||
1940 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR: | ||||
1941 | FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr"); | ||||
1942 | break; | ||||
1943 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg: | ||||
1944 | FuncAttrs.addAttribute("zero-call-used-regs", "used-arg"); | ||||
1945 | break; | ||||
1946 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used: | ||||
1947 | FuncAttrs.addAttribute("zero-call-used-regs", "used"); | ||||
1948 | break; | ||||
1949 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg: | ||||
1950 | FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg"); | ||||
1951 | break; | ||||
1952 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR: | ||||
1953 | FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr"); | ||||
1954 | break; | ||||
1955 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg: | ||||
1956 | FuncAttrs.addAttribute("zero-call-used-regs", "all-arg"); | ||||
1957 | break; | ||||
1958 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All: | ||||
1959 | FuncAttrs.addAttribute("zero-call-used-regs", "all"); | ||||
1960 | break; | ||||
1961 | } | ||||
1962 | } | ||||
1963 | |||||
1964 | if (getLangOpts().assumeFunctionsAreConvergent()) { | ||||
1965 | // Conservatively, mark all functions and calls in CUDA and OpenCL as | ||||
1966 | // convergent (meaning, they may call an intrinsically convergent op, such | ||||
1967 | // as __syncthreads() / barrier(), and so can't have certain optimizations | ||||
1968 | // applied around them). LLVM will remove this attribute where it safely | ||||
1969 | // can. | ||||
1970 | FuncAttrs.addAttribute(llvm::Attribute::Convergent); | ||||
1971 | } | ||||
1972 | |||||
1973 | // TODO: NoUnwind attribute should be added for other GPU modes HIP, | ||||
1974 | // SYCL, OpenMP offload. AFAIK, none of them support exceptions in device | ||||
1975 | // code. | ||||
1976 | if ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice) || | ||||
1977 | getLangOpts().OpenCL) { | ||||
1978 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); | ||||
1979 | } | ||||
1980 | |||||
1981 | for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { | ||||
1982 | StringRef Var, Value; | ||||
1983 | std::tie(Var, Value) = Attr.split('='); | ||||
1984 | FuncAttrs.addAttribute(Var, Value); | ||||
1985 | } | ||||
1986 | } | ||||
1987 | |||||
1988 | void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { | ||||
1989 | llvm::AttrBuilder FuncAttrs(F.getContext()); | ||||
1990 | getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), | ||||
1991 | /* AttrOnCallSite = */ false, FuncAttrs); | ||||
1992 | // TODO: call GetCPUAndFeaturesAttributes? | ||||
1993 | F.addFnAttrs(FuncAttrs); | ||||
1994 | } | ||||
1995 | |||||
1996 | void CodeGenModule::addDefaultFunctionDefinitionAttributes( | ||||
1997 | llvm::AttrBuilder &attrs) { | ||||
1998 | getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, | ||||
1999 | /*for call*/ false, attrs); | ||||
2000 | GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); | ||||
2001 | } | ||||
2002 | |||||
2003 | static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, | ||||
2004 | const LangOptions &LangOpts, | ||||
2005 | const NoBuiltinAttr *NBA = nullptr) { | ||||
2006 | auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { | ||||
2007 | SmallString<32> AttributeName; | ||||
2008 | AttributeName += "no-builtin-"; | ||||
2009 | AttributeName += BuiltinName; | ||||
2010 | FuncAttrs.addAttribute(AttributeName); | ||||
2011 | }; | ||||
2012 | |||||
2013 | // First, handle the language options passed through -fno-builtin. | ||||
2014 | if (LangOpts.NoBuiltin) { | ||||
2015 | // -fno-builtin disables them all. | ||||
2016 | FuncAttrs.addAttribute("no-builtins"); | ||||
2017 | return; | ||||
2018 | } | ||||
2019 | |||||
2020 | // Then, add attributes for builtins specified through -fno-builtin-<name>. | ||||
2021 | llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); | ||||
2022 | |||||
2023 | // Now, let's check the __attribute__((no_builtin("...")) attribute added to | ||||
2024 | // the source. | ||||
2025 | if (!NBA) | ||||
2026 | return; | ||||
2027 | |||||
2028 | // If there is a wildcard in the builtin names specified through the | ||||
2029 | // attribute, disable them all. | ||||
2030 | if (llvm::is_contained(NBA->builtinNames(), "*")) { | ||||
2031 | FuncAttrs.addAttribute("no-builtins"); | ||||
2032 | return; | ||||
2033 | } | ||||
2034 | |||||
2035 | // And last, add the rest of the builtin names. | ||||
2036 | llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); | ||||
2037 | } | ||||
2038 | |||||
2039 | static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, | ||||
2040 | const llvm::DataLayout &DL, const ABIArgInfo &AI, | ||||
2041 | bool CheckCoerce = true) { | ||||
2042 | llvm::Type *Ty = Types.ConvertTypeForMem(QTy); | ||||
2043 | if (AI.getKind() == ABIArgInfo::Indirect) | ||||
2044 | return true; | ||||
2045 | if (AI.getKind() == ABIArgInfo::Extend) | ||||
2046 | return true; | ||||
2047 | if (!DL.typeSizeEqualsStoreSize(Ty)) | ||||
2048 | // TODO: This will result in a modest amount of values not marked noundef | ||||
2049 | // when they could be. We care about values that *invisibly* contain undef | ||||
2050 | // bits from the perspective of LLVM IR. | ||||
2051 | return false; | ||||
2052 | if (CheckCoerce && AI.canHaveCoerceToType()) { | ||||
2053 | llvm::Type *CoerceTy = AI.getCoerceToType(); | ||||
2054 | if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), | ||||
2055 | DL.getTypeSizeInBits(Ty))) | ||||
2056 | // If we're coercing to a type with a greater size than the canonical one, | ||||
2057 | // we're introducing new undef bits. | ||||
2058 | // Coercing to a type of smaller or equal size is ok, as we know that | ||||
2059 | // there's no internal padding (typeSizeEqualsStoreSize). | ||||
2060 | return false; | ||||
2061 | } | ||||
2062 | if (QTy->isBitIntType()) | ||||
2063 | return true; | ||||
2064 | if (QTy->isReferenceType()) | ||||
2065 | return true; | ||||
2066 | if (QTy->isNullPtrType()) | ||||
2067 | return false; | ||||
2068 | if (QTy->isMemberPointerType()) | ||||
2069 | // TODO: Some member pointers are `noundef`, but it depends on the ABI. For | ||||
2070 | // now, never mark them. | ||||
2071 | return false; | ||||
2072 | if (QTy->isScalarType()) { | ||||
2073 | if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) | ||||
2074 | return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); | ||||
2075 | return true; | ||||
2076 | } | ||||
2077 | if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) | ||||
2078 | return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); | ||||
2079 | if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) | ||||
2080 | return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); | ||||
2081 | if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) | ||||
2082 | return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); | ||||
2083 | |||||
2084 | // TODO: Some structs may be `noundef`, in specific situations. | ||||
2085 | return false; | ||||
2086 | } | ||||
2087 | |||||
2088 | /// Check if the argument of a function has maybe_undef attribute. | ||||
2089 | static bool IsArgumentMaybeUndef(const Decl *TargetDecl, | ||||
2090 | unsigned NumRequiredArgs, unsigned ArgNo) { | ||||
2091 | const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); | ||||
2092 | if (!FD) | ||||
2093 | return false; | ||||
2094 | |||||
2095 | // Assume variadic arguments do not have maybe_undef attribute. | ||||
2096 | if (ArgNo >= NumRequiredArgs) | ||||
2097 | return false; | ||||
2098 | |||||
2099 | // Check if argument has maybe_undef attribute. | ||||
2100 | if (ArgNo < FD->getNumParams()) { | ||||
2101 | const ParmVarDecl *Param = FD->getParamDecl(ArgNo); | ||||
2102 | if (Param && Param->hasAttr<MaybeUndefAttr>()) | ||||
2103 | return true; | ||||
2104 | } | ||||
2105 | |||||
2106 | return false; | ||||
2107 | } | ||||
2108 | |||||
2109 | /// Test if it's legal to apply nofpclass for the given parameter type and it's | ||||
2110 | /// lowered IR type. | ||||
2111 | static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, | ||||
2112 | bool IsReturn) { | ||||
2113 | // Should only apply to FP types in the source, not ABI promoted. | ||||
2114 | if (!ParamType->hasFloatingRepresentation()) | ||||
2115 | return false; | ||||
2116 | |||||
2117 | // The promoted-to IR type also needs to support nofpclass. | ||||
2118 | llvm::Type *IRTy = AI.getCoerceToType(); | ||||
2119 | if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy)) | ||||
2120 | return true; | ||||
2121 | |||||
2122 | if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) { | ||||
2123 | return !IsReturn && AI.getCanBeFlattened() && | ||||
2124 | llvm::all_of(ST->elements(), [](llvm::Type *Ty) { | ||||
2125 | return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty); | ||||
2126 | }); | ||||
2127 | } | ||||
2128 | |||||
2129 | return false; | ||||
2130 | } | ||||
2131 | |||||
2132 | /// Return the nofpclass mask that can be applied to floating-point parameters. | ||||
2133 | static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) { | ||||
2134 | llvm::FPClassTest Mask = llvm::fcNone; | ||||
2135 | if (LangOpts.NoHonorInfs) | ||||
2136 | Mask |= llvm::fcInf; | ||||
2137 | if (LangOpts.NoHonorNaNs) | ||||
2138 | Mask |= llvm::fcNan; | ||||
2139 | return Mask; | ||||
2140 | } | ||||
2141 | |||||
2142 | /// Construct the IR attribute list of a function or call. | ||||
2143 | /// | ||||
2144 | /// When adding an attribute, please consider where it should be handled: | ||||
2145 | /// | ||||
2146 | /// - getDefaultFunctionAttributes is for attributes that are essentially | ||||
2147 | /// part of the global target configuration (but perhaps can be | ||||
2148 | /// overridden on a per-function basis). Adding attributes there | ||||
2149 | /// will cause them to also be set in frontends that build on Clang's | ||||
2150 | /// target-configuration logic, as well as for code defined in library | ||||
2151 | /// modules such as CUDA's libdevice. | ||||
2152 | /// | ||||
2153 | /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes | ||||
2154 | /// and adds declaration-specific, convention-specific, and | ||||
2155 | /// frontend-specific logic. The last is of particular importance: | ||||
2156 | /// attributes that restrict how the frontend generates code must be | ||||
2157 | /// added here rather than getDefaultFunctionAttributes. | ||||
2158 | /// | ||||
2159 | void CodeGenModule::ConstructAttributeList(StringRef Name, | ||||
2160 | const CGFunctionInfo &FI, | ||||
2161 | CGCalleeInfo CalleeInfo, | ||||
2162 | llvm::AttributeList &AttrList, | ||||
2163 | unsigned &CallingConv, | ||||
2164 | bool AttrOnCallSite, bool IsThunk) { | ||||
2165 | llvm::AttrBuilder FuncAttrs(getLLVMContext()); | ||||
2166 | llvm::AttrBuilder RetAttrs(getLLVMContext()); | ||||
2167 | |||||
2168 | // Collect function IR attributes from the CC lowering. | ||||
2169 | // We'll collect the paramete and result attributes later. | ||||
2170 | CallingConv = FI.getEffectiveCallingConvention(); | ||||
2171 | if (FI.isNoReturn()) | ||||
2172 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); | ||||
2173 | if (FI.isCmseNSCall()) | ||||
2174 | FuncAttrs.addAttribute("cmse_nonsecure_call"); | ||||
2175 | |||||
2176 | // Collect function IR attributes from the callee prototype if we have one. | ||||
2177 | AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, | ||||
2178 | CalleeInfo.getCalleeFunctionProtoType()); | ||||
2179 | |||||
2180 | const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); | ||||
2181 | |||||
2182 | // Attach assumption attributes to the declaration. If this is a call | ||||
2183 | // site, attach assumptions from the caller to the call as well. | ||||
2184 | AddAttributesFromAssumes(FuncAttrs, TargetDecl); | ||||
2185 | |||||
2186 | bool HasOptnone = false; | ||||
2187 | // The NoBuiltinAttr attached to the target FunctionDecl. | ||||
2188 | const NoBuiltinAttr *NBA = nullptr; | ||||
2189 | |||||
2190 | // Some ABIs may result in additional accesses to arguments that may | ||||
2191 | // otherwise not be present. | ||||
2192 | auto AddPotentialArgAccess = [&]() { | ||||
2193 | llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory); | ||||
2194 | if (A.isValid()) | ||||
2195 | FuncAttrs.addMemoryAttr(A.getMemoryEffects() | | ||||
2196 | llvm::MemoryEffects::argMemOnly()); | ||||
2197 | }; | ||||
2198 | |||||
2199 | // Collect function IR attributes based on declaration-specific | ||||
2200 | // information. | ||||
2201 | // FIXME: handle sseregparm someday... | ||||
2202 | if (TargetDecl) { | ||||
2203 | if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) | ||||
2204 | FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); | ||||
2205 | if (TargetDecl->hasAttr<NoThrowAttr>()) | ||||
2206 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); | ||||
2207 | if (TargetDecl->hasAttr<NoReturnAttr>()) | ||||
2208 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); | ||||
2209 | if (TargetDecl->hasAttr<ColdAttr>()) | ||||
2210 | FuncAttrs.addAttribute(llvm::Attribute::Cold); | ||||
2211 | if (TargetDecl->hasAttr<HotAttr>()) | ||||
2212 | FuncAttrs.addAttribute(llvm::Attribute::Hot); | ||||
2213 | if (TargetDecl->hasAttr<NoDuplicateAttr>()) | ||||
2214 | FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); | ||||
2215 | if (TargetDecl->hasAttr<ConvergentAttr>()) | ||||
2216 | FuncAttrs.addAttribute(llvm::Attribute::Convergent); | ||||
2217 | |||||
2218 | if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { | ||||
2219 | AddAttributesFromFunctionProtoType( | ||||
2220 | getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); | ||||
2221 | if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { | ||||
2222 | // A sane operator new returns a non-aliasing pointer. | ||||
2223 | auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); | ||||
2224 | if (getCodeGenOpts().AssumeSaneOperatorNew && | ||||
2225 | (Kind == OO_New || Kind == OO_Array_New)) | ||||
2226 | RetAttrs.addAttribute(llvm::Attribute::NoAlias); | ||||
2227 | } | ||||
2228 | const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); | ||||
2229 | const bool IsVirtualCall = MD && MD->isVirtual(); | ||||
2230 | // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a | ||||
2231 | // virtual function. These attributes are not inherited by overloads. | ||||
2232 | if (!(AttrOnCallSite && IsVirtualCall)) { | ||||
2233 | if (Fn->isNoReturn()) | ||||
2234 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); | ||||
2235 | NBA = Fn->getAttr<NoBuiltinAttr>(); | ||||
2236 | } | ||||
2237 | // Only place nomerge attribute on call sites, never functions. This | ||||
2238 | // allows it to work on indirect virtual function calls. | ||||
2239 | if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) | ||||
2240 | FuncAttrs.addAttribute(llvm::Attribute::NoMerge); | ||||
2241 | } | ||||
2242 | |||||
2243 | // 'const', 'pure' and 'noalias' attributed functions are also nounwind. | ||||
2244 | if (TargetDecl->hasAttr<ConstAttr>()) { | ||||
2245 | FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none()); | ||||
2246 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); | ||||
2247 | // gcc specifies that 'const' functions have greater restrictions than | ||||
2248 | // 'pure' functions, so they also cannot have infinite loops. | ||||
2249 | FuncAttrs.addAttribute(llvm::Attribute::WillReturn); | ||||
2250 | } else if (TargetDecl->hasAttr<PureAttr>()) { | ||||
2251 | FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly()); | ||||
2252 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); | ||||
2253 | // gcc specifies that 'pure' functions cannot have infinite loops. | ||||
2254 | FuncAttrs.addAttribute(llvm::Attribute::WillReturn); | ||||
2255 | } else if (TargetDecl->hasAttr<NoAliasAttr>()) { | ||||
2256 | FuncAttrs.addMemoryAttr(llvm::MemoryEffects::argMemOnly()); | ||||
2257 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); | ||||
2258 | } | ||||
2259 | if (TargetDecl->hasAttr<RestrictAttr>()) | ||||
2260 | RetAttrs.addAttribute(llvm::Attribute::NoAlias); | ||||
2261 | if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && | ||||
2262 | !CodeGenOpts.NullPointerIsValid) | ||||
2263 | RetAttrs.addAttribute(llvm::Attribute::NonNull); | ||||
2264 | if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) | ||||
2265 | FuncAttrs.addAttribute("no_caller_saved_registers"); | ||||
2266 | if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) | ||||
2267 | FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); | ||||
2268 | if (TargetDecl->hasAttr<LeafAttr>()) | ||||
2269 | FuncAttrs.addAttribute(llvm::Attribute::NoCallback); | ||||
2270 | |||||
2271 | HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); | ||||
2272 | if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { | ||||
2273 | std::optional<unsigned> NumElemsParam; | ||||
2274 | if (AllocSize->getNumElemsParam().isValid()) | ||||
2275 | NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); | ||||
2276 | FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), | ||||
2277 | NumElemsParam); | ||||
2278 | } | ||||
2279 | |||||
2280 | if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { | ||||
2281 | if (getLangOpts().OpenCLVersion <= 120) { | ||||
2282 | // OpenCL v1.2 Work groups are always uniform | ||||
2283 | FuncAttrs.addAttribute("uniform-work-group-size", "true"); | ||||
2284 | } else { | ||||
2285 | // OpenCL v2.0 Work groups may be whether uniform or not. | ||||
2286 | // '-cl-uniform-work-group-size' compile option gets a hint | ||||
2287 | // to the compiler that the global work-size be a multiple of | ||||
2288 | // the work-group size specified to clEnqueueNDRangeKernel | ||||
2289 | // (i.e. work groups are uniform). | ||||
2290 | FuncAttrs.addAttribute("uniform-work-group-size", | ||||
2291 | llvm::toStringRef(CodeGenOpts.UniformWGSize)); | ||||
2292 | } | ||||
2293 | } | ||||
2294 | } | ||||
2295 | |||||
2296 | // Attach "no-builtins" attributes to: | ||||
2297 | // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". | ||||
2298 | // * definitions: "no-builtins" or "no-builtin-<name>" only. | ||||
2299 | // The attributes can come from: | ||||
2300 | // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> | ||||
2301 | // * FunctionDecl attributes: __attribute__((no_builtin(...))) | ||||
2302 | addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); | ||||
2303 | |||||
2304 | // Collect function IR attributes based on global settiings. | ||||
2305 | getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); | ||||
2306 | |||||
2307 | // Override some default IR attributes based on declaration-specific | ||||
2308 | // information. | ||||
2309 | if (TargetDecl) { | ||||
2310 | if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) | ||||
2311 | FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); | ||||
2312 | if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) | ||||
2313 | FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); | ||||
2314 | if (TargetDecl->hasAttr<NoSplitStackAttr>()) | ||||
2315 | FuncAttrs.removeAttribute("split-stack"); | ||||
2316 | if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) { | ||||
2317 | // A function "__attribute__((...))" overrides the command-line flag. | ||||
2318 | auto Kind = | ||||
2319 | TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs(); | ||||
2320 | FuncAttrs.removeAttribute("zero-call-used-regs"); | ||||
2321 | FuncAttrs.addAttribute( | ||||
2322 | "zero-call-used-regs", | ||||
2323 | ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind)); | ||||
2324 | } | ||||
2325 | |||||
2326 | // Add NonLazyBind attribute to function declarations when -fno-plt | ||||
2327 | // is used. | ||||
2328 | // FIXME: what if we just haven't processed the function definition | ||||
2329 | // yet, or if it's an external definition like C99 inline? | ||||
2330 | if (CodeGenOpts.NoPLT) { | ||||
2331 | if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { | ||||
2332 | if (!Fn->isDefined() && !AttrOnCallSite) { | ||||
2333 | FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); | ||||
2334 | } | ||||
2335 | } | ||||
2336 | } | ||||
2337 | } | ||||
2338 | |||||
2339 | // Add "sample-profile-suffix-elision-policy" attribute for internal linkage | ||||
2340 | // functions with -funique-internal-linkage-names. | ||||
2341 | if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { | ||||
2342 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { | ||||
2343 | if (!FD->isExternallyVisible()) | ||||
2344 | FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", | ||||
2345 | "selected"); | ||||
2346 | } | ||||
2347 | } | ||||
2348 | |||||
2349 | // Collect non-call-site function IR attributes from declaration-specific | ||||
2350 | // information. | ||||
2351 | if (!AttrOnCallSite) { | ||||
2352 | if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) | ||||
2353 | FuncAttrs.addAttribute("cmse_nonsecure_entry"); | ||||
2354 | |||||
2355 | // Whether tail calls are enabled. | ||||
2356 | auto shouldDisableTailCalls = [&] { | ||||
2357 | // Should this be honored in getDefaultFunctionAttributes? | ||||
2358 | if (CodeGenOpts.DisableTailCalls) | ||||
2359 | return true; | ||||
2360 | |||||
2361 | if (!TargetDecl) | ||||
2362 | return false; | ||||
2363 | |||||
2364 | if (TargetDecl->hasAttr<DisableTailCallsAttr>() || | ||||
2365 | TargetDecl->hasAttr<AnyX86InterruptAttr>()) | ||||
2366 | return true; | ||||
2367 | |||||
2368 | if (CodeGenOpts.NoEscapingBlockTailCalls) { | ||||
2369 | if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) | ||||
2370 | if (!BD->doesNotEscape()) | ||||
2371 | return true; | ||||
2372 | } | ||||
2373 | |||||
2374 | return false; | ||||
2375 | }; | ||||
2376 | if (shouldDisableTailCalls()) | ||||
2377 | FuncAttrs.addAttribute("disable-tail-calls", "true"); | ||||
2378 | |||||
2379 | // CPU/feature overrides. addDefaultFunctionDefinitionAttributes | ||||
2380 | // handles these separately to set them based on the global defaults. | ||||
2381 | GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); | ||||
2382 | } | ||||
2383 | |||||
2384 | // Collect attributes from arguments and return values. | ||||
2385 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); | ||||
2386 | |||||
2387 | QualType RetTy = FI.getReturnType(); | ||||
2388 | const ABIArgInfo &RetAI = FI.getReturnInfo(); | ||||
2389 | const llvm::DataLayout &DL = getDataLayout(); | ||||
2390 | |||||
2391 | // Determine if the return type could be partially undef | ||||
2392 | if (CodeGenOpts.EnableNoundefAttrs && | ||||
2393 | HasStrictReturn(*this, RetTy, TargetDecl)) { | ||||
2394 | if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && | ||||
2395 | DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) | ||||
2396 | RetAttrs.addAttribute(llvm::Attribute::NoUndef); | ||||
2397 | } | ||||
2398 | |||||
2399 | switch (RetAI.getKind()) { | ||||
2400 | case ABIArgInfo::Extend: | ||||
2401 | if (RetAI.isSignExt()) | ||||
2402 | RetAttrs.addAttribute(llvm::Attribute::SExt); | ||||
2403 | else | ||||
2404 | RetAttrs.addAttribute(llvm::Attribute::ZExt); | ||||
2405 | [[fallthrough]]; | ||||
2406 | case ABIArgInfo::Direct: | ||||
2407 | if (RetAI.getInReg()) | ||||
2408 | RetAttrs.addAttribute(llvm::Attribute::InReg); | ||||
2409 | |||||
2410 | if (canApplyNoFPClass(RetAI, RetTy, true)) | ||||
2411 | RetAttrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts())); | ||||
2412 | |||||
2413 | break; | ||||
2414 | case ABIArgInfo::Ignore: | ||||
2415 | break; | ||||
2416 | |||||
2417 | case ABIArgInfo::InAlloca: | ||||
2418 | case ABIArgInfo::Indirect: { | ||||
2419 | // inalloca and sret disable readnone and readonly | ||||
2420 | AddPotentialArgAccess(); | ||||
2421 | break; | ||||
2422 | } | ||||
2423 | |||||
2424 | case ABIArgInfo::CoerceAndExpand: | ||||
2425 | break; | ||||
2426 | |||||
2427 | case ABIArgInfo::Expand: | ||||
2428 | case ABIArgInfo::IndirectAliased: | ||||
2429 | llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument" , "clang/lib/CodeGen/CGCall.cpp", 2429); | ||||
2430 | } | ||||
2431 | |||||
2432 | if (!IsThunk) { | ||||
2433 | // FIXME: fix this properly, https://reviews.llvm.org/D100388 | ||||
2434 | if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { | ||||
2435 | QualType PTy = RefTy->getPointeeType(); | ||||
2436 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) | ||||
2437 | RetAttrs.addDereferenceableAttr( | ||||
2438 | getMinimumObjectSize(PTy).getQuantity()); | ||||
2439 | if (getTypes().getTargetAddressSpace(PTy) == 0 && | ||||
2440 | !CodeGenOpts.NullPointerIsValid) | ||||
2441 | RetAttrs.addAttribute(llvm::Attribute::NonNull); | ||||
2442 | if (PTy->isObjectType()) { | ||||
2443 | llvm::Align Alignment = | ||||
2444 | getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); | ||||
2445 | RetAttrs.addAlignmentAttr(Alignment); | ||||
2446 | } | ||||
2447 | } | ||||
2448 | } | ||||
2449 | |||||
2450 | bool hasUsedSRet = false; | ||||
2451 | SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); | ||||
2452 | |||||
2453 | // Attach attributes to sret. | ||||
2454 | if (IRFunctionArgs.hasSRetArg()) { | ||||
2455 | llvm::AttrBuilder SRETAttrs(getLLVMContext()); | ||||
2456 | SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); | ||||
2457 | hasUsedSRet = true; | ||||
2458 | if (RetAI.getInReg()) | ||||
2459 | SRETAttrs.addAttribute(llvm::Attribute::InReg); | ||||
2460 | SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); | ||||
2461 | ArgAttrs[IRFunctionArgs.getSRetArgNo()] = | ||||
2462 | llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); | ||||
2463 | } | ||||
2464 | |||||
2465 | // Attach attributes to inalloca argument. | ||||
2466 | if (IRFunctionArgs.hasInallocaArg()) { | ||||
2467 | llvm::AttrBuilder Attrs(getLLVMContext()); | ||||
2468 | Attrs.addInAllocaAttr(FI.getArgStruct()); | ||||
2469 | ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = | ||||
2470 | llvm::AttributeSet::get(getLLVMContext(), Attrs); | ||||
2471 | } | ||||
2472 | |||||
2473 | // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, | ||||
2474 | // unless this is a thunk function. | ||||
2475 | // FIXME: fix this properly, https://reviews.llvm.org/D100388 | ||||
2476 | if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && | ||||
2477 | !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { | ||||
2478 | auto IRArgs = IRFunctionArgs.getIRArgs(0); | ||||
2479 | |||||
2480 | assert(IRArgs.second == 1 && "Expected only a single `this` pointer.")(static_cast <bool> (IRArgs.second == 1 && "Expected only a single `this` pointer." ) ? void (0) : __assert_fail ("IRArgs.second == 1 && \"Expected only a single `this` pointer.\"" , "clang/lib/CodeGen/CGCall.cpp", 2480, __extension__ __PRETTY_FUNCTION__ )); | ||||
2481 | |||||
2482 | llvm::AttrBuilder Attrs(getLLVMContext()); | ||||
2483 | |||||
2484 | QualType ThisTy = | ||||
2485 | FI.arg_begin()->type.castAs<PointerType>()->getPointeeType(); | ||||
2486 | |||||
2487 | if (!CodeGenOpts.NullPointerIsValid && | ||||
2488 | getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) { | ||||
2489 | Attrs.addAttribute(llvm::Attribute::NonNull); | ||||
2490 | Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity()); | ||||
2491 | } else { | ||||
2492 | // FIXME dereferenceable should be correct here, regardless of | ||||
2493 | // NullPointerIsValid. However, dereferenceable currently does not always | ||||
2494 | // respect NullPointerIsValid and may imply nonnull and break the program. | ||||
2495 | // See https://reviews.llvm.org/D66618 for discussions. | ||||
2496 | Attrs.addDereferenceableOrNullAttr( | ||||
2497 | getMinimumObjectSize( | ||||
2498 | FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) | ||||
2499 | .getQuantity()); | ||||
2500 | } | ||||
2501 | |||||
2502 | llvm::Align Alignment = | ||||
2503 | getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr, | ||||
2504 | /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) | ||||
2505 | .getAsAlign(); | ||||
2506 | Attrs.addAlignmentAttr(Alignment); | ||||
2507 | |||||
2508 | ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); | ||||
2509 | } | ||||
2510 | |||||
2511 | unsigned ArgNo = 0; | ||||
2512 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), | ||||
2513 | E = FI.arg_end(); | ||||
2514 | I != E; ++I, ++ArgNo) { | ||||
2515 | QualType ParamType = I->type; | ||||
2516 | const ABIArgInfo &AI = I->info; | ||||
2517 | llvm::AttrBuilder Attrs(getLLVMContext()); | ||||
2518 | |||||
2519 | // Add attribute for padding argument, if necessary. | ||||
2520 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) { | ||||
2521 | if (AI.getPaddingInReg()) { | ||||
2522 | ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = | ||||
2523 | llvm::AttributeSet::get( | ||||
2524 | getLLVMContext(), | ||||
2525 | llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg)); | ||||
2526 | } | ||||
2527 | } | ||||
2528 | |||||
2529 | // Decide whether the argument we're handling could be partially undef | ||||
2530 | if (CodeGenOpts.EnableNoundefAttrs && | ||||
2531 | DetermineNoUndef(ParamType, getTypes(), DL, AI)) { | ||||
2532 | Attrs.addAttribute(llvm::Attribute::NoUndef); | ||||
2533 | } | ||||
2534 | |||||
2535 | // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we | ||||
2536 | // have the corresponding parameter variable. It doesn't make | ||||
2537 | // sense to do it here because parameters are so messed up. | ||||
2538 | switch (AI.getKind()) { | ||||
2539 | case ABIArgInfo::Extend: | ||||
2540 | if (AI.isSignExt()) | ||||
2541 | Attrs.addAttribute(llvm::Attribute::SExt); | ||||
2542 | else | ||||
2543 | Attrs.addAttribute(llvm::Attribute::ZExt); | ||||
2544 | [[fallthrough]]; | ||||
2545 | case ABIArgInfo::Direct: | ||||
2546 | if (ArgNo == 0 && FI.isChainCall()) | ||||
2547 | Attrs.addAttribute(llvm::Attribute::Nest); | ||||
2548 | else if (AI.getInReg()) | ||||
2549 | Attrs.addAttribute(llvm::Attribute::InReg); | ||||
2550 | Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); | ||||
2551 | |||||
2552 | if (canApplyNoFPClass(AI, ParamType, false)) | ||||
2553 | Attrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts())); | ||||
2554 | break; | ||||
2555 | case ABIArgInfo::Indirect: { | ||||
2556 | if (AI.getInReg()) | ||||
2557 | Attrs.addAttribute(llvm::Attribute::InReg); | ||||
2558 | |||||
2559 | if (AI.getIndirectByVal()) | ||||
2560 | Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); | ||||
2561 | |||||
2562 | auto *Decl = ParamType->getAsRecordDecl(); | ||||
2563 | if (CodeGenOpts.PassByValueIsNoAlias && Decl && | ||||
2564 | Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) | ||||
2565 | // When calling the function, the pointer passed in will be the only | ||||
2566 | // reference to the underlying object. Mark it accordingly. | ||||
2567 | Attrs.addAttribute(llvm::Attribute::NoAlias); | ||||
2568 | |||||
2569 | // TODO: We could add the byref attribute if not byval, but it would | ||||
2570 | // require updating many testcases. | ||||
2571 | |||||
2572 | CharUnits Align = AI.getIndirectAlign(); | ||||
2573 | |||||
2574 | // In a byval argument, it is important that the required | ||||
2575 | // alignment of the type is honored, as LLVM might be creating a | ||||
2576 | // *new* stack object, and needs to know what alignment to give | ||||
2577 | // it. (Sometimes it can deduce a sensible alignment on its own, | ||||
2578 | // but not if clang decides it must emit a packed struct, or the | ||||
2579 | // user specifies increased alignment requirements.) | ||||
2580 | // | ||||
2581 | // This is different from indirect *not* byval, where the object | ||||
2582 | // exists already, and the align attribute is purely | ||||
2583 | // informative. | ||||
2584 | assert(!Align.isZero())(static_cast <bool> (!Align.isZero()) ? void (0) : __assert_fail ("!Align.isZero()", "clang/lib/CodeGen/CGCall.cpp", 2584, __extension__ __PRETTY_FUNCTION__)); | ||||
2585 | |||||
2586 | // For now, only add this when we have a byval argument. | ||||
2587 | // TODO: be less lazy about updating test cases. | ||||
2588 | if (AI.getIndirectByVal()) | ||||
2589 | Attrs.addAlignmentAttr(Align.getQuantity()); | ||||
2590 | |||||
2591 | // byval disables readnone and readonly. | ||||
2592 | AddPotentialArgAccess(); | ||||
2593 | break; | ||||
2594 | } | ||||
2595 | case ABIArgInfo::IndirectAliased: { | ||||
2596 | CharUnits Align = AI.getIndirectAlign(); | ||||
2597 | Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); | ||||
2598 | Attrs.addAlignmentAttr(Align.getQuantity()); | ||||
2599 | break; | ||||
2600 | } | ||||
2601 | case ABIArgInfo::Ignore: | ||||
2602 | case ABIArgInfo::Expand: | ||||
2603 | case ABIArgInfo::CoerceAndExpand: | ||||
2604 | break; | ||||
2605 | |||||
2606 | case ABIArgInfo::InAlloca: | ||||
2607 | // inalloca disables readnone and readonly. | ||||
2608 | AddPotentialArgAccess(); | ||||
2609 | continue; | ||||
2610 | } | ||||
2611 | |||||
2612 | if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { | ||||
2613 | QualType PTy = RefTy->getPointeeType(); | ||||
2614 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) | ||||
2615 | Attrs.addDereferenceableAttr( | ||||
2616 | getMinimumObjectSize(PTy).getQuantity()); | ||||
2617 | if (getTypes().getTargetAddressSpace(PTy) == 0 && | ||||
2618 | !CodeGenOpts.NullPointerIsValid) | ||||
2619 | Attrs.addAttribute(llvm::Attribute::NonNull); | ||||
2620 | if (PTy->isObjectType()) { | ||||
2621 | llvm::Align Alignment = | ||||
2622 | getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); | ||||
2623 | Attrs.addAlignmentAttr(Alignment); | ||||
2624 | } | ||||
2625 | } | ||||
2626 | |||||
2627 | // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types: | ||||
2628 | // > For arguments to a __kernel function declared to be a pointer to a | ||||
2629 | // > data type, the OpenCL compiler can assume that the pointee is always | ||||
2630 | // > appropriately aligned as required by the data type. | ||||
2631 | if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() && | ||||
2632 | ParamType->isPointerType()) { | ||||
2633 | QualType PTy = ParamType->getPointeeType(); | ||||
2634 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { | ||||
2635 | llvm::Align Alignment = | ||||
2636 | getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); | ||||
2637 | Attrs.addAlignmentAttr(Alignment); | ||||
2638 | } | ||||
2639 | } | ||||
2640 | |||||
2641 | switch (FI.getExtParameterInfo(ArgNo).getABI()) { | ||||
2642 | case ParameterABI::Ordinary: | ||||
2643 | break; | ||||
2644 | |||||
2645 | case ParameterABI::SwiftIndirectResult: { | ||||
2646 | // Add 'sret' if we haven't already used it for something, but | ||||
2647 | // only if the result is void. | ||||
2648 | if (!hasUsedSRet && RetTy->isVoidType()) { | ||||
2649 | Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); | ||||
2650 | hasUsedSRet = true; | ||||
2651 | } | ||||
2652 | |||||
2653 | // Add 'noalias' in either case. | ||||
2654 | Attrs.addAttribute(llvm::Attribute::NoAlias); | ||||
2655 | |||||
2656 | // Add 'dereferenceable' and 'alignment'. | ||||
2657 | auto PTy = ParamType->getPointeeType(); | ||||
2658 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { | ||||
2659 | auto info = getContext().getTypeInfoInChars(PTy); | ||||
2660 | Attrs.addDereferenceableAttr(info.Width.getQuantity()); | ||||
2661 | Attrs.addAlignmentAttr(info.Align.getAsAlign()); | ||||
2662 | } | ||||
2663 | break; | ||||
2664 | } | ||||
2665 | |||||
2666 | case ParameterABI::SwiftErrorResult: | ||||
2667 | Attrs.addAttribute(llvm::Attribute::SwiftError); | ||||
2668 | break; | ||||
2669 | |||||
2670 | case ParameterABI::SwiftContext: | ||||
2671 | Attrs.addAttribute(llvm::Attribute::SwiftSelf); | ||||
2672 | break; | ||||
2673 | |||||
2674 | case ParameterABI::SwiftAsyncContext: | ||||
2675 | Attrs.addAttribute(llvm::Attribute::SwiftAsync); | ||||
2676 | break; | ||||
2677 | } | ||||
2678 | |||||
2679 | if (FI.getExtParameterInfo(ArgNo).isNoEscape()) | ||||
2680 | Attrs.addAttribute(llvm::Attribute::NoCapture); | ||||
2681 | |||||
2682 | if (Attrs.hasAttributes()) { | ||||
2683 | unsigned FirstIRArg, NumIRArgs; | ||||
2684 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); | ||||
2685 | for (unsigned i = 0; i < NumIRArgs; i++) | ||||
2686 | ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes( | ||||
2687 | getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs)); | ||||
2688 | } | ||||
2689 | } | ||||
2690 | assert(ArgNo == FI.arg_size())(static_cast <bool> (ArgNo == FI.arg_size()) ? void (0) : __assert_fail ("ArgNo == FI.arg_size()", "clang/lib/CodeGen/CGCall.cpp" , 2690, __extension__ __PRETTY_FUNCTION__)); | ||||
2691 | |||||
2692 | AttrList = llvm::AttributeList::get( | ||||
2693 | getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), | ||||
2694 | llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); | ||||
2695 | } | ||||
2696 | |||||
2697 | /// An argument came in as a promoted argument; demote it back to its | ||||
2698 | /// declared type. | ||||
2699 | static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, | ||||
2700 | const VarDecl *var, | ||||
2701 | llvm::Value *value) { | ||||
2702 | llvm::Type *varType = CGF.ConvertType(var->getType()); | ||||
2703 | |||||
2704 | // This can happen with promotions that actually don't change the | ||||
2705 | // underlying type, like the enum promotions. | ||||
2706 | if (value->getType() == varType) return value; | ||||
2707 | |||||
2708 | assert((varType->isIntegerTy() || varType->isFloatingPointTy())(static_cast <bool> ((varType->isIntegerTy() || varType ->isFloatingPointTy()) && "unexpected promotion type" ) ? void (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\"" , "clang/lib/CodeGen/CGCall.cpp", 2709, __extension__ __PRETTY_FUNCTION__ )) | ||||
2709 | && "unexpected promotion type")(static_cast <bool> ((varType->isIntegerTy() || varType ->isFloatingPointTy()) && "unexpected promotion type" ) ? void (0) : __assert_fail ("(varType->isIntegerTy() || varType->isFloatingPointTy()) && \"unexpected promotion type\"" , "clang/lib/CodeGen/CGCall.cpp", 2709, __extension__ __PRETTY_FUNCTION__ )); | ||||
2710 | |||||
2711 | if (isa<llvm::IntegerType>(varType)) | ||||
2712 | return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); | ||||
2713 | |||||
2714 | return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); | ||||
2715 | } | ||||
2716 | |||||
2717 | /// Returns the attribute (either parameter attribute, or function | ||||
2718 | /// attribute), which declares argument ArgNo to be non-null. | ||||
2719 | static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, | ||||
2720 | QualType ArgType, unsigned ArgNo) { | ||||
2721 | // FIXME: __attribute__((nonnull)) can also be applied to: | ||||
2722 | // - references to pointers, where the pointee is known to be | ||||
2723 | // nonnull (apparently a Clang extension) | ||||
2724 | // - transparent unions containing pointers | ||||
2725 | // In the former case, LLVM IR cannot represent the constraint. In | ||||
2726 | // the latter case, we have no guarantee that the transparent union | ||||
2727 | // is in fact passed as a pointer. | ||||
2728 | if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) | ||||
2729 | return nullptr; | ||||
2730 | // First, check attribute on parameter itself. | ||||
2731 | if (PVD) { | ||||
2732 | if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) | ||||
2733 | return ParmNNAttr; | ||||
2734 | } | ||||
2735 | // Check function attributes. | ||||
2736 | if (!FD) | ||||
2737 | return nullptr; | ||||
2738 | for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { | ||||
2739 | if (NNAttr->isNonNull(ArgNo)) | ||||
2740 | return NNAttr; | ||||
2741 | } | ||||
2742 | return nullptr; | ||||
2743 | } | ||||
2744 | |||||
2745 | namespace { | ||||
2746 | struct CopyBackSwiftError final : EHScopeStack::Cleanup { | ||||
2747 | Address Temp; | ||||
2748 | Address Arg; | ||||
2749 | CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} | ||||
2750 | void Emit(CodeGenFunction &CGF, Flags flags) override { | ||||
2751 | llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); | ||||
2752 | CGF.Builder.CreateStore(errorValue, Arg); | ||||
2753 | } | ||||
2754 | }; | ||||
2755 | } | ||||
2756 | |||||
2757 | void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, | ||||
2758 | llvm::Function *Fn, | ||||
2759 | const FunctionArgList &Args) { | ||||
2760 | if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) | ||||
| |||||
2761 | // Naked functions don't have prologues. | ||||
2762 | return; | ||||
2763 | |||||
2764 | // If this is an implicit-return-zero function, go ahead and | ||||
2765 | // initialize the return value. TODO: it might be nice to have | ||||
2766 | // a more general mechanism for this that didn't require synthesized | ||||
2767 | // return statements. | ||||
2768 | if (const FunctionDecl *FD
| ||||
2769 | if (FD->hasImplicitReturnZero()) { | ||||
2770 | QualType RetTy = FD->getReturnType().getUnqualifiedType(); | ||||
2771 | llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); | ||||
2772 | llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); | ||||
2773 | Builder.CreateStore(Zero, ReturnValue); | ||||
2774 | } | ||||
2775 | } | ||||
2776 | |||||
2777 | // FIXME: We no longer need the types from FunctionArgList; lift up and | ||||
2778 | // simplify. | ||||
2779 | |||||
2780 | ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); | ||||
2781 | assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs())(static_cast <bool> (Fn->arg_size() == IRFunctionArgs .totalIRArgs()) ? void (0) : __assert_fail ("Fn->arg_size() == IRFunctionArgs.totalIRArgs()" , "clang/lib/CodeGen/CGCall.cpp", 2781, __extension__ __PRETTY_FUNCTION__ )); | ||||
2782 | |||||
2783 | // If we're using inalloca, all the memory arguments are GEPs off of the last | ||||
2784 | // parameter, which is a pointer to the complete memory area. | ||||
2785 | Address ArgStruct = Address::invalid(); | ||||
2786 | if (IRFunctionArgs.hasInallocaArg()) { | ||||
2787 | ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), | ||||
2788 | FI.getArgStruct(), FI.getArgStructAlignment()); | ||||
2789 | |||||
2790 | assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo())(static_cast <bool> (ArgStruct.getType() == FI.getArgStruct ()->getPointerTo()) ? void (0) : __assert_fail ("ArgStruct.getType() == FI.getArgStruct()->getPointerTo()" , "clang/lib/CodeGen/CGCall.cpp", 2790, __extension__ __PRETTY_FUNCTION__ )); | ||||
2791 | } | ||||
2792 | |||||
2793 | // Name the struct return parameter. | ||||
2794 | if (IRFunctionArgs.hasSRetArg()) { | ||||
2795 | auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); | ||||
2796 | AI->setName("agg.result"); | ||||
2797 | AI->addAttr(llvm::Attribute::NoAlias); | ||||
2798 | } | ||||
2799 | |||||
2800 | // Track if we received the parameter as a pointer (indirect, byval, or | ||||
2801 | // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it | ||||
2802 | // into a local alloca for us. | ||||
2803 | SmallVector<ParamValue, 16> ArgVals; | ||||
2804 | ArgVals.reserve(Args.size()); | ||||
2805 | |||||
2806 | // Create a pointer value for every parameter declaration. This usually | ||||
2807 | // entails copying one or more LLVM IR arguments into an alloca. Don't push | ||||
2808 | // any cleanups or do anything that might unwind. We do that separately, so | ||||
2809 | // we can push the cleanups in the correct order for the ABI. | ||||
2810 | assert(FI.arg_size() == Args.size() &&(static_cast <bool> (FI.arg_size() == Args.size() && "Mismatch between function signature & arguments.") ? void (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\"" , "clang/lib/CodeGen/CGCall.cpp", 2811, __extension__ __PRETTY_FUNCTION__ )) | ||||
2811 | "Mismatch between function signature & arguments.")(static_cast <bool> (FI.arg_size() == Args.size() && "Mismatch between function signature & arguments.") ? void (0) : __assert_fail ("FI.arg_size() == Args.size() && \"Mismatch between function signature & arguments.\"" , "clang/lib/CodeGen/CGCall.cpp", 2811, __extension__ __PRETTY_FUNCTION__ )); | ||||
2812 | unsigned ArgNo = 0; | ||||
2813 | CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); | ||||
2814 | for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); | ||||
2815 | i != e; ++i, ++info_it, ++ArgNo) { | ||||
2816 | const VarDecl *Arg = *i; | ||||
2817 | const ABIArgInfo &ArgI = info_it->info; | ||||
2818 | |||||
2819 | bool isPromoted = | ||||
2820 | isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); | ||||
2821 | // We are converting from ABIArgInfo type to VarDecl type directly, unless | ||||
2822 | // the parameter is promoted. In this case we convert to | ||||
2823 | // CGFunctionInfo::ArgInfo type with subsequent argument demotion. | ||||
2824 | QualType Ty = isPromoted ? info_it->type : Arg->getType(); | ||||
2825 | assert(hasScalarEvaluationKind(Ty) ==(static_cast <bool> (hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind (Arg->getType())) ? void (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())" , "clang/lib/CodeGen/CGCall.cpp", 2826, __extension__ __PRETTY_FUNCTION__ )) | ||||
2826 | hasScalarEvaluationKind(Arg->getType()))(static_cast <bool> (hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind (Arg->getType())) ? void (0) : __assert_fail ("hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())" , "clang/lib/CodeGen/CGCall.cpp", 2826, __extension__ __PRETTY_FUNCTION__ )); | ||||
2827 | |||||
2828 | unsigned FirstIRArg, NumIRArgs; | ||||
2829 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); | ||||
2830 | |||||
2831 | switch (ArgI.getKind()) { | ||||
2832 | case ABIArgInfo::InAlloca: { | ||||
2833 | assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail ("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 2833, __extension__ __PRETTY_FUNCTION__)); | ||||
2834 | auto FieldIndex = ArgI.getInAllocaFieldIndex(); | ||||
2835 | Address V = | ||||
2836 | Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); | ||||
2837 | if (ArgI.getInAllocaIndirect()) | ||||
2838 | V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty), | ||||
2839 | getContext().getTypeAlignInChars(Ty)); | ||||
2840 | ArgVals.push_back(ParamValue::forIndirect(V)); | ||||
2841 | break; | ||||
2842 | } | ||||
2843 | |||||
2844 | case ABIArgInfo::Indirect: | ||||
2845 | case ABIArgInfo::IndirectAliased: { | ||||
2846 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2846, __extension__ __PRETTY_FUNCTION__)); | ||||
2847 | Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty), | ||||
2848 | ArgI.getIndirectAlign(), KnownNonNull); | ||||
2849 | |||||
2850 | if (!hasScalarEvaluationKind(Ty)) { | ||||
2851 | // Aggregates and complex variables are accessed by reference. All we | ||||
2852 | // need to do is realign the value, if requested. Also, if the address | ||||
2853 | // may be aliased, copy it to ensure that the parameter variable is | ||||
2854 | // mutable and has a unique adress, as C requires. | ||||
2855 | Address V = ParamAddr; | ||||
2856 | if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { | ||||
2857 | Address AlignedTemp = CreateMemTemp(Ty, "coerce"); | ||||
2858 | |||||
2859 | // Copy from the incoming argument pointer to the temporary with the | ||||
2860 | // appropriate alignment. | ||||
2861 | // | ||||
2862 | // FIXME: We should have a common utility for generating an aggregate | ||||
2863 | // copy. | ||||
2864 | CharUnits Size = getContext().getTypeSizeInChars(Ty); | ||||
2865 | Builder.CreateMemCpy( | ||||
2866 | AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), | ||||
2867 | ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), | ||||
2868 | llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); | ||||
2869 | V = AlignedTemp; | ||||
2870 | } | ||||
2871 | ArgVals.push_back(ParamValue::forIndirect(V)); | ||||
2872 | } else { | ||||
2873 | // Load scalar value from indirect argument. | ||||
2874 | llvm::Value *V = | ||||
2875 | EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); | ||||
2876 | |||||
2877 | if (isPromoted) | ||||
2878 | V = emitArgumentDemotion(*this, Arg, V); | ||||
2879 | ArgVals.push_back(ParamValue::forDirect(V)); | ||||
2880 | } | ||||
2881 | break; | ||||
2882 | } | ||||
2883 | |||||
2884 | case ABIArgInfo::Extend: | ||||
2885 | case ABIArgInfo::Direct: { | ||||
2886 | auto AI = Fn->getArg(FirstIRArg); | ||||
2887 | llvm::Type *LTy = ConvertType(Arg->getType()); | ||||
2888 | |||||
2889 | // Prepare parameter attributes. So far, only attributes for pointer | ||||
2890 | // parameters are prepared. See | ||||
2891 | // http://llvm.org/docs/LangRef.html#paramattrs. | ||||
2892 | if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && | ||||
2893 | ArgI.getCoerceToType()->isPointerTy()) { | ||||
2894 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2894, __extension__ __PRETTY_FUNCTION__)); | ||||
2895 | |||||
2896 | if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { | ||||
2897 | // Set `nonnull` attribute if any. | ||||
2898 | if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), | ||||
2899 | PVD->getFunctionScopeIndex()) && | ||||
2900 | !CGM.getCodeGenOpts().NullPointerIsValid) | ||||
2901 | AI->addAttr(llvm::Attribute::NonNull); | ||||
2902 | |||||
2903 | QualType OTy = PVD->getOriginalType(); | ||||
2904 | if (const auto *ArrTy = | ||||
2905 | getContext().getAsConstantArrayType(OTy)) { | ||||
2906 | // A C99 array parameter declaration with the static keyword also | ||||
2907 | // indicates dereferenceability, and if the size is constant we can | ||||
2908 | // use the dereferenceable attribute (which requires the size in | ||||
2909 | // bytes). | ||||
2910 | if (ArrTy->getSizeModifier() == ArrayType::Static) { | ||||
2911 | QualType ETy = ArrTy->getElementType(); | ||||
2912 | llvm::Align Alignment = | ||||
2913 | CGM.getNaturalTypeAlignment(ETy).getAsAlign(); | ||||
2914 | AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); | ||||
2915 | uint64_t ArrSize = ArrTy->getSize().getZExtValue(); | ||||
2916 | if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && | ||||
2917 | ArrSize) { | ||||
2918 | llvm::AttrBuilder Attrs(getLLVMContext()); | ||||
2919 | Attrs.addDereferenceableAttr( | ||||
2920 | getContext().getTypeSizeInChars(ETy).getQuantity() * | ||||
2921 | ArrSize); | ||||
2922 | AI->addAttrs(Attrs); | ||||
2923 | } else if (getContext().getTargetInfo().getNullPointerValue( | ||||
2924 | ETy.getAddressSpace()) == 0 && | ||||
2925 | !CGM.getCodeGenOpts().NullPointerIsValid) { | ||||
2926 | AI->addAttr(llvm::Attribute::NonNull); | ||||
2927 | } | ||||
2928 | } | ||||
2929 | } else if (const auto *ArrTy = | ||||
2930 | getContext().getAsVariableArrayType(OTy)) { | ||||
2931 | // For C99 VLAs with the static keyword, we don't know the size so | ||||
2932 | // we can't use the dereferenceable attribute, but in addrspace(0) | ||||
2933 | // we know that it must be nonnull. | ||||
2934 | if (ArrTy->getSizeModifier() == VariableArrayType::Static) { | ||||
2935 | QualType ETy = ArrTy->getElementType(); | ||||
2936 | llvm::Align Alignment = | ||||
2937 | CGM.getNaturalTypeAlignment(ETy).getAsAlign(); | ||||
2938 | AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); | ||||
2939 | if (!getTypes().getTargetAddressSpace(ETy) && | ||||
2940 | !CGM.getCodeGenOpts().NullPointerIsValid) | ||||
2941 | AI->addAttr(llvm::Attribute::NonNull); | ||||
2942 | } | ||||
2943 | } | ||||
2944 | |||||
2945 | // Set `align` attribute if any. | ||||
2946 | const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); | ||||
2947 | if (!AVAttr) | ||||
2948 | if (const auto *TOTy = OTy->getAs<TypedefType>()) | ||||
2949 | AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); | ||||
2950 | if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { | ||||
2951 | // If alignment-assumption sanitizer is enabled, we do *not* add | ||||
2952 | // alignment attribute here, but emit normal alignment assumption, | ||||
2953 | // so the UBSAN check could function. | ||||
2954 | llvm::ConstantInt *AlignmentCI = | ||||
2955 | cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); | ||||
2956 | uint64_t AlignmentInt = | ||||
2957 | AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); | ||||
2958 | if (AI->getParamAlign().valueOrOne() < AlignmentInt) { | ||||
2959 | AI->removeAttr(llvm::Attribute::AttrKind::Alignment); | ||||
2960 | AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr( | ||||
2961 | llvm::Align(AlignmentInt))); | ||||
2962 | } | ||||
2963 | } | ||||
2964 | } | ||||
2965 | |||||
2966 | // Set 'noalias' if an argument type has the `restrict` qualifier. | ||||
2967 | if (Arg->getType().isRestrictQualified()) | ||||
2968 | AI->addAttr(llvm::Attribute::NoAlias); | ||||
2969 | } | ||||
2970 | |||||
2971 | // Prepare the argument value. If we have the trivial case, handle it | ||||
2972 | // with no muss and fuss. | ||||
2973 | if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && | ||||
2974 | ArgI.getCoerceToType() == ConvertType(Ty) && | ||||
2975 | ArgI.getDirectOffset() == 0) { | ||||
2976 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 2976, __extension__ __PRETTY_FUNCTION__)); | ||||
2977 | |||||
2978 | // LLVM expects swifterror parameters to be used in very restricted | ||||
2979 | // ways. Copy the value into a less-restricted temporary. | ||||
2980 | llvm::Value *V = AI; | ||||
2981 | if (FI.getExtParameterInfo(ArgNo).getABI() | ||||
2982 | == ParameterABI::SwiftErrorResult) { | ||||
2983 | QualType pointeeTy = Ty->getPointeeType(); | ||||
2984 | assert(pointeeTy->isPointerType())(static_cast <bool> (pointeeTy->isPointerType()) ? void (0) : __assert_fail ("pointeeTy->isPointerType()", "clang/lib/CodeGen/CGCall.cpp" , 2984, __extension__ __PRETTY_FUNCTION__)); | ||||
2985 | Address temp = | ||||
2986 | CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); | ||||
2987 | Address arg(V, ConvertTypeForMem(pointeeTy), | ||||
2988 | getContext().getTypeAlignInChars(pointeeTy)); | ||||
2989 | llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); | ||||
2990 | Builder.CreateStore(incomingErrorValue, temp); | ||||
2991 | V = temp.getPointer(); | ||||
2992 | |||||
2993 | // Push a cleanup to copy the value back at the end of the function. | ||||
2994 | // The convention does not guarantee that the value will be written | ||||
2995 | // back if the function exits with an unwind exception. | ||||
2996 | EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); | ||||
2997 | } | ||||
2998 | |||||
2999 | // Ensure the argument is the correct type. | ||||
3000 | if (V->getType() != ArgI.getCoerceToType()) | ||||
3001 | V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); | ||||
3002 | |||||
3003 | if (isPromoted) | ||||
3004 | V = emitArgumentDemotion(*this, Arg, V); | ||||
3005 | |||||
3006 | // Because of merging of function types from multiple decls it is | ||||
3007 | // possible for the type of an argument to not match the corresponding | ||||
3008 | // type in the function type. Since we are codegening the callee | ||||
3009 | // in here, add a cast to the argument type. | ||||
3010 | llvm::Type *LTy = ConvertType(Arg->getType()); | ||||
3011 | if (V->getType() != LTy) | ||||
3012 | V = Builder.CreateBitCast(V, LTy); | ||||
3013 | |||||
3014 | ArgVals.push_back(ParamValue::forDirect(V)); | ||||
3015 | break; | ||||
3016 | } | ||||
3017 | |||||
3018 | // VLST arguments are coerced to VLATs at the function boundary for | ||||
3019 | // ABI consistency. If this is a VLST that was coerced to | ||||
3020 | // a VLAT at the function boundary and the types match up, use | ||||
3021 | // llvm.vector.extract to convert back to the original VLST. | ||||
3022 | if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { | ||||
3023 | llvm::Value *Coerced = Fn->getArg(FirstIRArg); | ||||
3024 | if (auto *VecTyFrom = | ||||
3025 | dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { | ||||
3026 | // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 | ||||
3027 | // vector, bitcast the source and use a vector extract. | ||||
3028 | auto PredType = | ||||
3029 | llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); | ||||
3030 | if (VecTyFrom == PredType && | ||||
3031 | VecTyTo->getElementType() == Builder.getInt8Ty()) { | ||||
3032 | VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); | ||||
3033 | Coerced = Builder.CreateBitCast(Coerced, VecTyFrom); | ||||
3034 | } | ||||
3035 | if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { | ||||
3036 | llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); | ||||
3037 | |||||
3038 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 3038, __extension__ __PRETTY_FUNCTION__)); | ||||
3039 | Coerced->setName(Arg->getName() + ".coerce"); | ||||
3040 | ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( | ||||
3041 | VecTyTo, Coerced, Zero, "castFixedSve"))); | ||||
3042 | break; | ||||
3043 | } | ||||
3044 | } | ||||
3045 | } | ||||
3046 | |||||
3047 | Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), | ||||
3048 | Arg->getName()); | ||||
3049 | |||||
3050 | // Pointer to store into. | ||||
3051 | Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); | ||||
3052 | |||||
3053 | // Fast-isel and the optimizer generally like scalar values better than | ||||
3054 | // FCAs, so we flatten them if this is safe to do for this argument. | ||||
3055 | llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); | ||||
3056 | if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && | ||||
3057 | STy->getNumElements() > 1) { | ||||
3058 | uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); | ||||
3059 | llvm::Type *DstTy = Ptr.getElementType(); | ||||
3060 | uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); | ||||
3061 | |||||
3062 | Address AddrToStoreInto = Address::invalid(); | ||||
3063 | if (SrcSize <= DstSize) { | ||||
3064 | AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); | ||||
3065 | } else { | ||||
3066 | AddrToStoreInto = | ||||
3067 | CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); | ||||
3068 | } | ||||
3069 | |||||
3070 | assert(STy->getNumElements() == NumIRArgs)(static_cast <bool> (STy->getNumElements() == NumIRArgs ) ? void (0) : __assert_fail ("STy->getNumElements() == NumIRArgs" , "clang/lib/CodeGen/CGCall.cpp", 3070, __extension__ __PRETTY_FUNCTION__ )); | ||||
3071 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { | ||||
3072 | auto AI = Fn->getArg(FirstIRArg + i); | ||||
3073 | AI->setName(Arg->getName() + ".coerce" + Twine(i)); | ||||
3074 | Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); | ||||
3075 | Builder.CreateStore(AI, EltPtr); | ||||
3076 | } | ||||
3077 | |||||
3078 | if (SrcSize > DstSize) { | ||||
3079 | Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); | ||||
3080 | } | ||||
3081 | |||||
3082 | } else { | ||||
3083 | // Simple case, just do a coerced store of the argument into the alloca. | ||||
3084 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 3084, __extension__ __PRETTY_FUNCTION__)); | ||||
3085 | auto AI = Fn->getArg(FirstIRArg); | ||||
3086 | AI->setName(Arg->getName() + ".coerce"); | ||||
3087 | CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); | ||||
3088 | } | ||||
3089 | |||||
3090 | // Match to what EmitParmDecl is expecting for this type. | ||||
3091 | if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { | ||||
3092 | llvm::Value *V = | ||||
3093 | EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); | ||||
3094 | if (isPromoted) | ||||
3095 | V = emitArgumentDemotion(*this, Arg, V); | ||||
3096 | ArgVals.push_back(ParamValue::forDirect(V)); | ||||
3097 | } else { | ||||
3098 | ArgVals.push_back(ParamValue::forIndirect(Alloca)); | ||||
3099 | } | ||||
3100 | break; | ||||
3101 | } | ||||
3102 | |||||
3103 | case ABIArgInfo::CoerceAndExpand: { | ||||
3104 | // Reconstruct into a temporary. | ||||
3105 | Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); | ||||
3106 | ArgVals.push_back(ParamValue::forIndirect(alloca)); | ||||
3107 | |||||
3108 | auto coercionType = ArgI.getCoerceAndExpandType(); | ||||
3109 | alloca = Builder.CreateElementBitCast(alloca, coercionType); | ||||
3110 | |||||
3111 | unsigned argIndex = FirstIRArg; | ||||
3112 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { | ||||
3113 | llvm::Type *eltType = coercionType->getElementType(i); | ||||
3114 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) | ||||
3115 | continue; | ||||
3116 | |||||
3117 | auto eltAddr = Builder.CreateStructGEP(alloca, i); | ||||
3118 | auto elt = Fn->getArg(argIndex++); | ||||
3119 | Builder.CreateStore(elt, eltAddr); | ||||
3120 | } | ||||
3121 | assert(argIndex == FirstIRArg + NumIRArgs)(static_cast <bool> (argIndex == FirstIRArg + NumIRArgs ) ? void (0) : __assert_fail ("argIndex == FirstIRArg + NumIRArgs" , "clang/lib/CodeGen/CGCall.cpp", 3121, __extension__ __PRETTY_FUNCTION__ )); | ||||
3122 | break; | ||||
3123 | } | ||||
3124 | |||||
3125 | case ABIArgInfo::Expand: { | ||||
3126 | // If this structure was expanded into multiple arguments then | ||||
3127 | // we need to create a temporary and reconstruct it from the | ||||
3128 | // arguments. | ||||
3129 | Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); | ||||
3130 | LValue LV = MakeAddrLValue(Alloca, Ty); | ||||
3131 | ArgVals.push_back(ParamValue::forIndirect(Alloca)); | ||||
3132 | |||||
3133 | auto FnArgIter = Fn->arg_begin() + FirstIRArg; | ||||
3134 | ExpandTypeFromArgs(Ty, LV, FnArgIter); | ||||
3135 | assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs)(static_cast <bool> (FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs) ? void (0) : __assert_fail ("FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs" , "clang/lib/CodeGen/CGCall.cpp", 3135, __extension__ __PRETTY_FUNCTION__ )); | ||||
3136 | for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { | ||||
3137 | auto AI = Fn->getArg(FirstIRArg + i); | ||||
3138 | AI->setName(Arg->getName() + "." + Twine(i)); | ||||
3139 | } | ||||
3140 | break; | ||||
3141 | } | ||||
3142 | |||||
3143 | case ABIArgInfo::Ignore: | ||||
3144 | assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail ("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 3144, __extension__ __PRETTY_FUNCTION__)); | ||||
3145 | // Initialize the local variable appropriately. | ||||
3146 | if (!hasScalarEvaluationKind(Ty)) { | ||||
3147 | ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); | ||||
3148 | } else { | ||||
3149 | llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); | ||||
3150 | ArgVals.push_back(ParamValue::forDirect(U)); | ||||
3151 | } | ||||
3152 | break; | ||||
3153 | } | ||||
3154 | } | ||||
3155 | |||||
3156 | if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { | ||||
3157 | for (int I = Args.size() - 1; I >= 0; --I) | ||||
3158 | EmitParmDecl(*Args[I], ArgVals[I], I + 1); | ||||
3159 | } else { | ||||
3160 | for (unsigned I = 0, E = Args.size(); I != E; ++I) | ||||
3161 | EmitParmDecl(*Args[I], ArgVals[I], I + 1); | ||||
3162 | } | ||||
3163 | } | ||||
3164 | |||||
3165 | static void eraseUnusedBitCasts(llvm::Instruction *insn) { | ||||
3166 | while (insn->use_empty()) { | ||||
3167 | llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); | ||||
3168 | if (!bitcast) return; | ||||
3169 | |||||
3170 | // This is "safe" because we would have used a ConstantExpr otherwise. | ||||
3171 | insn = cast<llvm::Instruction>(bitcast->getOperand(0)); | ||||
3172 | bitcast->eraseFromParent(); | ||||
3173 | } | ||||
3174 | } | ||||
3175 | |||||
3176 | /// Try to emit a fused autorelease of a return result. | ||||
3177 | static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, | ||||
3178 | llvm::Value *result) { | ||||
3179 | // We must be immediately followed the cast. | ||||
3180 | llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); | ||||
3181 | if (BB->empty()) return nullptr; | ||||
3182 | if (&BB->back() != result) return nullptr; | ||||
3183 | |||||
3184 | llvm::Type *resultType = result->getType(); | ||||
3185 | |||||
3186 | // result is in a BasicBlock and is therefore an Instruction. | ||||
3187 | llvm::Instruction *generator = cast<llvm::Instruction>(result); | ||||
3188 | |||||
3189 | SmallVector<llvm::Instruction *, 4> InstsToKill; | ||||
3190 | |||||
3191 | // Look for: | ||||
3192 | // %generator = bitcast %type1* %generator2 to %type2* | ||||
3193 | while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { | ||||
3194 | // We would have emitted this as a constant if the operand weren't | ||||
3195 | // an Instruction. | ||||
3196 | generator = cast<llvm::Instruction>(bitcast->getOperand(0)); | ||||
3197 | |||||
3198 | // Require the generator to be immediately followed by the cast. | ||||
3199 | if (generator->getNextNode() != bitcast) | ||||
3200 | return nullptr; | ||||
3201 | |||||
3202 | InstsToKill.push_back(bitcast); | ||||
3203 | } | ||||
3204 | |||||
3205 | // Look for: | ||||
3206 | // %generator = call i8* @objc_retain(i8* %originalResult) | ||||
3207 | // or | ||||
3208 | // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) | ||||
3209 | llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); | ||||
3210 | if (!call) return nullptr; | ||||
3211 | |||||
3212 | bool doRetainAutorelease; | ||||
3213 | |||||
3214 | if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { | ||||
3215 | doRetainAutorelease = true; | ||||
3216 | } else if (call->getCalledOperand() == | ||||
3217 | CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { | ||||
3218 | doRetainAutorelease = false; | ||||
3219 | |||||
3220 | // If we emitted an assembly marker for this call (and the | ||||
3221 | // ARCEntrypoints field should have been set if so), go looking | ||||
3222 | // for that call. If we can't find it, we can't do this | ||||
3223 | // optimization. But it should always be the immediately previous | ||||
3224 | // instruction, unless we needed bitcasts around the call. | ||||
3225 | if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { | ||||
3226 | llvm::Instruction *prev = call->getPrevNode(); | ||||
3227 | assert(prev)(static_cast <bool> (prev) ? void (0) : __assert_fail ( "prev", "clang/lib/CodeGen/CGCall.cpp", 3227, __extension__ __PRETTY_FUNCTION__ )); | ||||
3228 | if (isa<llvm::BitCastInst>(prev)) { | ||||
3229 | prev = prev->getPrevNode(); | ||||
3230 | assert(prev)(static_cast <bool> (prev) ? void (0) : __assert_fail ( "prev", "clang/lib/CodeGen/CGCall.cpp", 3230, __extension__ __PRETTY_FUNCTION__ )); | ||||
3231 | } | ||||
3232 | assert(isa<llvm::CallInst>(prev))(static_cast <bool> (isa<llvm::CallInst>(prev)) ? void (0) : __assert_fail ("isa<llvm::CallInst>(prev)", "clang/lib/CodeGen/CGCall.cpp", 3232, __extension__ __PRETTY_FUNCTION__ )); | ||||
3233 | assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==(static_cast <bool> (cast<llvm::CallInst>(prev)-> getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker ) ? void (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker" , "clang/lib/CodeGen/CGCall.cpp", 3234, __extension__ __PRETTY_FUNCTION__ )) | ||||
3234 | CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker)(static_cast <bool> (cast<llvm::CallInst>(prev)-> getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker ) ? void (0) : __assert_fail ("cast<llvm::CallInst>(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker" , "clang/lib/CodeGen/CGCall.cpp", 3234, __extension__ __PRETTY_FUNCTION__ )); | ||||
3235 | InstsToKill.push_back(prev); | ||||
3236 | } | ||||
3237 | } else { | ||||
3238 | return nullptr; | ||||
3239 | } | ||||
3240 | |||||
3241 | result = call->getArgOperand(0); | ||||
3242 | InstsToKill.push_back(call); | ||||
3243 | |||||
3244 | // Keep killing bitcasts, for sanity. Note that we no longer care | ||||
3245 | // about precise ordering as long as there's exactly one use. | ||||
3246 | while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { | ||||
3247 | if (!bitcast->hasOneUse()) break; | ||||
3248 | InstsToKill.push_back(bitcast); | ||||
3249 | result = bitcast->getOperand(0); | ||||
3250 | } | ||||
3251 | |||||
3252 | // Delete all the unnecessary instructions, from latest to earliest. | ||||
3253 | for (auto *I : InstsToKill) | ||||
3254 | I->eraseFromParent(); | ||||
3255 | |||||
3256 | // Do the fused retain/autorelease if we were asked to. | ||||
3257 | if (doRetainAutorelease) | ||||
3258 | result = CGF.EmitARCRetainAutoreleaseReturnValue(result); | ||||
3259 | |||||
3260 | // Cast back to the result type. | ||||
3261 | return CGF.Builder.CreateBitCast(result, resultType); | ||||
3262 | } | ||||
3263 | |||||
3264 | /// If this is a +1 of the value of an immutable 'self', remove it. | ||||
3265 | static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, | ||||
3266 | llvm::Value *result) { | ||||
3267 | // This is only applicable to a method with an immutable 'self'. | ||||
3268 | const ObjCMethodDecl *method = | ||||
3269 | dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); | ||||
3270 | if (!method) return nullptr; | ||||
3271 | const VarDecl *self = method->getSelfDecl(); | ||||
3272 | if (!self->getType().isConstQualified()) return nullptr; | ||||
3273 | |||||
3274 | // Look for a retain call. | ||||
3275 | llvm::CallInst *retainCall = | ||||
3276 | dyn_cast<llvm::CallInst>(result->stripPointerCasts()); | ||||
3277 | if (!retainCall || retainCall->getCalledOperand() != | ||||
3278 | CGF.CGM.getObjCEntrypoints().objc_retain) | ||||
3279 | return nullptr; | ||||
3280 | |||||
3281 | // Look for an ordinary load of 'self'. | ||||
3282 | llvm::Value *retainedValue = retainCall->getArgOperand(0); | ||||
3283 | llvm::LoadInst *load = | ||||
3284 | dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); | ||||
3285 | if (!load || load->isAtomic() || load->isVolatile() || | ||||
3286 | load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) | ||||
3287 | return nullptr; | ||||
3288 | |||||
3289 | // Okay! Burn it all down. This relies for correctness on the | ||||
3290 | // assumption that the retain is emitted as part of the return and | ||||
3291 | // that thereafter everything is used "linearly". | ||||
3292 | llvm::Type *resultType = result->getType(); | ||||
3293 | eraseUnusedBitCasts(cast<llvm::Instruction>(result)); | ||||
3294 | assert(retainCall->use_empty())(static_cast <bool> (retainCall->use_empty()) ? void (0) : __assert_fail ("retainCall->use_empty()", "clang/lib/CodeGen/CGCall.cpp" , 3294, __extension__ __PRETTY_FUNCTION__)); | ||||
3295 | retainCall->eraseFromParent(); | ||||
3296 | eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); | ||||
3297 | |||||
3298 | return CGF.Builder.CreateBitCast(load, resultType); | ||||
3299 | } | ||||
3300 | |||||
3301 | /// Emit an ARC autorelease of the result of a function. | ||||
3302 | /// | ||||
3303 | /// \return the value to actually return from the function | ||||
3304 | static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, | ||||
3305 | llvm::Value *result) { | ||||
3306 | // If we're returning 'self', kill the initial retain. This is a | ||||
3307 | // heuristic attempt to "encourage correctness" in the really unfortunate | ||||
3308 | // case where we have a return of self during a dealloc and we desperately | ||||
3309 | // need to avoid the possible autorelease. | ||||
3310 | if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) | ||||
3311 | return self; | ||||
3312 | |||||
3313 | // At -O0, try to emit a fused retain/autorelease. | ||||
3314 | if (CGF.shouldUseFusedARCCalls()) | ||||
3315 | if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) | ||||
3316 | return fused; | ||||
3317 | |||||
3318 | return CGF.EmitARCAutoreleaseReturnValue(result); | ||||
3319 | } | ||||
3320 | |||||
3321 | /// Heuristically search for a dominating store to the return-value slot. | ||||
3322 | static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { | ||||
3323 | // Check if a User is a store which pointerOperand is the ReturnValue. | ||||
3324 | // We are looking for stores to the ReturnValue, not for stores of the | ||||
3325 | // ReturnValue to some other location. | ||||
3326 | auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { | ||||
3327 | auto *SI = dyn_cast<llvm::StoreInst>(U); | ||||
3328 | if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() || | ||||
3329 | SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType()) | ||||
3330 | return nullptr; | ||||
3331 | // These aren't actually possible for non-coerced returns, and we | ||||
3332 | // only care about non-coerced returns on this code path. | ||||
3333 | assert(!SI->isAtomic() && !SI->isVolatile())(static_cast <bool> (!SI->isAtomic() && !SI-> isVolatile()) ? void (0) : __assert_fail ("!SI->isAtomic() && !SI->isVolatile()" , "clang/lib/CodeGen/CGCall.cpp", 3333, __extension__ __PRETTY_FUNCTION__ )); | ||||
3334 | return SI; | ||||
3335 | }; | ||||
3336 | // If there are multiple uses of the return-value slot, just check | ||||
3337 | // for something immediately preceding the IP. Sometimes this can | ||||
3338 | // happen with how we generate implicit-returns; it can also happen | ||||
3339 | // with noreturn cleanups. | ||||
3340 | if (!CGF.ReturnValue.getPointer()->hasOneUse()) { | ||||
3341 | llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); | ||||
3342 | if (IP->empty()) return nullptr; | ||||
3343 | |||||
3344 | // Look at directly preceding instruction, skipping bitcasts and lifetime | ||||
3345 | // markers. | ||||
3346 | for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) { | ||||
3347 | if (isa<llvm::BitCastInst>(&I)) | ||||
3348 | continue; | ||||
3349 | if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) | ||||
3350 | if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end) | ||||
3351 | continue; | ||||
3352 | |||||
3353 | return GetStoreIfValid(&I); | ||||
3354 | } | ||||
3355 | return nullptr; | ||||
3356 | } | ||||
3357 | |||||
3358 | llvm::StoreInst *store = | ||||
3359 | GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); | ||||
3360 | if (!store) return nullptr; | ||||
3361 | |||||
3362 | // Now do a first-and-dirty dominance check: just walk up the | ||||
3363 | // single-predecessors chain from the current insertion point. | ||||
3364 | llvm::BasicBlock *StoreBB = store->getParent(); | ||||
3365 | llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); | ||||
3366 | while (IP != StoreBB) { | ||||
3367 | if (!(IP = IP->getSinglePredecessor())) | ||||
3368 | return nullptr; | ||||
3369 | } | ||||
3370 | |||||
3371 | // Okay, the store's basic block dominates the insertion point; we | ||||
3372 | // can do our thing. | ||||
3373 | return store; | ||||
3374 | } | ||||
3375 | |||||
3376 | // Helper functions for EmitCMSEClearRecord | ||||
3377 | |||||
3378 | // Set the bits corresponding to a field having width `BitWidth` and located at | ||||
3379 | // offset `BitOffset` (from the least significant bit) within a storage unit of | ||||
3380 | // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. | ||||
3381 | // Use little-endian layout, i.e.`Bits[0]` is the LSB. | ||||
3382 | static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, | ||||
3383 | int BitWidth, int CharWidth) { | ||||
3384 | assert(CharWidth <= 64)(static_cast <bool> (CharWidth <= 64) ? void (0) : __assert_fail ("CharWidth <= 64", "clang/lib/CodeGen/CGCall.cpp", 3384, __extension__ __PRETTY_FUNCTION__)); | ||||
3385 | assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth)(static_cast <bool> (static_cast<unsigned>(BitWidth ) <= Bits.size() * CharWidth) ? void (0) : __assert_fail ( "static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth" , "clang/lib/CodeGen/CGCall.cpp", 3385, __extension__ __PRETTY_FUNCTION__ )); | ||||
3386 | |||||
3387 | int Pos = 0; | ||||
3388 | if (BitOffset >= CharWidth) { | ||||
3389 | Pos += BitOffset / CharWidth; | ||||
3390 | BitOffset = BitOffset % CharWidth; | ||||
3391 | } | ||||
3392 | |||||
3393 | const uint64_t Used = (uint64_t(1) << CharWidth) - 1; | ||||
3394 | if (BitOffset + BitWidth >= CharWidth) { | ||||
3395 | Bits[Pos++] |= (Used << BitOffset) & Used; | ||||
3396 | BitWidth -= CharWidth - BitOffset; | ||||
3397 | BitOffset = 0; | ||||
3398 | } | ||||
3399 | |||||
3400 | while (BitWidth >= CharWidth) { | ||||
3401 | Bits[Pos++] = Used; | ||||
3402 | BitWidth -= CharWidth; | ||||
3403 | } | ||||
3404 | |||||
3405 | if (BitWidth > 0) | ||||
3406 | Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; | ||||
3407 | } | ||||
3408 | |||||
3409 | // Set the bits corresponding to a field having width `BitWidth` and located at | ||||
3410 | // offset `BitOffset` (from the least significant bit) within a storage unit of | ||||
3411 | // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of | ||||
3412 | // `Bits` corresponds to one target byte. Use target endian layout. | ||||
3413 | static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, | ||||
3414 | int StorageSize, int BitOffset, int BitWidth, | ||||
3415 | int CharWidth, bool BigEndian) { | ||||
3416 | |||||
3417 | SmallVector<uint64_t, 8> TmpBits(StorageSize); | ||||
3418 | setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); | ||||
3419 | |||||
3420 | if (BigEndian) | ||||
3421 | std::reverse(TmpBits.begin(), TmpBits.end()); | ||||
3422 | |||||
3423 | for (uint64_t V : TmpBits) | ||||
3424 | Bits[StorageOffset++] |= V; | ||||
3425 | } | ||||
3426 | |||||
3427 | static void setUsedBits(CodeGenModule &, QualType, int, | ||||
3428 | SmallVectorImpl<uint64_t> &); | ||||
3429 | |||||
3430 | // Set the bits in `Bits`, which correspond to the value representations of | ||||
3431 | // the actual members of the record type `RTy`. Note that this function does | ||||
3432 | // not handle base classes, virtual tables, etc, since they cannot happen in | ||||
3433 | // CMSE function arguments or return. The bit mask corresponds to the target | ||||
3434 | // memory layout, i.e. it's endian dependent. | ||||
3435 | static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, | ||||
3436 | SmallVectorImpl<uint64_t> &Bits) { | ||||
3437 | ASTContext &Context = CGM.getContext(); | ||||
3438 | int CharWidth = Context.getCharWidth(); | ||||
3439 | const RecordDecl *RD = RTy->getDecl()->getDefinition(); | ||||
3440 | const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); | ||||
3441 | const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); | ||||
3442 | |||||
3443 | int Idx = 0; | ||||
3444 | for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { | ||||
3445 | const FieldDecl *F = *I; | ||||
3446 | |||||
3447 | if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || | ||||
3448 | F->getType()->isIncompleteArrayType()) | ||||
3449 | continue; | ||||
3450 | |||||
3451 | if (F->isBitField()) { | ||||
3452 | const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); | ||||
3453 | setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), | ||||
3454 | BFI.StorageSize / CharWidth, BFI.Offset, | ||||
3455 | BFI.Size, CharWidth, | ||||
3456 | CGM.getDataLayout().isBigEndian()); | ||||
3457 | continue; | ||||
3458 | } | ||||
3459 | |||||
3460 | setUsedBits(CGM, F->getType(), | ||||
3461 | Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); | ||||
3462 | } | ||||
3463 | } | ||||
3464 | |||||
3465 | // Set the bits in `Bits`, which correspond to the value representations of | ||||
3466 | // the elements of an array type `ATy`. | ||||
3467 | static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, | ||||
3468 | int Offset, SmallVectorImpl<uint64_t> &Bits) { | ||||
3469 | const ASTContext &Context = CGM.getContext(); | ||||
3470 | |||||
3471 | QualType ETy = Context.getBaseElementType(ATy); | ||||
3472 | int Size = Context.getTypeSizeInChars(ETy).getQuantity(); | ||||
3473 | SmallVector<uint64_t, 4> TmpBits(Size); | ||||
3474 | setUsedBits(CGM, ETy, 0, TmpBits); | ||||
3475 | |||||
3476 | for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { | ||||
3477 | auto Src = TmpBits.begin(); | ||||
3478 | auto Dst = Bits.begin() + Offset + I * Size; | ||||
3479 | for (int J = 0; J < Size; ++J) | ||||
3480 | *Dst++ |= *Src++; | ||||
3481 | } | ||||
3482 | } | ||||
3483 | |||||
3484 | // Set the bits in `Bits`, which correspond to the value representations of | ||||
3485 | // the type `QTy`. | ||||
3486 | static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, | ||||
3487 | SmallVectorImpl<uint64_t> &Bits) { | ||||
3488 | if (const auto *RTy = QTy->getAs<RecordType>()) | ||||
3489 | return setUsedBits(CGM, RTy, Offset, Bits); | ||||
3490 | |||||
3491 | ASTContext &Context = CGM.getContext(); | ||||
3492 | if (const auto *ATy = Context.getAsConstantArrayType(QTy)) | ||||
3493 | return setUsedBits(CGM, ATy, Offset, Bits); | ||||
3494 | |||||
3495 | int Size = Context.getTypeSizeInChars(QTy).getQuantity(); | ||||
3496 | if (Size <= 0) | ||||
3497 | return; | ||||
3498 | |||||
3499 | std::fill_n(Bits.begin() + Offset, Size, | ||||
3500 | (uint64_t(1) << Context.getCharWidth()) - 1); | ||||
3501 | } | ||||
3502 | |||||
3503 | static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, | ||||
3504 | int Pos, int Size, int CharWidth, | ||||
3505 | bool BigEndian) { | ||||
3506 | assert(Size > 0)(static_cast <bool> (Size > 0) ? void (0) : __assert_fail ("Size > 0", "clang/lib/CodeGen/CGCall.cpp", 3506, __extension__ __PRETTY_FUNCTION__)); | ||||
3507 | uint64_t Mask = 0; | ||||
3508 | if (BigEndian) { | ||||
3509 | for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; | ||||
3510 | ++P) | ||||
3511 | Mask = (Mask << CharWidth) | *P; | ||||
3512 | } else { | ||||
3513 | auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; | ||||
3514 | do | ||||
3515 | Mask = (Mask << CharWidth) | *--P; | ||||
3516 | while (P != End); | ||||
3517 | } | ||||
3518 | return Mask; | ||||
3519 | } | ||||
3520 | |||||
3521 | // Emit code to clear the bits in a record, which aren't a part of any user | ||||
3522 | // declared member, when the record is a function return. | ||||
3523 | llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, | ||||
3524 | llvm::IntegerType *ITy, | ||||
3525 | QualType QTy) { | ||||
3526 | assert(Src->getType() == ITy)(static_cast <bool> (Src->getType() == ITy) ? void ( 0) : __assert_fail ("Src->getType() == ITy", "clang/lib/CodeGen/CGCall.cpp" , 3526, __extension__ __PRETTY_FUNCTION__)); | ||||
3527 | assert(ITy->getScalarSizeInBits() <= 64)(static_cast <bool> (ITy->getScalarSizeInBits() <= 64) ? void (0) : __assert_fail ("ITy->getScalarSizeInBits() <= 64" , "clang/lib/CodeGen/CGCall.cpp", 3527, __extension__ __PRETTY_FUNCTION__ )); | ||||
3528 | |||||
3529 | const llvm::DataLayout &DataLayout = CGM.getDataLayout(); | ||||
3530 | int Size = DataLayout.getTypeStoreSize(ITy); | ||||
3531 | SmallVector<uint64_t, 4> Bits(Size); | ||||
3532 | setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); | ||||
3533 | |||||
3534 | int CharWidth = CGM.getContext().getCharWidth(); | ||||
3535 | uint64_t Mask = | ||||
3536 | buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); | ||||
3537 | |||||
3538 | return Builder.CreateAnd(Src, Mask, "cmse.clear"); | ||||
3539 | } | ||||
3540 | |||||
3541 | // Emit code to clear the bits in a record, which aren't a part of any user | ||||
3542 | // declared member, when the record is a function argument. | ||||
3543 | llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, | ||||
3544 | llvm::ArrayType *ATy, | ||||
3545 | QualType QTy) { | ||||
3546 | const llvm::DataLayout &DataLayout = CGM.getDataLayout(); | ||||
3547 | int Size = DataLayout.getTypeStoreSize(ATy); | ||||
3548 | SmallVector<uint64_t, 16> Bits(Size); | ||||
3549 | setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); | ||||
3550 | |||||
3551 | // Clear each element of the LLVM array. | ||||
3552 | int CharWidth = CGM.getContext().getCharWidth(); | ||||
3553 | int CharsPerElt = | ||||
3554 | ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; | ||||
3555 | int MaskIndex = 0; | ||||
3556 | llvm::Value *R = llvm::PoisonValue::get(ATy); | ||||
3557 | for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { | ||||
3558 | uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, | ||||
3559 | DataLayout.isBigEndian()); | ||||
3560 | MaskIndex += CharsPerElt; | ||||
3561 | llvm::Value *T0 = Builder.CreateExtractValue(Src, I); | ||||
3562 | llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); | ||||
3563 | R = Builder.CreateInsertValue(R, T1, I); | ||||
3564 | } | ||||
3565 | |||||
3566 | return R; | ||||
3567 | } | ||||
3568 | |||||
3569 | void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, | ||||
3570 | bool EmitRetDbgLoc, | ||||
3571 | SourceLocation EndLoc) { | ||||
3572 | if (FI.isNoReturn()) { | ||||
3573 | // Noreturn functions don't return. | ||||
3574 | EmitUnreachable(EndLoc); | ||||
3575 | return; | ||||
3576 | } | ||||
3577 | |||||
3578 | if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { | ||||
3579 | // Naked functions don't have epilogues. | ||||
3580 | Builder.CreateUnreachable(); | ||||
3581 | return; | ||||
3582 | } | ||||
3583 | |||||
3584 | // Functions with no result always return void. | ||||
3585 | if (!ReturnValue.isValid()) { | ||||
3586 | Builder.CreateRetVoid(); | ||||
3587 | return; | ||||
3588 | } | ||||
3589 | |||||
3590 | llvm::DebugLoc RetDbgLoc; | ||||
3591 | llvm::Value *RV = nullptr; | ||||
3592 | QualType RetTy = FI.getReturnType(); | ||||
3593 | const ABIArgInfo &RetAI = FI.getReturnInfo(); | ||||
3594 | |||||
3595 | switch (RetAI.getKind()) { | ||||
3596 | case ABIArgInfo::InAlloca: | ||||
3597 | // Aggregates get evaluated directly into the destination. Sometimes we | ||||
3598 | // need to return the sret value in a register, though. | ||||
3599 | assert(hasAggregateEvaluationKind(RetTy))(static_cast <bool> (hasAggregateEvaluationKind(RetTy)) ? void (0) : __assert_fail ("hasAggregateEvaluationKind(RetTy)" , "clang/lib/CodeGen/CGCall.cpp", 3599, __extension__ __PRETTY_FUNCTION__ )); | ||||
3600 | if (RetAI.getInAllocaSRet()) { | ||||
3601 | llvm::Function::arg_iterator EI = CurFn->arg_end(); | ||||
3602 | --EI; | ||||
3603 | llvm::Value *ArgStruct = &*EI; | ||||
3604 | llvm::Value *SRet = Builder.CreateStructGEP( | ||||
3605 | FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex()); | ||||
3606 | llvm::Type *Ty = | ||||
3607 | cast<llvm::GetElementPtrInst>(SRet)->getResultElementType(); | ||||
3608 | RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret"); | ||||
3609 | } | ||||
3610 | break; | ||||
3611 | |||||
3612 | case ABIArgInfo::Indirect: { | ||||
3613 | auto AI = CurFn->arg_begin(); | ||||
3614 | if (RetAI.isSRetAfterThis()) | ||||
3615 | ++AI; | ||||
3616 | switch (getEvaluationKind(RetTy)) { | ||||
3617 | case TEK_Complex: { | ||||
3618 | ComplexPairTy RT = | ||||
3619 | EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); | ||||
3620 | EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), | ||||
3621 | /*isInit*/ true); | ||||
3622 | break; | ||||
3623 | } | ||||
3624 | case TEK_Aggregate: | ||||
3625 | // Do nothing; aggregates get evaluated directly into the destination. | ||||
3626 | break; | ||||
3627 | case TEK_Scalar: { | ||||
3628 | LValueBaseInfo BaseInfo; | ||||
3629 | TBAAAccessInfo TBAAInfo; | ||||
3630 | CharUnits Alignment = | ||||
3631 | CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo); | ||||
3632 | Address ArgAddr(&*AI, ConvertType(RetTy), Alignment); | ||||
3633 | LValue ArgVal = | ||||
3634 | LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo); | ||||
3635 | EmitStoreOfScalar( | ||||
3636 | Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true); | ||||
3637 | break; | ||||
3638 | } | ||||
3639 | } | ||||
3640 | break; | ||||
3641 | } | ||||
3642 | |||||
3643 | case ABIArgInfo::Extend: | ||||
3644 | case ABIArgInfo::Direct: | ||||
3645 | if (RetAI.getCoerceToType() == ConvertType(RetTy) && | ||||
3646 | RetAI.getDirectOffset() == 0) { | ||||
3647 | // The internal return value temp always will have pointer-to-return-type | ||||
3648 | // type, just do a load. | ||||
3649 | |||||
3650 | // If there is a dominating store to ReturnValue, we can elide | ||||
3651 | // the load, zap the store, and usually zap the alloca. | ||||
3652 | if (llvm::StoreInst *SI = | ||||
3653 | findDominatingStoreToReturnValue(*this)) { | ||||
3654 | // Reuse the debug location from the store unless there is | ||||
3655 | // cleanup code to be emitted between the store and return | ||||
3656 | // instruction. | ||||
3657 | if (EmitRetDbgLoc && !AutoreleaseResult) | ||||
3658 | RetDbgLoc = SI->getDebugLoc(); | ||||
3659 | // Get the stored value and nuke the now-dead store. | ||||
3660 | RV = SI->getValueOperand(); | ||||
3661 | SI->eraseFromParent(); | ||||
3662 | |||||
3663 | // Otherwise, we have to do a simple load. | ||||
3664 | } else { | ||||
3665 | RV = Builder.CreateLoad(ReturnValue); | ||||
3666 | } | ||||
3667 | } else { | ||||
3668 | // If the value is offset in memory, apply the offset now. | ||||
3669 | Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); | ||||
3670 | |||||
3671 | RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); | ||||
3672 | } | ||||
3673 | |||||
3674 | // In ARC, end functions that return a retainable type with a call | ||||
3675 | // to objc_autoreleaseReturnValue. | ||||
3676 | if (AutoreleaseResult) { | ||||
3677 | #ifndef NDEBUG | ||||
3678 | // Type::isObjCRetainabletype has to be called on a QualType that hasn't | ||||
3679 | // been stripped of the typedefs, so we cannot use RetTy here. Get the | ||||
3680 | // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from | ||||
3681 | // CurCodeDecl or BlockInfo. | ||||
3682 | QualType RT; | ||||
3683 | |||||
3684 | if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) | ||||
3685 | RT = FD->getReturnType(); | ||||
3686 | else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) | ||||
3687 | RT = MD->getReturnType(); | ||||
3688 | else if (isa<BlockDecl>(CurCodeDecl)) | ||||
3689 | RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); | ||||
3690 | else | ||||
3691 | llvm_unreachable("Unexpected function/method type")::llvm::llvm_unreachable_internal("Unexpected function/method type" , "clang/lib/CodeGen/CGCall.cpp", 3691); | ||||
3692 | |||||
3693 | assert(getLangOpts().ObjCAutoRefCount &&(static_cast <bool> (getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType ()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()" , "clang/lib/CodeGen/CGCall.cpp", 3695, __extension__ __PRETTY_FUNCTION__ )) | ||||
3694 | !FI.isReturnsRetained() &&(static_cast <bool> (getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType ()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()" , "clang/lib/CodeGen/CGCall.cpp", 3695, __extension__ __PRETTY_FUNCTION__ )) | ||||
3695 | RT->isObjCRetainableType())(static_cast <bool> (getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType ()) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()" , "clang/lib/CodeGen/CGCall.cpp", 3695, __extension__ __PRETTY_FUNCTION__ )); | ||||
3696 | #endif | ||||
3697 | RV = emitAutoreleaseOfResult(*this, RV); | ||||
3698 | } | ||||
3699 | |||||
3700 | break; | ||||
3701 | |||||
3702 | case ABIArgInfo::Ignore: | ||||
3703 | break; | ||||
3704 | |||||
3705 | case ABIArgInfo::CoerceAndExpand: { | ||||
3706 | auto coercionType = RetAI.getCoerceAndExpandType(); | ||||
3707 | |||||
3708 | // Load all of the coerced elements out into results. | ||||
3709 | llvm::SmallVector<llvm::Value*, 4> results; | ||||
3710 | Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); | ||||
3711 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { | ||||
3712 | auto coercedEltType = coercionType->getElementType(i); | ||||
3713 | if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) | ||||
3714 | continue; | ||||
3715 | |||||
3716 | auto eltAddr = Builder.CreateStructGEP(addr, i); | ||||
3717 | auto elt = Builder.CreateLoad(eltAddr); | ||||
3718 | results.push_back(elt); | ||||
3719 | } | ||||
3720 | |||||
3721 | // If we have one result, it's the single direct result type. | ||||
3722 | if (results.size() == 1) { | ||||
3723 | RV = results[0]; | ||||
3724 | |||||
3725 | // Otherwise, we need to make a first-class aggregate. | ||||
3726 | } else { | ||||
3727 | // Construct a return type that lacks padding elements. | ||||
3728 | llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); | ||||
3729 | |||||
3730 | RV = llvm::PoisonValue::get(returnType); | ||||
3731 | for (unsigned i = 0, e = results.size(); i != e; ++i) { | ||||
3732 | RV = Builder.CreateInsertValue(RV, results[i], i); | ||||
3733 | } | ||||
3734 | } | ||||
3735 | break; | ||||
3736 | } | ||||
3737 | case ABIArgInfo::Expand: | ||||
3738 | case ABIArgInfo::IndirectAliased: | ||||
3739 | llvm_unreachable("Invalid ABI kind for return argument")::llvm::llvm_unreachable_internal("Invalid ABI kind for return argument" , "clang/lib/CodeGen/CGCall.cpp", 3739); | ||||
3740 | } | ||||
3741 | |||||
3742 | llvm::Instruction *Ret; | ||||
3743 | if (RV) { | ||||
3744 | if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { | ||||
3745 | // For certain return types, clear padding bits, as they may reveal | ||||
3746 | // sensitive information. | ||||
3747 | // Small struct/union types are passed as integers. | ||||
3748 | auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); | ||||
3749 | if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) | ||||
3750 | RV = EmitCMSEClearRecord(RV, ITy, RetTy); | ||||
3751 | } | ||||
3752 | EmitReturnValueCheck(RV); | ||||
3753 | Ret = Builder.CreateRet(RV); | ||||
3754 | } else { | ||||
3755 | Ret = Builder.CreateRetVoid(); | ||||
3756 | } | ||||
3757 | |||||
3758 | if (RetDbgLoc) | ||||
3759 | Ret->setDebugLoc(std::move(RetDbgLoc)); | ||||
3760 | } | ||||
3761 | |||||
3762 | void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { | ||||
3763 | // A current decl may not be available when emitting vtable thunks. | ||||
3764 | if (!CurCodeDecl) | ||||
3765 | return; | ||||
3766 | |||||
3767 | // If the return block isn't reachable, neither is this check, so don't emit | ||||
3768 | // it. | ||||
3769 | if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) | ||||
3770 | return; | ||||
3771 | |||||
3772 | ReturnsNonNullAttr *RetNNAttr = nullptr; | ||||
3773 | if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) | ||||
3774 | RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); | ||||
3775 | |||||
3776 | if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) | ||||
3777 | return; | ||||
3778 | |||||
3779 | // Prefer the returns_nonnull attribute if it's present. | ||||
3780 | SourceLocation AttrLoc; | ||||
3781 | SanitizerMask CheckKind; | ||||
3782 | SanitizerHandler Handler; | ||||
3783 | if (RetNNAttr) { | ||||
3784 | assert(!requiresReturnValueNullabilityCheck() &&(static_cast <bool> (!requiresReturnValueNullabilityCheck () && "Cannot check nullability and the nonnull attribute" ) ? void (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\"" , "clang/lib/CodeGen/CGCall.cpp", 3785, __extension__ __PRETTY_FUNCTION__ )) | ||||
3785 | "Cannot check nullability and the nonnull attribute")(static_cast <bool> (!requiresReturnValueNullabilityCheck () && "Cannot check nullability and the nonnull attribute" ) ? void (0) : __assert_fail ("!requiresReturnValueNullabilityCheck() && \"Cannot check nullability and the nonnull attribute\"" , "clang/lib/CodeGen/CGCall.cpp", 3785, __extension__ __PRETTY_FUNCTION__ )); | ||||
3786 | AttrLoc = RetNNAttr->getLocation(); | ||||
3787 | CheckKind = SanitizerKind::ReturnsNonnullAttribute; | ||||
3788 | Handler = SanitizerHandler::NonnullReturn; | ||||
3789 | } else { | ||||
3790 | if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) | ||||
3791 | if (auto *TSI = DD->getTypeSourceInfo()) | ||||
3792 | if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) | ||||
3793 | AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); | ||||
3794 | CheckKind = SanitizerKind::NullabilityReturn; | ||||
3795 | Handler = SanitizerHandler::NullabilityReturn; | ||||
3796 | } | ||||
3797 | |||||
3798 | SanitizerScope SanScope(this); | ||||
3799 | |||||
3800 | // Make sure the "return" source location is valid. If we're checking a | ||||
3801 | // nullability annotation, make sure the preconditions for the check are met. | ||||
3802 | llvm::BasicBlock *Check = createBasicBlock("nullcheck"); | ||||
3803 | llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); | ||||
3804 | llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); | ||||
3805 | llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); | ||||
3806 | if (requiresReturnValueNullabilityCheck()) | ||||
3807 | CanNullCheck = | ||||
3808 | Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); | ||||
3809 | Builder.CreateCondBr(CanNullCheck, Check, NoCheck); | ||||
3810 | EmitBlock(Check); | ||||
3811 | |||||
3812 | // Now do the null check. | ||||
3813 | llvm::Value *Cond = Builder.CreateIsNotNull(RV); | ||||
3814 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; | ||||
3815 | llvm::Value *DynamicData[] = {SLocPtr}; | ||||
3816 | EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); | ||||
3817 | |||||
3818 | EmitBlock(NoCheck); | ||||
3819 | |||||
3820 | #ifndef NDEBUG | ||||
3821 | // The return location should not be used after the check has been emitted. | ||||
3822 | ReturnLocation = Address::invalid(); | ||||
3823 | #endif | ||||
3824 | } | ||||
3825 | |||||
3826 | static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { | ||||
3827 | const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); | ||||
3828 | return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; | ||||
3829 | } | ||||
3830 | |||||
3831 | static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, | ||||
3832 | QualType Ty) { | ||||
3833 | // FIXME: Generate IR in one pass, rather than going back and fixing up these | ||||
3834 | // placeholders. | ||||
3835 | llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); | ||||
3836 | llvm::Type *IRPtrTy = IRTy->getPointerTo(); | ||||
3837 | llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy->getPointerTo()); | ||||
3838 | |||||
3839 | // FIXME: When we generate this IR in one pass, we shouldn't need | ||||
3840 | // this win32-specific alignment hack. | ||||
3841 | CharUnits Align = CharUnits::fromQuantity(4); | ||||
3842 | Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); | ||||
3843 | |||||
3844 | return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align), | ||||
3845 | Ty.getQualifiers(), | ||||
3846 | AggValueSlot::IsNotDestructed, | ||||
3847 | AggValueSlot::DoesNotNeedGCBarriers, | ||||
3848 | AggValueSlot::IsNotAliased, | ||||
3849 | AggValueSlot::DoesNotOverlap); | ||||
3850 | } | ||||
3851 | |||||
3852 | void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, | ||||
3853 | const VarDecl *param, | ||||
3854 | SourceLocation loc) { | ||||
3855 | // StartFunction converted the ABI-lowered parameter(s) into a | ||||
3856 | // local alloca. We need to turn that into an r-value suitable | ||||
3857 | // for EmitCall. | ||||
3858 | Address local = GetAddrOfLocalVar(param); | ||||
3859 | |||||
3860 | QualType type = param->getType(); | ||||
3861 | |||||
3862 | if (isInAllocaArgument(CGM.getCXXABI(), type)) { | ||||
3863 | CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); | ||||
3864 | } | ||||
3865 | |||||
3866 | // GetAddrOfLocalVar returns a pointer-to-pointer for references, | ||||
3867 | // but the argument needs to be the original pointer. | ||||
3868 | if (type->isReferenceType()) { | ||||
3869 | args.add(RValue::get(Builder.CreateLoad(local)), type); | ||||
3870 | |||||
3871 | // In ARC, move out of consumed arguments so that the release cleanup | ||||
3872 | // entered by StartFunction doesn't cause an over-release. This isn't | ||||
3873 | // optimal -O0 code generation, but it should get cleaned up when | ||||
3874 | // optimization is enabled. This also assumes that delegate calls are | ||||
3875 | // performed exactly once for a set of arguments, but that should be safe. | ||||
3876 | } else if (getLangOpts().ObjCAutoRefCount && | ||||
3877 | param->hasAttr<NSConsumedAttr>() && | ||||
3878 | type->isObjCRetainableType()) { | ||||
3879 | llvm::Value *ptr = Builder.CreateLoad(local); | ||||
3880 | auto null = | ||||
3881 | llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); | ||||
3882 | Builder.CreateStore(null, local); | ||||
3883 | args.add(RValue::get(ptr), type); | ||||
3884 | |||||
3885 | // For the most part, we just need to load the alloca, except that | ||||
3886 | // aggregate r-values are actually pointers to temporaries. | ||||
3887 | } else { | ||||
3888 | args.add(convertTempToRValue(local, type, loc), type); | ||||
3889 | } | ||||
3890 | |||||
3891 | // Deactivate the cleanup for the callee-destructed param that was pushed. | ||||
3892 | if (type->isRecordType() && !CurFuncIsThunk && | ||||
3893 | type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && | ||||
3894 | param->needsDestruction(getContext())) { | ||||
3895 | EHScopeStack::stable_iterator cleanup = | ||||
3896 | CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); | ||||
3897 | assert(cleanup.isValid() &&(static_cast <bool> (cleanup.isValid() && "cleanup for callee-destructed param not recorded" ) ? void (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\"" , "clang/lib/CodeGen/CGCall.cpp", 3898, __extension__ __PRETTY_FUNCTION__ )) | ||||
3898 | "cleanup for callee-destructed param not recorded")(static_cast <bool> (cleanup.isValid() && "cleanup for callee-destructed param not recorded" ) ? void (0) : __assert_fail ("cleanup.isValid() && \"cleanup for callee-destructed param not recorded\"" , "clang/lib/CodeGen/CGCall.cpp", 3898, __extension__ __PRETTY_FUNCTION__ )); | ||||
3899 | // This unreachable is a temporary marker which will be removed later. | ||||
3900 | llvm::Instruction *isActive = Builder.CreateUnreachable(); | ||||
3901 | args.addArgCleanupDeactivation(cleanup, isActive); | ||||
3902 | } | ||||
3903 | } | ||||
3904 | |||||
3905 | static bool isProvablyNull(llvm::Value *addr) { | ||||
3906 | return isa<llvm::ConstantPointerNull>(addr); | ||||
3907 | } | ||||
3908 | |||||
3909 | /// Emit the actual writing-back of a writeback. | ||||
3910 | static void emitWriteback(CodeGenFunction &CGF, | ||||
3911 | const CallArgList::Writeback &writeback) { | ||||
3912 | const LValue &srcLV = writeback.Source; | ||||
3913 | Address srcAddr = srcLV.getAddress(CGF); | ||||
3914 | assert(!isProvablyNull(srcAddr.getPointer()) &&(static_cast <bool> (!isProvablyNull(srcAddr.getPointer ()) && "shouldn't have writeback for provably null argument" ) ? void (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\"" , "clang/lib/CodeGen/CGCall.cpp", 3915, __extension__ __PRETTY_FUNCTION__ )) | ||||
3915 | "shouldn't have writeback for provably null argument")(static_cast <bool> (!isProvablyNull(srcAddr.getPointer ()) && "shouldn't have writeback for provably null argument" ) ? void (0) : __assert_fail ("!isProvablyNull(srcAddr.getPointer()) && \"shouldn't have writeback for provably null argument\"" , "clang/lib/CodeGen/CGCall.cpp", 3915, __extension__ __PRETTY_FUNCTION__ )); | ||||
3916 | |||||
3917 | llvm::BasicBlock *contBB = nullptr; | ||||
3918 | |||||
3919 | // If the argument wasn't provably non-null, we need to null check | ||||
3920 | // before doing the store. | ||||
3921 | bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), | ||||
3922 | CGF.CGM.getDataLayout()); | ||||
3923 | if (!provablyNonNull) { | ||||
3924 | llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); | ||||
3925 | contBB = CGF.createBasicBlock("icr.done"); | ||||
3926 | |||||
3927 | llvm::Value *isNull = | ||||
3928 | CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); | ||||
3929 | CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); | ||||
3930 | CGF.EmitBlock(writebackBB); | ||||
3931 | } | ||||
3932 | |||||
3933 | // Load the value to writeback. | ||||
3934 | llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); | ||||
3935 | |||||
3936 | // Cast it back, in case we're writing an id to a Foo* or something. | ||||
3937 | value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), | ||||
3938 | "icr.writeback-cast"); | ||||
3939 | |||||
3940 | // Perform the writeback. | ||||
3941 | |||||
3942 | // If we have a "to use" value, it's something we need to emit a use | ||||
3943 | // of. This has to be carefully threaded in: if it's done after the | ||||
3944 | // release it's potentially undefined behavior (and the optimizer | ||||
3945 | // will ignore it), and if it happens before the retain then the | ||||
3946 | // optimizer could move the release there. | ||||
3947 | if (writeback.ToUse) { | ||||
3948 | assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong)(static_cast <bool> (srcLV.getObjCLifetime() == Qualifiers ::OCL_Strong) ? void (0) : __assert_fail ("srcLV.getObjCLifetime() == Qualifiers::OCL_Strong" , "clang/lib/CodeGen/CGCall.cpp", 3948, __extension__ __PRETTY_FUNCTION__ )); | ||||
3949 | |||||
3950 | // Retain the new value. No need to block-copy here: the block's | ||||
3951 | // being passed up the stack. | ||||
3952 | value = CGF.EmitARCRetainNonBlock(value); | ||||
3953 | |||||
3954 | // Emit the intrinsic use here. | ||||
3955 | CGF.EmitARCIntrinsicUse(writeback.ToUse); | ||||
3956 | |||||
3957 | // Load the old value (primitively). | ||||
3958 | llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); | ||||
3959 | |||||
3960 | // Put the new value in place (primitively). | ||||
3961 | CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); | ||||
3962 | |||||
3963 | // Release the old value. | ||||
3964 | CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); | ||||
3965 | |||||
3966 | // Otherwise, we can just do a normal lvalue store. | ||||
3967 | } else { | ||||
3968 | CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); | ||||
3969 | } | ||||
3970 | |||||
3971 | // Jump to the continuation block. | ||||
3972 | if (!provablyNonNull) | ||||
3973 | CGF.EmitBlock(contBB); | ||||
3974 | } | ||||
3975 | |||||
3976 | static void emitWritebacks(CodeGenFunction &CGF, | ||||
3977 | const CallArgList &args) { | ||||
3978 | for (const auto &I : args.writebacks()) | ||||
3979 | emitWriteback(CGF, I); | ||||
3980 | } | ||||
3981 | |||||
3982 | static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, | ||||
3983 | const CallArgList &CallArgs) { | ||||
3984 | ArrayRef<CallArgList::CallArgCleanup> Cleanups = | ||||
3985 | CallArgs.getCleanupsToDeactivate(); | ||||
3986 | // Iterate in reverse to increase the likelihood of popping the cleanup. | ||||
3987 | for (const auto &I : llvm::reverse(Cleanups)) { | ||||
3988 | CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); | ||||
3989 | I.IsActiveIP->eraseFromParent(); | ||||
3990 | } | ||||
3991 | } | ||||
3992 | |||||
3993 | static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { | ||||
3994 | if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) | ||||
3995 | if (uop->getOpcode() == UO_AddrOf) | ||||
3996 | return uop->getSubExpr(); | ||||
3997 | return nullptr; | ||||
3998 | } | ||||
3999 | |||||
4000 | /// Emit an argument that's being passed call-by-writeback. That is, | ||||
4001 | /// we are passing the address of an __autoreleased temporary; it | ||||
4002 | /// might be copy-initialized with the current value of the given | ||||
4003 | /// address, but it will definitely be copied out of after the call. | ||||
4004 | static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, | ||||
4005 | const ObjCIndirectCopyRestoreExpr *CRE) { | ||||
4006 | LValue srcLV; | ||||
4007 | |||||
4008 | // Make an optimistic effort to emit the address as an l-value. | ||||
4009 | // This can fail if the argument expression is more complicated. | ||||
4010 | if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { | ||||
4011 | srcLV = CGF.EmitLValue(lvExpr); | ||||
4012 | |||||
4013 | // Otherwise, just emit it as a scalar. | ||||
4014 | } else { | ||||
4015 | Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); | ||||
4016 | |||||
4017 | QualType srcAddrType = | ||||
4018 | CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); | ||||
4019 | srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); | ||||
4020 | } | ||||
4021 | Address srcAddr = srcLV.getAddress(CGF); | ||||
4022 | |||||
4023 | // The dest and src types don't necessarily match in LLVM terms | ||||
4024 | // because of the crazy ObjC compatibility rules. | ||||
4025 | |||||
4026 | llvm::PointerType *destType = | ||||
4027 | cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); | ||||
4028 | llvm::Type *destElemType = | ||||
4029 | CGF.ConvertTypeForMem(CRE->getType()->getPointeeType()); | ||||
4030 | |||||
4031 | // If the address is a constant null, just pass the appropriate null. | ||||
4032 | if (isProvablyNull(srcAddr.getPointer())) { | ||||
4033 | args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), | ||||
4034 | CRE->getType()); | ||||
4035 | return; | ||||
4036 | } | ||||
4037 | |||||
4038 | // Create the temporary. | ||||
4039 | Address temp = | ||||
4040 | CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp"); | ||||
4041 | // Loading an l-value can introduce a cleanup if the l-value is __weak, | ||||
4042 | // and that cleanup will be conditional if we can't prove that the l-value | ||||
4043 | // isn't null, so we need to register a dominating point so that the cleanups | ||||
4044 | // system will make valid IR. | ||||
4045 | CodeGenFunction::ConditionalEvaluation condEval(CGF); | ||||
4046 | |||||
4047 | // Zero-initialize it if we're not doing a copy-initialization. | ||||
4048 | bool shouldCopy = CRE->shouldCopy(); | ||||
4049 | if (!shouldCopy) { | ||||
4050 | llvm::Value *null = | ||||
4051 | llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType)); | ||||
4052 | CGF.Builder.CreateStore(null, temp); | ||||
4053 | } | ||||
4054 | |||||
4055 | llvm::BasicBlock *contBB = nullptr; | ||||
4056 | llvm::BasicBlock *originBB = nullptr; | ||||
4057 | |||||
4058 | // If the address is *not* known to be non-null, we need to switch. | ||||
4059 | llvm::Value *finalArgument; | ||||
4060 | |||||
4061 | bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), | ||||
4062 | CGF.CGM.getDataLayout()); | ||||
4063 | if (provablyNonNull) { | ||||
4064 | finalArgument = temp.getPointer(); | ||||
4065 | } else { | ||||
4066 | llvm::Value *isNull = | ||||
4067 | CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); | ||||
4068 | |||||
4069 | finalArgument = CGF.Builder.CreateSelect(isNull, | ||||
4070 | llvm::ConstantPointerNull::get(destType), | ||||
4071 | temp.getPointer(), "icr.argument"); | ||||
4072 | |||||
4073 | // If we need to copy, then the load has to be conditional, which | ||||
4074 | // means we need control flow. | ||||
4075 | if (shouldCopy) { | ||||
4076 | originBB = CGF.Builder.GetInsertBlock(); | ||||
4077 | contBB = CGF.createBasicBlock("icr.cont"); | ||||
4078 | llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); | ||||
4079 | CGF.Builder.CreateCondBr(isNull, contBB, copyBB); | ||||
4080 | CGF.EmitBlock(copyBB); | ||||
4081 | condEval.begin(CGF); | ||||
4082 | } | ||||
4083 | } | ||||
4084 | |||||
4085 | llvm::Value *valueToUse = nullptr; | ||||
4086 | |||||
4087 | // Perform a copy if necessary. | ||||
4088 | if (shouldCopy) { | ||||
4089 | RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); | ||||
4090 | assert(srcRV.isScalar())(static_cast <bool> (srcRV.isScalar()) ? void (0) : __assert_fail ("srcRV.isScalar()", "clang/lib/CodeGen/CGCall.cpp", 4090, __extension__ __PRETTY_FUNCTION__)); | ||||
4091 | |||||
4092 | llvm::Value *src = srcRV.getScalarVal(); | ||||
4093 | src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast"); | ||||
4094 | |||||
4095 | // Use an ordinary store, not a store-to-lvalue. | ||||
4096 | CGF.Builder.CreateStore(src, temp); | ||||
4097 | |||||
4098 | // If optimization is enabled, and the value was held in a | ||||
4099 | // __strong variable, we need to tell the optimizer that this | ||||
4100 | // value has to stay alive until we're doing the store back. | ||||
4101 | // This is because the temporary is effectively unretained, | ||||
4102 | // and so otherwise we can violate the high-level semantics. | ||||
4103 | if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && | ||||
4104 | srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { | ||||
4105 | valueToUse = src; | ||||
4106 | } | ||||
4107 | } | ||||
4108 | |||||
4109 | // Finish the control flow if we needed it. | ||||
4110 | if (shouldCopy && !provablyNonNull) { | ||||
4111 | llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); | ||||
4112 | CGF.EmitBlock(contBB); | ||||
4113 | |||||
4114 | // Make a phi for the value to intrinsically use. | ||||
4115 | if (valueToUse) { | ||||
4116 | llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, | ||||
4117 | "icr.to-use"); | ||||
4118 | phiToUse->addIncoming(valueToUse, copyBB); | ||||
4119 | phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), | ||||
4120 | originBB); | ||||
4121 | valueToUse = phiToUse; | ||||
4122 | } | ||||
4123 | |||||
4124 | condEval.end(CGF); | ||||
4125 | } | ||||
4126 | |||||
4127 | args.addWriteback(srcLV, temp, valueToUse); | ||||
4128 | args.add(RValue::get(finalArgument), CRE->getType()); | ||||
4129 | } | ||||
4130 | |||||
4131 | void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { | ||||
4132 | assert(!StackBase)(static_cast <bool> (!StackBase) ? void (0) : __assert_fail ("!StackBase", "clang/lib/CodeGen/CGCall.cpp", 4132, __extension__ __PRETTY_FUNCTION__)); | ||||
4133 | |||||
4134 | // Save the stack. | ||||
4135 | llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); | ||||
4136 | StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); | ||||
4137 | } | ||||
4138 | |||||
4139 | void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { | ||||
4140 | if (StackBase) { | ||||
4141 | // Restore the stack after the call. | ||||
4142 | llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); | ||||
4143 | CGF.Builder.CreateCall(F, StackBase); | ||||
4144 | } | ||||
4145 | } | ||||
4146 | |||||
4147 | void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, | ||||
4148 | SourceLocation ArgLoc, | ||||
4149 | AbstractCallee AC, | ||||
4150 | unsigned ParmNum) { | ||||
4151 | if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || | ||||
4152 | SanOpts.has(SanitizerKind::NullabilityArg))) | ||||
4153 | return; | ||||
4154 | |||||
4155 | // The param decl may be missing in a variadic function. | ||||
4156 | auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; | ||||
4157 | unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; | ||||
4158 | |||||
4159 | // Prefer the nonnull attribute if it's present. | ||||
4160 | const NonNullAttr *NNAttr = nullptr; | ||||
4161 | if (SanOpts.has(SanitizerKind::NonnullAttribute)) | ||||
4162 | NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); | ||||
4163 | |||||
4164 | bool CanCheckNullability = false; | ||||
4165 | if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { | ||||
4166 | auto Nullability = PVD->getType()->getNullability(); | ||||
4167 | CanCheckNullability = Nullability && | ||||
4168 | *Nullability == NullabilityKind::NonNull && | ||||
4169 | PVD->getTypeSourceInfo(); | ||||
4170 | } | ||||
4171 | |||||
4172 | if (!NNAttr && !CanCheckNullability) | ||||
4173 | return; | ||||
4174 | |||||
4175 | SourceLocation AttrLoc; | ||||
4176 | SanitizerMask CheckKind; | ||||
4177 | SanitizerHandler Handler; | ||||
4178 | if (NNAttr) { | ||||
4179 | AttrLoc = NNAttr->getLocation(); | ||||
4180 | CheckKind = SanitizerKind::NonnullAttribute; | ||||
4181 | Handler = SanitizerHandler::NonnullArg; | ||||
4182 | } else { | ||||
4183 | AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); | ||||
4184 | CheckKind = SanitizerKind::NullabilityArg; | ||||
4185 | Handler = SanitizerHandler::NullabilityArg; | ||||
4186 | } | ||||
4187 | |||||
4188 | SanitizerScope SanScope(this); | ||||
4189 | llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); | ||||
4190 | llvm::Constant *StaticData[] = { | ||||
4191 | EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), | ||||
4192 | llvm::ConstantInt::get(Int32Ty, ArgNo + 1), | ||||
4193 | }; | ||||
4194 | EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt); | ||||
4195 | } | ||||
4196 | |||||
4197 | // Check if the call is going to use the inalloca convention. This needs to | ||||
4198 | // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged | ||||
4199 | // later, so we can't check it directly. | ||||
4200 | static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, | ||||
4201 | ArrayRef<QualType> ArgTypes) { | ||||
4202 | // The Swift calling conventions don't go through the target-specific | ||||
4203 | // argument classification, they never use inalloca. | ||||
4204 | // TODO: Consider limiting inalloca use to only calling conventions supported | ||||
4205 | // by MSVC. | ||||
4206 | if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync) | ||||
4207 | return false; | ||||
4208 | if (!CGM.getTarget().getCXXABI().isMicrosoft()) | ||||
4209 | return false; | ||||
4210 | return llvm::any_of(ArgTypes, [&](QualType Ty) { | ||||
4211 | return isInAllocaArgument(CGM.getCXXABI(), Ty); | ||||
4212 | }); | ||||
4213 | } | ||||
4214 | |||||
4215 | #ifndef NDEBUG | ||||
4216 | // Determine whether the given argument is an Objective-C method | ||||
4217 | // that may have type parameters in its signature. | ||||
4218 | static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { | ||||
4219 | const DeclContext *dc = method->getDeclContext(); | ||||
4220 | if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { | ||||
4221 | return classDecl->getTypeParamListAsWritten(); | ||||
4222 | } | ||||
4223 | |||||
4224 | if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { | ||||
4225 | return catDecl->getTypeParamList(); | ||||
4226 | } | ||||
4227 | |||||
4228 | return false; | ||||
4229 | } | ||||
4230 | #endif | ||||
4231 | |||||
4232 | /// EmitCallArgs - Emit call arguments for a function. | ||||
4233 | void CodeGenFunction::EmitCallArgs( | ||||
4234 | CallArgList &Args, PrototypeWrapper Prototype, | ||||
4235 | llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, | ||||
4236 | AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { | ||||
4237 | SmallVector<QualType, 16> ArgTypes; | ||||
4238 | |||||
4239 | assert((ParamsToSkip == 0 || Prototype.P) &&(static_cast <bool> ((ParamsToSkip == 0 || Prototype.P) && "Can't skip parameters if type info is not provided" ) ? void (0) : __assert_fail ("(ParamsToSkip == 0 || Prototype.P) && \"Can't skip parameters if type info is not provided\"" , "clang/lib/CodeGen/CGCall.cpp", 4240, __extension__ __PRETTY_FUNCTION__ )) | ||||
4240 | "Can't skip parameters if type info is not provided")(static_cast <bool> ((ParamsToSkip == 0 || Prototype.P) && "Can't skip parameters if type info is not provided" ) ? void (0) : __assert_fail ("(ParamsToSkip == 0 || Prototype.P) && \"Can't skip parameters if type info is not provided\"" , "clang/lib/CodeGen/CGCall.cpp", 4240, __extension__ __PRETTY_FUNCTION__ )); | ||||
4241 | |||||
4242 | // This variable only captures *explicitly* written conventions, not those | ||||
4243 | // applied by default via command line flags or target defaults, such as | ||||
4244 | // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would | ||||
4245 | // require knowing if this is a C++ instance method or being able to see | ||||
4246 | // unprototyped FunctionTypes. | ||||
4247 | CallingConv ExplicitCC = CC_C; | ||||
4248 | |||||
4249 | // First, if a prototype was provided, use those argument types. | ||||
4250 | bool IsVariadic = false; | ||||
4251 | if (Prototype.P) { | ||||
4252 | const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); | ||||
4253 | if (MD) { | ||||
4254 | IsVariadic = MD->isVariadic(); | ||||
4255 | ExplicitCC = getCallingConventionForDecl( | ||||
4256 | MD, CGM.getTarget().getTriple().isOSWindows()); | ||||
4257 | ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, | ||||
4258 | MD->param_type_end()); | ||||
4259 | } else { | ||||
4260 | const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); | ||||
4261 | IsVariadic = FPT->isVariadic(); | ||||
4262 | ExplicitCC = FPT->getExtInfo().getCC(); | ||||
4263 | ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, | ||||
4264 | FPT->param_type_end()); | ||||
4265 | } | ||||
4266 | |||||
4267 | #ifndef NDEBUG | ||||
4268 | // Check that the prototyped types match the argument expression types. | ||||
4269 | bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); | ||||
4270 | CallExpr::const_arg_iterator Arg = ArgRange.begin(); | ||||
4271 | for (QualType Ty : ArgTypes) { | ||||
4272 | assert(Arg != ArgRange.end() && "Running over edge of argument list!")(static_cast <bool> (Arg != ArgRange.end() && "Running over edge of argument list!" ) ? void (0) : __assert_fail ("Arg != ArgRange.end() && \"Running over edge of argument list!\"" , "clang/lib/CodeGen/CGCall.cpp", 4272, __extension__ __PRETTY_FUNCTION__ )); | ||||
4273 | assert((static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType () || Ty.getNonReferenceType()->isObjCRetainableType() || getContext () .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr ()) && "type mismatch in call argument!") ? void (0) : __assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\"" , "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__ )) | ||||
4274 | (isGenericMethod || Ty->isVariablyModifiedType() ||(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType () || Ty.getNonReferenceType()->isObjCRetainableType() || getContext () .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr ()) && "type mismatch in call argument!") ? void (0) : __assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\"" , "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__ )) | ||||
4275 | Ty.getNonReferenceType()->isObjCRetainableType() ||(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType () || Ty.getNonReferenceType()->isObjCRetainableType() || getContext () .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr ()) && "type mismatch in call argument!") ? void (0) : __assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\"" , "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__ )) | ||||
4276 | getContext()(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType () || Ty.getNonReferenceType()->isObjCRetainableType() || getContext () .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr ()) && "type mismatch in call argument!") ? void (0) : __assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\"" , "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__ )) | ||||
4277 | .getCanonicalType(Ty.getNonReferenceType())(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType () || Ty.getNonReferenceType()->isObjCRetainableType() || getContext () .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr ()) && "type mismatch in call argument!") ? void (0) : __assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\"" , "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__ )) | ||||
4278 | .getTypePtr() ==(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType () || Ty.getNonReferenceType()->isObjCRetainableType() || getContext () .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr ()) && "type mismatch in call argument!") ? void (0) : __assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\"" , "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__ )) | ||||
4279 | getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType () || Ty.getNonReferenceType()->isObjCRetainableType() || getContext () .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr ()) && "type mismatch in call argument!") ? void (0) : __assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\"" , "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__ )) | ||||
4280 | "type mismatch in call argument!")(static_cast <bool> ((isGenericMethod || Ty->isVariablyModifiedType () || Ty.getNonReferenceType()->isObjCRetainableType() || getContext () .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr ()) && "type mismatch in call argument!") ? void (0) : __assert_fail ("(isGenericMethod || Ty->isVariablyModifiedType() || Ty.getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType(Ty.getNonReferenceType()) .getTypePtr() == getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && \"type mismatch in call argument!\"" , "clang/lib/CodeGen/CGCall.cpp", 4280, __extension__ __PRETTY_FUNCTION__ )); | ||||
4281 | ++Arg; | ||||
4282 | } | ||||
4283 | |||||
4284 | // Either we've emitted all the call args, or we have a call to variadic | ||||
4285 | // function. | ||||
4286 | assert((Arg == ArgRange.end() || IsVariadic) &&(static_cast <bool> ((Arg == ArgRange.end() || IsVariadic ) && "Extra arguments in non-variadic function!") ? void (0) : __assert_fail ("(Arg == ArgRange.end() || IsVariadic) && \"Extra arguments in non-variadic function!\"" , "clang/lib/CodeGen/CGCall.cpp", 4287, __extension__ __PRETTY_FUNCTION__ )) | ||||
4287 | "Extra arguments in non-variadic function!")(static_cast <bool> ((Arg == ArgRange.end() || IsVariadic ) && "Extra arguments in non-variadic function!") ? void (0) : __assert_fail ("(Arg == ArgRange.end() || IsVariadic) && \"Extra arguments in non-variadic function!\"" , "clang/lib/CodeGen/CGCall.cpp", 4287, __extension__ __PRETTY_FUNCTION__ )); | ||||
4288 | #endif | ||||
4289 | } | ||||
4290 | |||||
4291 | // If we still have any arguments, emit them using the type of the argument. | ||||
4292 | for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) | ||||
4293 | ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); | ||||
4294 | assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()))(static_cast <bool> ((int)ArgTypes.size() == (ArgRange. end() - ArgRange.begin())) ? void (0) : __assert_fail ("(int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())" , "clang/lib/CodeGen/CGCall.cpp", 4294, __extension__ __PRETTY_FUNCTION__ )); | ||||
4295 | |||||
4296 | // We must evaluate arguments from right to left in the MS C++ ABI, | ||||
4297 | // because arguments are destroyed left to right in the callee. As a special | ||||
4298 | // case, there are certain language constructs that require left-to-right | ||||
4299 | // evaluation, and in those cases we consider the evaluation order requirement | ||||
4300 | // to trump the "destruction order is reverse construction order" guarantee. | ||||
4301 | bool LeftToRight = | ||||
4302 | CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() | ||||
4303 | ? Order == EvaluationOrder::ForceLeftToRight | ||||
4304 | : Order != EvaluationOrder::ForceRightToLeft; | ||||
4305 | |||||
4306 | auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, | ||||
4307 | RValue EmittedArg) { | ||||
4308 | if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) | ||||
4309 | return; | ||||
4310 | auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); | ||||
4311 | if (PS == nullptr) | ||||
4312 | return; | ||||
4313 | |||||
4314 | const auto &Context = getContext(); | ||||
4315 | auto SizeTy = Context.getSizeType(); | ||||
4316 | auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); | ||||
4317 | assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?")(static_cast <bool> (EmittedArg.getScalarVal() && "We emitted nothing for the arg?") ? void (0) : __assert_fail ("EmittedArg.getScalarVal() && \"We emitted nothing for the arg?\"" , "clang/lib/CodeGen/CGCall.cpp", 4317, __extension__ __PRETTY_FUNCTION__ )); | ||||
4318 | llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, | ||||
4319 | EmittedArg.getScalarVal(), | ||||
4320 | PS->isDynamic()); | ||||
4321 | Args.add(RValue::get(V), SizeTy); | ||||
4322 | // If we're emitting args in reverse, be sure to do so with | ||||
4323 | // pass_object_size, as well. | ||||
4324 | if (!LeftToRight) | ||||
4325 | std::swap(Args.back(), *(&Args.back() - 1)); | ||||
4326 | }; | ||||
4327 | |||||
4328 | // Insert a stack save if we're going to need any inalloca args. | ||||
4329 | if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { | ||||
4330 | assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&(static_cast <bool> (getTarget().getTriple().getArch() == llvm::Triple::x86 && "inalloca only supported on x86" ) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86 && \"inalloca only supported on x86\"" , "clang/lib/CodeGen/CGCall.cpp", 4331, __extension__ __PRETTY_FUNCTION__ )) | ||||
4331 | "inalloca only supported on x86")(static_cast <bool> (getTarget().getTriple().getArch() == llvm::Triple::x86 && "inalloca only supported on x86" ) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86 && \"inalloca only supported on x86\"" , "clang/lib/CodeGen/CGCall.cpp", 4331, __extension__ __PRETTY_FUNCTION__ )); | ||||
4332 | Args.allocateArgumentMemory(*this); | ||||
4333 | } | ||||
4334 | |||||
4335 | // Evaluate each argument in the appropriate order. | ||||
4336 | size_t CallArgsStart = Args.size(); | ||||
4337 | for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { | ||||
4338 | unsigned Idx = LeftToRight ? I : E - I - 1; | ||||
4339 | CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; | ||||
4340 | unsigned InitialArgSize = Args.size(); | ||||
4341 | // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of | ||||
4342 | // the argument and parameter match or the objc method is parameterized. | ||||
4343 | assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr >(*Arg) || getContext().hasSameUnqualifiedType((*Arg)-> getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl ()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl >(AC.getDecl())))) && "Argument and parameter types don't match" ) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\"" , "clang/lib/CodeGen/CGCall.cpp", 4348, __extension__ __PRETTY_FUNCTION__ )) | ||||
4344 | getContext().hasSameUnqualifiedType((*Arg)->getType(),(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr >(*Arg) || getContext().hasSameUnqualifiedType((*Arg)-> getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl ()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl >(AC.getDecl())))) && "Argument and parameter types don't match" ) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\"" , "clang/lib/CodeGen/CGCall.cpp", 4348, __extension__ __PRETTY_FUNCTION__ )) | ||||
4345 | ArgTypes[Idx]) ||(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr >(*Arg) || getContext().hasSameUnqualifiedType((*Arg)-> getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl ()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl >(AC.getDecl())))) && "Argument and parameter types don't match" ) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\"" , "clang/lib/CodeGen/CGCall.cpp", 4348, __extension__ __PRETTY_FUNCTION__ )) | ||||
4346 | (isa<ObjCMethodDecl>(AC.getDecl()) &&(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr >(*Arg) || getContext().hasSameUnqualifiedType((*Arg)-> getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl ()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl >(AC.getDecl())))) && "Argument and parameter types don't match" ) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\"" , "clang/lib/CodeGen/CGCall.cpp", 4348, __extension__ __PRETTY_FUNCTION__ )) | ||||
4347 | isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr >(*Arg) || getContext().hasSameUnqualifiedType((*Arg)-> getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl ()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl >(AC.getDecl())))) && "Argument and parameter types don't match" ) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\"" , "clang/lib/CodeGen/CGCall.cpp", 4348, __extension__ __PRETTY_FUNCTION__ )) | ||||
4348 | "Argument and parameter types don't match")(static_cast <bool> ((!isa<ObjCIndirectCopyRestoreExpr >(*Arg) || getContext().hasSameUnqualifiedType((*Arg)-> getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl ()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl >(AC.getDecl())))) && "Argument and parameter types don't match" ) ? void (0) : __assert_fail ("(!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa<ObjCMethodDecl>(AC.getDecl()) && isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && \"Argument and parameter types don't match\"" , "clang/lib/CodeGen/CGCall.cpp", 4348, __extension__ __PRETTY_FUNCTION__ )); | ||||
4349 | EmitCallArg(Args, *Arg, ArgTypes[Idx]); | ||||
4350 | // In particular, we depend on it being the last arg in Args, and the | ||||
4351 | // objectsize bits depend on there only being one arg if !LeftToRight. | ||||
4352 | assert(InitialArgSize + 1 == Args.size() &&(static_cast <bool> (InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg" ) ? void (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\"" , "clang/lib/CodeGen/CGCall.cpp", 4353, __extension__ __PRETTY_FUNCTION__ )) | ||||
4353 | "The code below depends on only adding one arg per EmitCallArg")(static_cast <bool> (InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg" ) ? void (0) : __assert_fail ("InitialArgSize + 1 == Args.size() && \"The code below depends on only adding one arg per EmitCallArg\"" , "clang/lib/CodeGen/CGCall.cpp", 4353, __extension__ __PRETTY_FUNCTION__ )); | ||||
4354 | (void)InitialArgSize; | ||||
4355 | // Since pointer argument are never emitted as LValue, it is safe to emit | ||||
4356 | // non-null argument check for r-value only. | ||||
4357 | if (!Args.back().hasLValue()) { | ||||
4358 | RValue RVArg = Args.back().getKnownRValue(); | ||||
4359 | EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, | ||||
4360 | ParamsToSkip + Idx); | ||||
4361 | // @llvm.objectsize should never have side-effects and shouldn't need | ||||
4362 | // destruction/cleanups, so we can safely "emit" it after its arg, | ||||
4363 | // regardless of right-to-leftness | ||||
4364 | MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); | ||||
4365 | } | ||||
4366 | } | ||||
4367 | |||||
4368 | if (!LeftToRight) { | ||||
4369 | // Un-reverse the arguments we just evaluated so they match up with the LLVM | ||||
4370 | // IR function. | ||||
4371 | std::reverse(Args.begin() + CallArgsStart, Args.end()); | ||||
4372 | } | ||||
4373 | } | ||||
4374 | |||||
4375 | namespace { | ||||
4376 | |||||
4377 | struct DestroyUnpassedArg final : EHScopeStack::Cleanup { | ||||
4378 | DestroyUnpassedArg(Address Addr, QualType Ty) | ||||
4379 | : Addr(Addr), Ty(Ty) {} | ||||
4380 | |||||
4381 | Address Addr; | ||||
4382 | QualType Ty; | ||||
4383 | |||||
4384 | void Emit(CodeGenFunction &CGF, Flags flags) override { | ||||
4385 | QualType::DestructionKind DtorKind = Ty.isDestructedType(); | ||||
4386 | if (DtorKind == QualType::DK_cxx_destructor) { | ||||
4387 | const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); | ||||
4388 | assert(!Dtor->isTrivial())(static_cast <bool> (!Dtor->isTrivial()) ? void (0) : __assert_fail ("!Dtor->isTrivial()", "clang/lib/CodeGen/CGCall.cpp" , 4388, __extension__ __PRETTY_FUNCTION__)); | ||||
4389 | CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, | ||||
4390 | /*Delegating=*/false, Addr, Ty); | ||||
4391 | } else { | ||||
4392 | CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); | ||||
4393 | } | ||||
4394 | } | ||||
4395 | }; | ||||
4396 | |||||
4397 | struct DisableDebugLocationUpdates { | ||||
4398 | CodeGenFunction &CGF; | ||||
4399 | bool disabledDebugInfo; | ||||
4400 | DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { | ||||
4401 | if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) | ||||
4402 | CGF.disableDebugInfo(); | ||||
4403 | } | ||||
4404 | ~DisableDebugLocationUpdates() { | ||||
4405 | if (disabledDebugInfo) | ||||
4406 | CGF.enableDebugInfo(); | ||||
4407 | } | ||||
4408 | }; | ||||
4409 | |||||
4410 | } // end anonymous namespace | ||||
4411 | |||||
4412 | RValue CallArg::getRValue(CodeGenFunction &CGF) const { | ||||
4413 | if (!HasLV) | ||||
4414 | return RV; | ||||
4415 | LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); | ||||
4416 | CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, | ||||
4417 | LV.isVolatile()); | ||||
4418 | IsUsed = true; | ||||
4419 | return RValue::getAggregate(Copy.getAddress(CGF)); | ||||
4420 | } | ||||
4421 | |||||
4422 | void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { | ||||
4423 | LValue Dst = CGF.MakeAddrLValue(Addr, Ty); | ||||
4424 | if (!HasLV && RV.isScalar()) | ||||
4425 | CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); | ||||
4426 | else if (!HasLV && RV.isComplex()) | ||||
4427 | CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); | ||||
4428 | else { | ||||
4429 | auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); | ||||
4430 | LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); | ||||
4431 | // We assume that call args are never copied into subobjects. | ||||
4432 | CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, | ||||
4433 | HasLV ? LV.isVolatileQualified() | ||||
4434 | : RV.isVolatileQualified()); | ||||
4435 | } | ||||
4436 | IsUsed = true; | ||||
4437 | } | ||||
4438 | |||||
4439 | void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, | ||||
4440 | QualType type) { | ||||
4441 | DisableDebugLocationUpdates Dis(*this, E); | ||||
4442 | if (const ObjCIndirectCopyRestoreExpr *CRE | ||||
4443 | = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { | ||||
4444 | assert(getLangOpts().ObjCAutoRefCount)(static_cast <bool> (getLangOpts().ObjCAutoRefCount) ? void (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount", "clang/lib/CodeGen/CGCall.cpp" , 4444, __extension__ __PRETTY_FUNCTION__)); | ||||
4445 | return emitWritebackArg(*this, args, CRE); | ||||
4446 | } | ||||
4447 | |||||
4448 | assert(type->isReferenceType() == E->isGLValue() &&(static_cast <bool> (type->isReferenceType() == E-> isGLValue() && "reference binding to unmaterialized r-value!" ) ? void (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\"" , "clang/lib/CodeGen/CGCall.cpp", 4449, __extension__ __PRETTY_FUNCTION__ )) | ||||
4449 | "reference binding to unmaterialized r-value!")(static_cast <bool> (type->isReferenceType() == E-> isGLValue() && "reference binding to unmaterialized r-value!" ) ? void (0) : __assert_fail ("type->isReferenceType() == E->isGLValue() && \"reference binding to unmaterialized r-value!\"" , "clang/lib/CodeGen/CGCall.cpp", 4449, __extension__ __PRETTY_FUNCTION__ )); | ||||
4450 | |||||
4451 | if (E->isGLValue()) { | ||||
4452 | assert(E->getObjectKind() == OK_Ordinary)(static_cast <bool> (E->getObjectKind() == OK_Ordinary ) ? void (0) : __assert_fail ("E->getObjectKind() == OK_Ordinary" , "clang/lib/CodeGen/CGCall.cpp", 4452, __extension__ __PRETTY_FUNCTION__ )); | ||||
4453 | return args.add(EmitReferenceBindingToExpr(E), type); | ||||
4454 | } | ||||
4455 | |||||
4456 | bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); | ||||
4457 | |||||
4458 | // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. | ||||
4459 | // However, we still have to push an EH-only cleanup in case we unwind before | ||||
4460 | // we make it to the call. | ||||
4461 | if (type->isRecordType() && | ||||
4462 | type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { | ||||
4463 | // If we're using inalloca, use the argument memory. Otherwise, use a | ||||
4464 | // temporary. | ||||
4465 | AggValueSlot Slot = args.isUsingInAlloca() | ||||
4466 | ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp"); | ||||
4467 | |||||
4468 | bool DestroyedInCallee = true, NeedsEHCleanup = true; | ||||
4469 | if (const auto *RD = type->getAsCXXRecordDecl()) | ||||
4470 | DestroyedInCallee = RD->hasNonTrivialDestructor(); | ||||
4471 | else | ||||
4472 | NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); | ||||
4473 | |||||
4474 | if (DestroyedInCallee) | ||||
4475 | Slot.setExternallyDestructed(); | ||||
4476 | |||||
4477 | EmitAggExpr(E, Slot); | ||||
4478 | RValue RV = Slot.asRValue(); | ||||
4479 | args.add(RV, type); | ||||
4480 | |||||
4481 | if (DestroyedInCallee && NeedsEHCleanup) { | ||||
4482 | // Create a no-op GEP between the placeholder and the cleanup so we can | ||||
4483 | // RAUW it successfully. It also serves as a marker of the first | ||||
4484 | // instruction where the cleanup is active. | ||||
4485 | pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), | ||||
4486 | type); | ||||
4487 | // This unreachable is a temporary marker which will be removed later. | ||||
4488 | llvm::Instruction *IsActive = Builder.CreateUnreachable(); | ||||
4489 | args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive); | ||||
4490 | } | ||||
4491 | return; | ||||
4492 | } | ||||
4493 | |||||
4494 | if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && | ||||
4495 | cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { | ||||
4496 | LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); | ||||
4497 | assert(L.isSimple())(static_cast <bool> (L.isSimple()) ? void (0) : __assert_fail ("L.isSimple()", "clang/lib/CodeGen/CGCall.cpp", 4497, __extension__ __PRETTY_FUNCTION__)); | ||||
4498 | args.addUncopiedAggregate(L, type); | ||||
4499 | return; | ||||
4500 | } | ||||
4501 | |||||
4502 | args.add(EmitAnyExprToTemp(E), type); | ||||
4503 | } | ||||
4504 | |||||
4505 | QualType CodeGenFunction::getVarArgType(const Expr *Arg) { | ||||
4506 | // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC | ||||
4507 | // implicitly widens null pointer constants that are arguments to varargs | ||||
4508 | // functions to pointer-sized ints. | ||||
4509 | if (!getTarget().getTriple().isOSWindows()) | ||||
4510 | return Arg->getType(); | ||||
4511 | |||||
4512 | if (Arg->getType()->isIntegerType() && | ||||
4513 | getContext().getTypeSize(Arg->getType()) < | ||||
4514 | getContext().getTargetInfo().getPointerWidth(LangAS::Default) && | ||||
4515 | Arg->isNullPointerConstant(getContext(), | ||||
4516 | Expr::NPC_ValueDependentIsNotNull)) { | ||||
4517 | return getContext().getIntPtrType(); | ||||
4518 | } | ||||
4519 | |||||
4520 | return Arg->getType(); | ||||
4521 | } | ||||
4522 | |||||
4523 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC | ||||
4524 | // optimizer it can aggressively ignore unwind edges. | ||||
4525 | void | ||||
4526 | CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { | ||||
4527 | if (CGM.getCodeGenOpts().OptimizationLevel != 0 && | ||||
4528 | !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) | ||||
4529 | Inst->setMetadata("clang.arc.no_objc_arc_exceptions", | ||||
4530 | CGM.getNoObjCARCExceptionsMetadata()); | ||||
4531 | } | ||||
4532 | |||||
4533 | /// Emits a call to the given no-arguments nounwind runtime function. | ||||
4534 | llvm::CallInst * | ||||
4535 | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, | ||||
4536 | const llvm::Twine &name) { | ||||
4537 | return EmitNounwindRuntimeCall(callee, std::nullopt, name); | ||||
4538 | } | ||||
4539 | |||||
4540 | /// Emits a call to the given nounwind runtime function. | ||||
4541 | llvm::CallInst * | ||||
4542 | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, | ||||
4543 | ArrayRef<llvm::Value *> args, | ||||
4544 | const llvm::Twine &name) { | ||||
4545 | llvm::CallInst *call = EmitRuntimeCall(callee, args, name); | ||||
4546 | call->setDoesNotThrow(); | ||||
4547 | return call; | ||||
4548 | } | ||||
4549 | |||||
4550 | /// Emits a simple call (never an invoke) to the given no-arguments | ||||
4551 | /// runtime function. | ||||
4552 | llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, | ||||
4553 | const llvm::Twine &name) { | ||||
4554 | return EmitRuntimeCall(callee, std::nullopt, name); | ||||
4555 | } | ||||
4556 | |||||
4557 | // Calls which may throw must have operand bundles indicating which funclet | ||||
4558 | // they are nested within. | ||||
4559 | SmallVector<llvm::OperandBundleDef, 1> | ||||
4560 | CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { | ||||
4561 | // There is no need for a funclet operand bundle if we aren't inside a | ||||
4562 | // funclet. | ||||
4563 | if (!CurrentFuncletPad) | ||||
4564 | return (SmallVector<llvm::OperandBundleDef, 1>()); | ||||
4565 | |||||
4566 | // Skip intrinsics which cannot throw (as long as they don't lower into | ||||
4567 | // regular function calls in the course of IR transformations). | ||||
4568 | if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) { | ||||
4569 | if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) { | ||||
4570 | auto IID = CalleeFn->getIntrinsicID(); | ||||
4571 | if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID)) | ||||
4572 | return (SmallVector<llvm::OperandBundleDef, 1>()); | ||||
4573 | } | ||||
4574 | } | ||||
4575 | |||||
4576 | SmallVector<llvm::OperandBundleDef, 1> BundleList; | ||||
4577 | BundleList.emplace_back("funclet", CurrentFuncletPad); | ||||
4578 | return BundleList; | ||||
4579 | } | ||||
4580 | |||||
4581 | /// Emits a simple call (never an invoke) to the given runtime function. | ||||
4582 | llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, | ||||
4583 | ArrayRef<llvm::Value *> args, | ||||
4584 | const llvm::Twine &name) { | ||||
4585 | llvm::CallInst *call = Builder.CreateCall( | ||||
4586 | callee, args, getBundlesForFunclet(callee.getCallee()), name); | ||||
4587 | call->setCallingConv(getRuntimeCC()); | ||||
4588 | return call; | ||||
4589 | } | ||||
4590 | |||||
4591 | /// Emits a call or invoke to the given noreturn runtime function. | ||||
4592 | void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( | ||||
4593 | llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { | ||||
4594 | SmallVector<llvm::OperandBundleDef, 1> BundleList = | ||||
4595 | getBundlesForFunclet(callee.getCallee()); | ||||
4596 | |||||
4597 | if (getInvokeDest()) { | ||||
4598 | llvm::InvokeInst *invoke = | ||||
4599 | Builder.CreateInvoke(callee, | ||||
4600 | getUnreachableBlock(), | ||||
4601 | getInvokeDest(), | ||||
4602 | args, | ||||
4603 | BundleList); | ||||
4604 | invoke->setDoesNotReturn(); | ||||
4605 | invoke->setCallingConv(getRuntimeCC()); | ||||
4606 | } else { | ||||
4607 | llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); | ||||
4608 | call->setDoesNotReturn(); | ||||
4609 | call->setCallingConv(getRuntimeCC()); | ||||
4610 | Builder.CreateUnreachable(); | ||||
4611 | } | ||||
4612 | } | ||||
4613 | |||||
4614 | /// Emits a call or invoke instruction to the given nullary runtime function. | ||||
4615 | llvm::CallBase * | ||||
4616 | CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, | ||||
4617 | const Twine &name) { | ||||
4618 | return EmitRuntimeCallOrInvoke(callee, std::nullopt, name); | ||||
4619 | } | ||||
4620 | |||||
4621 | /// Emits a call or invoke instruction to the given runtime function. | ||||
4622 | llvm::CallBase * | ||||
4623 | CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, | ||||
4624 | ArrayRef<llvm::Value *> args, | ||||
4625 | const Twine &name) { | ||||
4626 | llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); | ||||
4627 | call->setCallingConv(getRuntimeCC()); | ||||
4628 | return call; | ||||
4629 | } | ||||
4630 | |||||
4631 | /// Emits a call or invoke instruction to the given function, depending | ||||
4632 | /// on the current state of the EH stack. | ||||
4633 | llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, | ||||
4634 | ArrayRef<llvm::Value *> Args, | ||||
4635 | const Twine &Name) { | ||||
4636 | llvm::BasicBlock *InvokeDest = getInvokeDest(); | ||||
4637 | SmallVector<llvm::OperandBundleDef, 1> BundleList = | ||||
4638 | getBundlesForFunclet(Callee.getCallee()); | ||||
4639 | |||||
4640 | llvm::CallBase *Inst; | ||||
4641 | if (!InvokeDest) | ||||
4642 | Inst = Builder.CreateCall(Callee, Args, BundleList, Name); | ||||
4643 | else { | ||||
4644 | llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); | ||||
4645 | Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, | ||||
4646 | Name); | ||||
4647 | EmitBlock(ContBB); | ||||
4648 | } | ||||
4649 | |||||
4650 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC | ||||
4651 | // optimizer it can aggressively ignore unwind edges. | ||||
4652 | if (CGM.getLangOpts().ObjCAutoRefCount) | ||||
4653 | AddObjCARCExceptionMetadata(Inst); | ||||
4654 | |||||
4655 | return Inst; | ||||
4656 | } | ||||
4657 | |||||
4658 | void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, | ||||
4659 | llvm::Value *New) { | ||||
4660 | DeferredReplacements.push_back( | ||||
4661 | std::make_pair(llvm::WeakTrackingVH(Old), New)); | ||||
4662 | } | ||||
4663 | |||||
4664 | namespace { | ||||
4665 | |||||
4666 | /// Specify given \p NewAlign as the alignment of return value attribute. If | ||||
4667 | /// such attribute already exists, re-set it to the maximal one of two options. | ||||
4668 | [[nodiscard]] llvm::AttributeList | ||||
4669 | maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, | ||||
4670 | const llvm::AttributeList &Attrs, | ||||
4671 | llvm::Align NewAlign) { | ||||
4672 | llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); | ||||
4673 | if (CurAlign >= NewAlign) | ||||
4674 | return Attrs; | ||||
4675 | llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); | ||||
4676 | return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment) | ||||
4677 | .addRetAttribute(Ctx, AlignAttr); | ||||
4678 | } | ||||
4679 | |||||
4680 | template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { | ||||
4681 | protected: | ||||
4682 | CodeGenFunction &CGF; | ||||
4683 | |||||
4684 | /// We do nothing if this is, or becomes, nullptr. | ||||
4685 | const AlignedAttrTy *AA = nullptr; | ||||
4686 | |||||
4687 | llvm::Value *Alignment = nullptr; // May or may not be a constant. | ||||
4688 | llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. | ||||
4689 | |||||
4690 | AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) | ||||
4691 | : CGF(CGF_) { | ||||
4692 | if (!FuncDecl) | ||||
4693 | return; | ||||
4694 | AA = FuncDecl->getAttr<AlignedAttrTy>(); | ||||
4695 | } | ||||
4696 | |||||
4697 | public: | ||||
4698 | /// If we can, materialize the alignment as an attribute on return value. | ||||
4699 | [[nodiscard]] llvm::AttributeList | ||||
4700 | TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { | ||||
4701 | if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) | ||||
4702 | return Attrs; | ||||
4703 | const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); | ||||
4704 | if (!AlignmentCI) | ||||
4705 | return Attrs; | ||||
4706 | // We may legitimately have non-power-of-2 alignment here. | ||||
4707 | // If so, this is UB land, emit it via `@llvm.assume` instead. | ||||
4708 | if (!AlignmentCI->getValue().isPowerOf2()) | ||||
4709 | return Attrs; | ||||
4710 | llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( | ||||
4711 | CGF.getLLVMContext(), Attrs, | ||||
4712 | llvm::Align( | ||||
4713 | AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); | ||||
4714 | AA = nullptr; // We're done. Disallow doing anything else. | ||||
4715 | return NewAttrs; | ||||
4716 | } | ||||
4717 | |||||
4718 | /// Emit alignment assumption. | ||||
4719 | /// This is a general fallback that we take if either there is an offset, | ||||
4720 | /// or the alignment is variable or we are sanitizing for alignment. | ||||
4721 | void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { | ||||
4722 | if (!AA) | ||||
4723 | return; | ||||
4724 | CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, | ||||
4725 | AA->getLocation(), Alignment, OffsetCI); | ||||
4726 | AA = nullptr; // We're done. Disallow doing anything else. | ||||
4727 | } | ||||
4728 | }; | ||||
4729 | |||||
4730 | /// Helper data structure to emit `AssumeAlignedAttr`. | ||||
4731 | class AssumeAlignedAttrEmitter final | ||||
4732 | : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { | ||||
4733 | public: | ||||
4734 | AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) | ||||
4735 | : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { | ||||
4736 | if (!AA) | ||||
4737 | return; | ||||
4738 | // It is guaranteed that the alignment/offset are constants. | ||||
4739 | Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); | ||||
4740 | if (Expr *Offset = AA->getOffset()) { | ||||
4741 | OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); | ||||
4742 | if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. | ||||
4743 | OffsetCI = nullptr; | ||||
4744 | } | ||||
4745 | } | ||||
4746 | }; | ||||
4747 | |||||
4748 | /// Helper data structure to emit `AllocAlignAttr`. | ||||
4749 | class AllocAlignAttrEmitter final | ||||
4750 | : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { | ||||
4751 | public: | ||||
4752 | AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, | ||||
4753 | const CallArgList &CallArgs) | ||||
4754 | : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { | ||||
4755 | if (!AA) | ||||
4756 | return; | ||||
4757 | // Alignment may or may not be a constant, and that is okay. | ||||
4758 | Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] | ||||
4759 | .getRValue(CGF) | ||||
4760 | .getScalarVal(); | ||||
4761 | } | ||||
4762 | }; | ||||
4763 | |||||
4764 | } // namespace | ||||
4765 | |||||
4766 | static unsigned getMaxVectorWidth(const llvm::Type *Ty) { | ||||
4767 | if (auto *VT = dyn_cast<llvm::VectorType>(Ty)) | ||||
4768 | return VT->getPrimitiveSizeInBits().getKnownMinValue(); | ||||
4769 | if (auto *AT = dyn_cast<llvm::ArrayType>(Ty)) | ||||
4770 | return getMaxVectorWidth(AT->getElementType()); | ||||
4771 | |||||
4772 | unsigned MaxVectorWidth = 0; | ||||
4773 | if (auto *ST = dyn_cast<llvm::StructType>(Ty)) | ||||
4774 | for (auto *I : ST->elements()) | ||||
4775 | MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I)); | ||||
4776 | return MaxVectorWidth; | ||||
4777 | } | ||||
4778 | |||||
4779 | RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, | ||||
4780 | const CGCallee &Callee, | ||||
4781 | ReturnValueSlot ReturnValue, | ||||
4782 | const CallArgList &CallArgs, | ||||
4783 | llvm::CallBase **callOrInvoke, bool IsMustTail, | ||||
4784 | SourceLocation Loc) { | ||||
4785 | // FIXME: We no longer need the types from CallArgs; lift up and simplify. | ||||
4786 | |||||
4787 | assert(Callee.isOrdinary() || Callee.isVirtual())(static_cast <bool> (Callee.isOrdinary() || Callee.isVirtual ()) ? void (0) : __assert_fail ("Callee.isOrdinary() || Callee.isVirtual()" , "clang/lib/CodeGen/CGCall.cpp", 4787, __extension__ __PRETTY_FUNCTION__ )); | ||||
4788 | |||||
4789 | // Handle struct-return functions by passing a pointer to the | ||||
4790 | // location that we would like to return into. | ||||
4791 | QualType RetTy = CallInfo.getReturnType(); | ||||
4792 | const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); | ||||
4793 | |||||
4794 | llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); | ||||
4795 | |||||
4796 | const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); | ||||
4797 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { | ||||
4798 | // We can only guarantee that a function is called from the correct | ||||
4799 | // context/function based on the appropriate target attributes, | ||||
4800 | // so only check in the case where we have both always_inline and target | ||||
4801 | // since otherwise we could be making a conditional call after a check for | ||||
4802 | // the proper cpu features (and it won't cause code generation issues due to | ||||
4803 | // function based code generation). | ||||
4804 | if (TargetDecl->hasAttr<AlwaysInlineAttr>() && | ||||
4805 | (TargetDecl->hasAttr<TargetAttr>() || | ||||
4806 | (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>()))) | ||||
4807 | checkTargetFeatures(Loc, FD); | ||||
4808 | |||||
4809 | // Some architectures (such as x86-64) have the ABI changed based on | ||||
4810 | // attribute-target/features. Give them a chance to diagnose. | ||||
4811 | CGM.getTargetCodeGenInfo().checkFunctionCallABI( | ||||
4812 | CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); | ||||
4813 | } | ||||
4814 | |||||
4815 | #ifndef NDEBUG | ||||
4816 | if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { | ||||
4817 | // For an inalloca varargs function, we don't expect CallInfo to match the | ||||
4818 | // function pointer's type, because the inalloca struct a will have extra | ||||
4819 | // fields in it for the varargs parameters. Code later in this function | ||||
4820 | // bitcasts the function pointer to the type derived from CallInfo. | ||||
4821 | // | ||||
4822 | // In other cases, we assert that the types match up (until pointers stop | ||||
4823 | // having pointee types). | ||||
4824 | if (Callee.isVirtual()) | ||||
4825 | assert(IRFuncTy == Callee.getVirtualFunctionType())(static_cast <bool> (IRFuncTy == Callee.getVirtualFunctionType ()) ? void (0) : __assert_fail ("IRFuncTy == Callee.getVirtualFunctionType()" , "clang/lib/CodeGen/CGCall.cpp", 4825, __extension__ __PRETTY_FUNCTION__ )); | ||||
4826 | else { | ||||
4827 | llvm::PointerType *PtrTy = | ||||
4828 | llvm::cast<llvm::PointerType>(Callee.getFunctionPointer()->getType()); | ||||
4829 | assert(PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy))(static_cast <bool> (PtrTy->isOpaqueOrPointeeTypeMatches (IRFuncTy)) ? void (0) : __assert_fail ("PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy)" , "clang/lib/CodeGen/CGCall.cpp", 4829, __extension__ __PRETTY_FUNCTION__ )); | ||||
4830 | } | ||||
4831 | } | ||||
4832 | #endif | ||||
4833 | |||||
4834 | // 1. Set up the arguments. | ||||
4835 | |||||
4836 | // If we're using inalloca, insert the allocation after the stack save. | ||||
4837 | // FIXME: Do this earlier rather than hacking it in here! | ||||
4838 | Address ArgMemory = Address::invalid(); | ||||
4839 | if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { | ||||
4840 | const llvm::DataLayout &DL = CGM.getDataLayout(); | ||||
4841 | llvm::Instruction *IP = CallArgs.getStackBase(); | ||||
4842 | llvm::AllocaInst *AI; | ||||
4843 | if (IP) { | ||||
4844 | IP = IP->getNextNode(); | ||||
4845 | AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), | ||||
4846 | "argmem", IP); | ||||
4847 | } else { | ||||
4848 | AI = CreateTempAlloca(ArgStruct, "argmem"); | ||||
4849 | } | ||||
4850 | auto Align = CallInfo.getArgStructAlignment(); | ||||
4851 | AI->setAlignment(Align.getAsAlign()); | ||||
4852 | AI->setUsedWithInAlloca(true); | ||||
4853 | assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca())(static_cast <bool> (AI->isUsedWithInAlloca() && !AI->isStaticAlloca()) ? void (0) : __assert_fail ("AI->isUsedWithInAlloca() && !AI->isStaticAlloca()" , "clang/lib/CodeGen/CGCall.cpp", 4853, __extension__ __PRETTY_FUNCTION__ )); | ||||
4854 | ArgMemory = Address(AI, ArgStruct, Align); | ||||
4855 | } | ||||
4856 | |||||
4857 | ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); | ||||
4858 | SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); | ||||
4859 | |||||
4860 | // If the call returns a temporary with struct return, create a temporary | ||||
4861 | // alloca to hold the result, unless one is given to us. | ||||
4862 | Address SRetPtr = Address::invalid(); | ||||
4863 | Address SRetAlloca = Address::invalid(); | ||||
4864 | llvm::Value *UnusedReturnSizePtr = nullptr; | ||||
4865 | if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { | ||||
4866 | if (!ReturnValue.isNull()) { | ||||
4867 | SRetPtr = ReturnValue.getValue(); | ||||
4868 | } else { | ||||
4869 | SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); | ||||
4870 | if (HaveInsertPoint() && ReturnValue.isUnused()) { | ||||
4871 | llvm::TypeSize size = | ||||
4872 | CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); | ||||
4873 | UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); | ||||
4874 | } | ||||
4875 | } | ||||
4876 | if (IRFunctionArgs.hasSRetArg()) { | ||||
4877 | IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); | ||||
4878 | } else if (RetAI.isInAlloca()) { | ||||
4879 | Address Addr = | ||||
4880 | Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); | ||||
4881 | Builder.CreateStore(SRetPtr.getPointer(), Addr); | ||||
4882 | } | ||||
4883 | } | ||||
4884 | |||||
4885 | Address swiftErrorTemp = Address::invalid(); | ||||
4886 | Address swiftErrorArg = Address::invalid(); | ||||
4887 | |||||
4888 | // When passing arguments using temporary allocas, we need to add the | ||||
4889 | // appropriate lifetime markers. This vector keeps track of all the lifetime | ||||
4890 | // markers that need to be ended right after the call. | ||||
4891 | SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; | ||||
4892 | |||||
4893 | // Translate all of the arguments as necessary to match the IR lowering. | ||||
4894 | assert(CallInfo.arg_size() == CallArgs.size() &&(static_cast <bool> (CallInfo.arg_size() == CallArgs.size () && "Mismatch between function signature & arguments." ) ? void (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\"" , "clang/lib/CodeGen/CGCall.cpp", 4895, __extension__ __PRETTY_FUNCTION__ )) | ||||
4895 | "Mismatch between function signature & arguments.")(static_cast <bool> (CallInfo.arg_size() == CallArgs.size () && "Mismatch between function signature & arguments." ) ? void (0) : __assert_fail ("CallInfo.arg_size() == CallArgs.size() && \"Mismatch between function signature & arguments.\"" , "clang/lib/CodeGen/CGCall.cpp", 4895, __extension__ __PRETTY_FUNCTION__ )); | ||||
4896 | unsigned ArgNo = 0; | ||||
4897 | CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); | ||||
4898 | for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); | ||||
4899 | I != E; ++I, ++info_it, ++ArgNo) { | ||||
4900 | const ABIArgInfo &ArgInfo = info_it->info; | ||||
4901 | |||||
4902 | // Insert a padding argument to ensure proper alignment. | ||||
4903 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) | ||||
4904 | IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = | ||||
4905 | llvm::UndefValue::get(ArgInfo.getPaddingType()); | ||||
4906 | |||||
4907 | unsigned FirstIRArg, NumIRArgs; | ||||
4908 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); | ||||
4909 | |||||
4910 | bool ArgHasMaybeUndefAttr = | ||||
4911 | IsArgumentMaybeUndef(TargetDecl, CallInfo.getNumRequiredArgs(), ArgNo); | ||||
4912 | |||||
4913 | switch (ArgInfo.getKind()) { | ||||
4914 | case ABIArgInfo::InAlloca: { | ||||
4915 | assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail ("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 4915, __extension__ __PRETTY_FUNCTION__)); | ||||
4916 | assert(getTarget().getTriple().getArch() == llvm::Triple::x86)(static_cast <bool> (getTarget().getTriple().getArch() == llvm::Triple::x86) ? void (0) : __assert_fail ("getTarget().getTriple().getArch() == llvm::Triple::x86" , "clang/lib/CodeGen/CGCall.cpp", 4916, __extension__ __PRETTY_FUNCTION__ )); | ||||
4917 | if (I->isAggregate()) { | ||||
4918 | Address Addr = I->hasLValue() | ||||
4919 | ? I->getKnownLValue().getAddress(*this) | ||||
4920 | : I->getKnownRValue().getAggregateAddress(); | ||||
4921 | llvm::Instruction *Placeholder = | ||||
4922 | cast<llvm::Instruction>(Addr.getPointer()); | ||||
4923 | |||||
4924 | if (!ArgInfo.getInAllocaIndirect()) { | ||||
4925 | // Replace the placeholder with the appropriate argument slot GEP. | ||||
4926 | CGBuilderTy::InsertPoint IP = Builder.saveIP(); | ||||
4927 | Builder.SetInsertPoint(Placeholder); | ||||
4928 | Addr = Builder.CreateStructGEP(ArgMemory, | ||||
4929 | ArgInfo.getInAllocaFieldIndex()); | ||||
4930 | Builder.restoreIP(IP); | ||||
4931 | } else { | ||||
4932 | // For indirect things such as overaligned structs, replace the | ||||
4933 | // placeholder with a regular aggregate temporary alloca. Store the | ||||
4934 | // address of this alloca into the struct. | ||||
4935 | Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); | ||||
4936 | Address ArgSlot = Builder.CreateStructGEP( | ||||
4937 | ArgMemory, ArgInfo.getInAllocaFieldIndex()); | ||||
4938 | Builder.CreateStore(Addr.getPointer(), ArgSlot); | ||||
4939 | } | ||||
4940 | deferPlaceholderReplacement(Placeholder, Addr.getPointer()); | ||||
4941 | } else if (ArgInfo.getInAllocaIndirect()) { | ||||
4942 | // Make a temporary alloca and store the address of it into the argument | ||||
4943 | // struct. | ||||
4944 | Address Addr = CreateMemTempWithoutCast( | ||||
4945 | I->Ty, getContext().getTypeAlignInChars(I->Ty), | ||||
4946 | "indirect-arg-temp"); | ||||
4947 | I->copyInto(*this, Addr); | ||||
4948 | Address ArgSlot = | ||||
4949 | Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); | ||||
4950 | Builder.CreateStore(Addr.getPointer(), ArgSlot); | ||||
4951 | } else { | ||||
4952 | // Store the RValue into the argument struct. | ||||
4953 | Address Addr = | ||||
4954 | Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); | ||||
4955 | // There are some cases where a trivial bitcast is not avoidable. The | ||||
4956 | // definition of a type later in a translation unit may change it's type | ||||
4957 | // from {}* to (%struct.foo*)*. | ||||
4958 | Addr = Builder.CreateElementBitCast(Addr, ConvertTypeForMem(I->Ty)); | ||||
4959 | I->copyInto(*this, Addr); | ||||
4960 | } | ||||
4961 | break; | ||||
4962 | } | ||||
4963 | |||||
4964 | case ABIArgInfo::Indirect: | ||||
4965 | case ABIArgInfo::IndirectAliased: { | ||||
4966 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 4966, __extension__ __PRETTY_FUNCTION__)); | ||||
4967 | if (!I->isAggregate()) { | ||||
4968 | // Make a temporary alloca to pass the argument. | ||||
4969 | Address Addr = CreateMemTempWithoutCast( | ||||
4970 | I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); | ||||
4971 | |||||
4972 | llvm::Value *Val = Addr.getPointer(); | ||||
4973 | if (ArgHasMaybeUndefAttr) | ||||
4974 | Val = Builder.CreateFreeze(Addr.getPointer()); | ||||
4975 | IRCallArgs[FirstIRArg] = Val; | ||||
4976 | |||||
4977 | I->copyInto(*this, Addr); | ||||
4978 | } else { | ||||
4979 | // We want to avoid creating an unnecessary temporary+copy here; | ||||
4980 | // however, we need one in three cases: | ||||
4981 | // 1. If the argument is not byval, and we are required to copy the | ||||
4982 | // source. (This case doesn't occur on any common architecture.) | ||||
4983 | // 2. If the argument is byval, RV is not sufficiently aligned, and | ||||
4984 | // we cannot force it to be sufficiently aligned. | ||||
4985 | // 3. If the argument is byval, but RV is not located in default | ||||
4986 | // or alloca address space. | ||||
4987 | Address Addr = I->hasLValue() | ||||
4988 | ? I->getKnownLValue().getAddress(*this) | ||||
4989 | : I->getKnownRValue().getAggregateAddress(); | ||||
4990 | llvm::Value *V = Addr.getPointer(); | ||||
4991 | CharUnits Align = ArgInfo.getIndirectAlign(); | ||||
4992 | const llvm::DataLayout *TD = &CGM.getDataLayout(); | ||||
4993 | |||||
4994 | assert((FirstIRArg >= IRFuncTy->getNumParams() ||(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams () || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace () == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space" ) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\"" , "clang/lib/CodeGen/CGCall.cpp", 4997, __extension__ __PRETTY_FUNCTION__ )) | ||||
4995 | IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams () || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace () == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space" ) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\"" , "clang/lib/CodeGen/CGCall.cpp", 4997, __extension__ __PRETTY_FUNCTION__ )) | ||||
4996 | TD->getAllocaAddrSpace()) &&(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams () || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace () == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space" ) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\"" , "clang/lib/CodeGen/CGCall.cpp", 4997, __extension__ __PRETTY_FUNCTION__ )) | ||||
4997 | "indirect argument must be in alloca address space")(static_cast <bool> ((FirstIRArg >= IRFuncTy->getNumParams () || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace () == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space" ) ? void (0) : __assert_fail ("(FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && \"indirect argument must be in alloca address space\"" , "clang/lib/CodeGen/CGCall.cpp", 4997, __extension__ __PRETTY_FUNCTION__ )); | ||||
4998 | |||||
4999 | bool NeedCopy = false; | ||||
5000 | |||||
5001 | if (Addr.getAlignment() < Align && | ||||
5002 | llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < | ||||
5003 | Align.getAsAlign()) { | ||||
5004 | NeedCopy = true; | ||||
5005 | } else if (I->hasLValue()) { | ||||
5006 | auto LV = I->getKnownLValue(); | ||||
5007 | auto AS = LV.getAddressSpace(); | ||||
5008 | |||||
5009 | if (!ArgInfo.getIndirectByVal() || | ||||
5010 | (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { | ||||
5011 | NeedCopy = true; | ||||
5012 | } | ||||
5013 | if (!getLangOpts().OpenCL) { | ||||
5014 | if ((ArgInfo.getIndirectByVal() && | ||||
5015 | (AS != LangAS::Default && | ||||
5016 | AS != CGM.getASTAllocaAddressSpace()))) { | ||||
5017 | NeedCopy = true; | ||||
5018 | } | ||||
5019 | } | ||||
5020 | // For OpenCL even if RV is located in default or alloca address space | ||||
5021 | // we don't want to perform address space cast for it. | ||||
5022 | else if ((ArgInfo.getIndirectByVal() && | ||||
5023 | Addr.getType()->getAddressSpace() != IRFuncTy-> | ||||
5024 | getParamType(FirstIRArg)->getPointerAddressSpace())) { | ||||
5025 | NeedCopy = true; | ||||
5026 | } | ||||
5027 | } | ||||
5028 | |||||
5029 | if (NeedCopy) { | ||||
5030 | // Create an aligned temporary, and copy to it. | ||||
5031 | Address AI = CreateMemTempWithoutCast( | ||||
5032 | I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); | ||||
5033 | llvm::Value *Val = AI.getPointer(); | ||||
5034 | if (ArgHasMaybeUndefAttr) | ||||
5035 | Val = Builder.CreateFreeze(AI.getPointer()); | ||||
5036 | IRCallArgs[FirstIRArg] = Val; | ||||
5037 | |||||
5038 | // Emit lifetime markers for the temporary alloca. | ||||
5039 | llvm::TypeSize ByvalTempElementSize = | ||||
5040 | CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); | ||||
5041 | llvm::Value *LifetimeSize = | ||||
5042 | EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); | ||||
5043 | |||||
5044 | // Add cleanup code to emit the end lifetime marker after the call. | ||||
5045 | if (LifetimeSize) // In case we disabled lifetime markers. | ||||
5046 | CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); | ||||
5047 | |||||
5048 | // Generate the copy. | ||||
5049 | I->copyInto(*this, AI); | ||||
5050 | } else { | ||||
5051 | // Skip the extra memcpy call. | ||||
5052 | auto *T = llvm::PointerType::getWithSamePointeeType( | ||||
5053 | cast<llvm::PointerType>(V->getType()), | ||||
5054 | CGM.getDataLayout().getAllocaAddrSpace()); | ||||
5055 | |||||
5056 | llvm::Value *Val = getTargetHooks().performAddrSpaceCast( | ||||
5057 | *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, | ||||
5058 | true); | ||||
5059 | if (ArgHasMaybeUndefAttr) | ||||
5060 | Val = Builder.CreateFreeze(Val); | ||||
5061 | IRCallArgs[FirstIRArg] = Val; | ||||
5062 | } | ||||
5063 | } | ||||
5064 | break; | ||||
5065 | } | ||||
5066 | |||||
5067 | case ABIArgInfo::Ignore: | ||||
5068 | assert(NumIRArgs == 0)(static_cast <bool> (NumIRArgs == 0) ? void (0) : __assert_fail ("NumIRArgs == 0", "clang/lib/CodeGen/CGCall.cpp", 5068, __extension__ __PRETTY_FUNCTION__)); | ||||
5069 | break; | ||||
5070 | |||||
5071 | case ABIArgInfo::Extend: | ||||
5072 | case ABIArgInfo::Direct: { | ||||
5073 | if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && | ||||
5074 | ArgInfo.getCoerceToType() == ConvertType(info_it->type) && | ||||
5075 | ArgInfo.getDirectOffset() == 0) { | ||||
5076 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 5076, __extension__ __PRETTY_FUNCTION__)); | ||||
5077 | llvm::Value *V; | ||||
5078 | if (!I->isAggregate()) | ||||
5079 | V = I->getKnownRValue().getScalarVal(); | ||||
5080 | else | ||||
5081 | V = Builder.CreateLoad( | ||||
5082 | I->hasLValue() ? I->getKnownLValue().getAddress(*this) | ||||
5083 | : I->getKnownRValue().getAggregateAddress()); | ||||
5084 | |||||
5085 | // Implement swifterror by copying into a new swifterror argument. | ||||
5086 | // We'll write back in the normal path out of the call. | ||||
5087 | if (CallInfo.getExtParameterInfo(ArgNo).getABI() | ||||
5088 | == ParameterABI::SwiftErrorResult) { | ||||
5089 | assert(!swiftErrorTemp.isValid() && "multiple swifterror args")(static_cast <bool> (!swiftErrorTemp.isValid() && "multiple swifterror args") ? void (0) : __assert_fail ("!swiftErrorTemp.isValid() && \"multiple swifterror args\"" , "clang/lib/CodeGen/CGCall.cpp", 5089, __extension__ __PRETTY_FUNCTION__ )); | ||||
5090 | |||||
5091 | QualType pointeeTy = I->Ty->getPointeeType(); | ||||
5092 | swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy), | ||||
5093 | getContext().getTypeAlignInChars(pointeeTy)); | ||||
5094 | |||||
5095 | swiftErrorTemp = | ||||
5096 | CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); | ||||
5097 | V = swiftErrorTemp.getPointer(); | ||||
5098 | cast<llvm::AllocaInst>(V)->setSwiftError(true); | ||||
5099 | |||||
5100 | llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); | ||||
5101 | Builder.CreateStore(errorValue, swiftErrorTemp); | ||||
5102 | } | ||||
5103 | |||||
5104 | // We might have to widen integers, but we should never truncate. | ||||
5105 | if (ArgInfo.getCoerceToType() != V->getType() && | ||||
5106 | V->getType()->isIntegerTy()) | ||||
5107 | V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); | ||||
5108 | |||||
5109 | // If the argument doesn't match, perform a bitcast to coerce it. This | ||||
5110 | // can happen due to trivial type mismatches. | ||||
5111 | if (FirstIRArg < IRFuncTy->getNumParams() && | ||||
5112 | V->getType() != IRFuncTy->getParamType(FirstIRArg)) | ||||
5113 | V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); | ||||
5114 | |||||
5115 | if (ArgHasMaybeUndefAttr) | ||||
5116 | V = Builder.CreateFreeze(V); | ||||
5117 | IRCallArgs[FirstIRArg] = V; | ||||
5118 | break; | ||||
5119 | } | ||||
5120 | |||||
5121 | // FIXME: Avoid the conversion through memory if possible. | ||||
5122 | Address Src = Address::invalid(); | ||||
5123 | if (!I->isAggregate()) { | ||||
5124 | Src = CreateMemTemp(I->Ty, "coerce"); | ||||
5125 | I->copyInto(*this, Src); | ||||
5126 | } else { | ||||
5127 | Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) | ||||
5128 | : I->getKnownRValue().getAggregateAddress(); | ||||
5129 | } | ||||
5130 | |||||
5131 | // If the value is offset in memory, apply the offset now. | ||||
5132 | Src = emitAddressAtOffset(*this, Src, ArgInfo); | ||||
5133 | |||||
5134 | // Fast-isel and the optimizer generally like scalar values better than | ||||
5135 | // FCAs, so we flatten them if this is safe to do for this argument. | ||||
5136 | llvm::StructType *STy = | ||||
5137 | dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); | ||||
5138 | if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { | ||||
5139 | llvm::Type *SrcTy = Src.getElementType(); | ||||
5140 | uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); | ||||
5141 | uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); | ||||
5142 | |||||
5143 | // If the source type is smaller than the destination type of the | ||||
5144 | // coerce-to logic, copy the source value into a temp alloca the size | ||||
5145 | // of the destination type to allow loading all of it. The bits past | ||||
5146 | // the source value are left undef. | ||||
5147 | if (SrcSize < DstSize) { | ||||
5148 | Address TempAlloca | ||||
5149 | = CreateTempAlloca(STy, Src.getAlignment(), | ||||
5150 | Src.getName() + ".coerce"); | ||||
5151 | Builder.CreateMemCpy(TempAlloca, Src, SrcSize); | ||||
5152 | Src = TempAlloca; | ||||
5153 | } else { | ||||
5154 | Src = Builder.CreateElementBitCast(Src, STy); | ||||
5155 | } | ||||
5156 | |||||
5157 | assert(NumIRArgs == STy->getNumElements())(static_cast <bool> (NumIRArgs == STy->getNumElements ()) ? void (0) : __assert_fail ("NumIRArgs == STy->getNumElements()" , "clang/lib/CodeGen/CGCall.cpp", 5157, __extension__ __PRETTY_FUNCTION__ )); | ||||
5158 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { | ||||
5159 | Address EltPtr = Builder.CreateStructGEP(Src, i); | ||||
5160 | llvm::Value *LI = Builder.CreateLoad(EltPtr); | ||||
5161 | if (ArgHasMaybeUndefAttr) | ||||
5162 | LI = Builder.CreateFreeze(LI); | ||||
5163 | IRCallArgs[FirstIRArg + i] = LI; | ||||
5164 | } | ||||
5165 | } else { | ||||
5166 | // In the simple case, just pass the coerced loaded value. | ||||
5167 | assert(NumIRArgs == 1)(static_cast <bool> (NumIRArgs == 1) ? void (0) : __assert_fail ("NumIRArgs == 1", "clang/lib/CodeGen/CGCall.cpp", 5167, __extension__ __PRETTY_FUNCTION__)); | ||||
5168 | llvm::Value *Load = | ||||
5169 | CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); | ||||
5170 | |||||
5171 | if (CallInfo.isCmseNSCall()) { | ||||
5172 | // For certain parameter types, clear padding bits, as they may reveal | ||||
5173 | // sensitive information. | ||||
5174 | // Small struct/union types are passed as integer arrays. | ||||
5175 | auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); | ||||
5176 | if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) | ||||
5177 | Load = EmitCMSEClearRecord(Load, ATy, I->Ty); | ||||
5178 | } | ||||
5179 | |||||
5180 | if (ArgHasMaybeUndefAttr) | ||||
5181 | Load = Builder.CreateFreeze(Load); | ||||
5182 | IRCallArgs[FirstIRArg] = Load; | ||||
5183 | } | ||||
5184 | |||||
5185 | break; | ||||
5186 | } | ||||
5187 | |||||
5188 | case ABIArgInfo::CoerceAndExpand: { | ||||
5189 | auto coercionType = ArgInfo.getCoerceAndExpandType(); | ||||
5190 | auto layout = CGM.getDataLayout().getStructLayout(coercionType); | ||||
5191 | |||||
5192 | llvm::Value *tempSize = nullptr; | ||||
5193 | Address addr = Address::invalid(); | ||||
5194 | Address AllocaAddr = Address::invalid(); | ||||
5195 | if (I->isAggregate()) { | ||||
5196 | addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) | ||||
5197 | : I->getKnownRValue().getAggregateAddress(); | ||||
5198 | |||||
5199 | } else { | ||||
5200 | RValue RV = I->getKnownRValue(); | ||||
5201 | assert(RV.isScalar())(static_cast <bool> (RV.isScalar()) ? void (0) : __assert_fail ("RV.isScalar()", "clang/lib/CodeGen/CGCall.cpp", 5201, __extension__ __PRETTY_FUNCTION__)); // complex should always just be direct | ||||
5202 | |||||
5203 | llvm::Type *scalarType = RV.getScalarVal()->getType(); | ||||
5204 | auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); | ||||
5205 | auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType); | ||||
5206 | |||||
5207 | // Materialize to a temporary. | ||||
5208 | addr = CreateTempAlloca( | ||||
5209 | RV.getScalarVal()->getType(), | ||||
5210 | CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)), | ||||
5211 | "tmp", | ||||
5212 | /*ArraySize=*/nullptr, &AllocaAddr); | ||||
5213 | tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); | ||||
5214 | |||||
5215 | Builder.CreateStore(RV.getScalarVal(), addr); | ||||
5216 | } | ||||
5217 | |||||
5218 | addr = Builder.CreateElementBitCast(addr, coercionType); | ||||
5219 | |||||
5220 | unsigned IRArgPos = FirstIRArg; | ||||
5221 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { | ||||
5222 | llvm::Type *eltType = coercionType->getElementType(i); | ||||
5223 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; | ||||
5224 | Address eltAddr = Builder.CreateStructGEP(addr, i); | ||||
5225 | llvm::Value *elt = Builder.CreateLoad(eltAddr); | ||||
5226 | if (ArgHasMaybeUndefAttr) | ||||
5227 | elt = Builder.CreateFreeze(elt); | ||||
5228 | IRCallArgs[IRArgPos++] = elt; | ||||
5229 | } | ||||
5230 | assert(IRArgPos == FirstIRArg + NumIRArgs)(static_cast <bool> (IRArgPos == FirstIRArg + NumIRArgs ) ? void (0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs" , "clang/lib/CodeGen/CGCall.cpp", 5230, __extension__ __PRETTY_FUNCTION__ )); | ||||
5231 | |||||
5232 | if (tempSize) { | ||||
5233 | EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); | ||||
5234 | } | ||||
5235 | |||||
5236 | break; | ||||
5237 | } | ||||
5238 | |||||
5239 | case ABIArgInfo::Expand: { | ||||
5240 | unsigned IRArgPos = FirstIRArg; | ||||
5241 | ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); | ||||
5242 | assert(IRArgPos == FirstIRArg + NumIRArgs)(static_cast <bool> (IRArgPos == FirstIRArg + NumIRArgs ) ? void (0) : __assert_fail ("IRArgPos == FirstIRArg + NumIRArgs" , "clang/lib/CodeGen/CGCall.cpp", 5242, __extension__ __PRETTY_FUNCTION__ )); | ||||
5243 | break; | ||||
5244 | } | ||||
5245 | } | ||||
5246 | } | ||||
5247 | |||||
5248 | const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); | ||||
5249 | llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); | ||||
5250 | |||||
5251 | // If we're using inalloca, set up that argument. | ||||
5252 | if (ArgMemory.isValid()) { | ||||
5253 | llvm::Value *Arg = ArgMemory.getPointer(); | ||||
5254 | if (CallInfo.isVariadic()) { | ||||
5255 | // When passing non-POD arguments by value to variadic functions, we will | ||||
5256 | // end up with a variadic prototype and an inalloca call site. In such | ||||
5257 | // cases, we can't do any parameter mismatch checks. Give up and bitcast | ||||
5258 | // the callee. | ||||
5259 | unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); | ||||
5260 | CalleePtr = | ||||
5261 | Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); | ||||
5262 | } else { | ||||
5263 | llvm::Type *LastParamTy = | ||||
5264 | IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); | ||||
5265 | if (Arg->getType() != LastParamTy) { | ||||
5266 | #ifndef NDEBUG | ||||
5267 | // Assert that these structs have equivalent element types. | ||||
5268 | llvm::StructType *FullTy = CallInfo.getArgStruct(); | ||||
5269 | if (!LastParamTy->isOpaquePointerTy()) { | ||||
5270 | llvm::StructType *DeclaredTy = cast<llvm::StructType>( | ||||
5271 | LastParamTy->getNonOpaquePointerElementType()); | ||||
5272 | assert(DeclaredTy->getNumElements() == FullTy->getNumElements())(static_cast <bool> (DeclaredTy->getNumElements() == FullTy->getNumElements()) ? void (0) : __assert_fail ("DeclaredTy->getNumElements() == FullTy->getNumElements()" , "clang/lib/CodeGen/CGCall.cpp", 5272, __extension__ __PRETTY_FUNCTION__ )); | ||||
5273 | for (auto DI = DeclaredTy->element_begin(), | ||||
5274 | DE = DeclaredTy->element_end(), | ||||
5275 | FI = FullTy->element_begin(); | ||||
5276 | DI != DE; ++DI, ++FI) | ||||
5277 | assert(*DI == *FI)(static_cast <bool> (*DI == *FI) ? void (0) : __assert_fail ("*DI == *FI", "clang/lib/CodeGen/CGCall.cpp", 5277, __extension__ __PRETTY_FUNCTION__)); | ||||
5278 | } | ||||
5279 | #endif | ||||
5280 | Arg = Builder.CreateBitCast(Arg, LastParamTy); | ||||
5281 | } | ||||
5282 | } | ||||
5283 | assert(IRFunctionArgs.hasInallocaArg())(static_cast <bool> (IRFunctionArgs.hasInallocaArg()) ? void (0) : __assert_fail ("IRFunctionArgs.hasInallocaArg()", "clang/lib/CodeGen/CGCall.cpp", 5283, __extension__ __PRETTY_FUNCTION__ )); | ||||
5284 | IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; | ||||
5285 | } | ||||
5286 | |||||
5287 | // 2. Prepare the function pointer. | ||||
5288 | |||||
5289 | // If the callee is a bitcast of a non-variadic function to have a | ||||
5290 | // variadic function pointer type, check to see if we can remove the | ||||
5291 | // bitcast. This comes up with unprototyped functions. | ||||
5292 | // | ||||
5293 | // This makes the IR nicer, but more importantly it ensures that we | ||||
5294 | // can inline the function at -O0 if it is marked always_inline. | ||||
5295 | auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, | ||||
5296 | llvm::Value *Ptr) -> llvm::Function * { | ||||
5297 | if (!CalleeFT->isVarArg()) | ||||
5298 | return nullptr; | ||||
5299 | |||||
5300 | // Get underlying value if it's a bitcast | ||||
5301 | if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { | ||||
5302 | if (CE->getOpcode() == llvm::Instruction::BitCast) | ||||
5303 | Ptr = CE->getOperand(0); | ||||
5304 | } | ||||
5305 | |||||
5306 | llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); | ||||
5307 | if (!OrigFn) | ||||
5308 | return nullptr; | ||||
5309 | |||||
5310 | llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); | ||||
5311 | |||||
5312 | // If the original type is variadic, or if any of the component types | ||||
5313 | // disagree, we cannot remove the cast. | ||||
5314 | if (OrigFT->isVarArg() || | ||||
5315 | OrigFT->getNumParams() != CalleeFT->getNumParams() || | ||||
5316 | OrigFT->getReturnType() != CalleeFT->getReturnType()) | ||||
5317 | return nullptr; | ||||
5318 | |||||
5319 | for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) | ||||
5320 | if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) | ||||
5321 | return nullptr; | ||||
5322 | |||||
5323 | return OrigFn; | ||||
5324 | }; | ||||
5325 | |||||
5326 | if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { | ||||
5327 | CalleePtr = OrigFn; | ||||
5328 | IRFuncTy = OrigFn->getFunctionType(); | ||||
5329 | } | ||||
5330 | |||||
5331 | // 3. Perform the actual call. | ||||
5332 | |||||
5333 | // Deactivate any cleanups that we're supposed to do immediately before | ||||
5334 | // the call. | ||||
5335 | if (!CallArgs.getCleanupsToDeactivate().empty()) | ||||
5336 | deactivateArgCleanupsBeforeCall(*this, CallArgs); | ||||
5337 | |||||
5338 | // Assert that the arguments we computed match up. The IR verifier | ||||
5339 | // will catch this, but this is a common enough source of problems | ||||
5340 | // during IRGen changes that it's way better for debugging to catch | ||||
5341 | // it ourselves here. | ||||
5342 | #ifndef NDEBUG | ||||
5343 | assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg())(static_cast <bool> (IRCallArgs.size() == IRFuncTy-> getNumParams() || IRFuncTy->isVarArg()) ? void (0) : __assert_fail ("IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()" , "clang/lib/CodeGen/CGCall.cpp", 5343, __extension__ __PRETTY_FUNCTION__ )); | ||||
5344 | for (unsigned i = 0; i < IRCallArgs.size(); ++i) { | ||||
5345 | // Inalloca argument can have different type. | ||||
5346 | if (IRFunctionArgs.hasInallocaArg() && | ||||
5347 | i == IRFunctionArgs.getInallocaArgNo()) | ||||
5348 | continue; | ||||
5349 | if (i < IRFuncTy->getNumParams()) | ||||
5350 | assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i))(static_cast <bool> (IRCallArgs[i]->getType() == IRFuncTy ->getParamType(i)) ? void (0) : __assert_fail ("IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)" , "clang/lib/CodeGen/CGCall.cpp", 5350, __extension__ __PRETTY_FUNCTION__ )); | ||||
5351 | } | ||||
5352 | #endif | ||||
5353 | |||||
5354 | // Update the largest vector width if any arguments have vector types. | ||||
5355 | for (unsigned i = 0; i < IRCallArgs.size(); ++i) | ||||
5356 | LargestVectorWidth = std::max(LargestVectorWidth, | ||||
5357 | getMaxVectorWidth(IRCallArgs[i]->getType())); | ||||
5358 | |||||
5359 | // Compute the calling convention and attributes. | ||||
5360 | unsigned CallingConv; | ||||
5361 | llvm::AttributeList Attrs; | ||||
5362 | CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, | ||||
5363 | Callee.getAbstractInfo(), Attrs, CallingConv, | ||||
5364 | /*AttrOnCallSite=*/true, | ||||
5365 | /*IsThunk=*/false); | ||||
5366 | |||||
5367 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) | ||||
5368 | if (FD->hasAttr<StrictFPAttr>()) | ||||
5369 | // All calls within a strictfp function are marked strictfp | ||||
5370 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); | ||||
5371 | |||||
5372 | // Add call-site nomerge attribute if exists. | ||||
5373 | if (InNoMergeAttributedStmt) | ||||
5374 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge); | ||||
5375 | |||||
5376 | // Add call-site noinline attribute if exists. | ||||
5377 | if (InNoInlineAttributedStmt) | ||||
5378 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); | ||||
5379 | |||||
5380 | // Add call-site always_inline attribute if exists. | ||||
5381 | if (InAlwaysInlineAttributedStmt) | ||||
5382 | Attrs = | ||||
5383 | Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); | ||||
5384 | |||||
5385 | // Apply some call-site-specific attributes. | ||||
5386 | // TODO: work this into building the attribute set. | ||||
5387 | |||||
5388 | // Apply always_inline to all calls within flatten functions. | ||||
5389 | // FIXME: should this really take priority over __try, below? | ||||
5390 | if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && | ||||
5391 | !InNoInlineAttributedStmt && | ||||
5392 | !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { | ||||
5393 | Attrs = | ||||
5394 | Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); | ||||
5395 | } | ||||
5396 | |||||
5397 | // Disable inlining inside SEH __try blocks. | ||||
5398 | if (isSEHTryScope()) { | ||||
5399 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); | ||||
5400 | } | ||||
5401 | |||||
5402 | // Decide whether to use a call or an invoke. | ||||
5403 | bool CannotThrow; | ||||
5404 | if (currentFunctionUsesSEHTry()) { | ||||
5405 | // SEH cares about asynchronous exceptions, so everything can "throw." | ||||
5406 | CannotThrow = false; | ||||
5407 | } else if (isCleanupPadScope() && | ||||
5408 | EHPersonality::get(*this).isMSVCXXPersonality()) { | ||||
5409 | // The MSVC++ personality will implicitly terminate the program if an | ||||
5410 | // exception is thrown during a cleanup outside of a try/catch. | ||||
5411 | // We don't need to model anything in IR to get this behavior. | ||||
5412 | CannotThrow = true; | ||||
5413 | } else { | ||||
5414 | // Otherwise, nounwind call sites will never throw. | ||||
5415 | CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind); | ||||
5416 | |||||
5417 | if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) | ||||
5418 | if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) | ||||
5419 | CannotThrow = true; | ||||
5420 | } | ||||
5421 | |||||
5422 | // If we made a temporary, be sure to clean up after ourselves. Note that we | ||||
5423 | // can't depend on being inside of an ExprWithCleanups, so we need to manually | ||||
5424 | // pop this cleanup later on. Being eager about this is OK, since this | ||||
5425 | // temporary is 'invisible' outside of the callee. | ||||
5426 | if (UnusedReturnSizePtr) | ||||
5427 | pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, | ||||
5428 | UnusedReturnSizePtr); | ||||
5429 | |||||
5430 | llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); | ||||
5431 | |||||
5432 | SmallVector<llvm::OperandBundleDef, 1> BundleList = | ||||
5433 | getBundlesForFunclet(CalleePtr); | ||||
5434 | |||||
5435 | if (SanOpts.has(SanitizerKind::KCFI) && | ||||
5436 | !isa_and_nonnull<FunctionDecl>(TargetDecl)) | ||||
5437 | EmitKCFIOperandBundle(ConcreteCallee, BundleList); | ||||
5438 | |||||
5439 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) | ||||
5440 | if (FD->hasAttr<StrictFPAttr>()) | ||||
5441 | // All calls within a strictfp function are marked strictfp | ||||
5442 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); | ||||
5443 | |||||
5444 | AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); | ||||
5445 | Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); | ||||
5446 | |||||
5447 | AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); | ||||
5448 | Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); | ||||
5449 | |||||
5450 | // Emit the actual call/invoke instruction. | ||||
5451 | llvm::CallBase *CI; | ||||
5452 | if (!InvokeDest) { | ||||
5453 | CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); | ||||
5454 | } else { | ||||
5455 | llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); | ||||
5456 | CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, | ||||
5457 | BundleList); | ||||
5458 | EmitBlock(Cont); | ||||
5459 | } | ||||
5460 | if (callOrInvoke) | ||||
5461 | *callOrInvoke = CI; | ||||
5462 | |||||
5463 | // If this is within a function that has the guard(nocf) attribute and is an | ||||
5464 | // indirect call, add the "guard_nocf" attribute to this call to indicate that | ||||
5465 | // Control Flow Guard checks should not be added, even if the call is inlined. | ||||
5466 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { | ||||
5467 | if (const auto *A = FD->getAttr<CFGuardAttr>()) { | ||||
5468 | if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) | ||||
5469 | Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf"); | ||||
5470 | } | ||||
5471 | } | ||||
5472 | |||||
5473 | // Apply the attributes and calling convention. | ||||
5474 | CI->setAttributes(Attrs); | ||||
5475 | CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); | ||||
5476 | |||||
5477 | // Apply various metadata. | ||||
5478 | |||||
5479 | if (!CI->getType()->isVoidTy()) | ||||
5480 | CI->setName("call"); | ||||
5481 | |||||
5482 | // Update largest vector width from the return type. | ||||
5483 | LargestVectorWidth = | ||||
5484 | std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType())); | ||||
5485 | |||||
5486 | // Insert instrumentation or attach profile metadata at indirect call sites. | ||||
5487 | // For more details, see the comment before the definition of | ||||
5488 | // IPVK_IndirectCallTarget in InstrProfData.inc. | ||||
5489 | if (!CI->getCalledFunction()) | ||||
5490 | PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, | ||||
5491 | CI, CalleePtr); | ||||
5492 | |||||
5493 | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC | ||||
5494 | // optimizer it can aggressively ignore unwind edges. | ||||
5495 | if (CGM.getLangOpts().ObjCAutoRefCount) | ||||
5496 | AddObjCARCExceptionMetadata(CI); | ||||
5497 | |||||
5498 | // Set tail call kind if necessary. | ||||
5499 | if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { | ||||
5500 | if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) | ||||
5501 | Call->setTailCallKind(llvm::CallInst::TCK_NoTail); | ||||
5502 | else if (IsMustTail) | ||||
5503 | Call->setTailCallKind(llvm::CallInst::TCK_MustTail); | ||||
5504 | } | ||||
5505 | |||||
5506 | // Add metadata for calls to MSAllocator functions | ||||
5507 | if (getDebugInfo() && TargetDecl && | ||||
5508 | TargetDecl->hasAttr<MSAllocatorAttr>()) | ||||
5509 | getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); | ||||
5510 | |||||
5511 | // Add metadata if calling an __attribute__((error(""))) or warning fn. | ||||
5512 | if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { | ||||
5513 | llvm::ConstantInt *Line = | ||||
5514 | llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding()); | ||||
5515 | llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line); | ||||
5516 | llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD}); | ||||
5517 | CI->setMetadata("srcloc", MDT); | ||||
5518 | } | ||||
5519 | |||||
5520 | // 4. Finish the call. | ||||
5521 | |||||
5522 | // If the call doesn't return, finish the basic block and clear the | ||||
5523 | // insertion point; this allows the rest of IRGen to discard | ||||
5524 | // unreachable code. | ||||
5525 | if (CI->doesNotReturn()) { | ||||
5526 | if (UnusedReturnSizePtr) | ||||
5527 | PopCleanupBlock(); | ||||
5528 | |||||
5529 | // Strip away the noreturn attribute to better diagnose unreachable UB. | ||||
5530 | if (SanOpts.has(SanitizerKind::Unreachable)) { | ||||
5531 | // Also remove from function since CallBase::hasFnAttr additionally checks | ||||
5532 | // attributes of the called function. | ||||
5533 | if (auto *F = CI->getCalledFunction()) | ||||
5534 | F->removeFnAttr(llvm::Attribute::NoReturn); | ||||
5535 | CI->removeFnAttr(llvm::Attribute::NoReturn); | ||||
5536 | |||||
5537 | // Avoid incompatibility with ASan which relies on the `noreturn` | ||||
5538 | // attribute to insert handler calls. | ||||
5539 | if (SanOpts.hasOneOf(SanitizerKind::Address | | ||||
5540 | SanitizerKind::KernelAddress)) { | ||||
5541 | SanitizerScope SanScope(this); | ||||
5542 | llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); | ||||
5543 | Builder.SetInsertPoint(CI); | ||||
5544 | auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); | ||||
5545 | llvm::FunctionCallee Fn = | ||||
5546 | CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); | ||||
5547 | EmitNounwindRuntimeCall(Fn); | ||||
5548 | } | ||||
5549 | } | ||||
5550 | |||||
5551 | EmitUnreachable(Loc); | ||||
5552 | Builder.ClearInsertionPoint(); | ||||
5553 | |||||
5554 | // FIXME: For now, emit a dummy basic block because expr emitters in | ||||
5555 | // generally are not ready to handle emitting expressions at unreachable | ||||
5556 | // points. | ||||
5557 | EnsureInsertPoint(); | ||||
5558 | |||||
5559 | // Return a reasonable RValue. | ||||
5560 | return GetUndefRValue(RetTy); | ||||
5561 | } | ||||
5562 | |||||
5563 | // If this is a musttail call, return immediately. We do not branch to the | ||||
5564 | // epilogue in this case. | ||||
5565 | if (IsMustTail) { | ||||
5566 | for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end(); | ||||
5567 | ++it) { | ||||
5568 | EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it); | ||||
5569 | if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) | ||||
5570 | CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups"); | ||||
5571 | } | ||||
5572 | if (CI->getType()->isVoidTy()) | ||||
5573 | Builder.CreateRetVoid(); | ||||
5574 | else | ||||
5575 | Builder.CreateRet(CI); | ||||
5576 | Builder.ClearInsertionPoint(); | ||||
5577 | EnsureInsertPoint(); | ||||
5578 | return GetUndefRValue(RetTy); | ||||
5579 | } | ||||
5580 | |||||
5581 | // Perform the swifterror writeback. | ||||
5582 | if (swiftErrorTemp.isValid()) { | ||||
5583 | llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); | ||||
5584 | Builder.CreateStore(errorResult, swiftErrorArg); | ||||
5585 | } | ||||
5586 | |||||
5587 | // Emit any call-associated writebacks immediately. Arguably this | ||||
5588 | // should happen after any return-value munging. | ||||
5589 | if (CallArgs.hasWritebacks()) | ||||
5590 | emitWritebacks(*this, CallArgs); | ||||
5591 | |||||
5592 | // The stack cleanup for inalloca arguments has to run out of the normal | ||||
5593 | // lexical order, so deactivate it and run it manually here. | ||||
5594 | CallArgs.freeArgumentMemory(*this); | ||||
5595 | |||||
5596 | // Extract the return value. | ||||
5597 | RValue Ret = [&] { | ||||
5598 | switch (RetAI.getKind()) { | ||||
5599 | case ABIArgInfo::CoerceAndExpand: { | ||||
5600 | auto coercionType = RetAI.getCoerceAndExpandType(); | ||||
5601 | |||||
5602 | Address addr = SRetPtr; | ||||
5603 | addr = Builder.CreateElementBitCast(addr, coercionType); | ||||
5604 | |||||
5605 | assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType())(static_cast <bool> (CI->getType() == RetAI.getUnpaddedCoerceAndExpandType ()) ? void (0) : __assert_fail ("CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()" , "clang/lib/CodeGen/CGCall.cpp", 5605, __extension__ __PRETTY_FUNCTION__ )); | ||||
5606 | bool requiresExtract = isa<llvm::StructType>(CI->getType()); | ||||
5607 | |||||
5608 | unsigned unpaddedIndex = 0; | ||||
5609 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { | ||||
5610 | llvm::Type *eltType = coercionType->getElementType(i); | ||||
5611 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; | ||||
5612 | Address eltAddr = Builder.CreateStructGEP(addr, i); | ||||
5613 | llvm::Value *elt = CI; | ||||
5614 | if (requiresExtract) | ||||
5615 | elt = Builder.CreateExtractValue(elt, unpaddedIndex++); | ||||
5616 | else | ||||
5617 | assert(unpaddedIndex == 0)(static_cast <bool> (unpaddedIndex == 0) ? void (0) : __assert_fail ("unpaddedIndex == 0", "clang/lib/CodeGen/CGCall.cpp", 5617, __extension__ __PRETTY_FUNCTION__)); | ||||
5618 | Builder.CreateStore(elt, eltAddr); | ||||
5619 | } | ||||
5620 | // FALLTHROUGH | ||||
5621 | [[fallthrough]]; | ||||
5622 | } | ||||
5623 | |||||
5624 | case ABIArgInfo::InAlloca: | ||||
5625 | case ABIArgInfo::Indirect: { | ||||
5626 | RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); | ||||
5627 | if (UnusedReturnSizePtr) | ||||
5628 | PopCleanupBlock(); | ||||
5629 | return ret; | ||||
5630 | } | ||||
5631 | |||||
5632 | case ABIArgInfo::Ignore: | ||||
5633 | // If we are ignoring an argument that had a result, make sure to | ||||
5634 | // construct the appropriate return value for our caller. | ||||
5635 | return GetUndefRValue(RetTy); | ||||
5636 | |||||
5637 | case ABIArgInfo::Extend: | ||||
5638 | case ABIArgInfo::Direct: { | ||||
5639 | llvm::Type *RetIRTy = ConvertType(RetTy); | ||||
5640 | if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { | ||||
5641 | switch (getEvaluationKind(RetTy)) { | ||||
5642 | case TEK_Complex: { | ||||
5643 | llvm::Value *Real = Builder.CreateExtractValue(CI, 0); | ||||
5644 | llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); | ||||
5645 | return RValue::getComplex(std::make_pair(Real, Imag)); | ||||
5646 | } | ||||
5647 | case TEK_Aggregate: { | ||||
5648 | Address DestPtr = ReturnValue.getValue(); | ||||
5649 | bool DestIsVolatile = ReturnValue.isVolatile(); | ||||
5650 | |||||
5651 | if (!DestPtr.isValid()) { | ||||
5652 | DestPtr = CreateMemTemp(RetTy, "agg.tmp"); | ||||
5653 | DestIsVolatile = false; | ||||
5654 | } |