clang  5.0.0
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47  switch (CC) {
48  default: return llvm::CallingConv::C;
49  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53  case CC_Win64: return llvm::CallingConv::Win64;
54  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58  // TODO: Add support for __pascal to LLVM.
60  // TODO: Add support for __vectorcall to LLVM.
61  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66  case CC_Swift: return llvm::CallingConv::Swift;
67  }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
71 /// qualification.
72 /// FIXME: address space qualification?
74  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76 }
77 
78 /// Returns the canonical formal type of the given C++ method.
80  return MD->getType()->getCanonicalTypeUnqualified()
82 }
83 
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type. Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
90 }
91 
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
96  // When translating an unprototyped function type, always use a
97  // variadic type.
98  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99  /*instanceMethod=*/false,
100  /*chainCall=*/false, None,
101  FTNP->getExtInfo(), {}, RequiredArgs(0));
102 }
103 
106  const FunctionProtoType *proto,
107  unsigned prefixArgs,
108  unsigned totalArgs) {
109  assert(proto->hasExtParameterInfos());
110  assert(paramInfos.size() <= prefixArgs);
111  assert(proto->getNumParams() + prefixArgs <= totalArgs);
112 
113  paramInfos.reserve(totalArgs);
114 
115  // Add default infos for any prefix args that don't already have infos.
116  paramInfos.resize(prefixArgs);
117 
118  // Add infos for the prototype.
119  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120  paramInfos.push_back(ParamInfo);
121  // pass_object_size params have no parameter info.
122  if (ParamInfo.hasPassObjectSize())
123  paramInfos.emplace_back();
124  }
125 
126  assert(paramInfos.size() <= totalArgs &&
127  "Did we forget to insert pass_object_size args?");
128  // Add default infos for the variadic and/or suffix arguments.
129  paramInfos.resize(totalArgs);
130 }
131 
132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
138  // Fast path: don't touch param info if we don't need to.
139  if (!FPT->hasExtParameterInfos()) {
140  assert(paramInfos.empty() &&
141  "We have paramInfos, but the prototype doesn't?");
142  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143  return;
144  }
145 
146  unsigned PrefixSize = prefix.size();
147  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148  // parameters; the only thing that can change this is the presence of
149  // pass_object_size. So, we preallocate for the common case.
150  prefix.reserve(prefix.size() + FPT->getNumParams());
151 
152  auto ExtInfos = FPT->getExtParameterInfos();
153  assert(ExtInfos.size() == FPT->getNumParams());
154  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155  prefix.push_back(FPT->getParamType(I));
156  if (ExtInfos[I].hasPassObjectSize())
157  prefix.push_back(CGT.getContext().getSizeType());
158  }
159 
160  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161  prefix.size());
162 }
163 
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
170  const FunctionDecl *FD) {
172  RequiredArgs Required =
173  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174  // FIXME: Kill copy.
175  appendParameterTypes(CGT, prefix, paramInfos, FTP);
176  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177 
178  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179  /*chainCall=*/false, prefix,
180  FTP->getExtInfo(), paramInfos,
181  Required);
182 }
183 
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
188  const FunctionDecl *FD) {
190  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191  FTP, FD);
192 }
193 
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195  // Set the appropriate calling convention for the Function.
196  if (D->hasAttr<StdCallAttr>())
197  return CC_X86StdCall;
198 
199  if (D->hasAttr<FastCallAttr>())
200  return CC_X86FastCall;
201 
202  if (D->hasAttr<RegCallAttr>())
203  return CC_X86RegCall;
204 
205  if (D->hasAttr<ThisCallAttr>())
206  return CC_X86ThisCall;
207 
208  if (D->hasAttr<VectorCallAttr>())
209  return CC_X86VectorCall;
210 
211  if (D->hasAttr<PascalAttr>())
212  return CC_X86Pascal;
213 
214  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216 
217  if (D->hasAttr<IntelOclBiccAttr>())
218  return CC_IntelOclBicc;
219 
220  if (D->hasAttr<MSABIAttr>())
221  return IsWindows ? CC_C : CC_Win64;
222 
223  if (D->hasAttr<SysVABIAttr>())
224  return IsWindows ? CC_X86_64SysV : CC_C;
225 
226  if (D->hasAttr<PreserveMostAttr>())
227  return CC_PreserveMost;
228 
229  if (D->hasAttr<PreserveAllAttr>())
230  return CC_PreserveAll;
231 
232  return CC_C;
233 }
234 
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 /// so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
243  const FunctionProtoType *FTP,
244  const CXXMethodDecl *MD) {
246 
247  // Add the 'this' pointer.
248  if (RD)
249  argTypes.push_back(GetThisType(Context, RD));
250  else
251  argTypes.push_back(Context.VoidPtrTy);
252 
254  *this, true, argTypes,
256 }
257 
258 /// Arrange the argument and result information for a declaration or
259 /// definition of the given C++ non-static member function. The
260 /// member function must be an ordinary function, i.e. not a
261 /// constructor or destructor.
262 const CGFunctionInfo &
264  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
265  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
266 
268 
269  if (MD->isInstance()) {
270  // The abstract case is perfectly fine.
271  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
272  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
273  }
274 
275  return arrangeFreeFunctionType(prototype, MD);
276 }
277 
279  const InheritedConstructor &Inherited, CXXCtorType Type) {
280  // Parameters are unnecessary if we're constructing a base class subobject
281  // and the inherited constructor lives in a virtual base.
282  return Type == Ctor_Complete ||
283  !Inherited.getShadowDecl()->constructsVirtualBase() ||
284  !Target.getCXXABI().hasConstructorVariants();
285  }
286 
287 const CGFunctionInfo &
289  StructorType Type) {
290 
293  argTypes.push_back(GetThisType(Context, MD->getParent()));
294 
295  bool PassParams = true;
296 
297  GlobalDecl GD;
298  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
299  GD = GlobalDecl(CD, toCXXCtorType(Type));
300 
301  // A base class inheriting constructor doesn't get forwarded arguments
302  // needed to construct a virtual base (or base class thereof).
303  if (auto Inherited = CD->getInheritedConstructor())
304  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
305  } else {
306  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
307  GD = GlobalDecl(DD, toCXXDtorType(Type));
308  }
309 
311 
312  // Add the formal parameters.
313  if (PassParams)
314  appendParameterTypes(*this, argTypes, paramInfos, FTP);
315 
316  CGCXXABI::AddedStructorArgs AddedArgs =
317  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
318  if (!paramInfos.empty()) {
319  // Note: prefix implies after the first param.
320  if (AddedArgs.Prefix)
321  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
323  if (AddedArgs.Suffix)
324  paramInfos.append(AddedArgs.Suffix,
326  }
327 
328  RequiredArgs required =
329  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
331 
332  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
333  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
334  ? argTypes.front()
335  : TheCXXABI.hasMostDerivedReturn(GD)
336  ? CGM.getContext().VoidPtrTy
337  : Context.VoidTy;
338  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
339  /*chainCall=*/false, argTypes, extInfo,
340  paramInfos, required);
341 }
342 
346  for (auto &arg : args)
347  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
348  return argTypes;
349 }
350 
354  for (auto &arg : args)
355  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
356  return argTypes;
357 }
358 
361  unsigned prefixArgs, unsigned totalArgs) {
363  if (proto->hasExtParameterInfos()) {
364  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
365  }
366  return result;
367 }
368 
369 /// Arrange a call to a C++ method, passing the given arguments.
370 ///
371 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
372 /// parameter.
373 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
374 /// args.
375 /// PassProtoArgs indicates whether `args` has args for the parameters in the
376 /// given CXXConstructorDecl.
377 const CGFunctionInfo &
378 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
379  const CXXConstructorDecl *D,
380  CXXCtorType CtorKind,
381  unsigned ExtraPrefixArgs,
382  unsigned ExtraSuffixArgs,
383  bool PassProtoArgs) {
384  // FIXME: Kill copy.
386  for (const auto &Arg : args)
387  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
388 
389  // +1 for implicit this, which should always be args[0].
390  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
391 
393  RequiredArgs Required =
394  RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
395  GlobalDecl GD(D, CtorKind);
396  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
397  ? ArgTypes.front()
398  : TheCXXABI.hasMostDerivedReturn(GD)
399  ? CGM.getContext().VoidPtrTy
400  : Context.VoidTy;
401 
402  FunctionType::ExtInfo Info = FPT->getExtInfo();
404  // If the prototype args are elided, we should only have ABI-specific args,
405  // which never have param info.
406  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
407  // ABI-specific suffix arguments are treated the same as variadic arguments.
408  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
409  ArgTypes.size());
410  }
411  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
412  /*chainCall=*/false, ArgTypes, Info,
413  ParamInfos, Required);
414 }
415 
416 /// Arrange the argument and result information for the declaration or
417 /// definition of the given function.
418 const CGFunctionInfo &
419 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
420  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
421  if (MD->isInstance())
422  return arrangeCXXMethodDeclaration(MD);
423 
425 
426  assert(isa<FunctionType>(FTy));
427 
428  // When declaring a function without a prototype, always use a
429  // non-variadic type.
432  noProto->getReturnType(), /*instanceMethod=*/false,
433  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
434  }
435 
437 }
438 
439 /// Arrange the argument and result information for the declaration or
440 /// definition of an Objective-C method.
441 const CGFunctionInfo &
442 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
443  // It happens that this is the same as a call with no optional
444  // arguments, except also using the formal 'self' type.
446 }
447 
448 /// Arrange the argument and result information for the function type
449 /// through which to perform a send to the given Objective-C method,
450 /// using the given receiver type. The receiver type is not always
451 /// the 'self' type of the method or even an Objective-C pointer type.
452 /// This is *not* the right method for actually performing such a
453 /// message send, due to the possibility of optional arguments.
454 const CGFunctionInfo &
456  QualType receiverType) {
458  argTys.push_back(Context.getCanonicalParamType(receiverType));
460  // FIXME: Kill copy?
461  for (const auto *I : MD->parameters()) {
462  argTys.push_back(Context.getCanonicalParamType(I->getType()));
463  }
464 
465  FunctionType::ExtInfo einfo;
466  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
467  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
468 
469  if (getContext().getLangOpts().ObjCAutoRefCount &&
470  MD->hasAttr<NSReturnsRetainedAttr>())
471  einfo = einfo.withProducesResult(true);
472 
473  RequiredArgs required =
474  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
475 
477  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
478  /*chainCall=*/false, argTys, einfo, {}, required);
479 }
480 
481 const CGFunctionInfo &
482 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
483  const CallArgList &args) {
484  auto argTypes = getArgTypesForCall(Context, args);
485  FunctionType::ExtInfo einfo;
486 
488  GetReturnType(returnType), /*instanceMethod=*/false,
489  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
490 }
491 
492 const CGFunctionInfo &
493 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
494  // FIXME: Do we need to handle ObjCMethodDecl?
495  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
496 
497  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
498  return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
499 
500  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
501  return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
502 
503  return arrangeFunctionDeclaration(FD);
504 }
505 
506 /// Arrange a thunk that takes 'this' as the first parameter followed by
507 /// varargs. Return a void pointer, regardless of the actual return type.
508 /// The body of the thunk will end in a musttail call to a function of the
509 /// correct type, and the caller will bitcast the function to the correct
510 /// prototype.
511 const CGFunctionInfo &
512 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
513  assert(MD->isVirtual() && "only virtual memptrs have thunks");
515  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
516  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
517  /*chainCall=*/false, ArgTys,
518  FTP->getExtInfo(), {}, RequiredArgs(1));
519 }
520 
521 const CGFunctionInfo &
522 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
523  CXXCtorType CT) {
524  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
525 
528  const CXXRecordDecl *RD = CD->getParent();
529  ArgTys.push_back(GetThisType(Context, RD));
530  if (CT == Ctor_CopyingClosure)
531  ArgTys.push_back(*FTP->param_type_begin());
532  if (RD->getNumVBases() > 0)
533  ArgTys.push_back(Context.IntTy);
535  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
536  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
537  /*chainCall=*/false, ArgTys,
538  FunctionType::ExtInfo(CC), {},
540 }
541 
542 /// Arrange a call as unto a free function, except possibly with an
543 /// additional number of formal parameters considered required.
544 static const CGFunctionInfo &
546  CodeGenModule &CGM,
547  const CallArgList &args,
548  const FunctionType *fnType,
549  unsigned numExtraRequiredArgs,
550  bool chainCall) {
551  assert(args.size() >= numExtraRequiredArgs);
552 
554 
555  // In most cases, there are no optional arguments.
556  RequiredArgs required = RequiredArgs::All;
557 
558  // If we have a variadic prototype, the required arguments are the
559  // extra prefix plus the arguments in the prototype.
560  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
561  if (proto->isVariadic())
562  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
563 
564  if (proto->hasExtParameterInfos())
565  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
566  args.size());
567 
568  // If we don't have a prototype at all, but we're supposed to
569  // explicitly use the variadic convention for unprototyped calls,
570  // treat all of the arguments as required but preserve the nominal
571  // possibility of variadics.
572  } else if (CGM.getTargetCodeGenInfo()
573  .isNoProtoCallVariadic(args,
574  cast<FunctionNoProtoType>(fnType))) {
575  required = RequiredArgs(args.size());
576  }
577 
578  // FIXME: Kill copy.
580  for (const auto &arg : args)
581  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
583  /*instanceMethod=*/false, chainCall,
584  argTypes, fnType->getExtInfo(), paramInfos,
585  required);
586 }
587 
588 /// Figure out the rules for calling a function with the given formal
589 /// type using the given arguments. The arguments are necessary
590 /// because the function might be unprototyped, in which case it's
591 /// target-dependent in crazy ways.
592 const CGFunctionInfo &
594  const FunctionType *fnType,
595  bool chainCall) {
596  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
597  chainCall ? 1 : 0, chainCall);
598 }
599 
600 /// A block function is essentially a free function with an
601 /// extra implicit argument.
602 const CGFunctionInfo &
603 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
604  const FunctionType *fnType) {
605  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
606  /*chainCall=*/false);
607 }
608 
609 const CGFunctionInfo &
610 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
611  const FunctionArgList &params) {
612  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
613  auto argTypes = getArgTypesForDeclaration(Context, params);
614 
616  GetReturnType(proto->getReturnType()),
617  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
618  proto->getExtInfo(), paramInfos,
619  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
620 }
621 
622 const CGFunctionInfo &
623 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
624  const CallArgList &args) {
625  // FIXME: Kill copy.
627  for (const auto &Arg : args)
628  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
630  GetReturnType(resultType), /*instanceMethod=*/false,
631  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
632  /*paramInfos=*/ {}, RequiredArgs::All);
633 }
634 
635 const CGFunctionInfo &
636 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
637  const FunctionArgList &args) {
638  auto argTypes = getArgTypesForDeclaration(Context, args);
639 
641  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
642  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
643 }
644 
645 const CGFunctionInfo &
646 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
647  ArrayRef<CanQualType> argTypes) {
649  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
650  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
651 }
652 
653 /// Arrange a call to a C++ method, passing the given arguments.
654 ///
655 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
656 /// does not count `this`.
657 const CGFunctionInfo &
658 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
659  const FunctionProtoType *proto,
660  RequiredArgs required,
661  unsigned numPrefixArgs) {
662  assert(numPrefixArgs + 1 <= args.size() &&
663  "Emitting a call with less args than the required prefix?");
664  // Add one to account for `this`. It's a bit awkward here, but we don't count
665  // `this` in similar places elsewhere.
666  auto paramInfos =
667  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
668 
669  // FIXME: Kill copy.
670  auto argTypes = getArgTypesForCall(Context, args);
671 
672  FunctionType::ExtInfo info = proto->getExtInfo();
674  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
675  /*chainCall=*/false, argTypes, info, paramInfos, required);
676 }
677 
678 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
680  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
682 }
683 
684 const CGFunctionInfo &
685 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
686  const CallArgList &args) {
687  assert(signature.arg_size() <= args.size());
688  if (signature.arg_size() == args.size())
689  return signature;
690 
692  auto sigParamInfos = signature.getExtParameterInfos();
693  if (!sigParamInfos.empty()) {
694  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
695  paramInfos.resize(args.size());
696  }
697 
698  auto argTypes = getArgTypesForCall(Context, args);
699 
700  assert(signature.getRequiredArgs().allowsOptionalArgs());
701  return arrangeLLVMFunctionInfo(signature.getReturnType(),
702  signature.isInstanceMethod(),
703  signature.isChainCall(),
704  argTypes,
705  signature.getExtInfo(),
706  paramInfos,
707  signature.getRequiredArgs());
708 }
709 
710 namespace clang {
711 namespace CodeGen {
713 }
714 }
715 
716 /// Arrange the argument and result information for an abstract value
717 /// of a given function type. This is the method which all of the
718 /// above functions ultimately defer to.
719 const CGFunctionInfo &
721  bool instanceMethod,
722  bool chainCall,
723  ArrayRef<CanQualType> argTypes,
726  RequiredArgs required) {
727  assert(std::all_of(argTypes.begin(), argTypes.end(),
728  [](CanQualType T) { return T.isCanonicalAsParam(); }));
729 
730  // Lookup or create unique function info.
731  llvm::FoldingSetNodeID ID;
732  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
733  required, resultType, argTypes);
734 
735  void *insertPos = nullptr;
736  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
737  if (FI)
738  return *FI;
739 
740  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
741 
742  // Construct the function info. We co-allocate the ArgInfos.
743  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
744  paramInfos, resultType, argTypes, required);
745  FunctionInfos.InsertNode(FI, insertPos);
746 
747  bool inserted = FunctionsBeingProcessed.insert(FI).second;
748  (void)inserted;
749  assert(inserted && "Recursively being processed?");
750 
751  // Compute ABI information.
752  if (CC == llvm::CallingConv::SPIR_KERNEL) {
753  // Force target independent argument handling for the host visible
754  // kernel functions.
755  computeSPIRKernelABIInfo(CGM, *FI);
756  } else if (info.getCC() == CC_Swift) {
757  swiftcall::computeABIInfo(CGM, *FI);
758  } else {
759  getABIInfo().computeInfo(*FI);
760  }
761 
762  // Loop over all of the computed argument and return value info. If any of
763  // them are direct or extend without a specified coerce type, specify the
764  // default now.
765  ABIArgInfo &retInfo = FI->getReturnInfo();
766  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
767  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
768 
769  for (auto &I : FI->arguments())
770  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
771  I.info.setCoerceToType(ConvertType(I.type));
772 
773  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
774  assert(erased && "Not in set?");
775 
776  return *FI;
777 }
778 
780  bool instanceMethod,
781  bool chainCall,
782  const FunctionType::ExtInfo &info,
783  ArrayRef<ExtParameterInfo> paramInfos,
784  CanQualType resultType,
785  ArrayRef<CanQualType> argTypes,
786  RequiredArgs required) {
787  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
788 
789  void *buffer =
790  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
791  argTypes.size() + 1, paramInfos.size()));
792 
793  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
794  FI->CallingConvention = llvmCC;
795  FI->EffectiveCallingConvention = llvmCC;
796  FI->ASTCallingConvention = info.getCC();
797  FI->InstanceMethod = instanceMethod;
798  FI->ChainCall = chainCall;
799  FI->NoReturn = info.getNoReturn();
800  FI->ReturnsRetained = info.getProducesResult();
801  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
802  FI->Required = required;
803  FI->HasRegParm = info.getHasRegParm();
804  FI->RegParm = info.getRegParm();
805  FI->ArgStruct = nullptr;
806  FI->ArgStructAlign = 0;
807  FI->NumArgs = argTypes.size();
808  FI->HasExtParameterInfos = !paramInfos.empty();
809  FI->getArgsBuffer()[0].type = resultType;
810  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
811  FI->getArgsBuffer()[i + 1].type = argTypes[i];
812  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
813  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
814  return FI;
815 }
816 
817 /***/
818 
819 namespace {
820 // ABIArgInfo::Expand implementation.
821 
822 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
823 struct TypeExpansion {
824  enum TypeExpansionKind {
825  // Elements of constant arrays are expanded recursively.
826  TEK_ConstantArray,
827  // Record fields are expanded recursively (but if record is a union, only
828  // the field with the largest size is expanded).
829  TEK_Record,
830  // For complex types, real and imaginary parts are expanded recursively.
831  TEK_Complex,
832  // All other types are not expandable.
833  TEK_None
834  };
835 
836  const TypeExpansionKind Kind;
837 
838  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
839  virtual ~TypeExpansion() {}
840 };
841 
842 struct ConstantArrayExpansion : TypeExpansion {
843  QualType EltTy;
844  uint64_t NumElts;
845 
846  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
847  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
848  static bool classof(const TypeExpansion *TE) {
849  return TE->Kind == TEK_ConstantArray;
850  }
851 };
852 
853 struct RecordExpansion : TypeExpansion {
855 
857 
858  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
860  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
861  Fields(std::move(Fields)) {}
862  static bool classof(const TypeExpansion *TE) {
863  return TE->Kind == TEK_Record;
864  }
865 };
866 
867 struct ComplexExpansion : TypeExpansion {
868  QualType EltTy;
869 
870  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
871  static bool classof(const TypeExpansion *TE) {
872  return TE->Kind == TEK_Complex;
873  }
874 };
875 
876 struct NoExpansion : TypeExpansion {
877  NoExpansion() : TypeExpansion(TEK_None) {}
878  static bool classof(const TypeExpansion *TE) {
879  return TE->Kind == TEK_None;
880  }
881 };
882 } // namespace
883 
884 static std::unique_ptr<TypeExpansion>
886  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
887  return llvm::make_unique<ConstantArrayExpansion>(
888  AT->getElementType(), AT->getSize().getZExtValue());
889  }
890  if (const RecordType *RT = Ty->getAs<RecordType>()) {
893  const RecordDecl *RD = RT->getDecl();
894  assert(!RD->hasFlexibleArrayMember() &&
895  "Cannot expand structure with flexible array.");
896  if (RD->isUnion()) {
897  // Unions can be here only in degenerative cases - all the fields are same
898  // after flattening. Thus we have to use the "largest" field.
899  const FieldDecl *LargestFD = nullptr;
900  CharUnits UnionSize = CharUnits::Zero();
901 
902  for (const auto *FD : RD->fields()) {
903  // Skip zero length bitfields.
904  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
905  continue;
906  assert(!FD->isBitField() &&
907  "Cannot expand structure with bit-field members.");
908  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
909  if (UnionSize < FieldSize) {
910  UnionSize = FieldSize;
911  LargestFD = FD;
912  }
913  }
914  if (LargestFD)
915  Fields.push_back(LargestFD);
916  } else {
917  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
918  assert(!CXXRD->isDynamicClass() &&
919  "cannot expand vtable pointers in dynamic classes");
920  for (const CXXBaseSpecifier &BS : CXXRD->bases())
921  Bases.push_back(&BS);
922  }
923 
924  for (const auto *FD : RD->fields()) {
925  // Skip zero length bitfields.
926  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
927  continue;
928  assert(!FD->isBitField() &&
929  "Cannot expand structure with bit-field members.");
930  Fields.push_back(FD);
931  }
932  }
933  return llvm::make_unique<RecordExpansion>(std::move(Bases),
934  std::move(Fields));
935  }
936  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
937  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
938  }
939  return llvm::make_unique<NoExpansion>();
940 }
941 
942 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
943  auto Exp = getTypeExpansion(Ty, Context);
944  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
945  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
946  }
947  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
948  int Res = 0;
949  for (auto BS : RExp->Bases)
950  Res += getExpansionSize(BS->getType(), Context);
951  for (auto FD : RExp->Fields)
952  Res += getExpansionSize(FD->getType(), Context);
953  return Res;
954  }
955  if (isa<ComplexExpansion>(Exp.get()))
956  return 2;
957  assert(isa<NoExpansion>(Exp.get()));
958  return 1;
959 }
960 
961 void
962 CodeGenTypes::getExpandedTypes(QualType Ty,
964  auto Exp = getTypeExpansion(Ty, Context);
965  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
966  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
967  getExpandedTypes(CAExp->EltTy, TI);
968  }
969  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
970  for (auto BS : RExp->Bases)
971  getExpandedTypes(BS->getType(), TI);
972  for (auto FD : RExp->Fields)
973  getExpandedTypes(FD->getType(), TI);
974  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
975  llvm::Type *EltTy = ConvertType(CExp->EltTy);
976  *TI++ = EltTy;
977  *TI++ = EltTy;
978  } else {
979  assert(isa<NoExpansion>(Exp.get()));
980  *TI++ = ConvertType(Ty);
981  }
982 }
983 
985  ConstantArrayExpansion *CAE,
986  Address BaseAddr,
987  llvm::function_ref<void(Address)> Fn) {
988  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
989  CharUnits EltAlign =
990  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
991 
992  for (int i = 0, n = CAE->NumElts; i < n; i++) {
993  llvm::Value *EltAddr =
994  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
995  Fn(Address(EltAddr, EltAlign));
996  }
997 }
998 
999 void CodeGenFunction::ExpandTypeFromArgs(
1001  assert(LV.isSimple() &&
1002  "Unexpected non-simple lvalue during struct expansion.");
1003 
1004  auto Exp = getTypeExpansion(Ty, getContext());
1005  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1006  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1007  [&](Address EltAddr) {
1008  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1009  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1010  });
1011  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1012  Address This = LV.getAddress();
1013  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1014  // Perform a single step derived-to-base conversion.
1015  Address Base =
1016  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1017  /*NullCheckValue=*/false, SourceLocation());
1018  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1019 
1020  // Recurse onto bases.
1021  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1022  }
1023  for (auto FD : RExp->Fields) {
1024  // FIXME: What are the right qualifiers here?
1025  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1026  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1027  }
1028  } else if (isa<ComplexExpansion>(Exp.get())) {
1029  auto realValue = *AI++;
1030  auto imagValue = *AI++;
1031  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1032  } else {
1033  assert(isa<NoExpansion>(Exp.get()));
1034  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1035  }
1036 }
1037 
1038 void CodeGenFunction::ExpandTypeToArgs(
1039  QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
1040  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1041  auto Exp = getTypeExpansion(Ty, getContext());
1042  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1044  [&](Address EltAddr) {
1045  RValue EltRV =
1046  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
1047  ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
1048  });
1049  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1050  Address This = RV.getAggregateAddress();
1051  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1052  // Perform a single step derived-to-base conversion.
1053  Address Base =
1054  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1055  /*NullCheckValue=*/false, SourceLocation());
1056  RValue BaseRV = RValue::getAggregate(Base);
1057 
1058  // Recurse onto bases.
1059  ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
1060  IRCallArgPos);
1061  }
1062 
1063  LValue LV = MakeAddrLValue(This, Ty);
1064  for (auto FD : RExp->Fields) {
1065  RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
1066  ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
1067  IRCallArgPos);
1068  }
1069  } else if (isa<ComplexExpansion>(Exp.get())) {
1070  ComplexPairTy CV = RV.getComplexVal();
1071  IRCallArgs[IRCallArgPos++] = CV.first;
1072  IRCallArgs[IRCallArgPos++] = CV.second;
1073  } else {
1074  assert(isa<NoExpansion>(Exp.get()));
1075  assert(RV.isScalar() &&
1076  "Unexpected non-scalar rvalue during struct expansion.");
1077 
1078  // Insert a bitcast as needed.
1079  llvm::Value *V = RV.getScalarVal();
1080  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1081  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1082  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1083 
1084  IRCallArgs[IRCallArgPos++] = V;
1085  }
1086 }
1087 
1088 /// Create a temporary allocation for the purposes of coercion.
1090  CharUnits MinAlign) {
1091  // Don't use an alignment that's worse than what LLVM would prefer.
1092  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1093  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1094 
1095  return CGF.CreateTempAlloca(Ty, Align);
1096 }
1097 
1098 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1099 /// accessing some number of bytes out of it, try to gep into the struct to get
1100 /// at its inner goodness. Dive as deep as possible without entering an element
1101 /// with an in-memory size smaller than DstSize.
1102 static Address
1104  llvm::StructType *SrcSTy,
1105  uint64_t DstSize, CodeGenFunction &CGF) {
1106  // We can't dive into a zero-element struct.
1107  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1108 
1109  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1110 
1111  // If the first elt is at least as large as what we're looking for, or if the
1112  // first element is the same size as the whole struct, we can enter it. The
1113  // comparison must be made on the store size and not the alloca size. Using
1114  // the alloca size may overstate the size of the load.
1115  uint64_t FirstEltSize =
1116  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1117  if (FirstEltSize < DstSize &&
1118  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1119  return SrcPtr;
1120 
1121  // GEP into the first element.
1122  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1123 
1124  // If the first element is a struct, recurse.
1125  llvm::Type *SrcTy = SrcPtr.getElementType();
1126  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1127  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1128 
1129  return SrcPtr;
1130 }
1131 
1132 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1133 /// are either integers or pointers. This does a truncation of the value if it
1134 /// is too large or a zero extension if it is too small.
1135 ///
1136 /// This behaves as if the value were coerced through memory, so on big-endian
1137 /// targets the high bits are preserved in a truncation, while little-endian
1138 /// targets preserve the low bits.
1140  llvm::Type *Ty,
1141  CodeGenFunction &CGF) {
1142  if (Val->getType() == Ty)
1143  return Val;
1144 
1145  if (isa<llvm::PointerType>(Val->getType())) {
1146  // If this is Pointer->Pointer avoid conversion to and from int.
1147  if (isa<llvm::PointerType>(Ty))
1148  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1149 
1150  // Convert the pointer to an integer so we can play with its width.
1151  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1152  }
1153 
1154  llvm::Type *DestIntTy = Ty;
1155  if (isa<llvm::PointerType>(DestIntTy))
1156  DestIntTy = CGF.IntPtrTy;
1157 
1158  if (Val->getType() != DestIntTy) {
1159  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1160  if (DL.isBigEndian()) {
1161  // Preserve the high bits on big-endian targets.
1162  // That is what memory coercion does.
1163  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1164  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1165 
1166  if (SrcSize > DstSize) {
1167  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1168  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1169  } else {
1170  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1171  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1172  }
1173  } else {
1174  // Little-endian targets preserve the low bits. No shifts required.
1175  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1176  }
1177  }
1178 
1179  if (isa<llvm::PointerType>(Ty))
1180  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1181  return Val;
1182 }
1183 
1184 
1185 
1186 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1187 /// a pointer to an object of type \arg Ty, known to be aligned to
1188 /// \arg SrcAlign bytes.
1189 ///
1190 /// This safely handles the case when the src type is smaller than the
1191 /// destination type; in this situation the values of bits which not
1192 /// present in the src are undefined.
1193 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1194  CodeGenFunction &CGF) {
1195  llvm::Type *SrcTy = Src.getElementType();
1196 
1197  // If SrcTy and Ty are the same, just do a load.
1198  if (SrcTy == Ty)
1199  return CGF.Builder.CreateLoad(Src);
1200 
1201  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1202 
1203  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1204  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1205  SrcTy = Src.getType()->getElementType();
1206  }
1207 
1208  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1209 
1210  // If the source and destination are integer or pointer types, just do an
1211  // extension or truncation to the desired type.
1212  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1213  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1214  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1215  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1216  }
1217 
1218  // If load is legal, just bitcast the src pointer.
1219  if (SrcSize >= DstSize) {
1220  // Generally SrcSize is never greater than DstSize, since this means we are
1221  // losing bits. However, this can happen in cases where the structure has
1222  // additional padding, for example due to a user specified alignment.
1223  //
1224  // FIXME: Assert that we aren't truncating non-padding bits when have access
1225  // to that information.
1226  Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1227  return CGF.Builder.CreateLoad(Src);
1228  }
1229 
1230  // Otherwise do coercion through memory. This is stupid, but simple.
1231  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1232  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1233  Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1234  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1235  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1236  false);
1237  return CGF.Builder.CreateLoad(Tmp);
1238 }
1239 
1240 // Function to store a first-class aggregate into memory. We prefer to
1241 // store the elements rather than the aggregate to be more friendly to
1242 // fast-isel.
1243 // FIXME: Do we need to recurse here?
1245  Address Dest, bool DestIsVolatile) {
1246  // Prefer scalar stores to first-class aggregate stores.
1247  if (llvm::StructType *STy =
1248  dyn_cast<llvm::StructType>(Val->getType())) {
1249  const llvm::StructLayout *Layout =
1250  CGF.CGM.getDataLayout().getStructLayout(STy);
1251 
1252  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1253  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1254  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1255  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1256  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1257  }
1258  } else {
1259  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1260  }
1261 }
1262 
1263 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1264 /// where the source and destination may have different types. The
1265 /// destination is known to be aligned to \arg DstAlign bytes.
1266 ///
1267 /// This safely handles the case when the src type is larger than the
1268 /// destination type; the upper bits of the src will be lost.
1270  Address Dst,
1271  bool DstIsVolatile,
1272  CodeGenFunction &CGF) {
1273  llvm::Type *SrcTy = Src->getType();
1274  llvm::Type *DstTy = Dst.getType()->getElementType();
1275  if (SrcTy == DstTy) {
1276  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1277  return;
1278  }
1279 
1280  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1281 
1282  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1283  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1284  DstTy = Dst.getType()->getElementType();
1285  }
1286 
1287  // If the source and destination are integer or pointer types, just do an
1288  // extension or truncation to the desired type.
1289  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1290  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1291  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1292  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1293  return;
1294  }
1295 
1296  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1297 
1298  // If store is legal, just bitcast the src pointer.
1299  if (SrcSize <= DstSize) {
1300  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1301  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1302  } else {
1303  // Otherwise do coercion through memory. This is stupid, but
1304  // simple.
1305 
1306  // Generally SrcSize is never greater than DstSize, since this means we are
1307  // losing bits. However, this can happen in cases where the structure has
1308  // additional padding, for example due to a user specified alignment.
1309  //
1310  // FIXME: Assert that we aren't truncating non-padding bits when have access
1311  // to that information.
1312  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1313  CGF.Builder.CreateStore(Src, Tmp);
1314  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1315  Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1316  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1317  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1318  false);
1319  }
1320 }
1321 
1323  const ABIArgInfo &info) {
1324  if (unsigned offset = info.getDirectOffset()) {
1325  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1326  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1327  CharUnits::fromQuantity(offset));
1328  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1329  }
1330  return addr;
1331 }
1332 
1333 namespace {
1334 
1335 /// Encapsulates information about the way function arguments from
1336 /// CGFunctionInfo should be passed to actual LLVM IR function.
1337 class ClangToLLVMArgMapping {
1338  static const unsigned InvalidIndex = ~0U;
1339  unsigned InallocaArgNo;
1340  unsigned SRetArgNo;
1341  unsigned TotalIRArgs;
1342 
1343  /// Arguments of LLVM IR function corresponding to single Clang argument.
1344  struct IRArgs {
1345  unsigned PaddingArgIndex;
1346  // Argument is expanded to IR arguments at positions
1347  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1348  unsigned FirstArgIndex;
1349  unsigned NumberOfArgs;
1350 
1351  IRArgs()
1352  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1353  NumberOfArgs(0) {}
1354  };
1355 
1356  SmallVector<IRArgs, 8> ArgInfo;
1357 
1358 public:
1359  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1360  bool OnlyRequiredArgs = false)
1361  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1362  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1363  construct(Context, FI, OnlyRequiredArgs);
1364  }
1365 
1366  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1367  unsigned getInallocaArgNo() const {
1368  assert(hasInallocaArg());
1369  return InallocaArgNo;
1370  }
1371 
1372  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1373  unsigned getSRetArgNo() const {
1374  assert(hasSRetArg());
1375  return SRetArgNo;
1376  }
1377 
1378  unsigned totalIRArgs() const { return TotalIRArgs; }
1379 
1380  bool hasPaddingArg(unsigned ArgNo) const {
1381  assert(ArgNo < ArgInfo.size());
1382  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1383  }
1384  unsigned getPaddingArgNo(unsigned ArgNo) const {
1385  assert(hasPaddingArg(ArgNo));
1386  return ArgInfo[ArgNo].PaddingArgIndex;
1387  }
1388 
1389  /// Returns index of first IR argument corresponding to ArgNo, and their
1390  /// quantity.
1391  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1392  assert(ArgNo < ArgInfo.size());
1393  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1394  ArgInfo[ArgNo].NumberOfArgs);
1395  }
1396 
1397 private:
1398  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1399  bool OnlyRequiredArgs);
1400 };
1401 
1402 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1403  const CGFunctionInfo &FI,
1404  bool OnlyRequiredArgs) {
1405  unsigned IRArgNo = 0;
1406  bool SwapThisWithSRet = false;
1407  const ABIArgInfo &RetAI = FI.getReturnInfo();
1408 
1409  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1410  SwapThisWithSRet = RetAI.isSRetAfterThis();
1411  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1412  }
1413 
1414  unsigned ArgNo = 0;
1415  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1416  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1417  ++I, ++ArgNo) {
1418  assert(I != FI.arg_end());
1419  QualType ArgType = I->type;
1420  const ABIArgInfo &AI = I->info;
1421  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1422  auto &IRArgs = ArgInfo[ArgNo];
1423 
1424  if (AI.getPaddingType())
1425  IRArgs.PaddingArgIndex = IRArgNo++;
1426 
1427  switch (AI.getKind()) {
1428  case ABIArgInfo::Extend:
1429  case ABIArgInfo::Direct: {
1430  // FIXME: handle sseregparm someday...
1431  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1432  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1433  IRArgs.NumberOfArgs = STy->getNumElements();
1434  } else {
1435  IRArgs.NumberOfArgs = 1;
1436  }
1437  break;
1438  }
1439  case ABIArgInfo::Indirect:
1440  IRArgs.NumberOfArgs = 1;
1441  break;
1442  case ABIArgInfo::Ignore:
1443  case ABIArgInfo::InAlloca:
1444  // ignore and inalloca doesn't have matching LLVM parameters.
1445  IRArgs.NumberOfArgs = 0;
1446  break;
1447  case ABIArgInfo::CoerceAndExpand:
1448  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1449  break;
1450  case ABIArgInfo::Expand:
1451  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1452  break;
1453  }
1454 
1455  if (IRArgs.NumberOfArgs > 0) {
1456  IRArgs.FirstArgIndex = IRArgNo;
1457  IRArgNo += IRArgs.NumberOfArgs;
1458  }
1459 
1460  // Skip over the sret parameter when it comes second. We already handled it
1461  // above.
1462  if (IRArgNo == 1 && SwapThisWithSRet)
1463  IRArgNo++;
1464  }
1465  assert(ArgNo == ArgInfo.size());
1466 
1467  if (FI.usesInAlloca())
1468  InallocaArgNo = IRArgNo++;
1469 
1470  TotalIRArgs = IRArgNo;
1471 }
1472 } // namespace
1473 
1474 /***/
1475 
1476 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1477  return FI.getReturnInfo().isIndirect();
1478 }
1479 
1480 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1481  return ReturnTypeUsesSRet(FI) &&
1482  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1483 }
1484 
1485 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1486  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1487  switch (BT->getKind()) {
1488  default:
1489  return false;
1490  case BuiltinType::Float:
1491  return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1492  case BuiltinType::Double:
1493  return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1494  case BuiltinType::LongDouble:
1495  return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1496  }
1497  }
1498 
1499  return false;
1500 }
1501 
1502 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1503  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1504  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1505  if (BT->getKind() == BuiltinType::LongDouble)
1506  return getTarget().useObjCFP2RetForComplexLongDouble();
1507  }
1508  }
1509 
1510  return false;
1511 }
1512 
1513 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1514  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1515  return GetFunctionType(FI);
1516 }
1517 
1518 llvm::FunctionType *
1519 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1520 
1521  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1522  (void)Inserted;
1523  assert(Inserted && "Recursively being processed?");
1524 
1525  llvm::Type *resultType = nullptr;
1526  const ABIArgInfo &retAI = FI.getReturnInfo();
1527  switch (retAI.getKind()) {
1528  case ABIArgInfo::Expand:
1529  llvm_unreachable("Invalid ABI kind for return argument");
1530 
1531  case ABIArgInfo::Extend:
1532  case ABIArgInfo::Direct:
1533  resultType = retAI.getCoerceToType();
1534  break;
1535 
1536  case ABIArgInfo::InAlloca:
1537  if (retAI.getInAllocaSRet()) {
1538  // sret things on win32 aren't void, they return the sret pointer.
1539  QualType ret = FI.getReturnType();
1540  llvm::Type *ty = ConvertType(ret);
1541  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1542  resultType = llvm::PointerType::get(ty, addressSpace);
1543  } else {
1544  resultType = llvm::Type::getVoidTy(getLLVMContext());
1545  }
1546  break;
1547 
1548  case ABIArgInfo::Indirect:
1549  case ABIArgInfo::Ignore:
1550  resultType = llvm::Type::getVoidTy(getLLVMContext());
1551  break;
1552 
1553  case ABIArgInfo::CoerceAndExpand:
1554  resultType = retAI.getUnpaddedCoerceAndExpandType();
1555  break;
1556  }
1557 
1558  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1559  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1560 
1561  // Add type for sret argument.
1562  if (IRFunctionArgs.hasSRetArg()) {
1563  QualType Ret = FI.getReturnType();
1564  llvm::Type *Ty = ConvertType(Ret);
1565  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1566  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1567  llvm::PointerType::get(Ty, AddressSpace);
1568  }
1569 
1570  // Add type for inalloca argument.
1571  if (IRFunctionArgs.hasInallocaArg()) {
1572  auto ArgStruct = FI.getArgStruct();
1573  assert(ArgStruct);
1574  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1575  }
1576 
1577  // Add in all of the required arguments.
1578  unsigned ArgNo = 0;
1580  ie = it + FI.getNumRequiredArgs();
1581  for (; it != ie; ++it, ++ArgNo) {
1582  const ABIArgInfo &ArgInfo = it->info;
1583 
1584  // Insert a padding type to ensure proper alignment.
1585  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1586  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1587  ArgInfo.getPaddingType();
1588 
1589  unsigned FirstIRArg, NumIRArgs;
1590  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1591 
1592  switch (ArgInfo.getKind()) {
1593  case ABIArgInfo::Ignore:
1594  case ABIArgInfo::InAlloca:
1595  assert(NumIRArgs == 0);
1596  break;
1597 
1598  case ABIArgInfo::Indirect: {
1599  assert(NumIRArgs == 1);
1600  // indirect arguments are always on the stack, which is alloca addr space.
1601  llvm::Type *LTy = ConvertTypeForMem(it->type);
1602  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1603  CGM.getDataLayout().getAllocaAddrSpace());
1604  break;
1605  }
1606 
1607  case ABIArgInfo::Extend:
1608  case ABIArgInfo::Direct: {
1609  // Fast-isel and the optimizer generally like scalar values better than
1610  // FCAs, so we flatten them if this is safe to do for this argument.
1611  llvm::Type *argType = ArgInfo.getCoerceToType();
1612  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1613  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1614  assert(NumIRArgs == st->getNumElements());
1615  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1616  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1617  } else {
1618  assert(NumIRArgs == 1);
1619  ArgTypes[FirstIRArg] = argType;
1620  }
1621  break;
1622  }
1623 
1624  case ABIArgInfo::CoerceAndExpand: {
1625  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1626  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1627  *ArgTypesIter++ = EltTy;
1628  }
1629  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1630  break;
1631  }
1632 
1633  case ABIArgInfo::Expand:
1634  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1635  getExpandedTypes(it->type, ArgTypesIter);
1636  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1637  break;
1638  }
1639  }
1640 
1641  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1642  assert(Erased && "Not in set?");
1643 
1644  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1645 }
1646 
1647 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1648  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1649  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1650 
1651  if (!isFuncTypeConvertible(FPT))
1652  return llvm::StructType::get(getLLVMContext());
1653 
1654  const CGFunctionInfo *Info;
1655  if (isa<CXXDestructorDecl>(MD))
1656  Info =
1657  &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1658  else
1659  Info = &arrangeCXXMethodDeclaration(MD);
1660  return GetFunctionType(*Info);
1661 }
1662 
1664  llvm::AttrBuilder &FuncAttrs,
1665  const FunctionProtoType *FPT) {
1666  if (!FPT)
1667  return;
1668 
1670  FPT->isNothrow(Ctx))
1671  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1672 }
1673 
1674 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1675  bool AttrOnCallSite,
1676  llvm::AttrBuilder &FuncAttrs) {
1677  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1678  if (!HasOptnone) {
1679  if (CodeGenOpts.OptimizeSize)
1680  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1681  if (CodeGenOpts.OptimizeSize == 2)
1682  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1683  }
1684 
1685  if (CodeGenOpts.DisableRedZone)
1686  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1687  if (CodeGenOpts.NoImplicitFloat)
1688  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1689 
1690  if (AttrOnCallSite) {
1691  // Attributes that should go on the call site only.
1692  if (!CodeGenOpts.SimplifyLibCalls ||
1693  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1694  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1695  if (!CodeGenOpts.TrapFuncName.empty())
1696  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1697  } else {
1698  // Attributes that should go on the function, but not the call site.
1699  if (!CodeGenOpts.DisableFPElim) {
1700  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1701  } else if (CodeGenOpts.OmitLeafFramePointer) {
1702  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1703  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1704  } else {
1705  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1706  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1707  }
1708 
1709  FuncAttrs.addAttribute("less-precise-fpmad",
1710  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1711 
1712  if (!CodeGenOpts.FPDenormalMode.empty())
1713  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1714 
1715  FuncAttrs.addAttribute("no-trapping-math",
1716  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1717 
1718  // TODO: Are these all needed?
1719  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1720  FuncAttrs.addAttribute("no-infs-fp-math",
1721  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1722  FuncAttrs.addAttribute("no-nans-fp-math",
1723  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1724  FuncAttrs.addAttribute("unsafe-fp-math",
1725  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1726  FuncAttrs.addAttribute("use-soft-float",
1727  llvm::toStringRef(CodeGenOpts.SoftFloat));
1728  FuncAttrs.addAttribute("stack-protector-buffer-size",
1729  llvm::utostr(CodeGenOpts.SSPBufferSize));
1730  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1731  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1732  FuncAttrs.addAttribute(
1733  "correctly-rounded-divide-sqrt-fp-math",
1734  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1735 
1736  // TODO: Reciprocal estimate codegen options should apply to instructions?
1737  std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
1738  if (!Recips.empty())
1739  FuncAttrs.addAttribute("reciprocal-estimates",
1740  llvm::join(Recips.begin(), Recips.end(), ","));
1741 
1742  if (CodeGenOpts.StackRealignment)
1743  FuncAttrs.addAttribute("stackrealign");
1744  if (CodeGenOpts.Backchain)
1745  FuncAttrs.addAttribute("backchain");
1746  }
1747 
1748  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1749  // Conservatively, mark all functions and calls in CUDA as convergent
1750  // (meaning, they may call an intrinsically convergent op, such as
1751  // __syncthreads(), and so can't have certain optimizations applied around
1752  // them). LLVM will remove this attribute where it safely can.
1753  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1754 
1755  // Exceptions aren't supported in CUDA device code.
1756  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1757 
1758  // Respect -fcuda-flush-denormals-to-zero.
1759  if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1760  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1761  }
1762 }
1763 
1764 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1765  llvm::AttrBuilder FuncAttrs;
1766  ConstructDefaultFnAttrList(F.getName(),
1767  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1768  /* AttrOnCallsite = */ false, FuncAttrs);
1769  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1770 }
1771 
1772 void CodeGenModule::ConstructAttributeList(
1773  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1774  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1775  llvm::AttrBuilder FuncAttrs;
1776  llvm::AttrBuilder RetAttrs;
1777 
1778  CallingConv = FI.getEffectiveCallingConvention();
1779  if (FI.isNoReturn())
1780  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1781 
1782  // If we have information about the function prototype, we can learn
1783  // attributes form there.
1784  AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1785  CalleeInfo.getCalleeFunctionProtoType());
1786 
1787  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1788 
1789  bool HasOptnone = false;
1790  // FIXME: handle sseregparm someday...
1791  if (TargetDecl) {
1792  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1793  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1794  if (TargetDecl->hasAttr<NoThrowAttr>())
1795  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1796  if (TargetDecl->hasAttr<NoReturnAttr>())
1797  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1798  if (TargetDecl->hasAttr<ColdAttr>())
1799  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1800  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1801  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1802  if (TargetDecl->hasAttr<ConvergentAttr>())
1803  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1804 
1805  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1807  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1808  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1809  // These attributes are not inherited by overloads.
1810  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1811  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1812  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1813  }
1814 
1815  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1816  if (TargetDecl->hasAttr<ConstAttr>()) {
1817  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1818  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1819  } else if (TargetDecl->hasAttr<PureAttr>()) {
1820  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1821  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1822  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1823  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1824  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1825  }
1826  if (TargetDecl->hasAttr<RestrictAttr>())
1827  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1828  if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1829  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1830  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1831  FuncAttrs.addAttribute("no_caller_saved_registers");
1832 
1833  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1834  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1835  Optional<unsigned> NumElemsParam;
1836  // alloc_size args are base-1, 0 means not present.
1837  if (unsigned N = AllocSize->getNumElemsParam())
1838  NumElemsParam = N - 1;
1839  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
1840  NumElemsParam);
1841  }
1842  }
1843 
1844  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1845 
1846  if (CodeGenOpts.EnableSegmentedStacks &&
1847  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1848  FuncAttrs.addAttribute("split-stack");
1849 
1850  if (!AttrOnCallSite) {
1851  bool DisableTailCalls =
1852  CodeGenOpts.DisableTailCalls ||
1853  (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1854  TargetDecl->hasAttr<AnyX86InterruptAttr>()));
1855  FuncAttrs.addAttribute("disable-tail-calls",
1856  llvm::toStringRef(DisableTailCalls));
1857 
1858  // Add target-cpu and target-features attributes to functions. If
1859  // we have a decl for the function and it has a target attribute then
1860  // parse that and add it to the feature set.
1861  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1862  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1863  if (FD && FD->hasAttr<TargetAttr>()) {
1864  llvm::StringMap<bool> FeatureMap;
1865  getFunctionFeatureMap(FeatureMap, FD);
1866 
1867  // Produce the canonical string for this set of features.
1868  std::vector<std::string> Features;
1869  for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1870  ie = FeatureMap.end();
1871  it != ie; ++it)
1872  Features.push_back((it->second ? "+" : "-") + it->first().str());
1873 
1874  // Now add the target-cpu and target-features to the function.
1875  // While we populated the feature map above, we still need to
1876  // get and parse the target attribute so we can get the cpu for
1877  // the function.
1878  const auto *TD = FD->getAttr<TargetAttr>();
1879  TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1880  if (ParsedAttr.Architecture != "")
1881  TargetCPU = ParsedAttr.Architecture;
1882  if (TargetCPU != "")
1883  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1884  if (!Features.empty()) {
1885  std::sort(Features.begin(), Features.end());
1886  FuncAttrs.addAttribute(
1887  "target-features",
1888  llvm::join(Features.begin(), Features.end(), ","));
1889  }
1890  } else {
1891  // Otherwise just add the existing target cpu and target features to the
1892  // function.
1893  std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1894  if (TargetCPU != "")
1895  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1896  if (!Features.empty()) {
1897  std::sort(Features.begin(), Features.end());
1898  FuncAttrs.addAttribute(
1899  "target-features",
1900  llvm::join(Features.begin(), Features.end(), ","));
1901  }
1902  }
1903  }
1904 
1905  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1906 
1907  QualType RetTy = FI.getReturnType();
1908  const ABIArgInfo &RetAI = FI.getReturnInfo();
1909  switch (RetAI.getKind()) {
1910  case ABIArgInfo::Extend:
1911  if (RetTy->hasSignedIntegerRepresentation())
1912  RetAttrs.addAttribute(llvm::Attribute::SExt);
1913  else if (RetTy->hasUnsignedIntegerRepresentation())
1914  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1915  // FALL THROUGH
1916  case ABIArgInfo::Direct:
1917  if (RetAI.getInReg())
1918  RetAttrs.addAttribute(llvm::Attribute::InReg);
1919  break;
1920  case ABIArgInfo::Ignore:
1921  break;
1922 
1923  case ABIArgInfo::InAlloca:
1924  case ABIArgInfo::Indirect: {
1925  // inalloca and sret disable readnone and readonly
1926  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1927  .removeAttribute(llvm::Attribute::ReadNone);
1928  break;
1929  }
1930 
1931  case ABIArgInfo::CoerceAndExpand:
1932  break;
1933 
1934  case ABIArgInfo::Expand:
1935  llvm_unreachable("Invalid ABI kind for return argument");
1936  }
1937 
1938  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1939  QualType PTy = RefTy->getPointeeType();
1940  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1941  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1942  .getQuantity());
1943  else if (getContext().getTargetAddressSpace(PTy) == 0)
1944  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1945  }
1946 
1947  bool hasUsedSRet = false;
1948  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1949 
1950  // Attach attributes to sret.
1951  if (IRFunctionArgs.hasSRetArg()) {
1952  llvm::AttrBuilder SRETAttrs;
1953  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1954  hasUsedSRet = true;
1955  if (RetAI.getInReg())
1956  SRETAttrs.addAttribute(llvm::Attribute::InReg);
1957  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
1958  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
1959  }
1960 
1961  // Attach attributes to inalloca argument.
1962  if (IRFunctionArgs.hasInallocaArg()) {
1963  llvm::AttrBuilder Attrs;
1964  Attrs.addAttribute(llvm::Attribute::InAlloca);
1965  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
1966  llvm::AttributeSet::get(getLLVMContext(), Attrs);
1967  }
1968 
1969  unsigned ArgNo = 0;
1971  E = FI.arg_end();
1972  I != E; ++I, ++ArgNo) {
1973  QualType ParamType = I->type;
1974  const ABIArgInfo &AI = I->info;
1975  llvm::AttrBuilder Attrs;
1976 
1977  // Add attribute for padding argument, if necessary.
1978  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1979  if (AI.getPaddingInReg()) {
1980  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1981  llvm::AttributeSet::get(
1982  getLLVMContext(),
1983  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
1984  }
1985  }
1986 
1987  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1988  // have the corresponding parameter variable. It doesn't make
1989  // sense to do it here because parameters are so messed up.
1990  switch (AI.getKind()) {
1991  case ABIArgInfo::Extend:
1992  if (ParamType->isSignedIntegerOrEnumerationType())
1993  Attrs.addAttribute(llvm::Attribute::SExt);
1994  else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1995  if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1996  Attrs.addAttribute(llvm::Attribute::SExt);
1997  else
1998  Attrs.addAttribute(llvm::Attribute::ZExt);
1999  }
2000  // FALL THROUGH
2001  case ABIArgInfo::Direct:
2002  if (ArgNo == 0 && FI.isChainCall())
2003  Attrs.addAttribute(llvm::Attribute::Nest);
2004  else if (AI.getInReg())
2005  Attrs.addAttribute(llvm::Attribute::InReg);
2006  break;
2007 
2008  case ABIArgInfo::Indirect: {
2009  if (AI.getInReg())
2010  Attrs.addAttribute(llvm::Attribute::InReg);
2011 
2012  if (AI.getIndirectByVal())
2013  Attrs.addAttribute(llvm::Attribute::ByVal);
2014 
2015  CharUnits Align = AI.getIndirectAlign();
2016 
2017  // In a byval argument, it is important that the required
2018  // alignment of the type is honored, as LLVM might be creating a
2019  // *new* stack object, and needs to know what alignment to give
2020  // it. (Sometimes it can deduce a sensible alignment on its own,
2021  // but not if clang decides it must emit a packed struct, or the
2022  // user specifies increased alignment requirements.)
2023  //
2024  // This is different from indirect *not* byval, where the object
2025  // exists already, and the align attribute is purely
2026  // informative.
2027  assert(!Align.isZero());
2028 
2029  // For now, only add this when we have a byval argument.
2030  // TODO: be less lazy about updating test cases.
2031  if (AI.getIndirectByVal())
2032  Attrs.addAlignmentAttr(Align.getQuantity());
2033 
2034  // byval disables readnone and readonly.
2035  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2036  .removeAttribute(llvm::Attribute::ReadNone);
2037  break;
2038  }
2039  case ABIArgInfo::Ignore:
2040  case ABIArgInfo::Expand:
2041  case ABIArgInfo::CoerceAndExpand:
2042  break;
2043 
2044  case ABIArgInfo::InAlloca:
2045  // inalloca disables readnone and readonly.
2046  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2047  .removeAttribute(llvm::Attribute::ReadNone);
2048  continue;
2049  }
2050 
2051  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2052  QualType PTy = RefTy->getPointeeType();
2053  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2054  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2055  .getQuantity());
2056  else if (getContext().getTargetAddressSpace(PTy) == 0)
2057  Attrs.addAttribute(llvm::Attribute::NonNull);
2058  }
2059 
2060  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2062  break;
2063 
2065  // Add 'sret' if we haven't already used it for something, but
2066  // only if the result is void.
2067  if (!hasUsedSRet && RetTy->isVoidType()) {
2068  Attrs.addAttribute(llvm::Attribute::StructRet);
2069  hasUsedSRet = true;
2070  }
2071 
2072  // Add 'noalias' in either case.
2073  Attrs.addAttribute(llvm::Attribute::NoAlias);
2074 
2075  // Add 'dereferenceable' and 'alignment'.
2076  auto PTy = ParamType->getPointeeType();
2077  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2078  auto info = getContext().getTypeInfoInChars(PTy);
2079  Attrs.addDereferenceableAttr(info.first.getQuantity());
2080  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2081  info.second.getQuantity()));
2082  }
2083  break;
2084  }
2085 
2087  Attrs.addAttribute(llvm::Attribute::SwiftError);
2088  break;
2089 
2091  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2092  break;
2093  }
2094 
2095  if (Attrs.hasAttributes()) {
2096  unsigned FirstIRArg, NumIRArgs;
2097  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2098  for (unsigned i = 0; i < NumIRArgs; i++)
2099  ArgAttrs[FirstIRArg + i] =
2100  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2101  }
2102  }
2103  assert(ArgNo == FI.arg_size());
2104 
2105  AttrList = llvm::AttributeList::get(
2106  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2107  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2108 }
2109 
2110 /// An argument came in as a promoted argument; demote it back to its
2111 /// declared type.
2113  const VarDecl *var,
2114  llvm::Value *value) {
2115  llvm::Type *varType = CGF.ConvertType(var->getType());
2116 
2117  // This can happen with promotions that actually don't change the
2118  // underlying type, like the enum promotions.
2119  if (value->getType() == varType) return value;
2120 
2121  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2122  && "unexpected promotion type");
2123 
2124  if (isa<llvm::IntegerType>(varType))
2125  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2126 
2127  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2128 }
2129 
2130 /// Returns the attribute (either parameter attribute, or function
2131 /// attribute), which declares argument ArgNo to be non-null.
2132 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2133  QualType ArgType, unsigned ArgNo) {
2134  // FIXME: __attribute__((nonnull)) can also be applied to:
2135  // - references to pointers, where the pointee is known to be
2136  // nonnull (apparently a Clang extension)
2137  // - transparent unions containing pointers
2138  // In the former case, LLVM IR cannot represent the constraint. In
2139  // the latter case, we have no guarantee that the transparent union
2140  // is in fact passed as a pointer.
2141  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2142  return nullptr;
2143  // First, check attribute on parameter itself.
2144  if (PVD) {
2145  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2146  return ParmNNAttr;
2147  }
2148  // Check function attributes.
2149  if (!FD)
2150  return nullptr;
2151  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2152  if (NNAttr->isNonNull(ArgNo))
2153  return NNAttr;
2154  }
2155  return nullptr;
2156 }
2157 
2158 namespace {
2159  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2160  Address Temp;
2161  Address Arg;
2162  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2163  void Emit(CodeGenFunction &CGF, Flags flags) override {
2164  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2165  CGF.Builder.CreateStore(errorValue, Arg);
2166  }
2167  };
2168 }
2169 
2170 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2171  llvm::Function *Fn,
2172  const FunctionArgList &Args) {
2173  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2174  // Naked functions don't have prologues.
2175  return;
2176 
2177  // If this is an implicit-return-zero function, go ahead and
2178  // initialize the return value. TODO: it might be nice to have
2179  // a more general mechanism for this that didn't require synthesized
2180  // return statements.
2181  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2182  if (FD->hasImplicitReturnZero()) {
2183  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2184  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2185  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2186  Builder.CreateStore(Zero, ReturnValue);
2187  }
2188  }
2189 
2190  // FIXME: We no longer need the types from FunctionArgList; lift up and
2191  // simplify.
2192 
2193  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2194  // Flattened function arguments.
2196  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2197  for (auto &Arg : Fn->args()) {
2198  FnArgs.push_back(&Arg);
2199  }
2200  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2201 
2202  // If we're using inalloca, all the memory arguments are GEPs off of the last
2203  // parameter, which is a pointer to the complete memory area.
2204  Address ArgStruct = Address::invalid();
2205  const llvm::StructLayout *ArgStructLayout = nullptr;
2206  if (IRFunctionArgs.hasInallocaArg()) {
2207  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2208  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2209  FI.getArgStructAlignment());
2210 
2211  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2212  }
2213 
2214  // Name the struct return parameter.
2215  if (IRFunctionArgs.hasSRetArg()) {
2216  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2217  AI->setName("agg.result");
2218  AI->addAttr(llvm::Attribute::NoAlias);
2219  }
2220 
2221  // Track if we received the parameter as a pointer (indirect, byval, or
2222  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2223  // into a local alloca for us.
2225  ArgVals.reserve(Args.size());
2226 
2227  // Create a pointer value for every parameter declaration. This usually
2228  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2229  // any cleanups or do anything that might unwind. We do that separately, so
2230  // we can push the cleanups in the correct order for the ABI.
2231  assert(FI.arg_size() == Args.size() &&
2232  "Mismatch between function signature & arguments.");
2233  unsigned ArgNo = 0;
2235  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2236  i != e; ++i, ++info_it, ++ArgNo) {
2237  const VarDecl *Arg = *i;
2238  QualType Ty = info_it->type;
2239  const ABIArgInfo &ArgI = info_it->info;
2240 
2241  bool isPromoted =
2242  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2243 
2244  unsigned FirstIRArg, NumIRArgs;
2245  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2246 
2247  switch (ArgI.getKind()) {
2248  case ABIArgInfo::InAlloca: {
2249  assert(NumIRArgs == 0);
2250  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2251  CharUnits FieldOffset =
2252  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2253  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2254  Arg->getName());
2255  ArgVals.push_back(ParamValue::forIndirect(V));
2256  break;
2257  }
2258 
2259  case ABIArgInfo::Indirect: {
2260  assert(NumIRArgs == 1);
2261  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2262 
2263  if (!hasScalarEvaluationKind(Ty)) {
2264  // Aggregates and complex variables are accessed by reference. All we
2265  // need to do is realign the value, if requested.
2266  Address V = ParamAddr;
2267  if (ArgI.getIndirectRealign()) {
2268  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2269 
2270  // Copy from the incoming argument pointer to the temporary with the
2271  // appropriate alignment.
2272  //
2273  // FIXME: We should have a common utility for generating an aggregate
2274  // copy.
2275  CharUnits Size = getContext().getTypeSizeInChars(Ty);
2276  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2277  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2278  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2279  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2280  V = AlignedTemp;
2281  }
2282  ArgVals.push_back(ParamValue::forIndirect(V));
2283  } else {
2284  // Load scalar value from indirect argument.
2285  llvm::Value *V =
2286  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2287 
2288  if (isPromoted)
2289  V = emitArgumentDemotion(*this, Arg, V);
2290  ArgVals.push_back(ParamValue::forDirect(V));
2291  }
2292  break;
2293  }
2294 
2295  case ABIArgInfo::Extend:
2296  case ABIArgInfo::Direct: {
2297 
2298  // If we have the trivial case, handle it with no muss and fuss.
2299  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2300  ArgI.getCoerceToType() == ConvertType(Ty) &&
2301  ArgI.getDirectOffset() == 0) {
2302  assert(NumIRArgs == 1);
2303  llvm::Value *V = FnArgs[FirstIRArg];
2304  auto AI = cast<llvm::Argument>(V);
2305 
2306  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2307  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2308  PVD->getFunctionScopeIndex()))
2309  AI->addAttr(llvm::Attribute::NonNull);
2310 
2311  QualType OTy = PVD->getOriginalType();
2312  if (const auto *ArrTy =
2313  getContext().getAsConstantArrayType(OTy)) {
2314  // A C99 array parameter declaration with the static keyword also
2315  // indicates dereferenceability, and if the size is constant we can
2316  // use the dereferenceable attribute (which requires the size in
2317  // bytes).
2318  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2319  QualType ETy = ArrTy->getElementType();
2320  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2321  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2322  ArrSize) {
2323  llvm::AttrBuilder Attrs;
2324  Attrs.addDereferenceableAttr(
2325  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2326  AI->addAttrs(Attrs);
2327  } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2328  AI->addAttr(llvm::Attribute::NonNull);
2329  }
2330  }
2331  } else if (const auto *ArrTy =
2332  getContext().getAsVariableArrayType(OTy)) {
2333  // For C99 VLAs with the static keyword, we don't know the size so
2334  // we can't use the dereferenceable attribute, but in addrspace(0)
2335  // we know that it must be nonnull.
2336  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2337  !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2338  AI->addAttr(llvm::Attribute::NonNull);
2339  }
2340 
2341  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2342  if (!AVAttr)
2343  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2344  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2345  if (AVAttr) {
2346  llvm::Value *AlignmentValue =
2347  EmitScalarExpr(AVAttr->getAlignment());
2348  llvm::ConstantInt *AlignmentCI =
2349  cast<llvm::ConstantInt>(AlignmentValue);
2350  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2351  +llvm::Value::MaximumAlignment);
2352  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2353  }
2354  }
2355 
2356  if (Arg->getType().isRestrictQualified())
2357  AI->addAttr(llvm::Attribute::NoAlias);
2358 
2359  // LLVM expects swifterror parameters to be used in very restricted
2360  // ways. Copy the value into a less-restricted temporary.
2361  if (FI.getExtParameterInfo(ArgNo).getABI()
2363  QualType pointeeTy = Ty->getPointeeType();
2364  assert(pointeeTy->isPointerType());
2365  Address temp =
2366  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2367  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2368  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2369  Builder.CreateStore(incomingErrorValue, temp);
2370  V = temp.getPointer();
2371 
2372  // Push a cleanup to copy the value back at the end of the function.
2373  // The convention does not guarantee that the value will be written
2374  // back if the function exits with an unwind exception.
2375  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2376  }
2377 
2378  // Ensure the argument is the correct type.
2379  if (V->getType() != ArgI.getCoerceToType())
2380  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2381 
2382  if (isPromoted)
2383  V = emitArgumentDemotion(*this, Arg, V);
2384 
2385  // Because of merging of function types from multiple decls it is
2386  // possible for the type of an argument to not match the corresponding
2387  // type in the function type. Since we are codegening the callee
2388  // in here, add a cast to the argument type.
2389  llvm::Type *LTy = ConvertType(Arg->getType());
2390  if (V->getType() != LTy)
2391  V = Builder.CreateBitCast(V, LTy);
2392 
2393  ArgVals.push_back(ParamValue::forDirect(V));
2394  break;
2395  }
2396 
2397  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2398  Arg->getName());
2399 
2400  // Pointer to store into.
2401  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2402 
2403  // Fast-isel and the optimizer generally like scalar values better than
2404  // FCAs, so we flatten them if this is safe to do for this argument.
2405  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2406  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2407  STy->getNumElements() > 1) {
2408  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2409  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2410  llvm::Type *DstTy = Ptr.getElementType();
2411  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2412 
2413  Address AddrToStoreInto = Address::invalid();
2414  if (SrcSize <= DstSize) {
2415  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2416  } else {
2417  AddrToStoreInto =
2418  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2419  }
2420 
2421  assert(STy->getNumElements() == NumIRArgs);
2422  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2423  auto AI = FnArgs[FirstIRArg + i];
2424  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2425  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2426  Address EltPtr =
2427  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2428  Builder.CreateStore(AI, EltPtr);
2429  }
2430 
2431  if (SrcSize > DstSize) {
2432  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2433  }
2434 
2435  } else {
2436  // Simple case, just do a coerced store of the argument into the alloca.
2437  assert(NumIRArgs == 1);
2438  auto AI = FnArgs[FirstIRArg];
2439  AI->setName(Arg->getName() + ".coerce");
2440  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2441  }
2442 
2443  // Match to what EmitParmDecl is expecting for this type.
2444  if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2445  llvm::Value *V =
2446  EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2447  if (isPromoted)
2448  V = emitArgumentDemotion(*this, Arg, V);
2449  ArgVals.push_back(ParamValue::forDirect(V));
2450  } else {
2451  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2452  }
2453  break;
2454  }
2455 
2456  case ABIArgInfo::CoerceAndExpand: {
2457  // Reconstruct into a temporary.
2458  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2459  ArgVals.push_back(ParamValue::forIndirect(alloca));
2460 
2461  auto coercionType = ArgI.getCoerceAndExpandType();
2462  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2463  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2464 
2465  unsigned argIndex = FirstIRArg;
2466  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2467  llvm::Type *eltType = coercionType->getElementType(i);
2468  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2469  continue;
2470 
2471  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2472  auto elt = FnArgs[argIndex++];
2473  Builder.CreateStore(elt, eltAddr);
2474  }
2475  assert(argIndex == FirstIRArg + NumIRArgs);
2476  break;
2477  }
2478 
2479  case ABIArgInfo::Expand: {
2480  // If this structure was expanded into multiple arguments then
2481  // we need to create a temporary and reconstruct it from the
2482  // arguments.
2483  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2484  LValue LV = MakeAddrLValue(Alloca, Ty);
2485  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2486 
2487  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2488  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2489  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2490  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2491  auto AI = FnArgs[FirstIRArg + i];
2492  AI->setName(Arg->getName() + "." + Twine(i));
2493  }
2494  break;
2495  }
2496 
2497  case ABIArgInfo::Ignore:
2498  assert(NumIRArgs == 0);
2499  // Initialize the local variable appropriately.
2500  if (!hasScalarEvaluationKind(Ty)) {
2501  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2502  } else {
2503  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2504  ArgVals.push_back(ParamValue::forDirect(U));
2505  }
2506  break;
2507  }
2508  }
2509 
2510  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2511  for (int I = Args.size() - 1; I >= 0; --I)
2512  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2513  } else {
2514  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2515  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2516  }
2517 }
2518 
2519 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2520  while (insn->use_empty()) {
2521  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2522  if (!bitcast) return;
2523 
2524  // This is "safe" because we would have used a ConstantExpr otherwise.
2525  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2526  bitcast->eraseFromParent();
2527  }
2528 }
2529 
2530 /// Try to emit a fused autorelease of a return result.
2532  llvm::Value *result) {
2533  // We must be immediately followed the cast.
2534  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2535  if (BB->empty()) return nullptr;
2536  if (&BB->back() != result) return nullptr;
2537 
2538  llvm::Type *resultType = result->getType();
2539 
2540  // result is in a BasicBlock and is therefore an Instruction.
2541  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2542 
2544 
2545  // Look for:
2546  // %generator = bitcast %type1* %generator2 to %type2*
2547  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2548  // We would have emitted this as a constant if the operand weren't
2549  // an Instruction.
2550  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2551 
2552  // Require the generator to be immediately followed by the cast.
2553  if (generator->getNextNode() != bitcast)
2554  return nullptr;
2555 
2556  InstsToKill.push_back(bitcast);
2557  }
2558 
2559  // Look for:
2560  // %generator = call i8* @objc_retain(i8* %originalResult)
2561  // or
2562  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2563  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2564  if (!call) return nullptr;
2565 
2566  bool doRetainAutorelease;
2567 
2568  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2569  doRetainAutorelease = true;
2570  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2572  doRetainAutorelease = false;
2573 
2574  // If we emitted an assembly marker for this call (and the
2575  // ARCEntrypoints field should have been set if so), go looking
2576  // for that call. If we can't find it, we can't do this
2577  // optimization. But it should always be the immediately previous
2578  // instruction, unless we needed bitcasts around the call.
2580  llvm::Instruction *prev = call->getPrevNode();
2581  assert(prev);
2582  if (isa<llvm::BitCastInst>(prev)) {
2583  prev = prev->getPrevNode();
2584  assert(prev);
2585  }
2586  assert(isa<llvm::CallInst>(prev));
2587  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2589  InstsToKill.push_back(prev);
2590  }
2591  } else {
2592  return nullptr;
2593  }
2594 
2595  result = call->getArgOperand(0);
2596  InstsToKill.push_back(call);
2597 
2598  // Keep killing bitcasts, for sanity. Note that we no longer care
2599  // about precise ordering as long as there's exactly one use.
2600  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2601  if (!bitcast->hasOneUse()) break;
2602  InstsToKill.push_back(bitcast);
2603  result = bitcast->getOperand(0);
2604  }
2605 
2606  // Delete all the unnecessary instructions, from latest to earliest.
2607  for (auto *I : InstsToKill)
2608  I->eraseFromParent();
2609 
2610  // Do the fused retain/autorelease if we were asked to.
2611  if (doRetainAutorelease)
2612  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2613 
2614  // Cast back to the result type.
2615  return CGF.Builder.CreateBitCast(result, resultType);
2616 }
2617 
2618 /// If this is a +1 of the value of an immutable 'self', remove it.
2620  llvm::Value *result) {
2621  // This is only applicable to a method with an immutable 'self'.
2622  const ObjCMethodDecl *method =
2623  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2624  if (!method) return nullptr;
2625  const VarDecl *self = method->getSelfDecl();
2626  if (!self->getType().isConstQualified()) return nullptr;
2627 
2628  // Look for a retain call.
2629  llvm::CallInst *retainCall =
2630  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2631  if (!retainCall ||
2632  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2633  return nullptr;
2634 
2635  // Look for an ordinary load of 'self'.
2636  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2637  llvm::LoadInst *load =
2638  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2639  if (!load || load->isAtomic() || load->isVolatile() ||
2640  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2641  return nullptr;
2642 
2643  // Okay! Burn it all down. This relies for correctness on the
2644  // assumption that the retain is emitted as part of the return and
2645  // that thereafter everything is used "linearly".
2646  llvm::Type *resultType = result->getType();
2647  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2648  assert(retainCall->use_empty());
2649  retainCall->eraseFromParent();
2650  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2651 
2652  return CGF.Builder.CreateBitCast(load, resultType);
2653 }
2654 
2655 /// Emit an ARC autorelease of the result of a function.
2656 ///
2657 /// \return the value to actually return from the function
2659  llvm::Value *result) {
2660  // If we're returning 'self', kill the initial retain. This is a
2661  // heuristic attempt to "encourage correctness" in the really unfortunate
2662  // case where we have a return of self during a dealloc and we desperately
2663  // need to avoid the possible autorelease.
2664  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2665  return self;
2666 
2667  // At -O0, try to emit a fused retain/autorelease.
2668  if (CGF.shouldUseFusedARCCalls())
2669  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2670  return fused;
2671 
2672  return CGF.EmitARCAutoreleaseReturnValue(result);
2673 }
2674 
2675 /// Heuristically search for a dominating store to the return-value slot.
2676 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2677  // Check if a User is a store which pointerOperand is the ReturnValue.
2678  // We are looking for stores to the ReturnValue, not for stores of the
2679  // ReturnValue to some other location.
2680  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2681  auto *SI = dyn_cast<llvm::StoreInst>(U);
2682  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2683  return nullptr;
2684  // These aren't actually possible for non-coerced returns, and we
2685  // only care about non-coerced returns on this code path.
2686  assert(!SI->isAtomic() && !SI->isVolatile());
2687  return SI;
2688  };
2689  // If there are multiple uses of the return-value slot, just check
2690  // for something immediately preceding the IP. Sometimes this can
2691  // happen with how we generate implicit-returns; it can also happen
2692  // with noreturn cleanups.
2693  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2694  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2695  if (IP->empty()) return nullptr;
2696  llvm::Instruction *I = &IP->back();
2697 
2698  // Skip lifetime markers
2699  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2700  IE = IP->rend();
2701  II != IE; ++II) {
2702  if (llvm::IntrinsicInst *Intrinsic =
2703  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2704  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2705  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2706  ++II;
2707  if (II == IE)
2708  break;
2709  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2710  continue;
2711  }
2712  }
2713  I = &*II;
2714  break;
2715  }
2716 
2717  return GetStoreIfValid(I);
2718  }
2719 
2720  llvm::StoreInst *store =
2721  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2722  if (!store) return nullptr;
2723 
2724  // Now do a first-and-dirty dominance check: just walk up the
2725  // single-predecessors chain from the current insertion point.
2726  llvm::BasicBlock *StoreBB = store->getParent();
2727  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2728  while (IP != StoreBB) {
2729  if (!(IP = IP->getSinglePredecessor()))
2730  return nullptr;
2731  }
2732 
2733  // Okay, the store's basic block dominates the insertion point; we
2734  // can do our thing.
2735  return store;
2736 }
2737 
2738 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2739  bool EmitRetDbgLoc,
2740  SourceLocation EndLoc) {
2741  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2742  // Naked functions don't have epilogues.
2743  Builder.CreateUnreachable();
2744  return;
2745  }
2746 
2747  // Functions with no result always return void.
2748  if (!ReturnValue.isValid()) {
2749  Builder.CreateRetVoid();
2750  return;
2751  }
2752 
2753  llvm::DebugLoc RetDbgLoc;
2754  llvm::Value *RV = nullptr;
2755  QualType RetTy = FI.getReturnType();
2756  const ABIArgInfo &RetAI = FI.getReturnInfo();
2757 
2758  switch (RetAI.getKind()) {
2759  case ABIArgInfo::InAlloca:
2760  // Aggregrates get evaluated directly into the destination. Sometimes we
2761  // need to return the sret value in a register, though.
2762  assert(hasAggregateEvaluationKind(RetTy));
2763  if (RetAI.getInAllocaSRet()) {
2764  llvm::Function::arg_iterator EI = CurFn->arg_end();
2765  --EI;
2766  llvm::Value *ArgStruct = &*EI;
2767  llvm::Value *SRet = Builder.CreateStructGEP(
2768  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2769  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2770  }
2771  break;
2772 
2773  case ABIArgInfo::Indirect: {
2774  auto AI = CurFn->arg_begin();
2775  if (RetAI.isSRetAfterThis())
2776  ++AI;
2777  switch (getEvaluationKind(RetTy)) {
2778  case TEK_Complex: {
2779  ComplexPairTy RT =
2780  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2781  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2782  /*isInit*/ true);
2783  break;
2784  }
2785  case TEK_Aggregate:
2786  // Do nothing; aggregrates get evaluated directly into the destination.
2787  break;
2788  case TEK_Scalar:
2789  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2790  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2791  /*isInit*/ true);
2792  break;
2793  }
2794  break;
2795  }
2796 
2797  case ABIArgInfo::Extend:
2798  case ABIArgInfo::Direct:
2799  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2800  RetAI.getDirectOffset() == 0) {
2801  // The internal return value temp always will have pointer-to-return-type
2802  // type, just do a load.
2803 
2804  // If there is a dominating store to ReturnValue, we can elide
2805  // the load, zap the store, and usually zap the alloca.
2806  if (llvm::StoreInst *SI =
2808  // Reuse the debug location from the store unless there is
2809  // cleanup code to be emitted between the store and return
2810  // instruction.
2811  if (EmitRetDbgLoc && !AutoreleaseResult)
2812  RetDbgLoc = SI->getDebugLoc();
2813  // Get the stored value and nuke the now-dead store.
2814  RV = SI->getValueOperand();
2815  SI->eraseFromParent();
2816 
2817  // If that was the only use of the return value, nuke it as well now.
2818  auto returnValueInst = ReturnValue.getPointer();
2819  if (returnValueInst->use_empty()) {
2820  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2821  alloca->eraseFromParent();
2822  ReturnValue = Address::invalid();
2823  }
2824  }
2825 
2826  // Otherwise, we have to do a simple load.
2827  } else {
2828  RV = Builder.CreateLoad(ReturnValue);
2829  }
2830  } else {
2831  // If the value is offset in memory, apply the offset now.
2832  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2833 
2834  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2835  }
2836 
2837  // In ARC, end functions that return a retainable type with a call
2838  // to objc_autoreleaseReturnValue.
2839  if (AutoreleaseResult) {
2840 #ifndef NDEBUG
2841  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2842  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2843  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2844  // CurCodeDecl or BlockInfo.
2845  QualType RT;
2846 
2847  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2848  RT = FD->getReturnType();
2849  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2850  RT = MD->getReturnType();
2851  else if (isa<BlockDecl>(CurCodeDecl))
2852  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2853  else
2854  llvm_unreachable("Unexpected function/method type");
2855 
2856  assert(getLangOpts().ObjCAutoRefCount &&
2857  !FI.isReturnsRetained() &&
2858  RT->isObjCRetainableType());
2859 #endif
2860  RV = emitAutoreleaseOfResult(*this, RV);
2861  }
2862 
2863  break;
2864 
2865  case ABIArgInfo::Ignore:
2866  break;
2867 
2868  case ABIArgInfo::CoerceAndExpand: {
2869  auto coercionType = RetAI.getCoerceAndExpandType();
2870  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2871 
2872  // Load all of the coerced elements out into results.
2874  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2875  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2876  auto coercedEltType = coercionType->getElementType(i);
2877  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2878  continue;
2879 
2880  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2881  auto elt = Builder.CreateLoad(eltAddr);
2882  results.push_back(elt);
2883  }
2884 
2885  // If we have one result, it's the single direct result type.
2886  if (results.size() == 1) {
2887  RV = results[0];
2888 
2889  // Otherwise, we need to make a first-class aggregate.
2890  } else {
2891  // Construct a return type that lacks padding elements.
2892  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2893 
2894  RV = llvm::UndefValue::get(returnType);
2895  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2896  RV = Builder.CreateInsertValue(RV, results[i], i);
2897  }
2898  }
2899  break;
2900  }
2901 
2902  case ABIArgInfo::Expand:
2903  llvm_unreachable("Invalid ABI kind for return argument");
2904  }
2905 
2906  llvm::Instruction *Ret;
2907  if (RV) {
2908  EmitReturnValueCheck(RV);
2909  Ret = Builder.CreateRet(RV);
2910  } else {
2911  Ret = Builder.CreateRetVoid();
2912  }
2913 
2914  if (RetDbgLoc)
2915  Ret->setDebugLoc(std::move(RetDbgLoc));
2916 }
2917 
2918 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2919  // A current decl may not be available when emitting vtable thunks.
2920  if (!CurCodeDecl)
2921  return;
2922 
2923  ReturnsNonNullAttr *RetNNAttr = nullptr;
2924  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2925  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2926 
2927  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2928  return;
2929 
2930  // Prefer the returns_nonnull attribute if it's present.
2931  SourceLocation AttrLoc;
2932  SanitizerMask CheckKind;
2933  SanitizerHandler Handler;
2934  if (RetNNAttr) {
2935  assert(!requiresReturnValueNullabilityCheck() &&
2936  "Cannot check nullability and the nonnull attribute");
2937  AttrLoc = RetNNAttr->getLocation();
2938  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2939  Handler = SanitizerHandler::NonnullReturn;
2940  } else {
2941  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2942  if (auto *TSI = DD->getTypeSourceInfo())
2943  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2944  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2945  CheckKind = SanitizerKind::NullabilityReturn;
2946  Handler = SanitizerHandler::NullabilityReturn;
2947  }
2948 
2949  SanitizerScope SanScope(this);
2950 
2951  // Make sure the "return" source location is valid. If we're checking a
2952  // nullability annotation, make sure the preconditions for the check are met.
2953  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
2954  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
2955  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
2956  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
2957  if (requiresReturnValueNullabilityCheck())
2958  CanNullCheck =
2959  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
2960  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
2961  EmitBlock(Check);
2962 
2963  // Now do the null check.
2964  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
2965  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
2966  llvm::Value *DynamicData[] = {SLocPtr};
2967  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
2968 
2969  EmitBlock(NoCheck);
2970 
2971 #ifndef NDEBUG
2972  // The return location should not be used after the check has been emitted.
2973  ReturnLocation = Address::invalid();
2974 #endif
2975 }
2976 
2978  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2979  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2980 }
2981 
2983  QualType Ty) {
2984  // FIXME: Generate IR in one pass, rather than going back and fixing up these
2985  // placeholders.
2986  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2987  llvm::Type *IRPtrTy = IRTy->getPointerTo();
2988  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
2989 
2990  // FIXME: When we generate this IR in one pass, we shouldn't need
2991  // this win32-specific alignment hack.
2993  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
2994 
2995  return AggValueSlot::forAddr(Address(Placeholder, Align),
2996  Ty.getQualifiers(),
2997  AggValueSlot::IsNotDestructed,
2998  AggValueSlot::DoesNotNeedGCBarriers,
2999  AggValueSlot::IsNotAliased);
3000 }
3001 
3002 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3003  const VarDecl *param,
3004  SourceLocation loc) {
3005  // StartFunction converted the ABI-lowered parameter(s) into a
3006  // local alloca. We need to turn that into an r-value suitable
3007  // for EmitCall.
3008  Address local = GetAddrOfLocalVar(param);
3009 
3010  QualType type = param->getType();
3011 
3012  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3013  "cannot emit delegate call arguments for inalloca arguments!");
3014 
3015  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3016  // but the argument needs to be the original pointer.
3017  if (type->isReferenceType()) {
3018  args.add(RValue::get(Builder.CreateLoad(local)), type);
3019 
3020  // In ARC, move out of consumed arguments so that the release cleanup
3021  // entered by StartFunction doesn't cause an over-release. This isn't
3022  // optimal -O0 code generation, but it should get cleaned up when
3023  // optimization is enabled. This also assumes that delegate calls are
3024  // performed exactly once for a set of arguments, but that should be safe.
3025  } else if (getLangOpts().ObjCAutoRefCount &&
3026  param->hasAttr<NSConsumedAttr>() &&
3027  type->isObjCRetainableType()) {
3028  llvm::Value *ptr = Builder.CreateLoad(local);
3029  auto null =
3030  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3031  Builder.CreateStore(null, local);
3032  args.add(RValue::get(ptr), type);
3033 
3034  // For the most part, we just need to load the alloca, except that
3035  // aggregate r-values are actually pointers to temporaries.
3036  } else {
3037  args.add(convertTempToRValue(local, type, loc), type);
3038  }
3039 }
3040 
3041 static bool isProvablyNull(llvm::Value *addr) {
3042  return isa<llvm::ConstantPointerNull>(addr);
3043 }
3044 
3045 /// Emit the actual writing-back of a writeback.
3047  const CallArgList::Writeback &writeback) {
3048  const LValue &srcLV = writeback.Source;
3049  Address srcAddr = srcLV.getAddress();
3050  assert(!isProvablyNull(srcAddr.getPointer()) &&
3051  "shouldn't have writeback for provably null argument");
3052 
3053  llvm::BasicBlock *contBB = nullptr;
3054 
3055  // If the argument wasn't provably non-null, we need to null check
3056  // before doing the store.
3057  bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
3058  if (!provablyNonNull) {
3059  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3060  contBB = CGF.createBasicBlock("icr.done");
3061 
3062  llvm::Value *isNull =
3063  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3064  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3065  CGF.EmitBlock(writebackBB);
3066  }
3067 
3068  // Load the value to writeback.
3069  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3070 
3071  // Cast it back, in case we're writing an id to a Foo* or something.
3072  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3073  "icr.writeback-cast");
3074 
3075  // Perform the writeback.
3076 
3077  // If we have a "to use" value, it's something we need to emit a use
3078  // of. This has to be carefully threaded in: if it's done after the
3079  // release it's potentially undefined behavior (and the optimizer
3080  // will ignore it), and if it happens before the retain then the
3081  // optimizer could move the release there.
3082  if (writeback.ToUse) {
3083  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3084 
3085  // Retain the new value. No need to block-copy here: the block's
3086  // being passed up the stack.
3087  value = CGF.EmitARCRetainNonBlock(value);
3088 
3089  // Emit the intrinsic use here.
3090  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3091 
3092  // Load the old value (primitively).
3093  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3094 
3095  // Put the new value in place (primitively).
3096  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3097 
3098  // Release the old value.
3099  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3100 
3101  // Otherwise, we can just do a normal lvalue store.
3102  } else {
3103  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3104  }
3105 
3106  // Jump to the continuation block.
3107  if (!provablyNonNull)
3108  CGF.EmitBlock(contBB);
3109 }
3110 
3112  const CallArgList &args) {
3113  for (const auto &I : args.writebacks())
3114  emitWriteback(CGF, I);
3115 }
3116 
3118  const CallArgList &CallArgs) {
3121  CallArgs.getCleanupsToDeactivate();
3122  // Iterate in reverse to increase the likelihood of popping the cleanup.
3123  for (const auto &I : llvm::reverse(Cleanups)) {
3124  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3125  I.IsActiveIP->eraseFromParent();
3126  }
3127 }
3128 
3129 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3130  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3131  if (uop->getOpcode() == UO_AddrOf)
3132  return uop->getSubExpr();
3133  return nullptr;
3134 }
3135 
3136 /// Emit an argument that's being passed call-by-writeback. That is,
3137 /// we are passing the address of an __autoreleased temporary; it
3138 /// might be copy-initialized with the current value of the given
3139 /// address, but it will definitely be copied out of after the call.
3141  const ObjCIndirectCopyRestoreExpr *CRE) {
3142  LValue srcLV;
3143 
3144  // Make an optimistic effort to emit the address as an l-value.
3145  // This can fail if the argument expression is more complicated.
3146  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3147  srcLV = CGF.EmitLValue(lvExpr);
3148 
3149  // Otherwise, just emit it as a scalar.
3150  } else {
3151  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3152 
3153  QualType srcAddrType =
3154  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3155  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3156  }
3157  Address srcAddr = srcLV.getAddress();
3158 
3159  // The dest and src types don't necessarily match in LLVM terms
3160  // because of the crazy ObjC compatibility rules.
3161 
3162  llvm::PointerType *destType =
3163  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3164 
3165  // If the address is a constant null, just pass the appropriate null.
3166  if (isProvablyNull(srcAddr.getPointer())) {
3167  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3168  CRE->getType());
3169  return;
3170  }
3171 
3172  // Create the temporary.
3173  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3174  CGF.getPointerAlign(),
3175  "icr.temp");
3176  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3177  // and that cleanup will be conditional if we can't prove that the l-value
3178  // isn't null, so we need to register a dominating point so that the cleanups
3179  // system will make valid IR.
3181 
3182  // Zero-initialize it if we're not doing a copy-initialization.
3183  bool shouldCopy = CRE->shouldCopy();
3184  if (!shouldCopy) {
3185  llvm::Value *null =
3186  llvm::ConstantPointerNull::get(
3187  cast<llvm::PointerType>(destType->getElementType()));
3188  CGF.Builder.CreateStore(null, temp);
3189  }
3190 
3191  llvm::BasicBlock *contBB = nullptr;
3192  llvm::BasicBlock *originBB = nullptr;
3193 
3194  // If the address is *not* known to be non-null, we need to switch.
3195  llvm::Value *finalArgument;
3196 
3197  bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
3198  if (provablyNonNull) {
3199  finalArgument = temp.getPointer();
3200  } else {
3201  llvm::Value *isNull =
3202  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3203 
3204  finalArgument = CGF.Builder.CreateSelect(isNull,
3205  llvm::ConstantPointerNull::get(destType),
3206  temp.getPointer(), "icr.argument");
3207 
3208  // If we need to copy, then the load has to be conditional, which
3209  // means we need control flow.
3210  if (shouldCopy) {
3211  originBB = CGF.Builder.GetInsertBlock();
3212  contBB = CGF.createBasicBlock("icr.cont");
3213  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3214  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3215  CGF.EmitBlock(copyBB);
3216  condEval.begin(CGF);
3217  }
3218  }
3219 
3220  llvm::Value *valueToUse = nullptr;
3221 
3222  // Perform a copy if necessary.
3223  if (shouldCopy) {
3224  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3225  assert(srcRV.isScalar());
3226 
3227  llvm::Value *src = srcRV.getScalarVal();
3228  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3229  "icr.cast");
3230 
3231  // Use an ordinary store, not a store-to-lvalue.
3232  CGF.Builder.CreateStore(src, temp);
3233 
3234  // If optimization is enabled, and the value was held in a
3235  // __strong variable, we need to tell the optimizer that this
3236  // value has to stay alive until we're doing the store back.
3237  // This is because the temporary is effectively unretained,
3238  // and so otherwise we can violate the high-level semantics.
3239  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3241  valueToUse = src;
3242  }
3243  }
3244 
3245  // Finish the control flow if we needed it.
3246  if (shouldCopy && !provablyNonNull) {
3247  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3248  CGF.EmitBlock(contBB);
3249 
3250  // Make a phi for the value to intrinsically use.
3251  if (valueToUse) {
3252  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3253  "icr.to-use");
3254  phiToUse->addIncoming(valueToUse, copyBB);
3255  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3256  originBB);
3257  valueToUse = phiToUse;
3258  }
3259 
3260  condEval.end(CGF);
3261  }
3262 
3263  args.addWriteback(srcLV, temp, valueToUse);
3264  args.add(RValue::get(finalArgument), CRE->getType());
3265 }
3266 
3267 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3268  assert(!StackBase);
3269 
3270  // Save the stack.
3271  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3272  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3273 }
3274 
3275 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3276  if (StackBase) {
3277  // Restore the stack after the call.
3278  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3279  CGF.Builder.CreateCall(F, StackBase);
3280  }
3281 }
3282 
3283 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3284  SourceLocation ArgLoc,
3285  AbstractCallee AC,
3286  unsigned ParmNum) {
3287  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3288  SanOpts.has(SanitizerKind::NullabilityArg)))
3289  return;
3290 
3291  // The param decl may be missing in a variadic function.
3292  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3293  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3294 
3295  // Prefer the nonnull attribute if it's present.
3296  const NonNullAttr *NNAttr = nullptr;
3297  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3298  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3299 
3300  bool CanCheckNullability = false;
3301  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3302  auto Nullability = PVD->getType()->getNullability(getContext());
3303  CanCheckNullability = Nullability &&
3305  PVD->getTypeSourceInfo();
3306  }
3307 
3308  if (!NNAttr && !CanCheckNullability)
3309  return;
3310 
3311  SourceLocation AttrLoc;
3312  SanitizerMask CheckKind;
3313  SanitizerHandler Handler;
3314  if (NNAttr) {
3315  AttrLoc = NNAttr->getLocation();
3316  CheckKind = SanitizerKind::NonnullAttribute;
3317  Handler = SanitizerHandler::NonnullArg;
3318  } else {
3319  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3320  CheckKind = SanitizerKind::NullabilityArg;
3321  Handler = SanitizerHandler::NullabilityArg;
3322  }
3323 
3324  SanitizerScope SanScope(this);
3325  assert(RV.isScalar());
3326  llvm::Value *V = RV.getScalarVal();
3327  llvm::Value *Cond =
3328  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3329  llvm::Constant *StaticData[] = {
3330  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3331  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3332  };
3333  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3334 }
3335 
3336 void CodeGenFunction::EmitCallArgs(
3337  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3338  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3339  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3340  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3341 
3342  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3343  // because arguments are destroyed left to right in the callee. As a special
3344  // case, there are certain language constructs that require left-to-right
3345  // evaluation, and in those cases we consider the evaluation order requirement
3346  // to trump the "destruction order is reverse construction order" guarantee.
3347  bool LeftToRight =
3348  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3349  ? Order == EvaluationOrder::ForceLeftToRight
3350  : Order != EvaluationOrder::ForceRightToLeft;
3351 
3352  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3353  RValue EmittedArg) {
3354  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3355  return;
3356  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3357  if (PS == nullptr)
3358  return;
3359 
3360  const auto &Context = getContext();
3361  auto SizeTy = Context.getSizeType();
3362  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3363  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3364  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3365  EmittedArg.getScalarVal());
3366  Args.add(RValue::get(V), SizeTy);
3367  // If we're emitting args in reverse, be sure to do so with
3368  // pass_object_size, as well.
3369  if (!LeftToRight)
3370  std::swap(Args.back(), *(&Args.back() - 1));
3371  };
3372 
3373  // Insert a stack save if we're going to need any inalloca args.
3374  bool HasInAllocaArgs = false;
3375  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3376  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3377  I != E && !HasInAllocaArgs; ++I)
3378  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3379  if (HasInAllocaArgs) {
3380  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3381  Args.allocateArgumentMemory(*this);
3382  }
3383  }
3384 
3385  // Evaluate each argument in the appropriate order.
3386  size_t CallArgsStart = Args.size();
3387  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3388  unsigned Idx = LeftToRight ? I : E - I - 1;
3389  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3390  unsigned InitialArgSize = Args.size();
3391  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3392  // the argument and parameter match or the objc method is parameterized.
3393  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3394  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3395  ArgTypes[Idx]) ||
3396  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3397  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3398  "Argument and parameter types don't match");
3399  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3400  // In particular, we depend on it being the last arg in Args, and the
3401  // objectsize bits depend on there only being one arg if !LeftToRight.
3402  assert(InitialArgSize + 1 == Args.size() &&
3403  "The code below depends on only adding one arg per EmitCallArg");
3404  (void)InitialArgSize;
3405  RValue RVArg = Args.back().RV;
3406  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3407  ParamsToSkip + Idx);
3408  // @llvm.objectsize should never have side-effects and shouldn't need
3409  // destruction/cleanups, so we can safely "emit" it after its arg,
3410  // regardless of right-to-leftness
3411  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3412  }
3413 
3414  if (!LeftToRight) {
3415  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3416  // IR function.
3417  std::reverse(Args.begin() + CallArgsStart, Args.end());
3418  }
3419 }
3420 
3421 namespace {
3422 
3423 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3424  DestroyUnpassedArg(Address Addr, QualType Ty)
3425  : Addr(Addr), Ty(Ty) {}
3426 
3427  Address Addr;
3428  QualType Ty;
3429 
3430  void Emit(CodeGenFunction &CGF, Flags flags) override {
3431  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3432  assert(!Dtor->isTrivial());
3433  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3434  /*Delegating=*/false, Addr);
3435  }
3436 };
3437 
3438 struct DisableDebugLocationUpdates {
3439  CodeGenFunction &CGF;
3440  bool disabledDebugInfo;
3441  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3442  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3443  CGF.disableDebugInfo();
3444  }
3445  ~DisableDebugLocationUpdates() {
3446  if (disabledDebugInfo)
3447  CGF.enableDebugInfo();
3448  }
3449 };
3450 
3451 } // end anonymous namespace
3452 
3453 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3454  QualType type) {
3455  DisableDebugLocationUpdates Dis(*this, E);
3456  if (const ObjCIndirectCopyRestoreExpr *CRE
3457  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3458  assert(getLangOpts().ObjCAutoRefCount);
3459  return emitWritebackArg(*this, args, CRE);
3460  }
3461 
3462  assert(type->isReferenceType() == E->isGLValue() &&
3463  "reference binding to unmaterialized r-value!");
3464 
3465  if (E->isGLValue()) {
3466  assert(E->getObjectKind() == OK_Ordinary);
3467  return args.add(EmitReferenceBindingToExpr(E), type);
3468  }
3469 
3470  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3471 
3472  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3473  // However, we still have to push an EH-only cleanup in case we unwind before
3474  // we make it to the call.
3475  if (HasAggregateEvalKind &&
3476  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3477  // If we're using inalloca, use the argument memory. Otherwise, use a
3478  // temporary.
3479  AggValueSlot Slot;
3480  if (args.isUsingInAlloca())
3481  Slot = createPlaceholderSlot(*this, type);
3482  else
3483  Slot = CreateAggTemp(type, "agg.tmp");
3484 
3485  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3486  bool DestroyedInCallee =
3487  RD && RD->hasNonTrivialDestructor() &&
3488  CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
3489  if (DestroyedInCallee)
3490  Slot.setExternallyDestructed();
3491 
3492  EmitAggExpr(E, Slot);
3493  RValue RV = Slot.asRValue();
3494  args.add(RV, type);
3495 
3496  if (DestroyedInCallee) {
3497  // Create a no-op GEP between the placeholder and the cleanup so we can
3498  // RAUW it successfully. It also serves as a marker of the first
3499  // instruction where the cleanup is active.
3500  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3501  type);
3502  // This unreachable is a temporary marker which will be removed later.
3503  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3504  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3505  }
3506  return;
3507  }
3508 
3509  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3510  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3511  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3512  assert(L.isSimple());
3513  if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3514  args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3515  } else {
3516  // We can't represent a misaligned lvalue in the CallArgList, so copy
3517  // to an aligned temporary now.
3518  Address tmp = CreateMemTemp(type);
3519  EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3520  args.add(RValue::getAggregate(tmp), type);
3521  }
3522  return;
3523  }
3524 
3525  args.add(EmitAnyExprToTemp(E), type);
3526 }
3527 
3528 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3529  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3530  // implicitly widens null pointer constants that are arguments to varargs
3531  // functions to pointer-sized ints.
3532  if (!getTarget().getTriple().isOSWindows())
3533  return Arg->getType();
3534 
3535  if (Arg->getType()->isIntegerType() &&
3536  getContext().getTypeSize(Arg->getType()) <
3537  getContext().getTargetInfo().getPointerWidth(0) &&
3538  Arg->isNullPointerConstant(getContext(),
3540  return getContext().getIntPtrType();
3541  }
3542 
3543  return Arg->getType();
3544 }
3545 
3546 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3547 // optimizer it can aggressively ignore unwind edges.
3548 void
3549 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3550  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3551  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3552  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3553  CGM.getNoObjCARCExceptionsMetadata());
3554 }
3555 
3556 /// Emits a call to the given no-arguments nounwind runtime function.
3557 llvm::CallInst *
3558 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3559  const llvm::Twine &name) {
3560  return EmitNounwindRuntimeCall(callee, None, name);
3561 }
3562 
3563 /// Emits a call to the given nounwind runtime function.
3564 llvm::CallInst *
3565 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3567  const llvm::Twine &name) {
3568  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3569  call->setDoesNotThrow();
3570  return call;
3571 }
3572 
3573 /// Emits a simple call (never an invoke) to the given no-arguments
3574 /// runtime function.
3575 llvm::CallInst *
3576 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3577  const llvm::Twine &name) {
3578  return EmitRuntimeCall(callee, None, name);
3579 }
3580 
3581 // Calls which may throw must have operand bundles indicating which funclet
3582 // they are nested within.
3583 static void
3584 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
3586  // There is no need for a funclet operand bundle if we aren't inside a
3587  // funclet.
3588  if (!CurrentFuncletPad)
3589  return;
3590 
3591  // Skip intrinsics which cannot throw.
3592  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3593  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3594  return;
3595 
3596  BundleList.emplace_back("funclet", CurrentFuncletPad);
3597 }
3598 
3599 /// Emits a simple call (never an invoke) to the given runtime function.
3600 llvm::CallInst *
3601 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3603  const llvm::Twine &name) {
3605  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3606 
3607  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
3608  call->setCallingConv(getRuntimeCC());
3609  return call;
3610 }
3611 
3612 /// Emits a call or invoke to the given noreturn runtime function.
3613 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3614  ArrayRef<llvm::Value*> args) {
3616  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3617 
3618  if (getInvokeDest()) {
3619  llvm::InvokeInst *invoke =
3620  Builder.CreateInvoke(callee,
3621  getUnreachableBlock(),
3622  getInvokeDest(),
3623  args,
3624  BundleList);
3625  invoke->setDoesNotReturn();
3626  invoke->setCallingConv(getRuntimeCC());
3627  } else {
3628  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3629  call->setDoesNotReturn();
3630  call->setCallingConv(getRuntimeCC());
3631  Builder.CreateUnreachable();
3632  }
3633 }
3634 
3635 /// Emits a call or invoke instruction to the given nullary runtime function.
3636 llvm::CallSite
3637 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3638  const Twine &name) {
3639  return EmitRuntimeCallOrInvoke(callee, None, name);
3640 }
3641 
3642 /// Emits a call or invoke instruction to the given runtime function.
3643 llvm::CallSite
3644 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3646  const Twine &name) {
3647  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3648  callSite.setCallingConv(getRuntimeCC());
3649  return callSite;
3650 }
3651 
3652 /// Emits a call or invoke instruction to the given function, depending
3653 /// on the current state of the EH stack.
3654 llvm::CallSite
3655 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3657  const Twine &Name) {
3658  llvm::BasicBlock *InvokeDest = getInvokeDest();
3660  getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3661 
3662  llvm::Instruction *Inst;
3663  if (!InvokeDest)
3664  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3665  else {
3666  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3667  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3668  Name);
3669  EmitBlock(ContBB);
3670  }
3671 
3672  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3673  // optimizer it can aggressively ignore unwind edges.
3674  if (CGM.getLangOpts().ObjCAutoRefCount)
3675  AddObjCARCExceptionMetadata(Inst);
3676 
3677  return llvm::CallSite(Inst);
3678 }
3679 
3680 /// \brief Store a non-aggregate value to an address to initialize it. For
3681 /// initialization, a non-atomic store will be used.
3683  LValue Dst) {
3684  if (Src.isScalar())
3685  CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3686  else
3687  CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3688 }
3689 
3690 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3691  llvm::Value *New) {
3692  DeferredReplacements.push_back(std::make_pair(Old, New));
3693 }
3694 
3695 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3696  const CGCallee &Callee,
3697  ReturnValueSlot ReturnValue,
3698  const CallArgList &CallArgs,
3699  llvm::Instruction **callOrInvoke) {
3700  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3701 
3702  assert(Callee.isOrdinary());
3703 
3704  // Handle struct-return functions by passing a pointer to the
3705  // location that we would like to return into.
3706  QualType RetTy = CallInfo.getReturnType();
3707  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3708 
3709  llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3710 
3711  // 1. Set up the arguments.
3712 
3713  // If we're using inalloca, insert the allocation after the stack save.
3714  // FIXME: Do this earlier rather than hacking it in here!
3715  Address ArgMemory = Address::invalid();
3716  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3717  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3718  const llvm::DataLayout &DL = CGM.getDataLayout();
3719  ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3720  llvm::Instruction *IP = CallArgs.getStackBase();
3721  llvm::AllocaInst *AI;
3722  if (IP) {
3723  IP = IP->getNextNode();
3724  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3725  "argmem", IP);
3726  } else {
3727  AI = CreateTempAlloca(ArgStruct, "argmem");
3728  }
3729  auto Align = CallInfo.getArgStructAlignment();
3730  AI->setAlignment(Align.getQuantity());
3731  AI->setUsedWithInAlloca(true);
3732  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3733  ArgMemory = Address(AI, Align);
3734  }
3735 
3736  // Helper function to drill into the inalloca allocation.
3737  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3738  auto FieldOffset =
3739  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3740  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3741  };
3742 
3743  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3744  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3745 
3746  // If the call returns a temporary with struct return, create a temporary
3747  // alloca to hold the result, unless one is given to us.
3748  Address SRetPtr = Address::invalid();
3749  size_t UnusedReturnSize = 0;
3750  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3751  if (!ReturnValue.isNull()) {
3752  SRetPtr = ReturnValue.getValue();
3753  } else {
3754  SRetPtr = CreateMemTemp(RetTy);
3755  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3756  uint64_t size =
3757  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3758  if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3759  UnusedReturnSize = size;
3760  }
3761  }
3762  if (IRFunctionArgs.hasSRetArg()) {
3763  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3764  } else if (RetAI.isInAlloca()) {
3765  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3766  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3767  }
3768  }
3769 
3770  Address swiftErrorTemp = Address::invalid();
3771  Address swiftErrorArg = Address::invalid();
3772 
3773  // Translate all of the arguments as necessary to match the IR lowering.
3774  assert(CallInfo.arg_size() == CallArgs.size() &&
3775  "Mismatch between function signature & arguments.");
3776  unsigned ArgNo = 0;
3777  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3778  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3779  I != E; ++I, ++info_it, ++ArgNo) {
3780  const ABIArgInfo &ArgInfo = info_it->info;
3781  RValue RV = I->RV;
3782 
3783  // Insert a padding argument to ensure proper alignment.
3784  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3785  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3786  llvm::UndefValue::get(ArgInfo.getPaddingType());
3787 
3788  unsigned FirstIRArg, NumIRArgs;
3789  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3790 
3791  switch (ArgInfo.getKind()) {
3792  case ABIArgInfo::InAlloca: {
3793  assert(NumIRArgs == 0);
3794  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3795  if (RV.isAggregate()) {
3796  // Replace the placeholder with the appropriate argument slot GEP.
3797  llvm::Instruction *Placeholder =
3798  cast<llvm::Instruction>(RV.getAggregatePointer());
3799  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3800  Builder.SetInsertPoint(Placeholder);
3801  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3802  Builder.restoreIP(IP);
3803  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3804  } else {
3805  // Store the RValue into the argument struct.
3806  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3807  unsigned AS = Addr.getType()->getPointerAddressSpace();
3808  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3809  // There are some cases where a trivial bitcast is not avoidable. The
3810  // definition of a type later in a translation unit may change it's type
3811  // from {}* to (%struct.foo*)*.
3812  if (Addr.getType() != MemType)
3813  Addr = Builder.CreateBitCast(Addr, MemType);
3814  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3815  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3816  }
3817  break;
3818  }
3819 
3820  case ABIArgInfo::Indirect: {
3821  assert(NumIRArgs == 1);
3822  if (RV.isScalar() || RV.isComplex()) {
3823  // Make a temporary alloca to pass the argument.
3824  Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
3825  "indirect-arg-temp", false);
3826  IRCallArgs[FirstIRArg] = Addr.getPointer();
3827 
3828  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3829  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3830  } else {
3831  // We want to avoid creating an unnecessary temporary+copy here;
3832  // however, we need one in three cases:
3833  // 1. If the argument is not byval, and we are required to copy the
3834  // source. (This case doesn't occur on any common architecture.)
3835  // 2. If the argument is byval, RV is not sufficiently aligned, and
3836  // we cannot force it to be sufficiently aligned.
3837  // 3. If the argument is byval, but RV is located in an address space
3838  // different than that of the argument (0).
3839  Address Addr = RV.getAggregateAddress();
3840  CharUnits Align = ArgInfo.getIndirectAlign();
3841  const llvm::DataLayout *TD = &CGM.getDataLayout();
3842  const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3843  const unsigned ArgAddrSpace =
3844  (FirstIRArg < IRFuncTy->getNumParams()
3845  ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3846  : 0);
3847  if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3848  (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3849  llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3850  Align.getQuantity(), *TD)
3851  < Align.getQuantity()) ||
3852  (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3853  // Create an aligned temporary, and copy to it.
3854  Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
3855  "byval-temp", false);
3856  IRCallArgs[FirstIRArg] = AI.getPointer();
3857  EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3858  } else {
3859  // Skip the extra memcpy call.
3860  IRCallArgs[FirstIRArg] = Addr.getPointer();
3861  }
3862  }
3863  break;
3864  }
3865 
3866  case ABIArgInfo::Ignore:
3867  assert(NumIRArgs == 0);
3868  break;
3869 
3870  case ABIArgInfo::Extend:
3871  case ABIArgInfo::Direct: {
3872  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3873  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3874  ArgInfo.getDirectOffset() == 0) {
3875  assert(NumIRArgs == 1);
3876  llvm::Value *V;
3877  if (RV.isScalar())
3878  V = RV.getScalarVal();
3879  else
3880  V = Builder.CreateLoad(RV.getAggregateAddress());
3881 
3882  // Implement swifterror by copying into a new swifterror argument.
3883  // We'll write back in the normal path out of the call.
3884  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3886  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3887 
3888  QualType pointeeTy = I->Ty->getPointeeType();
3889  swiftErrorArg =
3890  Address(V, getContext().getTypeAlignInChars(pointeeTy));
3891 
3892  swiftErrorTemp =
3893  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3894  V = swiftErrorTemp.getPointer();
3895  cast<llvm::AllocaInst>(V)->setSwiftError(true);
3896 
3897  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3898  Builder.CreateStore(errorValue, swiftErrorTemp);
3899  }
3900 
3901  // We might have to widen integers, but we should never truncate.
3902  if (ArgInfo.getCoerceToType() != V->getType() &&
3903  V->getType()->isIntegerTy())
3904  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3905 
3906  // If the argument doesn't match, perform a bitcast to coerce it. This
3907  // can happen due to trivial type mismatches.
3908  if (FirstIRArg < IRFuncTy->getNumParams() &&
3909  V->getType() != IRFuncTy->getParamType(FirstIRArg))
3910  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3911 
3912  IRCallArgs[FirstIRArg] = V;
3913  break;
3914  }
3915 
3916  // FIXME: Avoid the conversion through memory if possible.
3917  Address Src = Address::invalid();
3918  if (RV.isScalar() || RV.isComplex()) {
3919  Src = CreateMemTemp(I->Ty, "coerce");
3920  LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3921  EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3922  } else {
3923  Src = RV.getAggregateAddress();
3924  }
3925 
3926  // If the value is offset in memory, apply the offset now.
3927  Src = emitAddressAtOffset(*this, Src, ArgInfo);
3928 
3929  // Fast-isel and the optimizer generally like scalar values better than
3930  // FCAs, so we flatten them if this is safe to do for this argument.
3931  llvm::StructType *STy =
3932  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3933  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3934  llvm::Type *SrcTy = Src.getType()->getElementType();
3935  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3936  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3937 
3938  // If the source type is smaller than the destination type of the
3939  // coerce-to logic, copy the source value into a temp alloca the size
3940  // of the destination type to allow loading all of it. The bits past
3941  // the source value are left undef.
3942  if (SrcSize < DstSize) {
3943  Address TempAlloca
3944  = CreateTempAlloca(STy, Src.getAlignment(),
3945  Src.getName() + ".coerce");
3946  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3947  Src = TempAlloca;
3948  } else {
3949  Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3950  }
3951 
3952  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3953  assert(NumIRArgs == STy->getNumElements());
3954  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3955  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3956  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3957  llvm::Value *LI = Builder.CreateLoad(EltPtr);
3958  IRCallArgs[FirstIRArg + i] = LI;
3959  }
3960  } else {
3961  // In the simple case, just pass the coerced loaded value.
3962  assert(NumIRArgs == 1);
3963  IRCallArgs[FirstIRArg] =
3964  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3965  }
3966 
3967  break;
3968  }
3969 
3970  case ABIArgInfo::CoerceAndExpand: {
3971  auto coercionType = ArgInfo.getCoerceAndExpandType();
3972  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
3973 
3974  llvm::Value *tempSize = nullptr;
3975  Address addr = Address::invalid();
3976  if (RV.isAggregate()) {
3977  addr = RV.getAggregateAddress();
3978  } else {
3979  assert(RV.isScalar()); // complex should always just be direct
3980 
3981  llvm::Type *scalarType = RV.getScalarVal()->getType();
3982  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
3983  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
3984 
3985  tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
3986 
3987  // Materialize to a temporary.
3988  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
3989  CharUnits::fromQuantity(std::max(layout->getAlignment(),
3990  scalarAlign)));
3991  EmitLifetimeStart(scalarSize, addr.getPointer());
3992 
3993  Builder.CreateStore(RV.getScalarVal(), addr);
3994  }
3995 
3996  addr = Builder.CreateElementBitCast(addr, coercionType);
3997 
3998  unsigned IRArgPos = FirstIRArg;
3999  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4000  llvm::Type *eltType = coercionType->getElementType(i);
4001  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4002  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4003  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4004  IRCallArgs[IRArgPos++] = elt;
4005  }
4006  assert(IRArgPos == FirstIRArg + NumIRArgs);
4007 
4008  if (tempSize) {
4009  EmitLifetimeEnd(tempSize, addr.getPointer());
4010  }
4011 
4012  break;
4013  }
4014 
4015  case ABIArgInfo::Expand:
4016  unsigned IRArgPos = FirstIRArg;
4017  ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
4018  assert(IRArgPos == FirstIRArg + NumIRArgs);
4019  break;
4020  }
4021  }
4022 
4023  llvm::Value *CalleePtr = Callee.getFunctionPointer();
4024 
4025  // If we're using inalloca, set up that argument.
4026  if (ArgMemory.isValid()) {
4027  llvm::Value *Arg = ArgMemory.getPointer();
4028  if (CallInfo.isVariadic()) {
4029  // When passing non-POD arguments by value to variadic functions, we will
4030  // end up with a variadic prototype and an inalloca call site. In such
4031  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4032  // the callee.
4033  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4034  auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4035  CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4036  } else {
4037  llvm::Type *LastParamTy =
4038  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4039  if (Arg->getType() != LastParamTy) {
4040 #ifndef NDEBUG
4041  // Assert that these structs have equivalent element types.
4042  llvm::StructType *FullTy = CallInfo.getArgStruct();
4043  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4044  cast<llvm::PointerType>(LastParamTy)->getElementType());
4045  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4046  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4047  DE = DeclaredTy->element_end(),
4048  FI = FullTy->element_begin();
4049  DI != DE; ++DI, ++FI)
4050  assert(*DI == *FI);
4051 #endif
4052  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4053  }
4054  }
4055  assert(IRFunctionArgs.hasInallocaArg());
4056  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4057  }
4058 
4059  // 2. Prepare the function pointer.
4060 
4061  // If the callee is a bitcast of a non-variadic function to have a
4062  // variadic function pointer type, check to see if we can remove the
4063  // bitcast. This comes up with unprototyped functions.
4064  //
4065  // This makes the IR nicer, but more importantly it ensures that we
4066  // can inline the function at -O0 if it is marked always_inline.
4067  auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4068  llvm::FunctionType *CalleeFT =
4069  cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4070  if (!CalleeFT->isVarArg())
4071  return Ptr;
4072 
4073  llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4074  if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4075  return Ptr;
4076 
4077  llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4078  if (!OrigFn)
4079  return Ptr;
4080 
4081  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4082 
4083  // If the original type is variadic, or if any of the component types
4084  // disagree, we cannot remove the cast.
4085  if (OrigFT->isVarArg() ||
4086  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4087  OrigFT->getReturnType() != CalleeFT->getReturnType())
4088  return Ptr;
4089 
4090  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4091  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4092  return Ptr;
4093 
4094  return OrigFn;
4095  };
4096  CalleePtr = simplifyVariadicCallee(CalleePtr);
4097 
4098  // 3. Perform the actual call.
4099 
4100  // Deactivate any cleanups that we're supposed to do immediately before
4101  // the call.
4102  if (!CallArgs.getCleanupsToDeactivate().empty())
4103  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4104 
4105  // Assert that the arguments we computed match up. The IR verifier
4106  // will catch this, but this is a common enough source of problems
4107  // during IRGen changes that it's way better for debugging to catch
4108  // it ourselves here.
4109 #ifndef NDEBUG
4110  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4111  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4112  // Inalloca argument can have different type.
4113  if (IRFunctionArgs.hasInallocaArg() &&
4114  i == IRFunctionArgs.getInallocaArgNo())
4115  continue;
4116  if (i < IRFuncTy->getNumParams())
4117  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4118  }
4119 #endif
4120 
4121  // Compute the calling convention and attributes.
4122  unsigned CallingConv;
4123  llvm::AttributeList Attrs;
4124  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4125  Callee.getAbstractInfo(), Attrs, CallingConv,
4126  /*AttrOnCallSite=*/true);
4127 
4128  // Apply some call-site-specific attributes.
4129  // TODO: work this into building the attribute set.
4130 
4131  // Apply always_inline to all calls within flatten functions.
4132  // FIXME: should this really take priority over __try, below?
4133  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4134  !(Callee.getAbstractInfo().getCalleeDecl() &&
4135  Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4136  Attrs =
4137  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4138  llvm::Attribute::AlwaysInline);
4139  }
4140 
4141  // Disable inlining inside SEH __try blocks.
4142  if (isSEHTryScope()) {
4143  Attrs =
4144  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4145  llvm::Attribute::NoInline);
4146  }
4147 
4148  // Decide whether to use a call or an invoke.
4149  bool CannotThrow;
4150  if (currentFunctionUsesSEHTry()) {
4151  // SEH cares about asynchronous exceptions, so everything can "throw."
4152  CannotThrow = false;
4153  } else if (isCleanupPadScope() &&
4154  EHPersonality::get(*this).isMSVCXXPersonality()) {
4155  // The MSVC++ personality will implicitly terminate the program if an
4156  // exception is thrown during a cleanup outside of a try/catch.
4157  // We don't need to model anything in IR to get this behavior.
4158  CannotThrow = true;
4159  } else {
4160  // Otherwise, nounwind call sites will never throw.
4161  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4162  llvm::Attribute::NoUnwind);
4163  }
4164  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4165 
4167  getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
4168 
4169  // Emit the actual call/invoke instruction.
4170  llvm::CallSite CS;
4171  if (!InvokeDest) {
4172  CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4173  } else {
4174  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4175  CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4176  BundleList);
4177  EmitBlock(Cont);
4178  }
4179  llvm::Instruction *CI = CS.getInstruction();
4180  if (callOrInvoke)
4181  *callOrInvoke = CI;
4182 
4183  // Apply the attributes and calling convention.
4184  CS.setAttributes(Attrs);
4185  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4186 
4187  // Apply various metadata.
4188 
4189  if (!CI->getType()->isVoidTy())
4190  CI->setName("call");
4191 
4192  // Insert instrumentation or attach profile metadata at indirect call sites.
4193  // For more details, see the comment before the definition of
4194  // IPVK_IndirectCallTarget in InstrProfData.inc.
4195  if (!CS.getCalledFunction())
4196  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4197  CI, CalleePtr);
4198 
4199  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4200  // optimizer it can aggressively ignore unwind edges.
4201  if (CGM.getLangOpts().ObjCAutoRefCount)
4202  AddObjCARCExceptionMetadata(CI);
4203 
4204  // Suppress tail calls if requested.
4205  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4206  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4207  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4208  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4209  }
4210 
4211  // 4. Finish the call.
4212 
4213  // If the call doesn't return, finish the basic block and clear the
4214  // insertion point; this allows the rest of IRGen to discard
4215  // unreachable code.
4216  if (CS.doesNotReturn()) {
4217  if (UnusedReturnSize)
4218  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4219  SRetPtr.getPointer());
4220 
4221  Builder.CreateUnreachable();
4222  Builder.ClearInsertionPoint();
4223 
4224  // FIXME: For now, emit a dummy basic block because expr emitters in
4225  // generally are not ready to handle emitting expressions at unreachable
4226  // points.
4227  EnsureInsertPoint();
4228 
4229  // Return a reasonable RValue.
4230  return GetUndefRValue(RetTy);
4231  }
4232 
4233  // Perform the swifterror writeback.
4234  if (swiftErrorTemp.isValid()) {
4235  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4236  Builder.CreateStore(errorResult, swiftErrorArg);
4237  }
4238 
4239  // Emit any call-associated writebacks immediately. Arguably this
4240  // should happen after any return-value munging.
4241  if (CallArgs.hasWritebacks())
4242  emitWritebacks(*this, CallArgs);
4243 
4244  // The stack cleanup for inalloca arguments has to run out of the normal
4245  // lexical order, so deactivate it and run it manually here.
4246  CallArgs.freeArgumentMemory(*this);
4247 
4248  // Extract the return value.
4249  RValue Ret = [&] {
4250  switch (RetAI.getKind()) {
4251  case ABIArgInfo::CoerceAndExpand: {
4252  auto coercionType = RetAI.getCoerceAndExpandType();
4253  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4254 
4255  Address addr = SRetPtr;
4256  addr = Builder.CreateElementBitCast(addr, coercionType);
4257 
4258  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4259  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4260 
4261  unsigned unpaddedIndex = 0;
4262  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4263  llvm::Type *eltType = coercionType->getElementType(i);
4264  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4265  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4266  llvm::Value *elt = CI;
4267  if (requiresExtract)
4268  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4269  else
4270  assert(unpaddedIndex == 0);
4271  Builder.CreateStore(elt, eltAddr);
4272  }
4273  // FALLTHROUGH
4274  LLVM_FALLTHROUGH;
4275  }
4276 
4277  case ABIArgInfo::InAlloca:
4278  case ABIArgInfo::Indirect: {
4279  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4280  if (UnusedReturnSize)
4281  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4282  SRetPtr.getPointer());
4283  return ret;
4284  }
4285 
4286  case ABIArgInfo::Ignore:
4287  // If we are ignoring an argument that had a result, make sure to
4288  // construct the appropriate return value for our caller.
4289  return GetUndefRValue(RetTy);
4290 
4291  case ABIArgInfo::Extend:
4292  case ABIArgInfo::Direct: {
4293  llvm::Type *RetIRTy = ConvertType(RetTy);
4294  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4295  switch (getEvaluationKind(RetTy)) {
4296  case TEK_Complex: {
4297  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4298  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4299  return RValue::getComplex(std::make_pair(Real, Imag));
4300  }
4301  case TEK_Aggregate: {
4302  Address DestPtr = ReturnValue.getValue();
4303  bool DestIsVolatile = ReturnValue.isVolatile();
4304 
4305  if (!DestPtr.isValid()) {
4306  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4307  DestIsVolatile = false;
4308  }
4309  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4310  return RValue::getAggregate(DestPtr);
4311  }
4312  case TEK_Scalar: {
4313  // If the argument doesn't match, perform a bitcast to coerce it. This
4314  // can happen due to trivial type mismatches.
4315  llvm::Value *V = CI;
4316  if (V->getType() != RetIRTy)
4317  V = Builder.CreateBitCast(V, RetIRTy);
4318  return RValue::get(V);
4319  }
4320  }
4321  llvm_unreachable("bad evaluation kind");
4322  }
4323 
4324  Address DestPtr = ReturnValue.getValue();
4325  bool DestIsVolatile = ReturnValue.isVolatile();
4326 
4327  if (!DestPtr.isValid()) {
4328  DestPtr = CreateMemTemp(RetTy, "coerce");
4329  DestIsVolatile = false;
4330  }
4331 
4332  // If the value is offset in memory, apply the offset now.
4333  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4334  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4335 
4336  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4337  }
4338 
4339  case ABIArgInfo::Expand:
4340  llvm_unreachable("Invalid ABI kind for return argument");
4341  }
4342 
4343  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4344  } ();
4345 
4346  // Emit the assume_aligned check on the return value.
4347  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4348  if (Ret.isScalar() && TargetDecl) {
4349  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4350  llvm::Value *OffsetValue = nullptr;
4351  if (const auto *Offset = AA->getOffset())
4352  OffsetValue = EmitScalarExpr(Offset);
4353 
4354  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4355  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4356  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4357  OffsetValue);
4358  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4359  llvm::Value *ParamVal =
4360  CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
4361  EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4362  }
4363  }
4364 
4365  return Ret;
4366 }
4367 
4368 /* VarArg handling */
4369 
4370 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4371  VAListAddr = VE->isMicrosoftABI()
4372  ? EmitMSVAListRef(VE->getSubExpr())
4373  : EmitVAListRef(VE->getSubExpr());
4374  QualType Ty = VE->getType();
4375  if (VE->isMicrosoftABI())
4376  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4377  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4378 }
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1464
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:281
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:409
FunctionDecl - An instance of this class is created to represent a function declaration or definition...
Definition: Decl.h:1618
StringRef getName() const
getName - Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:237
ObjCEntrypoints & getObjCEntrypoints() const
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2224
Complete object ctor.
Definition: ABI.h:26
CanQualType VoidPtrTy
Definition: ASTContext.h:978
A (possibly-)qualified type.
Definition: Type.h:616
llvm::Type * ConvertTypeForMem(QualType T)
CanQualType getReturnType() const
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3042
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:79
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:64
unsigned getInAllocaFieldIndex() const
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3046
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1089
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2658
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:545
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:2923
const TargetInfo & getTarget() const
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:187
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:65
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3041
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:81
Address getAddress() const
Definition: CGValue.h:346
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:242
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
bool hasNonTrivialDestructor() const
Determine whether this class has a non-trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1356
bool hasFlexibleArrayMember() const
Definition: Decl.h:3406
ASTContext & getContext() const
Definition: CodeGenTypes.h:176
const llvm::DataLayout & getDataLayout() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:3492
The base class of the type hierarchy.
Definition: Type.h:1303
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1749
RValue asAggregateRValue() const
Definition: CGValue.h:450
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2132
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:942
bool isBlockPointerType() const
Definition: Type.h:5718
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:1983
CanProxy< U > castAs() const
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:360
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2329
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e.g., it is an unsigned integer type or a vector.
Definition: Type.cpp:1814
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2112
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3275
Default closure variant of a ctor.
Definition: ABI.h:30
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:385
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
VarDecl - An instance of this class is created to represent a variable declaration or definition...
Definition: Decl.h:758
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:104
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change...
Definition: TargetCXXABI.h:216
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1193
void setCoerceToType(llvm::Type *T)
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:1924
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:113
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, LValue Dst)
Store a non-aggregate value to an address to initialize it.
Definition: CGCall.cpp:3682
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:227
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:288
ParmVarDecl - Represents a parameter to a function.
Definition: Decl.h:1434
bool isObjCRetainableType() const
Definition: Type.cpp:3751
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3209
bool isVoidType() const
Definition: Type.h:5906
unsigned getNumParams() const
Definition: Type.h:3338
RecordDecl - Represents a struct/union/class.
Definition: Decl.h:3354
const_arg_iterator arg_end() const
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2303
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3022
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3111
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3162
bool hasAttr() const
Definition: DeclBase.h:521
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:128
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
bool isReferenceType() const
Definition: Type.h:5721
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2216
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
Definition: Decl.h:2366
bool isAnyPointerType() const
Definition: Type.h:5715
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:404
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
Definition: Type.h:3150
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:3493
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2206
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
Definition: CGCall.cpp:3140
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
llvm::Type * getCoerceToType() const
unsigned getRegParm() const
Definition: Type.h:2997
const Decl * getDecl() const
Definition: GlobalDecl.h:62
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Definition: ExprObjC.h:1494
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:241
T * getAttr() const
Definition: DeclBase.h:518
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo=LValueBaseInfo(AlignmentSource::Type), llvm::MDNode *TBAAInfo=nullptr, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Definition: CGExpr.cpp:1437
CharUnits getArgStructAlignment() const
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
const CGFunctionInfo & arrangeFreeFunctionType(CodeGenModule &CGM, CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
CharUnits getAlignment() const
Definition: CGValue.h:335
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2531
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:150
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
uint32_t Offset
Definition: CacheTokens.cpp:43
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:279
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
QualType getReturnType() const
Definition: Type.h:3065
Wrapper for source info for functions.
Definition: TypeLoc.h:1357
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2018
field_range fields() const
Definition: Decl.h:3483
const Expr * getSubExpr() const
Definition: Expr.h:3772
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:252
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:134
Values of this type can never be null.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:1930
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:263
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
An ordinary object is located at an address in memory.
Definition: Specifiers.h:122
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:90
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:720
bool isValid() const
Definition: Address.h:36
detail::InMemoryDirectory::const_iterator I
llvm::StructType * getCoerceAndExpandType() const
QualType getCanonicalTypeInternal() const
Definition: Type.h:2045
QualType getType() const
Definition: Decl.h:589
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Derives the 'this' type for codegen purposes, i.e.
Definition: CGCall.cpp:73
const CGFunctionInfo & arrangeObjCMessageSendSignature(CodeGenModule &CGM, const ObjCMethodDecl *MD, QualType receiverType)
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
Definition: Type.h:3095
unsigned getNumRequiredArgs() const
bool isUnion() const
Definition: Decl.h:3028
llvm::Type * getUnpaddedCoerceAndExpandType() const
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:329
ExtInfo getExtInfo() const
Definition: Type.h:3074
CanQualType getCanonicalTypeUnqualified() const
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3129
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3371
const TargetCodeGenInfo & getTargetCodeGenInfo()
const Decl * getCalleeDecl() const
Definition: CGCall.h:62
writeback_const_range writebacks() const
Definition: CGCall.h:237
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
Definition: CGCall.h:249
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:222
ASTContext * Context
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:226
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition: CGCall.h:59
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:3754
bool getNoCallerSavedRegs() const
Definition: Type.h:2995
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:414
Address Temporary
The temporary alloca.
Definition: CGCall.h:193
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:196
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:1760
llvm::Value * getPointer() const
Definition: Address.h:38
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:2982
Expr - This represents one expression.
Definition: Expr.h:105
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:69
bool isInstance() const
Definition: DeclCXX.h:1930
bool isAggregate() const
Definition: CGValue.h:53
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:2977
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:88
const CGCalleeInfo & getAbstractInfo() const
Definition: CGCall.h:153
bool isVirtual() const
Definition: DeclCXX.h:1947
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2551
llvm::Constant * objc_retain
id objc_retain(id);
CharUnits getIndirectAlign() const
RValue asRValue() const
Definition: CGValue.h:593
bool getNoReturn() const
Definition: Type.h:2993
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:408
void add(RValue rvalue, QualType type, bool needscopy=false)
Definition: CGCall.h:207
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:344
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:157
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2519
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo=LValueBaseInfo(AlignmentSource::Type), llvm::MDNode *TBAAInfo=nullptr, bool isInit=false, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
Definition: CGExpr.cpp:1527
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
A class for recording the number of arguments that a function signature requires. ...
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1322
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2676
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:896
UnaryOperator - This represents the unary-expression's (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1714
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=None)
bool isGLValue() const
Definition: Expr.h:251
llvm::Type * getPaddingType() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:298
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:1800
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1139
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:232
bool hasWritebacks() const
Definition: CGCall.h:232
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:29
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1244
bool isVolatile() const
Definition: CGValue.h:314
ArrayRef< ParmVarDecl * > parameters() const
Definition: DeclObjC.h:371
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1269
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:146
Kind
Address getValue() const
Definition: CGCall.h:301
bool isSimple() const
Definition: CGValue.h:265
FunctionType::ExtInfo getExtInfo() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3129
ASTContext & getContext() const
RequiredArgs getRequiredArgs() const
Encodes a location in the source.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition: CGObjC.cpp:1805
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2090
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:278
const std::string ID
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:1920
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1230
An aggregate value slot.
Definition: CGValue.h:456
bool isVariadic() const
Definition: DeclObjC.h:418
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:1903
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
const CGFunctionInfo & arrangeFreeFunctionCall(CodeGenModule &CGM, CanQualType returnType, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, RequiredArgs args)
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3267
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2235
bool isNothrow(const ASTContext &Ctx, bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:3437
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:703
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:197
CanQualType VoidTy
Definition: ASTContext.h:963
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
const CodeGenOptions & getCodeGenOpts() const
An aligned address.
Definition: Address.h:25
QualType getReturnType() const
Definition: DeclObjC.h:330
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6105
All available information about a concrete callee.
Definition: CGCall.h:66
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:352
Complete object dtor.
Definition: ABI.h:36
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1663
ConstructorUsingShadowDecl * getShadowDecl() const
Definition: DeclCXX.h:2315
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:551
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
Definition: ASTContext.h:1734
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1103
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:276
QualType getType() const
Definition: Expr.h:127
CGFunctionInfo - Class to encapsulate the information about a function definition.
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
This class organizes the cross-function state that is used while generating LLVM code.
bool canHaveCoerceToType() const
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2302
uint64_t SanitizerMask
Definition: Sanitizers.h:24
llvm::Value * getAggregatePointer() const
Definition: CGValue.h:75
bool isScalar() const
Definition: CGValue.h:51
StringRef Name
Definition: USRFinder.cpp:123
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
unsigned getDirectOffset() const
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1437
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Definition: ASTMatchers.h:2126
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:165
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
LValue Source
The original argument.
Definition: CGCall.h:190
bool getProducesResult() const
Definition: Type.h:2994
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:91
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:984
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:167
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CallingConv getCC() const
Definition: Type.h:3003
SourceLocation getLocStart() const LLVM_READONLY
Definition: Decl.h:683
detail::InMemoryDirectory::const_iterator E
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
static void getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, SmallVectorImpl< llvm::OperandBundleDef > &BundleList)
Definition: CGCall.cpp:3584
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:3784
Complex values, per C99 6.2.5p11.
Definition: Type.h:2164
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
static bool classof(const OMPClause *T)
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:6042
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
Definition: CGCall.cpp:2619
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:44
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:120
Address getAddress() const
Definition: CGValue.h:577
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2360
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1548
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:1909
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:885
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:5553
bool isComplex() const
Definition: CGValue.h:52
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:436
const ParmVarDecl * getParamDecl(unsigned I) const
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
Represents a base class of a C++ class.
Definition: DeclCXX.h:158
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
Represents a C++ struct/union/class.
Definition: DeclCXX.h:267
BoundNodesTreeBuilder *const Builder
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:861
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:329
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:259
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:505
llvm::Type * ConvertType(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo=LValueBaseInfo(AlignmentSource::Type))
CallingConv getDefaultCallingConvention(bool isVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1082
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
llvm::Instruction * getStackBase() const
Definition: CGCall.h:254
This class is used for builtin types like 'int'.
Definition: Type.h:2084
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:70
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:70
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1595
Copying closure variant of a ctor.
Definition: ABI.h:29
Defines the clang::TargetInfo interface.
const_arg_iterator arg_begin() const
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows)
Definition: CGCall.cpp:194
bool getHasRegParm() const
Definition: Type.h:2996
CanQualType IntTy
Definition: ASTContext.h:971
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:3777
llvm::FunctionType * getFunctionType() const
Definition: CGCall.h:161
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2321
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
Definition: CGCall.cpp:3117
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
Definition: DeclCXX.h:3160
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional, const FunctionDecl *FD)
Compute the arguments required by the given formal prototype, given that there may be some additional...
bool isVolatileQualified() const
Definition: CGValue.h:55
CXXCtorType toCXXCtorType(StructorType T)
Definition: CodeGenTypes.h:65
bool getIndirectRealign() const
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
LValue - This represents an lvalue references.
Definition: CGValue.h:171
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:752
An abstract representation of regular/ObjC call/message targets.
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:147
Notes how many arguments were added to the beginning (Prefix) and ending (Suffix) of an arg list...
Definition: CGCXXABI.h:299
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g., it is an signed integer type or a vector.
Definition: Type.cpp:1774
bool isOrdinary() const
Definition: CGCall.h:150
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:182
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:2553
Abstract information about a function or function prototype.
Definition: CGCall.h:44
A class which abstracts out some details necessary for making a call.
Definition: Type.h:2948
This parameter (which must have pointer type) is a Swift indirect result parameter.
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:5928
Expr * IgnoreParens() LLVM_READONLY
IgnoreParens - Ignore parentheses.
Definition: Expr.cpp:2368
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:5516
static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign, ASTContext &Context)
A helper function to get the alignment of a Decl referred to by DeclRefExpr or MemberExpr.
bool isPointerType() const
Definition: Type.h:5712