Bug Summary

File:tools/clang/lib/CodeGen/CGObjC.cpp
Warning:line 2901, column 3
Undefined or garbage value returned to caller

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGObjC.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn350071/tools/clang/include -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn350071/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-8~svn350071=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-12-27-042839-1215-1 -x c++ /build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp -faddrsig
1//===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Objective-C code as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGDebugInfo.h"
15#include "CGObjCRuntime.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/StmtObjC.h"
22#include "clang/Basic/Diagnostic.h"
23#include "clang/CodeGen/CGFunctionInfo.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/IR/CallSite.h"
26#include "llvm/IR/DataLayout.h"
27#include "llvm/IR/InlineAsm.h"
28using namespace clang;
29using namespace CodeGen;
30
31typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
32static TryEmitResult
33tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
34static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
35 QualType ET,
36 RValue Result);
37
38/// Given the address of a variable of pointer type, find the correct
39/// null to store into it.
40static llvm::Constant *getNullForVariable(Address addr) {
41 llvm::Type *type = addr.getElementType();
42 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
43}
44
45/// Emits an instance of NSConstantString representing the object.
46llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
47{
48 llvm::Constant *C =
49 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
50 // FIXME: This bitcast should just be made an invariant on the Runtime.
51 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
52}
53
54/// EmitObjCBoxedExpr - This routine generates code to call
55/// the appropriate expression boxing method. This will either be
56/// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:],
57/// or [NSValue valueWithBytes:objCType:].
58///
59llvm::Value *
60CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
61 // Generate the correct selector for this literal's concrete type.
62 // Get the method.
63 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
64 const Expr *SubExpr = E->getSubExpr();
65 assert(BoxingMethod && "BoxingMethod is null")((BoxingMethod && "BoxingMethod is null") ? static_cast
<void> (0) : __assert_fail ("BoxingMethod && \"BoxingMethod is null\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 65, __PRETTY_FUNCTION__))
;
66 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method")((BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"
) ? static_cast<void> (0) : __assert_fail ("BoxingMethod->isClassMethod() && \"BoxingMethod must be a class method\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 66, __PRETTY_FUNCTION__))
;
67 Selector Sel = BoxingMethod->getSelector();
68
69 // Generate a reference to the class pointer, which will be the receiver.
70 // Assumes that the method was introduced in the class that should be
71 // messaged (avoids pulling it out of the result type).
72 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
73 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
74 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl);
75
76 CallArgList Args;
77 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin();
78 QualType ArgQT = ArgDecl->getType().getUnqualifiedType();
79
80 // ObjCBoxedExpr supports boxing of structs and unions
81 // via [NSValue valueWithBytes:objCType:]
82 const QualType ValueType(SubExpr->getType().getCanonicalType());
83 if (ValueType->isObjCBoxableRecordType()) {
84 // Emit CodeGen for first parameter
85 // and cast value to correct type
86 Address Temporary = CreateMemTemp(SubExpr->getType());
87 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
88 Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
89 Args.add(RValue::get(BitCast.getPointer()), ArgQT);
90
91 // Create char array to store type encoding
92 std::string Str;
93 getContext().getObjCEncodingForType(ValueType, Str);
94 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer();
95
96 // Cast type encoding to correct type
97 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
98 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType();
99 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT));
100
101 Args.add(RValue::get(Cast), EncodingQT);
102 } else {
103 Args.add(EmitAnyExpr(SubExpr), ArgQT);
104 }
105
106 RValue result = Runtime.GenerateMessageSend(
107 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
108 Args, ClassDecl, BoxingMethod);
109 return Builder.CreateBitCast(result.getScalarVal(),
110 ConvertType(E->getType()));
111}
112
113llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
114 const ObjCMethodDecl *MethodWithObjects) {
115 ASTContext &Context = CGM.getContext();
116 const ObjCDictionaryLiteral *DLE = nullptr;
117 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
118 if (!ALE)
119 DLE = cast<ObjCDictionaryLiteral>(E);
120
121 // Optimize empty collections by referencing constants, when available.
122 uint64_t NumElements =
123 ALE ? ALE->getNumElements() : DLE->getNumElements();
124 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) {
125 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__";
126 QualType IdTy(CGM.getContext().getObjCIdType());
127 llvm::Constant *Constant =
128 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName);
129 LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy);
130 llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc());
131 cast<llvm::LoadInst>(Ptr)->setMetadata(
132 CGM.getModule().getMDKindID("invariant.load"),
133 llvm::MDNode::get(getLLVMContext(), None));
134 return Builder.CreateBitCast(Ptr, ConvertType(E->getType()));
135 }
136
137 // Compute the type of the array we're initializing.
138 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
139 NumElements);
140 QualType ElementType = Context.getObjCIdType().withConst();
141 QualType ElementArrayType
142 = Context.getConstantArrayType(ElementType, APNumElements,
143 ArrayType::Normal, /*IndexTypeQuals=*/0);
144
145 // Allocate the temporary array(s).
146 Address Objects = CreateMemTemp(ElementArrayType, "objects");
147 Address Keys = Address::invalid();
148 if (DLE)
149 Keys = CreateMemTemp(ElementArrayType, "keys");
150
151 // In ARC, we may need to do extra work to keep all the keys and
152 // values alive until after the call.
153 SmallVector<llvm::Value *, 16> NeededObjects;
154 bool TrackNeededObjects =
155 (getLangOpts().ObjCAutoRefCount &&
156 CGM.getCodeGenOpts().OptimizationLevel != 0);
157
158 // Perform the actual initialialization of the array(s).
159 for (uint64_t i = 0; i < NumElements; i++) {
160 if (ALE) {
161 // Emit the element and store it to the appropriate array slot.
162 const Expr *Rhs = ALE->getElement(i);
163 LValue LV = MakeAddrLValue(
164 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
165 ElementType, AlignmentSource::Decl);
166
167 llvm::Value *value = EmitScalarExpr(Rhs);
168 EmitStoreThroughLValue(RValue::get(value), LV, true);
169 if (TrackNeededObjects) {
170 NeededObjects.push_back(value);
171 }
172 } else {
173 // Emit the key and store it to the appropriate array slot.
174 const Expr *Key = DLE->getKeyValueElement(i).Key;
175 LValue KeyLV = MakeAddrLValue(
176 Builder.CreateConstArrayGEP(Keys, i, getPointerSize()),
177 ElementType, AlignmentSource::Decl);
178 llvm::Value *keyValue = EmitScalarExpr(Key);
179 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
180
181 // Emit the value and store it to the appropriate array slot.
182 const Expr *Value = DLE->getKeyValueElement(i).Value;
183 LValue ValueLV = MakeAddrLValue(
184 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
185 ElementType, AlignmentSource::Decl);
186 llvm::Value *valueValue = EmitScalarExpr(Value);
187 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
188 if (TrackNeededObjects) {
189 NeededObjects.push_back(keyValue);
190 NeededObjects.push_back(valueValue);
191 }
192 }
193 }
194
195 // Generate the argument list.
196 CallArgList Args;
197 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
198 const ParmVarDecl *argDecl = *PI++;
199 QualType ArgQT = argDecl->getType().getUnqualifiedType();
200 Args.add(RValue::get(Objects.getPointer()), ArgQT);
201 if (DLE) {
202 argDecl = *PI++;
203 ArgQT = argDecl->getType().getUnqualifiedType();
204 Args.add(RValue::get(Keys.getPointer()), ArgQT);
205 }
206 argDecl = *PI;
207 ArgQT = argDecl->getType().getUnqualifiedType();
208 llvm::Value *Count =
209 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
210 Args.add(RValue::get(Count), ArgQT);
211
212 // Generate a reference to the class pointer, which will be the receiver.
213 Selector Sel = MethodWithObjects->getSelector();
214 QualType ResultType = E->getType();
215 const ObjCObjectPointerType *InterfacePointerType
216 = ResultType->getAsObjCInterfacePointerType();
217 ObjCInterfaceDecl *Class
218 = InterfacePointerType->getObjectType()->getInterface();
219 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
220 llvm::Value *Receiver = Runtime.GetClass(*this, Class);
221
222 // Generate the message send.
223 RValue result = Runtime.GenerateMessageSend(
224 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel,
225 Receiver, Args, Class, MethodWithObjects);
226
227 // The above message send needs these objects, but in ARC they are
228 // passed in a buffer that is essentially __unsafe_unretained.
229 // Therefore we must prevent the optimizer from releasing them until
230 // after the call.
231 if (TrackNeededObjects) {
232 EmitARCIntrinsicUse(NeededObjects);
233 }
234
235 return Builder.CreateBitCast(result.getScalarVal(),
236 ConvertType(E->getType()));
237}
238
239llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
240 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
241}
242
243llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
244 const ObjCDictionaryLiteral *E) {
245 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
246}
247
248/// Emit a selector.
249llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
250 // Untyped selector.
251 // Note that this implementation allows for non-constant strings to be passed
252 // as arguments to @selector(). Currently, the only thing preventing this
253 // behaviour is the type checking in the front end.
254 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector());
255}
256
257llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
258 // FIXME: This should pass the Decl not the name.
259 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol());
260}
261
262/// Adjust the type of an Objective-C object that doesn't match up due
263/// to type erasure at various points, e.g., related result types or the use
264/// of parameterized classes.
265static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT,
266 RValue Result) {
267 if (!ExpT->isObjCRetainableType())
268 return Result;
269
270 // If the converted types are the same, we're done.
271 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT);
272 if (ExpLLVMTy == Result.getScalarVal()->getType())
273 return Result;
274
275 // We have applied a substitution. Cast the rvalue appropriately.
276 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
277 ExpLLVMTy));
278}
279
280/// Decide whether to extend the lifetime of the receiver of a
281/// returns-inner-pointer message.
282static bool
283shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
284 switch (message->getReceiverKind()) {
285
286 // For a normal instance message, we should extend unless the
287 // receiver is loaded from a variable with precise lifetime.
288 case ObjCMessageExpr::Instance: {
289 const Expr *receiver = message->getInstanceReceiver();
290
291 // Look through OVEs.
292 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
293 if (opaque->getSourceExpr())
294 receiver = opaque->getSourceExpr()->IgnoreParens();
295 }
296
297 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
298 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
299 receiver = ice->getSubExpr()->IgnoreParens();
300
301 // Look through OVEs.
302 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
303 if (opaque->getSourceExpr())
304 receiver = opaque->getSourceExpr()->IgnoreParens();
305 }
306
307 // Only __strong variables.
308 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
309 return true;
310
311 // All ivars and fields have precise lifetime.
312 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
313 return false;
314
315 // Otherwise, check for variables.
316 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
317 if (!declRef) return true;
318 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
319 if (!var) return true;
320
321 // All variables have precise lifetime except local variables with
322 // automatic storage duration that aren't specially marked.
323 return (var->hasLocalStorage() &&
324 !var->hasAttr<ObjCPreciseLifetimeAttr>());
325 }
326
327 case ObjCMessageExpr::Class:
328 case ObjCMessageExpr::SuperClass:
329 // It's never necessary for class objects.
330 return false;
331
332 case ObjCMessageExpr::SuperInstance:
333 // We generally assume that 'self' lives throughout a method call.
334 return false;
335 }
336
337 llvm_unreachable("invalid receiver kind")::llvm::llvm_unreachable_internal("invalid receiver kind", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 337)
;
338}
339
340/// Given an expression of ObjC pointer type, check whether it was
341/// immediately loaded from an ARC __weak l-value.
342static const Expr *findWeakLValue(const Expr *E) {
343 assert(E->getType()->isObjCRetainableType())((E->getType()->isObjCRetainableType()) ? static_cast<
void> (0) : __assert_fail ("E->getType()->isObjCRetainableType()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 343, __PRETTY_FUNCTION__))
;
344 E = E->IgnoreParens();
345 if (auto CE = dyn_cast<CastExpr>(E)) {
346 if (CE->getCastKind() == CK_LValueToRValue) {
347 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
348 return CE->getSubExpr();
349 }
350 }
351
352 return nullptr;
353}
354
355/// The ObjC runtime may provide entrypoints that are likely to be faster
356/// than an ordinary message send of the appropriate selector.
357///
358/// The entrypoints are guaranteed to be equivalent to just sending the
359/// corresponding message. If the entrypoint is implemented naively as just a
360/// message send, using it is a trade-off: it sacrifices a few cycles of
361/// overhead to save a small amount of code. However, it's possible for
362/// runtimes to detect and special-case classes that use "standard"
363/// behavior; if that's dynamically a large proportion of all objects, using
364/// the entrypoint will also be faster than using a message send.
365///
366/// If the runtime does support a required entrypoint, then this method will
367/// generate a call and return the resulting value. Otherwise it will return
368/// None and the caller can generate a msgSend instead.
369static Optional<llvm::Value *>
370tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
371 llvm::Value *Receiver,
372 const CallArgList& Args, Selector Sel,
373 const ObjCMethodDecl *method) {
374 auto &CGM = CGF.CGM;
375 if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls)
376 return None;
377
378 auto &Runtime = CGM.getLangOpts().ObjCRuntime;
379 switch (Sel.getMethodFamily()) {
380 case OMF_alloc:
381 if (Runtime.shouldUseRuntimeFunctionsForAlloc() &&
382 ResultType->isObjCObjectPointerType()) {
383 // [Foo alloc] -> objc_alloc(Foo)
384 if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc")
385 return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType));
386 // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo)
387 if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 &&
388 Args.size() == 1 && Args.front().getType()->isPointerType() &&
389 Sel.getNameForSlot(0) == "allocWithZone") {
390 const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal();
391 if (isa<llvm::ConstantPointerNull>(arg))
392 return CGF.EmitObjCAllocWithZone(Receiver,
393 CGF.ConvertType(ResultType));
394 return None;
395 }
396 }
397 break;
398
399 case OMF_autorelease:
400 if (ResultType->isObjCObjectPointerType() &&
401 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
402 Runtime.shouldUseARCFunctionsForRetainRelease())
403 return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType));
404 break;
405
406 case OMF_retain:
407 if (ResultType->isObjCObjectPointerType() &&
408 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
409 Runtime.shouldUseARCFunctionsForRetainRelease())
410 return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType));
411 break;
412
413 case OMF_release:
414 if (ResultType->isVoidType() &&
415 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
416 Runtime.shouldUseARCFunctionsForRetainRelease()) {
417 CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime);
418 return nullptr;
419 }
420 break;
421
422 default:
423 break;
424 }
425 return None;
426}
427
428RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
429 ReturnValueSlot Return) {
430 // Only the lookup mechanism and first two arguments of the method
431 // implementation vary between runtimes. We can get the receiver and
432 // arguments in generic code.
433
434 bool isDelegateInit = E->isDelegateInitCall();
435
436 const ObjCMethodDecl *method = E->getMethodDecl();
437
438 // If the method is -retain, and the receiver's being loaded from
439 // a __weak variable, peephole the entire operation to objc_loadWeakRetained.
440 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance &&
441 method->getMethodFamily() == OMF_retain) {
442 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
443 LValue lvalue = EmitLValue(lvalueExpr);
444 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
445 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
446 }
447 }
448
449 // We don't retain the receiver in delegate init calls, and this is
450 // safe because the receiver value is always loaded from 'self',
451 // which we zero out. We don't want to Block_copy block receivers,
452 // though.
453 bool retainSelf =
454 (!isDelegateInit &&
455 CGM.getLangOpts().ObjCAutoRefCount &&
456 method &&
457 method->hasAttr<NSConsumesSelfAttr>());
458
459 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
460 bool isSuperMessage = false;
461 bool isClassMessage = false;
462 ObjCInterfaceDecl *OID = nullptr;
463 // Find the receiver
464 QualType ReceiverType;
465 llvm::Value *Receiver = nullptr;
466 switch (E->getReceiverKind()) {
467 case ObjCMessageExpr::Instance:
468 ReceiverType = E->getInstanceReceiver()->getType();
469 if (retainSelf) {
470 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
471 E->getInstanceReceiver());
472 Receiver = ter.getPointer();
473 if (ter.getInt()) retainSelf = false;
474 } else
475 Receiver = EmitScalarExpr(E->getInstanceReceiver());
476 break;
477
478 case ObjCMessageExpr::Class: {
479 ReceiverType = E->getClassReceiver();
480 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
481 assert(ObjTy && "Invalid Objective-C class message send")((ObjTy && "Invalid Objective-C class message send") ?
static_cast<void> (0) : __assert_fail ("ObjTy && \"Invalid Objective-C class message send\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 481, __PRETTY_FUNCTION__))
;
482 OID = ObjTy->getInterface();
483 assert(OID && "Invalid Objective-C class message send")((OID && "Invalid Objective-C class message send") ? static_cast
<void> (0) : __assert_fail ("OID && \"Invalid Objective-C class message send\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 483, __PRETTY_FUNCTION__))
;
484 Receiver = Runtime.GetClass(*this, OID);
485 isClassMessage = true;
486 break;
487 }
488
489 case ObjCMessageExpr::SuperInstance:
490 ReceiverType = E->getSuperType();
491 Receiver = LoadObjCSelf();
492 isSuperMessage = true;
493 break;
494
495 case ObjCMessageExpr::SuperClass:
496 ReceiverType = E->getSuperType();
497 Receiver = LoadObjCSelf();
498 isSuperMessage = true;
499 isClassMessage = true;
500 break;
501 }
502
503 if (retainSelf)
504 Receiver = EmitARCRetainNonBlock(Receiver);
505
506 // In ARC, we sometimes want to "extend the lifetime"
507 // (i.e. retain+autorelease) of receivers of returns-inner-pointer
508 // messages.
509 if (getLangOpts().ObjCAutoRefCount && method &&
510 method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
511 shouldExtendReceiverForInnerPointerMessage(E))
512 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
513
514 QualType ResultType = method ? method->getReturnType() : E->getType();
515
516 CallArgList Args;
517 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method));
518
519 // For delegate init calls in ARC, do an unsafe store of null into
520 // self. This represents the call taking direct ownership of that
521 // value. We have to do this after emitting the other call
522 // arguments because they might also reference self, but we don't
523 // have to worry about any of them modifying self because that would
524 // be an undefined read and write of an object in unordered
525 // expressions.
526 if (isDelegateInit) {
527 assert(getLangOpts().ObjCAutoRefCount &&((getLangOpts().ObjCAutoRefCount && "delegate init calls should only be marked in ARC"
) ? static_cast<void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && \"delegate init calls should only be marked in ARC\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 528, __PRETTY_FUNCTION__))
528 "delegate init calls should only be marked in ARC")((getLangOpts().ObjCAutoRefCount && "delegate init calls should only be marked in ARC"
) ? static_cast<void> (0) : __assert_fail ("getLangOpts().ObjCAutoRefCount && \"delegate init calls should only be marked in ARC\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 528, __PRETTY_FUNCTION__))
;
529
530 // Do an unsafe store of null into self.
531 Address selfAddr =
532 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
533 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
534 }
535
536 RValue result;
537 if (isSuperMessage) {
538 // super is only valid in an Objective-C method
539 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
540 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
541 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
542 E->getSelector(),
543 OMD->getClassInterface(),
544 isCategoryImpl,
545 Receiver,
546 isClassMessage,
547 Args,
548 method);
549 } else {
550 // Call runtime methods directly if we can.
551 if (Optional<llvm::Value *> SpecializedResult =
552 tryGenerateSpecializedMessageSend(*this, ResultType, Receiver, Args,
553 E->getSelector(), method)) {
554 result = RValue::get(SpecializedResult.getValue());
555 } else {
556 result = Runtime.GenerateMessageSend(*this, Return, ResultType,
557 E->getSelector(), Receiver, Args,
558 OID, method);
559 }
560 }
561
562 // For delegate init calls in ARC, implicitly store the result of
563 // the call back into self. This takes ownership of the value.
564 if (isDelegateInit) {
565 Address selfAddr =
566 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
567 llvm::Value *newSelf = result.getScalarVal();
568
569 // The delegate return type isn't necessarily a matching type; in
570 // fact, it's quite likely to be 'id'.
571 llvm::Type *selfTy = selfAddr.getElementType();
572 newSelf = Builder.CreateBitCast(newSelf, selfTy);
573
574 Builder.CreateStore(newSelf, selfAddr);
575 }
576
577 return AdjustObjCObjectType(*this, E->getType(), result);
578}
579
580namespace {
581struct FinishARCDealloc final : EHScopeStack::Cleanup {
582 void Emit(CodeGenFunction &CGF, Flags flags) override {
583 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
584
585 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
586 const ObjCInterfaceDecl *iface = impl->getClassInterface();
587 if (!iface->getSuperClass()) return;
588
589 bool isCategory = isa<ObjCCategoryImplDecl>(impl);
590
591 // Call [super dealloc] if we have a superclass.
592 llvm::Value *self = CGF.LoadObjCSelf();
593
594 CallArgList args;
595 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
596 CGF.getContext().VoidTy,
597 method->getSelector(),
598 iface,
599 isCategory,
600 self,
601 /*is class msg*/ false,
602 args,
603 method);
604 }
605};
606}
607
608/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
609/// the LLVM function and sets the other context used by
610/// CodeGenFunction.
611void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
612 const ObjCContainerDecl *CD) {
613 SourceLocation StartLoc = OMD->getBeginLoc();
614 FunctionArgList args;
615 // Check if we should generate debug info for this method.
616 if (OMD->hasAttr<NoDebugAttr>())
617 DebugInfo = nullptr; // disable debug info indefinitely for this function
618
619 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
620
621 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
622 CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
623
624 args.push_back(OMD->getSelfDecl());
625 args.push_back(OMD->getCmdDecl());
626
627 args.append(OMD->param_begin(), OMD->param_end());
628
629 CurGD = OMD;
630 CurEHLocation = OMD->getEndLoc();
631
632 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
633 OMD->getLocation(), StartLoc);
634
635 // In ARC, certain methods get an extra cleanup.
636 if (CGM.getLangOpts().ObjCAutoRefCount &&
637 OMD->isInstanceMethod() &&
638 OMD->getSelector().isUnarySelector()) {
639 const IdentifierInfo *ident =
640 OMD->getSelector().getIdentifierInfoForSlot(0);
641 if (ident->isStr("dealloc"))
642 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
643 }
644}
645
646static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
647 LValue lvalue, QualType type);
648
649/// Generate an Objective-C method. An Objective-C method is a C function with
650/// its pointer, name, and types registered in the class structure.
651void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
652 StartObjCMethod(OMD, OMD->getClassInterface());
653 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn);
654 assert(isa<CompoundStmt>(OMD->getBody()))((isa<CompoundStmt>(OMD->getBody())) ? static_cast<
void> (0) : __assert_fail ("isa<CompoundStmt>(OMD->getBody())"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 654, __PRETTY_FUNCTION__))
;
655 incrementProfileCounter(OMD->getBody());
656 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
657 FinishFunction(OMD->getBodyRBrace());
658}
659
660/// emitStructGetterCall - Call the runtime function to load a property
661/// into the return value slot.
662static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
663 bool isAtomic, bool hasStrong) {
664 ASTContext &Context = CGF.getContext();
665
666 Address src =
667 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
668 .getAddress();
669
670 // objc_copyStruct (ReturnValue, &structIvar,
671 // sizeof (Type of Ivar), isAtomic, false);
672 CallArgList args;
673
674 Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
675 args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
676
677 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
678 args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
679
680 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
681 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
682 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
683 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
684
685 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
686 CGCallee callee = CGCallee::forDirect(fn);
687 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args),
688 callee, ReturnValueSlot(), args);
689}
690
691/// Determine whether the given architecture supports unaligned atomic
692/// accesses. They don't have to be fast, just faster than a function
693/// call and a mutex.
694static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
695 // FIXME: Allow unaligned atomic load/store on x86. (It is not
696 // currently supported by the backend.)
697 return 0;
698}
699
700/// Return the maximum size that permits atomic accesses for the given
701/// architecture.
702static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
703 llvm::Triple::ArchType arch) {
704 // ARM has 8-byte atomic accesses, but it's not clear whether we
705 // want to rely on them here.
706
707 // In the default case, just assume that any size up to a pointer is
708 // fine given adequate alignment.
709 return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
710}
711
712namespace {
713 class PropertyImplStrategy {
714 public:
715 enum StrategyKind {
716 /// The 'native' strategy is to use the architecture's provided
717 /// reads and writes.
718 Native,
719
720 /// Use objc_setProperty and objc_getProperty.
721 GetSetProperty,
722
723 /// Use objc_setProperty for the setter, but use expression
724 /// evaluation for the getter.
725 SetPropertyAndExpressionGet,
726
727 /// Use objc_copyStruct.
728 CopyStruct,
729
730 /// The 'expression' strategy is to emit normal assignment or
731 /// lvalue-to-rvalue expressions.
732 Expression
733 };
734
735 StrategyKind getKind() const { return StrategyKind(Kind); }
736
737 bool hasStrongMember() const { return HasStrong; }
738 bool isAtomic() const { return IsAtomic; }
739 bool isCopy() const { return IsCopy; }
740
741 CharUnits getIvarSize() const { return IvarSize; }
742 CharUnits getIvarAlignment() const { return IvarAlignment; }
743
744 PropertyImplStrategy(CodeGenModule &CGM,
745 const ObjCPropertyImplDecl *propImpl);
746
747 private:
748 unsigned Kind : 8;
749 unsigned IsAtomic : 1;
750 unsigned IsCopy : 1;
751 unsigned HasStrong : 1;
752
753 CharUnits IvarSize;
754 CharUnits IvarAlignment;
755 };
756}
757
758/// Pick an implementation strategy for the given property synthesis.
759PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
760 const ObjCPropertyImplDecl *propImpl) {
761 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
762 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
763
764 IsCopy = (setterKind == ObjCPropertyDecl::Copy);
765 IsAtomic = prop->isAtomic();
766 HasStrong = false; // doesn't matter here.
767
768 // Evaluate the ivar's size and alignment.
769 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
770 QualType ivarType = ivar->getType();
771 std::tie(IvarSize, IvarAlignment) =
772 CGM.getContext().getTypeInfoInChars(ivarType);
773
774 // If we have a copy property, we always have to use getProperty/setProperty.
775 // TODO: we could actually use setProperty and an expression for non-atomics.
776 if (IsCopy) {
777 Kind = GetSetProperty;
778 return;
779 }
780
781 // Handle retain.
782 if (setterKind == ObjCPropertyDecl::Retain) {
783 // In GC-only, there's nothing special that needs to be done.
784 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
785 // fallthrough
786
787 // In ARC, if the property is non-atomic, use expression emission,
788 // which translates to objc_storeStrong. This isn't required, but
789 // it's slightly nicer.
790 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
791 // Using standard expression emission for the setter is only
792 // acceptable if the ivar is __strong, which won't be true if
793 // the property is annotated with __attribute__((NSObject)).
794 // TODO: falling all the way back to objc_setProperty here is
795 // just laziness, though; we could still use objc_storeStrong
796 // if we hacked it right.
797 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
798 Kind = Expression;
799 else
800 Kind = SetPropertyAndExpressionGet;
801 return;
802
803 // Otherwise, we need to at least use setProperty. However, if
804 // the property isn't atomic, we can use normal expression
805 // emission for the getter.
806 } else if (!IsAtomic) {
807 Kind = SetPropertyAndExpressionGet;
808 return;
809
810 // Otherwise, we have to use both setProperty and getProperty.
811 } else {
812 Kind = GetSetProperty;
813 return;
814 }
815 }
816
817 // If we're not atomic, just use expression accesses.
818 if (!IsAtomic) {
819 Kind = Expression;
820 return;
821 }
822
823 // Properties on bitfield ivars need to be emitted using expression
824 // accesses even if they're nominally atomic.
825 if (ivar->isBitField()) {
826 Kind = Expression;
827 return;
828 }
829
830 // GC-qualified or ARC-qualified ivars need to be emitted as
831 // expressions. This actually works out to being atomic anyway,
832 // except for ARC __strong, but that should trigger the above code.
833 if (ivarType.hasNonTrivialObjCLifetime() ||
834 (CGM.getLangOpts().getGC() &&
835 CGM.getContext().getObjCGCAttrKind(ivarType))) {
836 Kind = Expression;
837 return;
838 }
839
840 // Compute whether the ivar has strong members.
841 if (CGM.getLangOpts().getGC())
842 if (const RecordType *recordType = ivarType->getAs<RecordType>())
843 HasStrong = recordType->getDecl()->hasObjectMember();
844
845 // We can never access structs with object members with a native
846 // access, because we need to use write barriers. This is what
847 // objc_copyStruct is for.
848 if (HasStrong) {
849 Kind = CopyStruct;
850 return;
851 }
852
853 // Otherwise, this is target-dependent and based on the size and
854 // alignment of the ivar.
855
856 // If the size of the ivar is not a power of two, give up. We don't
857 // want to get into the business of doing compare-and-swaps.
858 if (!IvarSize.isPowerOfTwo()) {
859 Kind = CopyStruct;
860 return;
861 }
862
863 llvm::Triple::ArchType arch =
864 CGM.getTarget().getTriple().getArch();
865
866 // Most architectures require memory to fit within a single cache
867 // line, so the alignment has to be at least the size of the access.
868 // Otherwise we have to grab a lock.
869 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
870 Kind = CopyStruct;
871 return;
872 }
873
874 // If the ivar's size exceeds the architecture's maximum atomic
875 // access size, we have to use CopyStruct.
876 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
877 Kind = CopyStruct;
878 return;
879 }
880
881 // Otherwise, we can use native loads and stores.
882 Kind = Native;
883}
884
885/// Generate an Objective-C property getter function.
886///
887/// The given Decl must be an ObjCImplementationDecl. \@synthesize
888/// is illegal within a category.
889void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
890 const ObjCPropertyImplDecl *PID) {
891 llvm::Constant *AtomicHelperFn =
892 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
893 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
894 ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
895 assert(OMD && "Invalid call to generate getter (empty method)")((OMD && "Invalid call to generate getter (empty method)"
) ? static_cast<void> (0) : __assert_fail ("OMD && \"Invalid call to generate getter (empty method)\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 895, __PRETTY_FUNCTION__))
;
896 StartObjCMethod(OMD, IMP->getClassInterface());
897
898 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
899
900 FinishFunction();
901}
902
903static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
904 const Expr *getter = propImpl->getGetterCXXConstructor();
905 if (!getter) return true;
906
907 // Sema only makes only of these when the ivar has a C++ class type,
908 // so the form is pretty constrained.
909
910 // If the property has a reference type, we might just be binding a
911 // reference, in which case the result will be a gl-value. We should
912 // treat this as a non-trivial operation.
913 if (getter->isGLValue())
914 return false;
915
916 // If we selected a trivial copy-constructor, we're okay.
917 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
918 return (construct->getConstructor()->isTrivial());
919
920 // The constructor might require cleanups (in which case it's never
921 // trivial).
922 assert(isa<ExprWithCleanups>(getter))((isa<ExprWithCleanups>(getter)) ? static_cast<void>
(0) : __assert_fail ("isa<ExprWithCleanups>(getter)", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 922, __PRETTY_FUNCTION__))
;
923 return false;
924}
925
926/// emitCPPObjectAtomicGetterCall - Call the runtime function to
927/// copy the ivar into the resturn slot.
928static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
929 llvm::Value *returnAddr,
930 ObjCIvarDecl *ivar,
931 llvm::Constant *AtomicHelperFn) {
932 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
933 // AtomicHelperFn);
934 CallArgList args;
935
936 // The 1st argument is the return Slot.
937 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
938
939 // The 2nd argument is the address of the ivar.
940 llvm::Value *ivarAddr =
941 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
942 CGF.LoadObjCSelf(), ivar, 0).getPointer();
943 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
944 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
945
946 // Third argument is the helper function.
947 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
948
949 llvm::Constant *copyCppAtomicObjectFn =
950 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
951 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn);
952 CGF.EmitCall(
953 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
954 callee, ReturnValueSlot(), args);
955}
956
957void
958CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
959 const ObjCPropertyImplDecl *propImpl,
960 const ObjCMethodDecl *GetterMethodDecl,
961 llvm::Constant *AtomicHelperFn) {
962 // If there's a non-trivial 'get' expression, we just have to emit that.
963 if (!hasTrivialGetExpr(propImpl)) {
964 if (!AtomicHelperFn) {
965 auto *ret = ReturnStmt::Create(getContext(), SourceLocation(),
966 propImpl->getGetterCXXConstructor(),
967 /* NRVOCandidate=*/nullptr);
968 EmitReturnStmt(*ret);
969 }
970 else {
971 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
972 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
973 ivar, AtomicHelperFn);
974 }
975 return;
976 }
977
978 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
979 QualType propType = prop->getType();
980 ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
981
982 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
983
984 // Pick an implementation strategy.
985 PropertyImplStrategy strategy(CGM, propImpl);
986 switch (strategy.getKind()) {
987 case PropertyImplStrategy::Native: {
988 // We don't need to do anything for a zero-size struct.
989 if (strategy.getIvarSize().isZero())
990 return;
991
992 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
993
994 // Currently, all atomic accesses have to be through integer
995 // types, so there's no point in trying to pick a prettier type.
996 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize());
997 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
998 bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
999
1000 // Perform an atomic load. This does not impose ordering constraints.
1001 Address ivarAddr = LV.getAddress();
1002 ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
1003 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
1004 load->setAtomic(llvm::AtomicOrdering::Unordered);
1005
1006 // Store that value into the return address. Doing this with a
1007 // bitcast is likely to produce some pretty ugly IR, but it's not
1008 // the *most* terrible thing in the world.
1009 llvm::Type *retTy = ConvertType(getterMethod->getReturnType());
1010 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy);
1011 llvm::Value *ivarVal = load;
1012 if (ivarSize > retTySize) {
1013 llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
1014 ivarVal = Builder.CreateTrunc(load, newTy);
1015 bitcastType = newTy->getPointerTo();
1016 }
1017 Builder.CreateStore(ivarVal,
1018 Builder.CreateBitCast(ReturnValue, bitcastType));
1019
1020 // Make sure we don't do an autorelease.
1021 AutoreleaseResult = false;
1022 return;
1023 }
1024
1025 case PropertyImplStrategy::GetSetProperty: {
1026 llvm::Constant *getPropertyFn =
1027 CGM.getObjCRuntime().GetPropertyGetFunction();
1028 if (!getPropertyFn) {
1029 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
1030 return;
1031 }
1032 CGCallee callee = CGCallee::forDirect(getPropertyFn);
1033
1034 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
1035 // FIXME: Can't this be simpler? This might even be worse than the
1036 // corresponding gcc code.
1037 llvm::Value *cmd =
1038 Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd");
1039 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
1040 llvm::Value *ivarOffset =
1041 EmitIvarOffset(classImpl->getClassInterface(), ivar);
1042
1043 CallArgList args;
1044 args.add(RValue::get(self), getContext().getObjCIdType());
1045 args.add(RValue::get(cmd), getContext().getObjCSelType());
1046 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1047 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
1048 getContext().BoolTy);
1049
1050 // FIXME: We shouldn't need to get the function info here, the
1051 // runtime already should have computed it to build the function.
1052 llvm::Instruction *CallInstruction;
1053 RValue RV = EmitCall(
1054 getTypes().arrangeBuiltinFunctionCall(propType, args),
1055 callee, ReturnValueSlot(), args, &CallInstruction);
1056 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
1057 call->setTailCall();
1058
1059 // We need to fix the type here. Ivars with copy & retain are
1060 // always objects so we don't need to worry about complex or
1061 // aggregates.
1062 RV = RValue::get(Builder.CreateBitCast(
1063 RV.getScalarVal(),
1064 getTypes().ConvertType(getterMethod->getReturnType())));
1065
1066 EmitReturnOfRValue(RV, propType);
1067
1068 // objc_getProperty does an autorelease, so we should suppress ours.
1069 AutoreleaseResult = false;
1070
1071 return;
1072 }
1073
1074 case PropertyImplStrategy::CopyStruct:
1075 emitStructGetterCall(*this, ivar, strategy.isAtomic(),
1076 strategy.hasStrongMember());
1077 return;
1078
1079 case PropertyImplStrategy::Expression:
1080 case PropertyImplStrategy::SetPropertyAndExpressionGet: {
1081 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
1082
1083 QualType ivarType = ivar->getType();
1084 switch (getEvaluationKind(ivarType)) {
1085 case TEK_Complex: {
1086 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
1087 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType),
1088 /*init*/ true);
1089 return;
1090 }
1091 case TEK_Aggregate: {
1092 // The return value slot is guaranteed to not be aliased, but
1093 // that's not necessarily the same as "on the stack", so
1094 // we still potentially need objc_memmove_collectable.
1095 EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType),
1096 /* Src= */ LV, ivarType, overlapForReturnValue());
1097 return;
1098 }
1099 case TEK_Scalar: {
1100 llvm::Value *value;
1101 if (propType->isReferenceType()) {
1102 value = LV.getAddress().getPointer();
1103 } else {
1104 // We want to load and autoreleaseReturnValue ARC __weak ivars.
1105 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
1106 if (getLangOpts().ObjCAutoRefCount) {
1107 value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
1108 } else {
1109 value = EmitARCLoadWeak(LV.getAddress());
1110 }
1111
1112 // Otherwise we want to do a simple load, suppressing the
1113 // final autorelease.
1114 } else {
1115 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal();
1116 AutoreleaseResult = false;
1117 }
1118
1119 value = Builder.CreateBitCast(
1120 value, ConvertType(GetterMethodDecl->getReturnType()));
1121 }
1122
1123 EmitReturnOfRValue(RValue::get(value), propType);
1124 return;
1125 }
1126 }
1127 llvm_unreachable("bad evaluation kind")::llvm::llvm_unreachable_internal("bad evaluation kind", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 1127)
;
1128 }
1129
1130 }
1131 llvm_unreachable("bad @property implementation strategy!")::llvm::llvm_unreachable_internal("bad @property implementation strategy!"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 1131)
;
1132}
1133
1134/// emitStructSetterCall - Call the runtime function to store the value
1135/// from the first formal parameter into the given ivar.
1136static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
1137 ObjCIvarDecl *ivar) {
1138 // objc_copyStruct (&structIvar, &Arg,
1139 // sizeof (struct something), true, false);
1140 CallArgList args;
1141
1142 // The first argument is the address of the ivar.
1143 llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
1144 CGF.LoadObjCSelf(), ivar, 0)
1145 .getPointer();
1146 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
1147 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
1148
1149 // The second argument is the address of the parameter variable.
1150 ParmVarDecl *argVar = *OMD->param_begin();
1151 DeclRefExpr argRef(CGF.getContext(), argVar, false,
1152 argVar->getType().getNonReferenceType(), VK_LValue,
1153 SourceLocation());
1154 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
1155 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
1156 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
1157
1158 // The third argument is the sizeof the type.
1159 llvm::Value *size =
1160 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
1161 args.add(RValue::get(size), CGF.getContext().getSizeType());
1162
1163 // The fourth argument is the 'isAtomic' flag.
1164 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
1165
1166 // The fifth argument is the 'hasStrong' flag.
1167 // FIXME: should this really always be false?
1168 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
1169
1170 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
1171 CGCallee callee = CGCallee::forDirect(fn);
1172 CGF.EmitCall(
1173 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
1174 callee, ReturnValueSlot(), args);
1175}
1176
1177/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
1178/// the value from the first formal parameter into the given ivar, using
1179/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
1180static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
1181 ObjCMethodDecl *OMD,
1182 ObjCIvarDecl *ivar,
1183 llvm::Constant *AtomicHelperFn) {
1184 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
1185 // AtomicHelperFn);
1186 CallArgList args;
1187
1188 // The first argument is the address of the ivar.
1189 llvm::Value *ivarAddr =
1190 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
1191 CGF.LoadObjCSelf(), ivar, 0).getPointer();
1192 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
1193 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
1194
1195 // The second argument is the address of the parameter variable.
1196 ParmVarDecl *argVar = *OMD->param_begin();
1197 DeclRefExpr argRef(CGF.getContext(), argVar, false,
1198 argVar->getType().getNonReferenceType(), VK_LValue,
1199 SourceLocation());
1200 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
1201 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
1202 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
1203
1204 // Third argument is the helper function.
1205 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
1206
1207 llvm::Constant *fn =
1208 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
1209 CGCallee callee = CGCallee::forDirect(fn);
1210 CGF.EmitCall(
1211 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
1212 callee, ReturnValueSlot(), args);
1213}
1214
1215
1216static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
1217 Expr *setter = PID->getSetterCXXAssignment();
1218 if (!setter) return true;
1219
1220 // Sema only makes only of these when the ivar has a C++ class type,
1221 // so the form is pretty constrained.
1222
1223 // An operator call is trivial if the function it calls is trivial.
1224 // This also implies that there's nothing non-trivial going on with
1225 // the arguments, because operator= can only be trivial if it's a
1226 // synthesized assignment operator and therefore both parameters are
1227 // references.
1228 if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
1229 if (const FunctionDecl *callee
1230 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
1231 if (callee->isTrivial())
1232 return true;
1233 return false;
1234 }
1235
1236 assert(isa<ExprWithCleanups>(setter))((isa<ExprWithCleanups>(setter)) ? static_cast<void>
(0) : __assert_fail ("isa<ExprWithCleanups>(setter)", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 1236, __PRETTY_FUNCTION__))
;
1237 return false;
1238}
1239
1240static bool UseOptimizedSetter(CodeGenModule &CGM) {
1241 if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
1242 return false;
1243 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
1244}
1245
1246void
1247CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1248 const ObjCPropertyImplDecl *propImpl,
1249 llvm::Constant *AtomicHelperFn) {
1250 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
1251 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1252 ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
1253
1254 // Just use the setter expression if Sema gave us one and it's
1255 // non-trivial.
1256 if (!hasTrivialSetExpr(propImpl)) {
1257 if (!AtomicHelperFn)
1258 // If non-atomic, assignment is called directly.
1259 EmitStmt(propImpl->getSetterCXXAssignment());
1260 else
1261 // If atomic, assignment is called via a locking api.
1262 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
1263 AtomicHelperFn);
1264 return;
1265 }
1266
1267 PropertyImplStrategy strategy(CGM, propImpl);
1268 switch (strategy.getKind()) {
1269 case PropertyImplStrategy::Native: {
1270 // We don't need to do anything for a zero-size struct.
1271 if (strategy.getIvarSize().isZero())
1272 return;
1273
1274 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
1275
1276 LValue ivarLValue =
1277 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
1278 Address ivarAddr = ivarLValue.getAddress();
1279
1280 // Currently, all atomic accesses have to be through integer
1281 // types, so there's no point in trying to pick a prettier type.
1282 llvm::Type *bitcastType =
1283 llvm::Type::getIntNTy(getLLVMContext(),
1284 getContext().toBits(strategy.getIvarSize()));
1285
1286 // Cast both arguments to the chosen operation type.
1287 argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
1288 ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
1289
1290 // This bitcast load is likely to cause some nasty IR.
1291 llvm::Value *load = Builder.CreateLoad(argAddr);
1292
1293 // Perform an atomic store. There are no memory ordering requirements.
1294 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
1295 store->setAtomic(llvm::AtomicOrdering::Unordered);
1296 return;
1297 }
1298
1299 case PropertyImplStrategy::GetSetProperty:
1300 case PropertyImplStrategy::SetPropertyAndExpressionGet: {
1301
1302 llvm::Constant *setOptimizedPropertyFn = nullptr;
1303 llvm::Constant *setPropertyFn = nullptr;
1304 if (UseOptimizedSetter(CGM)) {
1305 // 10.8 and iOS 6.0 code and GC is off
1306 setOptimizedPropertyFn =
1307 CGM.getObjCRuntime()
1308 .GetOptimizedPropertySetFunction(strategy.isAtomic(),
1309 strategy.isCopy());
1310 if (!setOptimizedPropertyFn) {
1311 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
1312 return;
1313 }
1314 }
1315 else {
1316 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
1317 if (!setPropertyFn) {
1318 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
1319 return;
1320 }
1321 }
1322
1323 // Emit objc_setProperty((id) self, _cmd, offset, arg,
1324 // <is-atomic>, <is-copy>).
1325 llvm::Value *cmd =
1326 Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl()));
1327 llvm::Value *self =
1328 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
1329 llvm::Value *ivarOffset =
1330 EmitIvarOffset(classImpl->getClassInterface(), ivar);
1331 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
1332 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg");
1333 arg = Builder.CreateBitCast(arg, VoidPtrTy);
1334
1335 CallArgList args;
1336 args.add(RValue::get(self), getContext().getObjCIdType());
1337 args.add(RValue::get(cmd), getContext().getObjCSelType());
1338 if (setOptimizedPropertyFn) {
1339 args.add(RValue::get(arg), getContext().getObjCIdType());
1340 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1341 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn);
1342 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
1343 callee, ReturnValueSlot(), args);
1344 } else {
1345 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1346 args.add(RValue::get(arg), getContext().getObjCIdType());
1347 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
1348 getContext().BoolTy);
1349 args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
1350 getContext().BoolTy);
1351 // FIXME: We shouldn't need to get the function info here, the runtime
1352 // already should have computed it to build the function.
1353 CGCallee callee = CGCallee::forDirect(setPropertyFn);
1354 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
1355 callee, ReturnValueSlot(), args);
1356 }
1357
1358 return;
1359 }
1360
1361 case PropertyImplStrategy::CopyStruct:
1362 emitStructSetterCall(*this, setterMethod, ivar);
1363 return;
1364
1365 case PropertyImplStrategy::Expression:
1366 break;
1367 }
1368
1369 // Otherwise, fake up some ASTs and emit a normal assignment.
1370 ValueDecl *selfDecl = setterMethod->getSelfDecl();
1371 DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(),
1372 VK_LValue, SourceLocation());
1373 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
1374 selfDecl->getType(), CK_LValueToRValue, &self,
1375 VK_RValue);
1376 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
1377 SourceLocation(), SourceLocation(),
1378 &selfLoad, true, true);
1379
1380 ParmVarDecl *argDecl = *setterMethod->param_begin();
1381 QualType argType = argDecl->getType().getNonReferenceType();
1382 DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue,
1383 SourceLocation());
1384 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
1385 argType.getUnqualifiedType(), CK_LValueToRValue,
1386 &arg, VK_RValue);
1387
1388 // The property type can differ from the ivar type in some situations with
1389 // Objective-C pointer types, we can always bit cast the RHS in these cases.
1390 // The following absurdity is just to ensure well-formed IR.
1391 CastKind argCK = CK_NoOp;
1392 if (ivarRef.getType()->isObjCObjectPointerType()) {
1393 if (argLoad.getType()->isObjCObjectPointerType())
1394 argCK = CK_BitCast;
1395 else if (argLoad.getType()->isBlockPointerType())
1396 argCK = CK_BlockPointerToObjCPointerCast;
1397 else
1398 argCK = CK_CPointerToObjCPointerCast;
1399 } else if (ivarRef.getType()->isBlockPointerType()) {
1400 if (argLoad.getType()->isBlockPointerType())
1401 argCK = CK_BitCast;
1402 else
1403 argCK = CK_AnyPointerToBlockPointerCast;
1404 } else if (ivarRef.getType()->isPointerType()) {
1405 argCK = CK_BitCast;
1406 }
1407 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
1408 ivarRef.getType(), argCK, &argLoad,
1409 VK_RValue);
1410 Expr *finalArg = &argLoad;
1411 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
1412 argLoad.getType()))
1413 finalArg = &argCast;
1414
1415
1416 BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
1417 ivarRef.getType(), VK_RValue, OK_Ordinary,
1418 SourceLocation(), FPOptions());
1419 EmitStmt(&assign);
1420}
1421
1422/// Generate an Objective-C property setter function.
1423///
1424/// The given Decl must be an ObjCImplementationDecl. \@synthesize
1425/// is illegal within a category.
1426void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
1427 const ObjCPropertyImplDecl *PID) {
1428 llvm::Constant *AtomicHelperFn =
1429 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
1430 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
1431 ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
1432 assert(OMD && "Invalid call to generate setter (empty method)")((OMD && "Invalid call to generate setter (empty method)"
) ? static_cast<void> (0) : __assert_fail ("OMD && \"Invalid call to generate setter (empty method)\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 1432, __PRETTY_FUNCTION__))
;
1433 StartObjCMethod(OMD, IMP->getClassInterface());
1434
1435 generateObjCSetterBody(IMP, PID, AtomicHelperFn);
1436
1437 FinishFunction();
1438}
1439
1440namespace {
1441 struct DestroyIvar final : EHScopeStack::Cleanup {
1442 private:
1443 llvm::Value *addr;
1444 const ObjCIvarDecl *ivar;
1445 CodeGenFunction::Destroyer *destroyer;
1446 bool useEHCleanupForArray;
1447 public:
1448 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
1449 CodeGenFunction::Destroyer *destroyer,
1450 bool useEHCleanupForArray)
1451 : addr(addr), ivar(ivar), destroyer(destroyer),
1452 useEHCleanupForArray(useEHCleanupForArray) {}
1453
1454 void Emit(CodeGenFunction &CGF, Flags flags) override {
1455 LValue lvalue
1456 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
1457 CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
1458 flags.isForNormalCleanup() && useEHCleanupForArray);
1459 }
1460 };
1461}
1462
1463/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
1464static void destroyARCStrongWithStore(CodeGenFunction &CGF,
1465 Address addr,
1466 QualType type) {
1467 llvm::Value *null = getNullForVariable(addr);
1468 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
1469}
1470
1471static void emitCXXDestructMethod(CodeGenFunction &CGF,
1472 ObjCImplementationDecl *impl) {
1473 CodeGenFunction::RunCleanupsScope scope(CGF);
1474
1475 llvm::Value *self = CGF.LoadObjCSelf();
1476
1477 const ObjCInterfaceDecl *iface = impl->getClassInterface();
1478 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
1479 ivar; ivar = ivar->getNextIvar()) {
1480 QualType type = ivar->getType();
1481
1482 // Check whether the ivar is a destructible type.
1483 QualType::DestructionKind dtorKind = type.isDestructedType();
1484 if (!dtorKind) continue;
1485
1486 CodeGenFunction::Destroyer *destroyer = nullptr;
1487
1488 // Use a call to objc_storeStrong to destroy strong ivars, for the
1489 // general benefit of the tools.
1490 if (dtorKind == QualType::DK_objc_strong_lifetime) {
1491 destroyer = destroyARCStrongWithStore;
1492
1493 // Otherwise use the default for the destruction kind.
1494 } else {
1495 destroyer = CGF.getDestroyer(dtorKind);
1496 }
1497
1498 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
1499
1500 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
1501 cleanupKind & EHCleanup);
1502 }
1503
1504 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?")((scope.requiresCleanups() && "nothing to do in .cxx_destruct?"
) ? static_cast<void> (0) : __assert_fail ("scope.requiresCleanups() && \"nothing to do in .cxx_destruct?\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 1504, __PRETTY_FUNCTION__))
;
1505}
1506
1507void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1508 ObjCMethodDecl *MD,
1509 bool ctor) {
1510 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
1511 StartObjCMethod(MD, IMP->getClassInterface());
1512
1513 // Emit .cxx_construct.
1514 if (ctor) {
1515 // Suppress the final autorelease in ARC.
1516 AutoreleaseResult = false;
1517
1518 for (const auto *IvarInit : IMP->inits()) {
1519 FieldDecl *Field = IvarInit->getAnyMember();
1520 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
1521 LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
1522 LoadObjCSelf(), Ivar, 0);
1523 EmitAggExpr(IvarInit->getInit(),
1524 AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
1525 AggValueSlot::DoesNotNeedGCBarriers,
1526 AggValueSlot::IsNotAliased,
1527 AggValueSlot::DoesNotOverlap));
1528 }
1529 // constructor returns 'self'.
1530 CodeGenTypes &Types = CGM.getTypes();
1531 QualType IdTy(CGM.getContext().getObjCIdType());
1532 llvm::Value *SelfAsId =
1533 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
1534 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
1535
1536 // Emit .cxx_destruct.
1537 } else {
1538 emitCXXDestructMethod(*this, IMP);
1539 }
1540 FinishFunction();
1541}
1542
1543llvm::Value *CodeGenFunction::LoadObjCSelf() {
1544 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
1545 DeclRefExpr DRE(getContext(), Self,
1546 /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
1547 Self->getType(), VK_LValue, SourceLocation());
1548 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation());
1549}
1550
1551QualType CodeGenFunction::TypeOfSelfObject() {
1552 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
1553 ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
1554 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
1555 getContext().getCanonicalType(selfDecl->getType()));
1556 return PTy->getPointeeType();
1557}
1558
1559void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
1560 llvm::Constant *EnumerationMutationFnPtr =
1561 CGM.getObjCRuntime().EnumerationMutationFunction();
1562 if (!EnumerationMutationFnPtr) {
1563 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
1564 return;
1565 }
1566 CGCallee EnumerationMutationFn =
1567 CGCallee::forDirect(EnumerationMutationFnPtr);
1568
1569 CGDebugInfo *DI = getDebugInfo();
1570 if (DI)
1571 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
1572
1573 RunCleanupsScope ForScope(*this);
1574
1575 // The local variable comes into scope immediately.
1576 AutoVarEmission variable = AutoVarEmission::invalid();
1577 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
1578 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
1579
1580 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
1581
1582 // Fast enumeration state.
1583 QualType StateTy = CGM.getObjCFastEnumerationStateType();
1584 Address StatePtr = CreateMemTemp(StateTy, "state.ptr");
1585 EmitNullInitialization(StatePtr, StateTy);
1586
1587 // Number of elements in the items array.
1588 static const unsigned NumItems = 16;
1589
1590 // Fetch the countByEnumeratingWithState:objects:count: selector.
1591 IdentifierInfo *II[] = {
1592 &CGM.getContext().Idents.get("countByEnumeratingWithState"),
1593 &CGM.getContext().Idents.get("objects"),
1594 &CGM.getContext().Idents.get("count")
1595 };
1596 Selector FastEnumSel =
1597 CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
1598
1599 QualType ItemsTy =
1600 getContext().getConstantArrayType(getContext().getObjCIdType(),
1601 llvm::APInt(32, NumItems),
1602 ArrayType::Normal, 0);
1603 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
1604
1605 // Emit the collection pointer. In ARC, we do a retain.
1606 llvm::Value *Collection;
1607 if (getLangOpts().ObjCAutoRefCount) {
1608 Collection = EmitARCRetainScalarExpr(S.getCollection());
1609
1610 // Enter a cleanup to do the release.
1611 EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
1612 } else {
1613 Collection = EmitScalarExpr(S.getCollection());
1614 }
1615
1616 // The 'continue' label needs to appear within the cleanup for the
1617 // collection object.
1618 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
1619
1620 // Send it our message:
1621 CallArgList Args;
1622
1623 // The first argument is a temporary of the enumeration-state type.
1624 Args.add(RValue::get(StatePtr.getPointer()),
1625 getContext().getPointerType(StateTy));
1626
1627 // The second argument is a temporary array with space for NumItems
1628 // pointers. We'll actually be loading elements from the array
1629 // pointer written into the control state; this buffer is so that
1630 // collections that *aren't* backed by arrays can still queue up
1631 // batches of elements.
1632 Args.add(RValue::get(ItemsPtr.getPointer()),
1633 getContext().getPointerType(ItemsTy));
1634
1635 // The third argument is the capacity of that temporary array.
1636 llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType());
1637 llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems);
1638 Args.add(RValue::get(Count), getContext().getNSUIntegerType());
1639
1640 // Start the enumeration.
1641 RValue CountRV =
1642 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1643 getContext().getNSUIntegerType(),
1644 FastEnumSel, Collection, Args);
1645
1646 // The initial number of objects that were returned in the buffer.
1647 llvm::Value *initialBufferLimit = CountRV.getScalarVal();
1648
1649 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
1650 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
1651
1652 llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy);
1653
1654 // If the limit pointer was zero to begin with, the collection is
1655 // empty; skip all this. Set the branch weight assuming this has the same
1656 // probability of exiting the loop as any other loop exit.
1657 uint64_t EntryCount = getCurrentProfileCount();
1658 Builder.CreateCondBr(
1659 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB,
1660 LoopInitBB,
1661 createProfileWeights(EntryCount, getProfileCount(S.getBody())));
1662
1663 // Otherwise, initialize the loop.
1664 EmitBlock(LoopInitBB);
1665
1666 // Save the initial mutations value. This is the value at an
1667 // address that was written into the state object by
1668 // countByEnumeratingWithState:objects:count:.
1669 Address StateMutationsPtrPtr = Builder.CreateStructGEP(
1670 StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr");
1671 llvm::Value *StateMutationsPtr
1672 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
1673
1674 llvm::Value *initialMutations =
1675 Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
1676 "forcoll.initial-mutations");
1677
1678 // Start looping. This is the point we return to whenever we have a
1679 // fresh, non-empty batch of objects.
1680 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
1681 EmitBlock(LoopBodyBB);
1682
1683 // The current index into the buffer.
1684 llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index");
1685 index->addIncoming(zero, LoopInitBB);
1686
1687 // The current buffer size.
1688 llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count");
1689 count->addIncoming(initialBufferLimit, LoopInitBB);
1690
1691 incrementProfileCounter(&S);
1692
1693 // Check whether the mutations value has changed from where it was
1694 // at start. StateMutationsPtr should actually be invariant between
1695 // refreshes.
1696 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
1697 llvm::Value *currentMutations
1698 = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
1699 "statemutations");
1700
1701 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
1702 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
1703
1704 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
1705 WasNotMutatedBB, WasMutatedBB);
1706
1707 // If so, call the enumeration-mutation function.
1708 EmitBlock(WasMutatedBB);
1709 llvm::Value *V =
1710 Builder.CreateBitCast(Collection,
1711 ConvertType(getContext().getObjCIdType()));
1712 CallArgList Args2;
1713 Args2.add(RValue::get(V), getContext().getObjCIdType());
1714 // FIXME: We shouldn't need to get the function info here, the runtime already
1715 // should have computed it to build the function.
1716 EmitCall(
1717 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2),
1718 EnumerationMutationFn, ReturnValueSlot(), Args2);
1719
1720 // Otherwise, or if the mutation function returns, just continue.
1721 EmitBlock(WasNotMutatedBB);
1722
1723 // Initialize the element variable.
1724 RunCleanupsScope elementVariableScope(*this);
1725 bool elementIsVariable;
1726 LValue elementLValue;
1727 QualType elementType;
1728 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
1729 // Initialize the variable, in case it's a __block variable or something.
1730 EmitAutoVarInit(variable);
1731
1732 const VarDecl *D = cast<VarDecl>(SD->getSingleDecl());
1733 DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false,
1734 D->getType(), VK_LValue, SourceLocation());
1735 elementLValue = EmitLValue(&tempDRE);
1736 elementType = D->getType();
1737 elementIsVariable = true;
1738
1739 if (D->isARCPseudoStrong())
1740 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
1741 } else {
1742 elementLValue = LValue(); // suppress warning
1743 elementType = cast<Expr>(S.getElement())->getType();
1744 elementIsVariable = false;
1745 }
1746 llvm::Type *convertedElementType = ConvertType(elementType);
1747
1748 // Fetch the buffer out of the enumeration state.
1749 // TODO: this pointer should actually be invariant between
1750 // refreshes, which would help us do certain loop optimizations.
1751 Address StateItemsPtr = Builder.CreateStructGEP(
1752 StatePtr, 1, getPointerSize(), "stateitems.ptr");
1753 llvm::Value *EnumStateItems =
1754 Builder.CreateLoad(StateItemsPtr, "stateitems");
1755
1756 // Fetch the value at the current index from the buffer.
1757 llvm::Value *CurrentItemPtr =
1758 Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
1759 llvm::Value *CurrentItem =
1760 Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
1761
1762 // Cast that value to the right type.
1763 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
1764 "currentitem");
1765
1766 // Make sure we have an l-value. Yes, this gets evaluated every
1767 // time through the loop.
1768 if (!elementIsVariable) {
1769 elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1770 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
1771 } else {
1772 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue,
1773 /*isInit*/ true);
1774 }
1775
1776 // If we do have an element variable, this assignment is the end of
1777 // its initialization.
1778 if (elementIsVariable)
1779 EmitAutoVarCleanups(variable);
1780
1781 // Perform the loop body, setting up break and continue labels.
1782 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
1783 {
1784 RunCleanupsScope Scope(*this);
1785 EmitStmt(S.getBody());
1786 }
1787 BreakContinueStack.pop_back();
1788
1789 // Destroy the element variable now.
1790 elementVariableScope.ForceCleanup();
1791
1792 // Check whether there are more elements.
1793 EmitBlock(AfterBody.getBlock());
1794
1795 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
1796
1797 // First we check in the local buffer.
1798 llvm::Value *indexPlusOne =
1799 Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1));
1800
1801 // If we haven't overrun the buffer yet, we can continue.
1802 // Set the branch weights based on the simplifying assumption that this is
1803 // like a while-loop, i.e., ignoring that the false branch fetches more
1804 // elements and then returns to the loop.
1805 Builder.CreateCondBr(
1806 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB,
1807 createProfileWeights(getProfileCount(S.getBody()), EntryCount));
1808
1809 index->addIncoming(indexPlusOne, AfterBody.getBlock());
1810 count->addIncoming(count, AfterBody.getBlock());
1811
1812 // Otherwise, we have to fetch more elements.
1813 EmitBlock(FetchMoreBB);
1814
1815 CountRV =
1816 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1817 getContext().getNSUIntegerType(),
1818 FastEnumSel, Collection, Args);
1819
1820 // If we got a zero count, we're done.
1821 llvm::Value *refetchCount = CountRV.getScalarVal();
1822
1823 // (note that the message send might split FetchMoreBB)
1824 index->addIncoming(zero, Builder.GetInsertBlock());
1825 count->addIncoming(refetchCount, Builder.GetInsertBlock());
1826
1827 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
1828 EmptyBB, LoopBodyBB);
1829
1830 // No more elements.
1831 EmitBlock(EmptyBB);
1832
1833 if (!elementIsVariable) {
1834 // If the element was not a declaration, set it to be null.
1835
1836 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
1837 elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1838 EmitStoreThroughLValue(RValue::get(null), elementLValue);
1839 }
1840
1841 if (DI)
1842 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
1843
1844 ForScope.ForceCleanup();
1845 EmitBlock(LoopEnd.getBlock());
1846}
1847
1848void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
1849 CGM.getObjCRuntime().EmitTryStmt(*this, S);
1850}
1851
1852void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
1853 CGM.getObjCRuntime().EmitThrowStmt(*this, S);
1854}
1855
1856void CodeGenFunction::EmitObjCAtSynchronizedStmt(
1857 const ObjCAtSynchronizedStmt &S) {
1858 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
1859}
1860
1861namespace {
1862 struct CallObjCRelease final : EHScopeStack::Cleanup {
1863 CallObjCRelease(llvm::Value *object) : object(object) {}
1864 llvm::Value *object;
1865
1866 void Emit(CodeGenFunction &CGF, Flags flags) override {
1867 // Releases at the end of the full-expression are imprecise.
1868 CGF.EmitARCRelease(object, ARCImpreciseLifetime);
1869 }
1870 };
1871}
1872
1873/// Produce the code for a CK_ARCConsumeObject. Does a primitive
1874/// release at the end of the full-expression.
1875llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
1876 llvm::Value *object) {
1877 // If we're in a conditional branch, we need to make the cleanup
1878 // conditional.
1879 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
1880 return object;
1881}
1882
1883llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
1884 llvm::Value *value) {
1885 return EmitARCRetainAutorelease(type, value);
1886}
1887
1888/// Given a number of pointers, inform the optimizer that they're
1889/// being intrinsically used up until this point in the program.
1890void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
1891 llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use;
1892 if (!fn)
1893 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use);
1894
1895 // This isn't really a "runtime" function, but as an intrinsic it
1896 // doesn't really matter as long as we align things up.
1897 EmitNounwindRuntimeCall(fn, values);
1898}
1899
1900static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
1901 llvm::Constant *RTF) {
1902 if (auto *F = dyn_cast<llvm::Function>(RTF)) {
1903 // If the target runtime doesn't naturally support ARC, emit weak
1904 // references to the runtime support library. We don't really
1905 // permit this to fail, but we need a particular relocation style.
1906 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
1907 !CGM.getTriple().isOSBinFormatCOFF()) {
1908 F->setLinkage(llvm::Function::ExternalWeakLinkage);
1909 }
1910 }
1911}
1912
1913/// Perform an operation having the signature
1914/// i8* (i8*)
1915/// where a null input causes a no-op and returns null.
1916static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
1917 llvm::Value *value,
1918 llvm::Type *returnType,
1919 llvm::Constant *&fn,
1920 llvm::Intrinsic::ID IntID,
1921 bool isTailCall = false) {
1922 if (isa<llvm::ConstantPointerNull>(value))
1923 return value;
1924
1925 if (!fn) {
1926 fn = CGF.CGM.getIntrinsic(IntID);
1927 setARCRuntimeFunctionLinkage(CGF.CGM, fn);
1928 }
1929
1930 // Cast the argument to 'id'.
1931 llvm::Type *origType = returnType ? returnType : value->getType();
1932 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
1933
1934 // Call the function.
1935 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
1936 if (isTailCall)
1937 call->setTailCall();
1938
1939 // Cast the result back to the original type.
1940 return CGF.Builder.CreateBitCast(call, origType);
1941}
1942
1943/// Perform an operation having the following signature:
1944/// i8* (i8**)
1945static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
1946 Address addr,
1947 llvm::Constant *&fn,
1948 llvm::Intrinsic::ID IntID) {
1949 if (!fn) {
1950 fn = CGF.CGM.getIntrinsic(IntID);
1951 setARCRuntimeFunctionLinkage(CGF.CGM, fn);
1952 }
1953
1954 // Cast the argument to 'id*'.
1955 llvm::Type *origType = addr.getElementType();
1956 addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
1957
1958 // Call the function.
1959 llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
1960
1961 // Cast the result back to a dereference of the original type.
1962 if (origType != CGF.Int8PtrTy)
1963 result = CGF.Builder.CreateBitCast(result, origType);
1964
1965 return result;
1966}
1967
1968/// Perform an operation having the following signature:
1969/// i8* (i8**, i8*)
1970static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
1971 Address addr,
1972 llvm::Value *value,
1973 llvm::Constant *&fn,
1974 llvm::Intrinsic::ID IntID,
1975 bool ignored) {
1976 assert(addr.getElementType() == value->getType())((addr.getElementType() == value->getType()) ? static_cast
<void> (0) : __assert_fail ("addr.getElementType() == value->getType()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 1976, __PRETTY_FUNCTION__))
;
1977
1978 if (!fn) {
1979 fn = CGF.CGM.getIntrinsic(IntID);
1980 setARCRuntimeFunctionLinkage(CGF.CGM, fn);
1981 }
1982
1983 llvm::Type *origType = value->getType();
1984
1985 llvm::Value *args[] = {
1986 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
1987 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
1988 };
1989 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
1990
1991 if (ignored) return nullptr;
1992
1993 return CGF.Builder.CreateBitCast(result, origType);
1994}
1995
1996/// Perform an operation having the following signature:
1997/// void (i8**, i8**)
1998static void emitARCCopyOperation(CodeGenFunction &CGF,
1999 Address dst,
2000 Address src,
2001 llvm::Constant *&fn,
2002 llvm::Intrinsic::ID IntID) {
2003 assert(dst.getType() == src.getType())((dst.getType() == src.getType()) ? static_cast<void> (
0) : __assert_fail ("dst.getType() == src.getType()", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2003, __PRETTY_FUNCTION__))
;
2004
2005 if (!fn) {
2006 fn = CGF.CGM.getIntrinsic(IntID);
2007 setARCRuntimeFunctionLinkage(CGF.CGM, fn);
2008 }
2009
2010 llvm::Value *args[] = {
2011 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
2012 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
2013 };
2014 CGF.EmitNounwindRuntimeCall(fn, args);
2015}
2016
2017/// Perform an operation having the signature
2018/// i8* (i8*)
2019/// where a null input causes a no-op and returns null.
2020static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
2021 llvm::Value *value,
2022 llvm::Type *returnType,
2023 llvm::Constant *&fn,
2024 StringRef fnName) {
2025 if (isa<llvm::ConstantPointerNull>(value))
2026 return value;
2027
2028 if (!fn) {
2029 llvm::FunctionType *fnType =
2030 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false);
2031 fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName);
2032
2033 // We have Native ARC, so set nonlazybind attribute for performance
2034 if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
2035 if (fnName == "objc_retain")
2036 f->addFnAttr(llvm::Attribute::NonLazyBind);
2037 }
2038
2039 // Cast the argument to 'id'.
2040 llvm::Type *origType = returnType ? returnType : value->getType();
2041 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
2042
2043 // Call the function.
2044 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
2045
2046 // Cast the result back to the original type.
2047 return CGF.Builder.CreateBitCast(call, origType);
2048}
2049
2050/// Produce the code to do a retain. Based on the type, calls one of:
2051/// call i8* \@objc_retain(i8* %value)
2052/// call i8* \@objc_retainBlock(i8* %value)
2053llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
2054 if (type->isBlockPointerType())
2055 return EmitARCRetainBlock(value, /*mandatory*/ false);
2056 else
2057 return EmitARCRetainNonBlock(value);
2058}
2059
2060/// Retain the given object, with normal retain semantics.
2061/// call i8* \@objc_retain(i8* %value)
2062llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
2063 return emitARCValueOperation(*this, value, nullptr,
2064 CGM.getObjCEntrypoints().objc_retain,
2065 llvm::Intrinsic::objc_retain);
2066}
2067
2068/// Retain the given block, with _Block_copy semantics.
2069/// call i8* \@objc_retainBlock(i8* %value)
2070///
2071/// \param mandatory - If false, emit the call with metadata
2072/// indicating that it's okay for the optimizer to eliminate this call
2073/// if it can prove that the block never escapes except down the stack.
2074llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
2075 bool mandatory) {
2076 llvm::Value *result
2077 = emitARCValueOperation(*this, value, nullptr,
2078 CGM.getObjCEntrypoints().objc_retainBlock,
2079 llvm::Intrinsic::objc_retainBlock);
2080
2081 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
2082 // tell the optimizer that it doesn't need to do this copy if the
2083 // block doesn't escape, where being passed as an argument doesn't
2084 // count as escaping.
2085 if (!mandatory && isa<llvm::Instruction>(result)) {
2086 llvm::CallInst *call
2087 = cast<llvm::CallInst>(result->stripPointerCasts());
2088 assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock)((call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock
) ? static_cast<void> (0) : __assert_fail ("call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2088, __PRETTY_FUNCTION__))
;
2089
2090 call->setMetadata("clang.arc.copy_on_escape",
2091 llvm::MDNode::get(Builder.getContext(), None));
2092 }
2093
2094 return result;
2095}
2096
2097static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
2098 // Fetch the void(void) inline asm which marks that we're going to
2099 // do something with the autoreleased return value.
2100 llvm::InlineAsm *&marker
2101 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker;
2102 if (!marker) {
2103 StringRef assembly
2104 = CGF.CGM.getTargetCodeGenInfo()
2105 .getARCRetainAutoreleasedReturnValueMarker();
2106
2107 // If we have an empty assembly string, there's nothing to do.
2108 if (assembly.empty()) {
2109
2110 // Otherwise, at -O0, build an inline asm that we're going to call
2111 // in a moment.
2112 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) {
2113 llvm::FunctionType *type =
2114 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false);
2115
2116 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
2117
2118 // If we're at -O1 and above, we don't want to litter the code
2119 // with this marker yet, so leave a breadcrumb for the ARC
2120 // optimizer to pick up.
2121 } else {
2122 llvm::NamedMDNode *metadata =
2123 CGF.CGM.getModule().getOrInsertNamedMetadata(
2124 "clang.arc.retainAutoreleasedReturnValueMarker");
2125 assert(metadata->getNumOperands() <= 1)((metadata->getNumOperands() <= 1) ? static_cast<void
> (0) : __assert_fail ("metadata->getNumOperands() <= 1"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2125, __PRETTY_FUNCTION__))
;
2126 if (metadata->getNumOperands() == 0) {
2127 auto &ctx = CGF.getLLVMContext();
2128 metadata->addOperand(llvm::MDNode::get(ctx,
2129 llvm::MDString::get(ctx, assembly)));
2130 }
2131 }
2132 }
2133
2134 // Call the marker asm if we made one, which we do only at -O0.
2135 if (marker)
2136 CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker));
2137}
2138
2139/// Retain the given object which is the result of a function call.
2140/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
2141///
2142/// Yes, this function name is one character away from a different
2143/// call with completely different semantics.
2144llvm::Value *
2145CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
2146 emitAutoreleasedReturnValueMarker(*this);
2147 return emitARCValueOperation(*this, value, nullptr,
2148 CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
2149 llvm::Intrinsic::objc_retainAutoreleasedReturnValue);
2150}
2151
2152/// Claim a possibly-autoreleased return value at +0. This is only
2153/// valid to do in contexts which do not rely on the retain to keep
2154/// the object valid for all of its uses; for example, when
2155/// the value is ignored, or when it is being assigned to an
2156/// __unsafe_unretained variable.
2157///
2158/// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value)
2159llvm::Value *
2160CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
2161 emitAutoreleasedReturnValueMarker(*this);
2162 return emitARCValueOperation(*this, value, nullptr,
2163 CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
2164 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue);
2165}
2166
2167/// Release the given object.
2168/// call void \@objc_release(i8* %value)
2169void CodeGenFunction::EmitARCRelease(llvm::Value *value,
2170 ARCPreciseLifetime_t precise) {
2171 if (isa<llvm::ConstantPointerNull>(value)) return;
2172
2173 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
2174 if (!fn) {
2175 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_release);
2176 setARCRuntimeFunctionLinkage(CGM, fn);
2177 }
2178
2179 // Cast the argument to 'id'.
2180 value = Builder.CreateBitCast(value, Int8PtrTy);
2181
2182 // Call objc_release.
2183 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
2184
2185 if (precise == ARCImpreciseLifetime) {
2186 call->setMetadata("clang.imprecise_release",
2187 llvm::MDNode::get(Builder.getContext(), None));
2188 }
2189}
2190
2191/// Destroy a __strong variable.
2192///
2193/// At -O0, emit a call to store 'null' into the address;
2194/// instrumenting tools prefer this because the address is exposed,
2195/// but it's relatively cumbersome to optimize.
2196///
2197/// At -O1 and above, just load and call objc_release.
2198///
2199/// call void \@objc_storeStrong(i8** %addr, i8* null)
2200void CodeGenFunction::EmitARCDestroyStrong(Address addr,
2201 ARCPreciseLifetime_t precise) {
2202 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2203 llvm::Value *null = getNullForVariable(addr);
2204 EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
2205 return;
2206 }
2207
2208 llvm::Value *value = Builder.CreateLoad(addr);
2209 EmitARCRelease(value, precise);
2210}
2211
2212/// Store into a strong object. Always calls this:
2213/// call void \@objc_storeStrong(i8** %addr, i8* %value)
2214llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
2215 llvm::Value *value,
2216 bool ignored) {
2217 assert(addr.getElementType() == value->getType())((addr.getElementType() == value->getType()) ? static_cast
<void> (0) : __assert_fail ("addr.getElementType() == value->getType()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2217, __PRETTY_FUNCTION__))
;
2218
2219 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
2220 if (!fn) {
2221 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_storeStrong);
2222 setARCRuntimeFunctionLinkage(CGM, fn);
2223 }
2224
2225 llvm::Value *args[] = {
2226 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
2227 Builder.CreateBitCast(value, Int8PtrTy)
2228 };
2229 EmitNounwindRuntimeCall(fn, args);
2230
2231 if (ignored) return nullptr;
2232 return value;
2233}
2234
2235/// Store into a strong object. Sometimes calls this:
2236/// call void \@objc_storeStrong(i8** %addr, i8* %value)
2237/// Other times, breaks it down into components.
2238llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
2239 llvm::Value *newValue,
2240 bool ignored) {
2241 QualType type = dst.getType();
2242 bool isBlock = type->isBlockPointerType();
2243
2244 // Use a store barrier at -O0 unless this is a block type or the
2245 // lvalue is inadequately aligned.
2246 if (shouldUseFusedARCCalls() &&
2247 !isBlock &&
2248 (dst.getAlignment().isZero() ||
2249 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
2250 return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
2251 }
2252
2253 // Otherwise, split it out.
2254
2255 // Retain the new value.
2256 newValue = EmitARCRetain(type, newValue);
2257
2258 // Read the old value.
2259 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation());
2260
2261 // Store. We do this before the release so that any deallocs won't
2262 // see the old value.
2263 EmitStoreOfScalar(newValue, dst);
2264
2265 // Finally, release the old value.
2266 EmitARCRelease(oldValue, dst.isARCPreciseLifetime());
2267
2268 return newValue;
2269}
2270
2271/// Autorelease the given object.
2272/// call i8* \@objc_autorelease(i8* %value)
2273llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
2274 return emitARCValueOperation(*this, value, nullptr,
2275 CGM.getObjCEntrypoints().objc_autorelease,
2276 llvm::Intrinsic::objc_autorelease);
2277}
2278
2279/// Autorelease the given object.
2280/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
2281llvm::Value *
2282CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
2283 return emitARCValueOperation(*this, value, nullptr,
2284 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
2285 llvm::Intrinsic::objc_autoreleaseReturnValue,
2286 /*isTailCall*/ true);
2287}
2288
2289/// Do a fused retain/autorelease of the given object.
2290/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
2291llvm::Value *
2292CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
2293 return emitARCValueOperation(*this, value, nullptr,
2294 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
2295 llvm::Intrinsic::objc_retainAutoreleaseReturnValue,
2296 /*isTailCall*/ true);
2297}
2298
2299/// Do a fused retain/autorelease of the given object.
2300/// call i8* \@objc_retainAutorelease(i8* %value)
2301/// or
2302/// %retain = call i8* \@objc_retainBlock(i8* %value)
2303/// call i8* \@objc_autorelease(i8* %retain)
2304llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
2305 llvm::Value *value) {
2306 if (!type->isBlockPointerType())
2307 return EmitARCRetainAutoreleaseNonBlock(value);
2308
2309 if (isa<llvm::ConstantPointerNull>(value)) return value;
2310
2311 llvm::Type *origType = value->getType();
2312 value = Builder.CreateBitCast(value, Int8PtrTy);
2313 value = EmitARCRetainBlock(value, /*mandatory*/ true);
2314 value = EmitARCAutorelease(value);
2315 return Builder.CreateBitCast(value, origType);
2316}
2317
2318/// Do a fused retain/autorelease of the given object.
2319/// call i8* \@objc_retainAutorelease(i8* %value)
2320llvm::Value *
2321CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
2322 return emitARCValueOperation(*this, value, nullptr,
2323 CGM.getObjCEntrypoints().objc_retainAutorelease,
2324 llvm::Intrinsic::objc_retainAutorelease);
2325}
2326
2327/// i8* \@objc_loadWeak(i8** %addr)
2328/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
2329llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
2330 return emitARCLoadOperation(*this, addr,
2331 CGM.getObjCEntrypoints().objc_loadWeak,
2332 llvm::Intrinsic::objc_loadWeak);
2333}
2334
2335/// i8* \@objc_loadWeakRetained(i8** %addr)
2336llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
2337 return emitARCLoadOperation(*this, addr,
2338 CGM.getObjCEntrypoints().objc_loadWeakRetained,
2339 llvm::Intrinsic::objc_loadWeakRetained);
2340}
2341
2342/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
2343/// Returns %value.
2344llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
2345 llvm::Value *value,
2346 bool ignored) {
2347 return emitARCStoreOperation(*this, addr, value,
2348 CGM.getObjCEntrypoints().objc_storeWeak,
2349 llvm::Intrinsic::objc_storeWeak, ignored);
2350}
2351
2352/// i8* \@objc_initWeak(i8** %addr, i8* %value)
2353/// Returns %value. %addr is known to not have a current weak entry.
2354/// Essentially equivalent to:
2355/// *addr = nil; objc_storeWeak(addr, value);
2356void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
2357 // If we're initializing to null, just write null to memory; no need
2358 // to get the runtime involved. But don't do this if optimization
2359 // is enabled, because accounting for this would make the optimizer
2360 // much more complicated.
2361 if (isa<llvm::ConstantPointerNull>(value) &&
2362 CGM.getCodeGenOpts().OptimizationLevel == 0) {
2363 Builder.CreateStore(value, addr);
2364 return;
2365 }
2366
2367 emitARCStoreOperation(*this, addr, value,
2368 CGM.getObjCEntrypoints().objc_initWeak,
2369 llvm::Intrinsic::objc_initWeak, /*ignored*/ true);
2370}
2371
2372/// void \@objc_destroyWeak(i8** %addr)
2373/// Essentially objc_storeWeak(addr, nil).
2374void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
2375 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
2376 if (!fn) {
2377 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_destroyWeak);
2378 setARCRuntimeFunctionLinkage(CGM, fn);
2379 }
2380
2381 // Cast the argument to 'id*'.
2382 addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
2383
2384 EmitNounwindRuntimeCall(fn, addr.getPointer());
2385}
2386
2387/// void \@objc_moveWeak(i8** %dest, i8** %src)
2388/// Disregards the current value in %dest. Leaves %src pointing to nothing.
2389/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
2390void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
2391 emitARCCopyOperation(*this, dst, src,
2392 CGM.getObjCEntrypoints().objc_moveWeak,
2393 llvm::Intrinsic::objc_moveWeak);
2394}
2395
2396/// void \@objc_copyWeak(i8** %dest, i8** %src)
2397/// Disregards the current value in %dest. Essentially
2398/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
2399void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
2400 emitARCCopyOperation(*this, dst, src,
2401 CGM.getObjCEntrypoints().objc_copyWeak,
2402 llvm::Intrinsic::objc_copyWeak);
2403}
2404
2405void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr,
2406 Address SrcAddr) {
2407 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr);
2408 Object = EmitObjCConsumeObject(Ty, Object);
2409 EmitARCStoreWeak(DstAddr, Object, false);
2410}
2411
2412void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
2413 Address SrcAddr) {
2414 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr);
2415 Object = EmitObjCConsumeObject(Ty, Object);
2416 EmitARCStoreWeak(DstAddr, Object, false);
2417 EmitARCDestroyWeak(SrcAddr);
2418}
2419
2420/// Produce the code to do a objc_autoreleasepool_push.
2421/// call i8* \@objc_autoreleasePoolPush(void)
2422llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
2423 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
2424 if (!fn) {
2425 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush);
2426 setARCRuntimeFunctionLinkage(CGM, fn);
2427 }
2428
2429 return EmitNounwindRuntimeCall(fn);
2430}
2431
2432/// Produce the code to do a primitive release.
2433/// call void \@objc_autoreleasePoolPop(i8* %ptr)
2434void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
2435 assert(value->getType() == Int8PtrTy)((value->getType() == Int8PtrTy) ? static_cast<void>
(0) : __assert_fail ("value->getType() == Int8PtrTy", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2435, __PRETTY_FUNCTION__))
;
2436
2437 if (getInvokeDest()) {
2438 // Call the runtime method not the intrinsic if we are handling exceptions
2439 llvm::Constant *&fn =
2440 CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke;
2441 if (!fn) {
2442 llvm::FunctionType *fnType =
2443 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
2444 fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop");
2445 setARCRuntimeFunctionLinkage(CGM, fn);
2446 }
2447
2448 // objc_autoreleasePoolPop can throw.
2449 EmitRuntimeCallOrInvoke(fn, value);
2450 } else {
2451 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
2452 if (!fn) {
2453 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop);
2454 setARCRuntimeFunctionLinkage(CGM, fn);
2455 }
2456
2457 EmitRuntimeCall(fn, value);
2458 }
2459}
2460
2461/// Produce the code to do an MRR version objc_autoreleasepool_push.
2462/// Which is: [[NSAutoreleasePool alloc] init];
2463/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
2464/// init is declared as: - (id) init; in its NSObject super class.
2465///
2466llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
2467 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
2468 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this);
2469 // [NSAutoreleasePool alloc]
2470 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
2471 Selector AllocSel = getContext().Selectors.getSelector(0, &II);
2472 CallArgList Args;
2473 RValue AllocRV =
2474 Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2475 getContext().getObjCIdType(),
2476 AllocSel, Receiver, Args);
2477
2478 // [Receiver init]
2479 Receiver = AllocRV.getScalarVal();
2480 II = &CGM.getContext().Idents.get("init");
2481 Selector InitSel = getContext().Selectors.getSelector(0, &II);
2482 RValue InitRV =
2483 Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2484 getContext().getObjCIdType(),
2485 InitSel, Receiver, Args);
2486 return InitRV.getScalarVal();
2487}
2488
2489/// Allocate the given objc object.
2490/// call i8* \@objc_alloc(i8* %value)
2491llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value,
2492 llvm::Type *resultType) {
2493 return emitObjCValueOperation(*this, value, resultType,
2494 CGM.getObjCEntrypoints().objc_alloc,
2495 "objc_alloc");
2496}
2497
2498/// Allocate the given objc object.
2499/// call i8* \@objc_allocWithZone(i8* %value)
2500llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value,
2501 llvm::Type *resultType) {
2502 return emitObjCValueOperation(*this, value, resultType,
2503 CGM.getObjCEntrypoints().objc_allocWithZone,
2504 "objc_allocWithZone");
2505}
2506
2507/// Produce the code to do a primitive release.
2508/// [tmp drain];
2509void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
2510 IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
2511 Selector DrainSel = getContext().Selectors.getSelector(0, &II);
2512 CallArgList Args;
2513 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
2514 getContext().VoidTy, DrainSel, Arg, Args);
2515}
2516
2517void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
2518 Address addr,
2519 QualType type) {
2520 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
2521}
2522
2523void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
2524 Address addr,
2525 QualType type) {
2526 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
2527}
2528
2529void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
2530 Address addr,
2531 QualType type) {
2532 CGF.EmitARCDestroyWeak(addr);
2533}
2534
2535void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr,
2536 QualType type) {
2537 llvm::Value *value = CGF.Builder.CreateLoad(addr);
2538 CGF.EmitARCIntrinsicUse(value);
2539}
2540
2541/// Autorelease the given object.
2542/// call i8* \@objc_autorelease(i8* %value)
2543llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value,
2544 llvm::Type *returnType) {
2545 return emitObjCValueOperation(*this, value, returnType,
2546 CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction,
2547 "objc_autorelease");
2548}
2549
2550/// Retain the given object, with normal retain semantics.
2551/// call i8* \@objc_retain(i8* %value)
2552llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value,
2553 llvm::Type *returnType) {
2554 return emitObjCValueOperation(*this, value, returnType,
2555 CGM.getObjCEntrypoints().objc_retainRuntimeFunction,
2556 "objc_retain");
2557}
2558
2559/// Release the given object.
2560/// call void \@objc_release(i8* %value)
2561void CodeGenFunction::EmitObjCRelease(llvm::Value *value,
2562 ARCPreciseLifetime_t precise) {
2563 if (isa<llvm::ConstantPointerNull>(value)) return;
2564
2565 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
2566 if (!fn) {
2567 if (!fn) {
2568 llvm::FunctionType *fnType =
2569 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
2570 fn = CGM.CreateRuntimeFunction(fnType, "objc_release");
2571 setARCRuntimeFunctionLinkage(CGM, fn);
2572 // We have Native ARC, so set nonlazybind attribute for performance
2573 if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
2574 f->addFnAttr(llvm::Attribute::NonLazyBind);
2575 }
2576 }
2577
2578 // Cast the argument to 'id'.
2579 value = Builder.CreateBitCast(value, Int8PtrTy);
2580
2581 // Call objc_release.
2582 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
2583
2584 if (precise == ARCImpreciseLifetime) {
2585 call->setMetadata("clang.imprecise_release",
2586 llvm::MDNode::get(Builder.getContext(), None));
2587 }
2588}
2589
2590namespace {
2591 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup {
2592 llvm::Value *Token;
2593
2594 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2595
2596 void Emit(CodeGenFunction &CGF, Flags flags) override {
2597 CGF.EmitObjCAutoreleasePoolPop(Token);
2598 }
2599 };
2600 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup {
2601 llvm::Value *Token;
2602
2603 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2604
2605 void Emit(CodeGenFunction &CGF, Flags flags) override {
2606 CGF.EmitObjCMRRAutoreleasePoolPop(Token);
2607 }
2608 };
2609}
2610
2611void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
2612 if (CGM.getLangOpts().ObjCAutoRefCount)
2613 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
2614 else
2615 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
2616}
2617
2618static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) {
2619 switch (lifetime) {
2620 case Qualifiers::OCL_None:
2621 case Qualifiers::OCL_ExplicitNone:
2622 case Qualifiers::OCL_Strong:
2623 case Qualifiers::OCL_Autoreleasing:
2624 return true;
2625
2626 case Qualifiers::OCL_Weak:
2627 return false;
2628 }
2629
2630 llvm_unreachable("impossible lifetime!")::llvm::llvm_unreachable_internal("impossible lifetime!", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2630)
;
2631}
2632
2633static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2634 LValue lvalue,
2635 QualType type) {
2636 llvm::Value *result;
2637 bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime());
2638 if (shouldRetain) {
2639 result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal();
2640 } else {
2641 assert(type.getObjCLifetime() == Qualifiers::OCL_Weak)((type.getObjCLifetime() == Qualifiers::OCL_Weak) ? static_cast
<void> (0) : __assert_fail ("type.getObjCLifetime() == Qualifiers::OCL_Weak"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2641, __PRETTY_FUNCTION__))
;
2642 result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress());
2643 }
2644 return TryEmitResult(result, !shouldRetain);
2645}
2646
2647static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2648 const Expr *e) {
2649 e = e->IgnoreParens();
2650 QualType type = e->getType();
2651
2652 // If we're loading retained from a __strong xvalue, we can avoid
2653 // an extra retain/release pair by zeroing out the source of this
2654 // "move" operation.
2655 if (e->isXValue() &&
2656 !type.isConstQualified() &&
2657 type.getObjCLifetime() == Qualifiers::OCL_Strong) {
2658 // Emit the lvalue.
2659 LValue lv = CGF.EmitLValue(e);
2660
2661 // Load the object pointer.
2662 llvm::Value *result = CGF.EmitLoadOfLValue(lv,
2663 SourceLocation()).getScalarVal();
2664
2665 // Set the source pointer to NULL.
2666 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
2667
2668 return TryEmitResult(result, true);
2669 }
2670
2671 // As a very special optimization, in ARC++, if the l-value is the
2672 // result of a non-volatile assignment, do a simple retain of the
2673 // result of the call to objc_storeWeak instead of reloading.
2674 if (CGF.getLangOpts().CPlusPlus &&
2675 !type.isVolatileQualified() &&
2676 type.getObjCLifetime() == Qualifiers::OCL_Weak &&
2677 isa<BinaryOperator>(e) &&
2678 cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
2679 return TryEmitResult(CGF.EmitScalarExpr(e), false);
2680
2681 // Try to emit code for scalar constant instead of emitting LValue and
2682 // loading it because we are not guaranteed to have an l-value. One of such
2683 // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable.
2684 if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) {
2685 auto *DRE = const_cast<DeclRefExpr *>(decl_expr);
2686 if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE))
2687 return TryEmitResult(CGF.emitScalarConstant(constant, DRE),
2688 !shouldRetainObjCLifetime(type.getObjCLifetime()));
2689 }
2690
2691 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
2692}
2693
2694typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
2695 llvm::Value *value)>
2696 ValueTransform;
2697
2698/// Insert code immediately after a call.
2699static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
2700 llvm::Value *value,
2701 ValueTransform doAfterCall,
2702 ValueTransform doFallback) {
2703 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
2704 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2705
2706 // Place the retain immediately following the call.
2707 CGF.Builder.SetInsertPoint(call->getParent(),
2708 ++llvm::BasicBlock::iterator(call));
2709 value = doAfterCall(CGF, value);
2710
2711 CGF.Builder.restoreIP(ip);
2712 return value;
2713 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
2714 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2715
2716 // Place the retain at the beginning of the normal destination block.
2717 llvm::BasicBlock *BB = invoke->getNormalDest();
2718 CGF.Builder.SetInsertPoint(BB, BB->begin());
2719 value = doAfterCall(CGF, value);
2720
2721 CGF.Builder.restoreIP(ip);
2722 return value;
2723
2724 // Bitcasts can arise because of related-result returns. Rewrite
2725 // the operand.
2726 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
2727 llvm::Value *operand = bitcast->getOperand(0);
2728 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback);
2729 bitcast->setOperand(0, operand);
2730 return bitcast;
2731
2732 // Generic fall-back case.
2733 } else {
2734 // Retain using the non-block variant: we never need to do a copy
2735 // of a block that's been returned to us.
2736 return doFallback(CGF, value);
2737 }
2738}
2739
2740/// Given that the given expression is some sort of call (which does
2741/// not return retained), emit a retain following it.
2742static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF,
2743 const Expr *e) {
2744 llvm::Value *value = CGF.EmitScalarExpr(e);
2745 return emitARCOperationAfterCall(CGF, value,
2746 [](CodeGenFunction &CGF, llvm::Value *value) {
2747 return CGF.EmitARCRetainAutoreleasedReturnValue(value);
2748 },
2749 [](CodeGenFunction &CGF, llvm::Value *value) {
2750 return CGF.EmitARCRetainNonBlock(value);
2751 });
2752}
2753
2754/// Given that the given expression is some sort of call (which does
2755/// not return retained), perform an unsafeClaim following it.
2756static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF,
2757 const Expr *e) {
2758 llvm::Value *value = CGF.EmitScalarExpr(e);
2759 return emitARCOperationAfterCall(CGF, value,
2760 [](CodeGenFunction &CGF, llvm::Value *value) {
2761 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value);
2762 },
2763 [](CodeGenFunction &CGF, llvm::Value *value) {
2764 return value;
2765 });
2766}
2767
2768llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E,
2769 bool allowUnsafeClaim) {
2770 if (allowUnsafeClaim &&
2771 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) {
2772 return emitARCUnsafeClaimCallResult(*this, E);
2773 } else {
2774 llvm::Value *value = emitARCRetainCallResult(*this, E);
2775 return EmitObjCConsumeObject(E->getType(), value);
2776 }
2777}
2778
2779/// Determine whether it might be important to emit a separate
2780/// objc_retain_block on the result of the given expression, or
2781/// whether it's okay to just emit it in a +1 context.
2782static bool shouldEmitSeparateBlockRetain(const Expr *e) {
2783 assert(e->getType()->isBlockPointerType())((e->getType()->isBlockPointerType()) ? static_cast<
void> (0) : __assert_fail ("e->getType()->isBlockPointerType()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2783, __PRETTY_FUNCTION__))
;
2784 e = e->IgnoreParens();
2785
2786 // For future goodness, emit block expressions directly in +1
2787 // contexts if we can.
2788 if (isa<BlockExpr>(e))
2789 return false;
2790
2791 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
2792 switch (cast->getCastKind()) {
2793 // Emitting these operations in +1 contexts is goodness.
2794 case CK_LValueToRValue:
2795 case CK_ARCReclaimReturnedObject:
2796 case CK_ARCConsumeObject:
2797 case CK_ARCProduceObject:
2798 return false;
2799
2800 // These operations preserve a block type.
2801 case CK_NoOp:
2802 case CK_BitCast:
2803 return shouldEmitSeparateBlockRetain(cast->getSubExpr());
2804
2805 // These operations are known to be bad (or haven't been considered).
2806 case CK_AnyPointerToBlockPointerCast:
2807 default:
2808 return true;
2809 }
2810 }
2811
2812 return true;
2813}
2814
2815namespace {
2816/// A CRTP base class for emitting expressions of retainable object
2817/// pointer type in ARC.
2818template <typename Impl, typename Result> class ARCExprEmitter {
2819protected:
2820 CodeGenFunction &CGF;
2821 Impl &asImpl() { return *static_cast<Impl*>(this); }
2822
2823 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {}
2824
2825public:
2826 Result visit(const Expr *e);
2827 Result visitCastExpr(const CastExpr *e);
2828 Result visitPseudoObjectExpr(const PseudoObjectExpr *e);
2829 Result visitBinaryOperator(const BinaryOperator *e);
2830 Result visitBinAssign(const BinaryOperator *e);
2831 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e);
2832 Result visitBinAssignAutoreleasing(const BinaryOperator *e);
2833 Result visitBinAssignWeak(const BinaryOperator *e);
2834 Result visitBinAssignStrong(const BinaryOperator *e);
2835
2836 // Minimal implementation:
2837 // Result visitLValueToRValue(const Expr *e)
2838 // Result visitConsumeObject(const Expr *e)
2839 // Result visitExtendBlockObject(const Expr *e)
2840 // Result visitReclaimReturnedObject(const Expr *e)
2841 // Result visitCall(const Expr *e)
2842 // Result visitExpr(const Expr *e)
2843 //
2844 // Result emitBitCast(Result result, llvm::Type *resultType)
2845 // llvm::Value *getValueOfResult(Result result)
2846};
2847}
2848
2849/// Try to emit a PseudoObjectExpr under special ARC rules.
2850///
2851/// This massively duplicates emitPseudoObjectRValue.
2852template <typename Impl, typename Result>
2853Result
2854ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) {
2855 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
2856
2857 // Find the result expression.
2858 const Expr *resultExpr = E->getResultExpr();
2859 assert(resultExpr)((resultExpr) ? static_cast<void> (0) : __assert_fail (
"resultExpr", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2859, __PRETTY_FUNCTION__))
;
12
Assuming 'resultExpr' is non-null
13
'?' condition is true
2860 Result result;
14
'result' declared without an initial value
2861
2862 for (PseudoObjectExpr::const_semantics_iterator
16
Loop condition is false. Execution continues on line 2898
2863 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
15
Assuming 'i' is equal to 'e'
2864 const Expr *semantic = *i;
2865
2866 // If this semantic expression is an opaque value, bind it
2867 // to the result of its source expression.
2868 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
2869 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
2870 OVMA opaqueData;
2871
2872 // If this semantic is the result of the pseudo-object
2873 // expression, try to evaluate the source as +1.
2874 if (ov == resultExpr) {
2875 assert(!OVMA::shouldBindAsLValue(ov))((!OVMA::shouldBindAsLValue(ov)) ? static_cast<void> (0
) : __assert_fail ("!OVMA::shouldBindAsLValue(ov)", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2875, __PRETTY_FUNCTION__))
;
2876 result = asImpl().visit(ov->getSourceExpr());
2877 opaqueData = OVMA::bind(CGF, ov,
2878 RValue::get(asImpl().getValueOfResult(result)));
2879
2880 // Otherwise, just bind it.
2881 } else {
2882 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
2883 }
2884 opaques.push_back(opaqueData);
2885
2886 // Otherwise, if the expression is the result, evaluate it
2887 // and remember the result.
2888 } else if (semantic == resultExpr) {
2889 result = asImpl().visit(semantic);
2890
2891 // Otherwise, evaluate the expression in an ignored context.
2892 } else {
2893 CGF.EmitIgnoredExpr(semantic);
2894 }
2895 }
2896
2897 // Unbind all the opaques now.
2898 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
17
Assuming 'i' is equal to 'e'
18
Loop condition is false. Execution continues on line 2901
2899 opaques[i].unbind(CGF);
2900
2901 return result;
19
Undefined or garbage value returned to caller
2902}
2903
2904template <typename Impl, typename Result>
2905Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) {
2906 switch (e->getCastKind()) {
2907
2908 // No-op casts don't change the type, so we just ignore them.
2909 case CK_NoOp:
2910 return asImpl().visit(e->getSubExpr());
2911
2912 // These casts can change the type.
2913 case CK_CPointerToObjCPointerCast:
2914 case CK_BlockPointerToObjCPointerCast:
2915 case CK_AnyPointerToBlockPointerCast:
2916 case CK_BitCast: {
2917 llvm::Type *resultType = CGF.ConvertType(e->getType());
2918 assert(e->getSubExpr()->getType()->hasPointerRepresentation())((e->getSubExpr()->getType()->hasPointerRepresentation
()) ? static_cast<void> (0) : __assert_fail ("e->getSubExpr()->getType()->hasPointerRepresentation()"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2918, __PRETTY_FUNCTION__))
;
2919 Result result = asImpl().visit(e->getSubExpr());
2920 return asImpl().emitBitCast(result, resultType);
2921 }
2922
2923 // Handle some casts specially.
2924 case CK_LValueToRValue:
2925 return asImpl().visitLValueToRValue(e->getSubExpr());
2926 case CK_ARCConsumeObject:
2927 return asImpl().visitConsumeObject(e->getSubExpr());
2928 case CK_ARCExtendBlockObject:
2929 return asImpl().visitExtendBlockObject(e->getSubExpr());
2930 case CK_ARCReclaimReturnedObject:
2931 return asImpl().visitReclaimReturnedObject(e->getSubExpr());
2932
2933 // Otherwise, use the default logic.
2934 default:
2935 return asImpl().visitExpr(e);
2936 }
2937}
2938
2939template <typename Impl, typename Result>
2940Result
2941ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) {
2942 switch (e->getOpcode()) {
2943 case BO_Comma:
2944 CGF.EmitIgnoredExpr(e->getLHS());
2945 CGF.EnsureInsertPoint();
2946 return asImpl().visit(e->getRHS());
2947
2948 case BO_Assign:
2949 return asImpl().visitBinAssign(e);
2950
2951 default:
2952 return asImpl().visitExpr(e);
2953 }
2954}
2955
2956template <typename Impl, typename Result>
2957Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) {
2958 switch (e->getLHS()->getType().getObjCLifetime()) {
2959 case Qualifiers::OCL_ExplicitNone:
2960 return asImpl().visitBinAssignUnsafeUnretained(e);
2961
2962 case Qualifiers::OCL_Weak:
2963 return asImpl().visitBinAssignWeak(e);
2964
2965 case Qualifiers::OCL_Autoreleasing:
2966 return asImpl().visitBinAssignAutoreleasing(e);
2967
2968 case Qualifiers::OCL_Strong:
2969 return asImpl().visitBinAssignStrong(e);
2970
2971 case Qualifiers::OCL_None:
2972 return asImpl().visitExpr(e);
2973 }
2974 llvm_unreachable("bad ObjC ownership qualifier")::llvm::llvm_unreachable_internal("bad ObjC ownership qualifier"
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 2974)
;
2975}
2976
2977/// The default rule for __unsafe_unretained emits the RHS recursively,
2978/// stores into the unsafe variable, and propagates the result outward.
2979template <typename Impl, typename Result>
2980Result ARCExprEmitter<Impl,Result>::
2981 visitBinAssignUnsafeUnretained(const BinaryOperator *e) {
2982 // Recursively emit the RHS.
2983 // For __block safety, do this before emitting the LHS.
2984 Result result = asImpl().visit(e->getRHS());
2985
2986 // Perform the store.
2987 LValue lvalue =
2988 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store);
2989 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)),
2990 lvalue);
2991
2992 return result;
2993}
2994
2995template <typename Impl, typename Result>
2996Result
2997ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) {
2998 return asImpl().visitExpr(e);
2999}
3000
3001template <typename Impl, typename Result>
3002Result
3003ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) {
3004 return asImpl().visitExpr(e);
3005}
3006
3007template <typename Impl, typename Result>
3008Result
3009ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) {
3010 return asImpl().visitExpr(e);
3011}
3012
3013/// The general expression-emission logic.
3014template <typename Impl, typename Result>
3015Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) {
3016 // We should *never* see a nested full-expression here, because if
3017 // we fail to emit at +1, our caller must not retain after we close
3018 // out the full-expression. This isn't as important in the unsafe
3019 // emitter.
3020 assert(!isa<ExprWithCleanups>(e))((!isa<ExprWithCleanups>(e)) ? static_cast<void> (
0) : __assert_fail ("!isa<ExprWithCleanups>(e)", "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 3020, __PRETTY_FUNCTION__))
;
7
'?' condition is true
3021
3022 // Look through parens, __extension__, generic selection, etc.
3023 e = e->IgnoreParens();
3024
3025 // Handle certain kinds of casts.
3026 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
8
Taking false branch
3027 return asImpl().visitCastExpr(ce);
3028
3029 // Handle the comma operator.
3030 } else if (auto op = dyn_cast<BinaryOperator>(e)) {
9
Taking false branch
3031 return asImpl().visitBinaryOperator(op);
3032
3033 // TODO: handle conditional operators here
3034
3035 // For calls and message sends, use the retained-call logic.
3036 // Delegate inits are a special case in that they're the only
3037 // returns-retained expression that *isn't* surrounded by
3038 // a consume.
3039 } else if (isa<CallExpr>(e) ||
3040 (isa<ObjCMessageExpr>(e) &&
3041 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
3042 return asImpl().visitCall(e);
3043
3044 // Look through pseudo-object expressions.
3045 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
10
Taking true branch
3046 return asImpl().visitPseudoObjectExpr(pseudo);
11
Calling 'ARCExprEmitter::visitPseudoObjectExpr'
3047 }
3048
3049 return asImpl().visitExpr(e);
3050}
3051
3052namespace {
3053
3054/// An emitter for +1 results.
3055struct ARCRetainExprEmitter :
3056 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> {
3057
3058 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
3059
3060 llvm::Value *getValueOfResult(TryEmitResult result) {
3061 return result.getPointer();
3062 }
3063
3064 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) {
3065 llvm::Value *value = result.getPointer();
3066 value = CGF.Builder.CreateBitCast(value, resultType);
3067 result.setPointer(value);
3068 return result;
3069 }
3070
3071 TryEmitResult visitLValueToRValue(const Expr *e) {
3072 return tryEmitARCRetainLoadOfScalar(CGF, e);
3073 }
3074
3075 /// For consumptions, just emit the subexpression and thus elide
3076 /// the retain/release pair.
3077 TryEmitResult visitConsumeObject(const Expr *e) {
3078 llvm::Value *result = CGF.EmitScalarExpr(e);
3079 return TryEmitResult(result, true);
3080 }
3081
3082 /// Block extends are net +0. Naively, we could just recurse on
3083 /// the subexpression, but actually we need to ensure that the
3084 /// value is copied as a block, so there's a little filter here.
3085 TryEmitResult visitExtendBlockObject(const Expr *e) {
3086 llvm::Value *result; // will be a +0 value
3087
3088 // If we can't safely assume the sub-expression will produce a
3089 // block-copied value, emit the sub-expression at +0.
3090 if (shouldEmitSeparateBlockRetain(e)) {
3091 result = CGF.EmitScalarExpr(e);
3092
3093 // Otherwise, try to emit the sub-expression at +1 recursively.
3094 } else {
3095 TryEmitResult subresult = asImpl().visit(e);
3096
3097 // If that produced a retained value, just use that.
3098 if (subresult.getInt()) {
3099 return subresult;
3100 }
3101
3102 // Otherwise it's +0.
3103 result = subresult.getPointer();
3104 }
3105
3106 // Retain the object as a block.
3107 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
3108 return TryEmitResult(result, true);
3109 }
3110
3111 /// For reclaims, emit the subexpression as a retained call and
3112 /// skip the consumption.
3113 TryEmitResult visitReclaimReturnedObject(const Expr *e) {
3114 llvm::Value *result = emitARCRetainCallResult(CGF, e);
3115 return TryEmitResult(result, true);
3116 }
3117
3118 /// When we have an undecorated call, retroactively do a claim.
3119 TryEmitResult visitCall(const Expr *e) {
3120 llvm::Value *result = emitARCRetainCallResult(CGF, e);
3121 return TryEmitResult(result, true);
3122 }
3123
3124 // TODO: maybe special-case visitBinAssignWeak?
3125
3126 TryEmitResult visitExpr(const Expr *e) {
3127 // We didn't find an obvious production, so emit what we've got and
3128 // tell the caller that we didn't manage to retain.
3129 llvm::Value *result = CGF.EmitScalarExpr(e);
3130 return TryEmitResult(result, false);
3131 }
3132};
3133}
3134
3135static TryEmitResult
3136tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
3137 return ARCRetainExprEmitter(CGF).visit(e);
3138}
3139
3140static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
3141 LValue lvalue,
3142 QualType type) {
3143 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
3144 llvm::Value *value = result.getPointer();
3145 if (!result.getInt())
3146 value = CGF.EmitARCRetain(type, value);
3147 return value;
3148}
3149
3150/// EmitARCRetainScalarExpr - Semantically equivalent to
3151/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
3152/// best-effort attempt to peephole expressions that naturally produce
3153/// retained objects.
3154llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
3155 // The retain needs to happen within the full-expression.
3156 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
3157 enterFullExpression(cleanups);
3158 RunCleanupsScope scope(*this);
3159 return EmitARCRetainScalarExpr(cleanups->getSubExpr());
3160 }
3161
3162 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
3163 llvm::Value *value = result.getPointer();
3164 if (!result.getInt())
3165 value = EmitARCRetain(e->getType(), value);
3166 return value;
3167}
3168
3169llvm::Value *
3170CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
3171 // The retain needs to happen within the full-expression.
3172 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
3173 enterFullExpression(cleanups);
3174 RunCleanupsScope scope(*this);
3175 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
3176 }
3177
3178 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
3179 llvm::Value *value = result.getPointer();
3180 if (result.getInt())
3181 value = EmitARCAutorelease(value);
3182 else
3183 value = EmitARCRetainAutorelease(e->getType(), value);
3184 return value;
3185}
3186
3187llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
3188 llvm::Value *result;
3189 bool doRetain;
3190
3191 if (shouldEmitSeparateBlockRetain(e)) {
3192 result = EmitScalarExpr(e);
3193 doRetain = true;
3194 } else {
3195 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
3196 result = subresult.getPointer();
3197 doRetain = !subresult.getInt();
3198 }
3199
3200 if (doRetain)
3201 result = EmitARCRetainBlock(result, /*mandatory*/ true);
3202 return EmitObjCConsumeObject(e->getType(), result);
3203}
3204
3205llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
3206 // In ARC, retain and autorelease the expression.
3207 if (getLangOpts().ObjCAutoRefCount) {
3208 // Do so before running any cleanups for the full-expression.
3209 // EmitARCRetainAutoreleaseScalarExpr does this for us.
3210 return EmitARCRetainAutoreleaseScalarExpr(expr);
3211 }
3212
3213 // Otherwise, use the normal scalar-expression emission. The
3214 // exception machinery doesn't do anything special with the
3215 // exception like retaining it, so there's no safety associated with
3216 // only running cleanups after the throw has started, and when it
3217 // matters it tends to be substantially inferior code.
3218 return EmitScalarExpr(expr);
3219}
3220
3221namespace {
3222
3223/// An emitter for assigning into an __unsafe_unretained context.
3224struct ARCUnsafeUnretainedExprEmitter :
3225 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> {
3226
3227 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
3228
3229 llvm::Value *getValueOfResult(llvm::Value *value) {
3230 return value;
3231 }
3232
3233 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) {
3234 return CGF.Builder.CreateBitCast(value, resultType);
3235 }
3236
3237 llvm::Value *visitLValueToRValue(const Expr *e) {
3238 return CGF.EmitScalarExpr(e);
3239 }
3240
3241 /// For consumptions, just emit the subexpression and perform the
3242 /// consumption like normal.
3243 llvm::Value *visitConsumeObject(const Expr *e) {
3244 llvm::Value *value = CGF.EmitScalarExpr(e);
3245 return CGF.EmitObjCConsumeObject(e->getType(), value);
3246 }
3247
3248 /// No special logic for block extensions. (This probably can't
3249 /// actually happen in this emitter, though.)
3250 llvm::Value *visitExtendBlockObject(const Expr *e) {
3251 return CGF.EmitARCExtendBlockObject(e);
3252 }
3253
3254 /// For reclaims, perform an unsafeClaim if that's enabled.
3255 llvm::Value *visitReclaimReturnedObject(const Expr *e) {
3256 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true);
3257 }
3258
3259 /// When we have an undecorated call, just emit it without adding
3260 /// the unsafeClaim.
3261 llvm::Value *visitCall(const Expr *e) {
3262 return CGF.EmitScalarExpr(e);
3263 }
3264
3265 /// Just do normal scalar emission in the default case.
3266 llvm::Value *visitExpr(const Expr *e) {
3267 return CGF.EmitScalarExpr(e);
3268 }
3269};
3270}
3271
3272static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF,
3273 const Expr *e) {
3274 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e);
6
Calling 'ARCExprEmitter::visit'
3275}
3276
3277/// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to
3278/// immediately releasing the resut of EmitARCRetainScalarExpr, but
3279/// avoiding any spurious retains, including by performing reclaims
3280/// with objc_unsafeClaimAutoreleasedReturnValue.
3281llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) {
3282 // Look through full-expressions.
3283 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
4
Taking false branch
3284 enterFullExpression(cleanups);
3285 RunCleanupsScope scope(*this);
3286 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr());
3287 }
3288
3289 return emitARCUnsafeUnretainedScalarExpr(*this, e);
5
Calling 'emitARCUnsafeUnretainedScalarExpr'
3290}
3291
3292std::pair<LValue,llvm::Value*>
3293CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e,
3294 bool ignored) {
3295 // Evaluate the RHS first. If we're ignoring the result, assume
3296 // that we can emit at an unsafe +0.
3297 llvm::Value *value;
3298 if (ignored) {
1
Assuming 'ignored' is not equal to 0
2
Taking true branch
3299 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS());
3
Calling 'CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr'
3300 } else {
3301 value = EmitScalarExpr(e->getRHS());
3302 }
3303
3304 // Emit the LHS and perform the store.
3305 LValue lvalue = EmitLValue(e->getLHS());
3306 EmitStoreOfScalar(value, lvalue);
3307
3308 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value);
3309}
3310
3311std::pair<LValue,llvm::Value*>
3312CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
3313 bool ignored) {
3314 // Evaluate the RHS first.
3315 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
3316 llvm::Value *value = result.getPointer();
3317
3318 bool hasImmediateRetain = result.getInt();
3319
3320 // If we didn't emit a retained object, and the l-value is of block
3321 // type, then we need to emit the block-retain immediately in case
3322 // it invalidates the l-value.
3323 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
3324 value = EmitARCRetainBlock(value, /*mandatory*/ false);
3325 hasImmediateRetain = true;
3326 }
3327
3328 LValue lvalue = EmitLValue(e->getLHS());
3329
3330 // If the RHS was emitted retained, expand this.
3331 if (hasImmediateRetain) {
3332 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation());
3333 EmitStoreOfScalar(value, lvalue);
3334 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
3335 } else {
3336 value = EmitARCStoreStrong(lvalue, value, ignored);
3337 }
3338
3339 return std::pair<LValue,llvm::Value*>(lvalue, value);
3340}
3341
3342std::pair<LValue,llvm::Value*>
3343CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
3344 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
3345 LValue lvalue = EmitLValue(e->getLHS());
3346
3347 EmitStoreOfScalar(value, lvalue);
3348
3349 return std::pair<LValue,llvm::Value*>(lvalue, value);
3350}
3351
3352void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
3353 const ObjCAutoreleasePoolStmt &ARPS) {
3354 const Stmt *subStmt = ARPS.getSubStmt();
3355 const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
3356
3357 CGDebugInfo *DI = getDebugInfo();
3358 if (DI)
3359 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
3360
3361 // Keep track of the current cleanup stack depth.
3362 RunCleanupsScope Scope(*this);
3363 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
3364 llvm::Value *token = EmitObjCAutoreleasePoolPush();
3365 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
3366 } else {
3367 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
3368 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
3369 }
3370
3371 for (const auto *I : S.body())
3372 EmitStmt(I);
3373
3374 if (DI)
3375 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
3376}
3377
3378/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3379/// make sure it survives garbage collection until this point.
3380void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
3381 // We just use an inline assembly.
3382 llvm::FunctionType *extenderType
3383 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
3384 llvm::Value *extender
3385 = llvm::InlineAsm::get(extenderType,
3386 /* assembly */ "",
3387 /* constraints */ "r",
3388 /* side effects */ true);
3389
3390 object = Builder.CreateBitCast(object, VoidPtrTy);
3391 EmitNounwindRuntimeCall(extender, object);
3392}
3393
3394/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
3395/// non-trivial copy assignment function, produce following helper function.
3396/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
3397///
3398llvm::Constant *
3399CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
3400 const ObjCPropertyImplDecl *PID) {
3401 if (!getLangOpts().CPlusPlus ||
3402 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
3403 return nullptr;
3404 QualType Ty = PID->getPropertyIvarDecl()->getType();
3405 if (!Ty->isRecordType())
3406 return nullptr;
3407 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
3408 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
3409 return nullptr;
3410 llvm::Constant *HelperFn = nullptr;
3411 if (hasTrivialSetExpr(PID))
3412 return nullptr;
3413 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null")((PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"
) ? static_cast<void> (0) : __assert_fail ("PID->getSetterCXXAssignment() && \"SetterCXXAssignment - null\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 3413, __PRETTY_FUNCTION__))
;
3414 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
3415 return HelperFn;
3416
3417 ASTContext &C = getContext();
3418 IdentifierInfo *II
3419 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
3420
3421 QualType ReturnTy = C.VoidTy;
3422 QualType DestTy = C.getPointerType(Ty);
3423 QualType SrcTy = Ty;
3424 SrcTy.addConst();
3425 SrcTy = C.getPointerType(SrcTy);
3426
3427 SmallVector<QualType, 2> ArgTys;
3428 ArgTys.push_back(DestTy);
3429 ArgTys.push_back(SrcTy);
3430 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
3431
3432 FunctionDecl *FD = FunctionDecl::Create(
3433 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
3434 FunctionTy, nullptr, SC_Static, false, false);
3435
3436 FunctionArgList args;
3437 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
3438 ImplicitParamDecl::Other);
3439 args.push_back(&DstDecl);
3440 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
3441 ImplicitParamDecl::Other);
3442 args.push_back(&SrcDecl);
3443
3444 const CGFunctionInfo &FI =
3445 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
3446
3447 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
3448
3449 llvm::Function *Fn =
3450 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
3451 "__assign_helper_atomic_property_",
3452 &CGM.getModule());
3453
3454 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
3455
3456 StartFunction(FD, ReturnTy, Fn, FI, args);
3457
3458 DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
3459 SourceLocation());
3460 UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
3461 VK_LValue, OK_Ordinary, SourceLocation(), false);
3462
3463 DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
3464 SourceLocation());
3465 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
3466 VK_LValue, OK_Ordinary, SourceLocation(), false);
3467
3468 Expr *Args[2] = { &DST, &SRC };
3469 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
3470 CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
3471 C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(),
3472 VK_LValue, SourceLocation(), FPOptions());
3473
3474 EmitStmt(TheCall);
3475
3476 FinishFunction();
3477 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
3478 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
3479 return HelperFn;
3480}
3481
3482llvm::Constant *
3483CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
3484 const ObjCPropertyImplDecl *PID) {
3485 if (!getLangOpts().CPlusPlus ||
3486 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
3487 return nullptr;
3488 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
3489 QualType Ty = PD->getType();
3490 if (!Ty->isRecordType())
3491 return nullptr;
3492 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
3493 return nullptr;
3494 llvm::Constant *HelperFn = nullptr;
3495 if (hasTrivialGetExpr(PID))
3496 return nullptr;
3497 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null")((PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"
) ? static_cast<void> (0) : __assert_fail ("PID->getGetterCXXConstructor() && \"getGetterCXXConstructor - null\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 3497, __PRETTY_FUNCTION__))
;
3498 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
3499 return HelperFn;
3500
3501 ASTContext &C = getContext();
3502 IdentifierInfo *II =
3503 &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
3504
3505 QualType ReturnTy = C.VoidTy;
3506 QualType DestTy = C.getPointerType(Ty);
3507 QualType SrcTy = Ty;
3508 SrcTy.addConst();
3509 SrcTy = C.getPointerType(SrcTy);
3510
3511 SmallVector<QualType, 2> ArgTys;
3512 ArgTys.push_back(DestTy);
3513 ArgTys.push_back(SrcTy);
3514 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
3515
3516 FunctionDecl *FD = FunctionDecl::Create(
3517 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
3518 FunctionTy, nullptr, SC_Static, false, false);
3519
3520 FunctionArgList args;
3521 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
3522 ImplicitParamDecl::Other);
3523 args.push_back(&DstDecl);
3524 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
3525 ImplicitParamDecl::Other);
3526 args.push_back(&SrcDecl);
3527
3528 const CGFunctionInfo &FI =
3529 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
3530
3531 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
3532
3533 llvm::Function *Fn = llvm::Function::Create(
3534 LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_",
3535 &CGM.getModule());
3536
3537 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
3538
3539 StartFunction(FD, ReturnTy, Fn, FI, args);
3540
3541 DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
3542 SourceLocation());
3543
3544 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
3545 VK_LValue, OK_Ordinary, SourceLocation(), false);
3546
3547 CXXConstructExpr *CXXConstExpr =
3548 cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
3549
3550 SmallVector<Expr*, 4> ConstructorArgs;
3551 ConstructorArgs.push_back(&SRC);
3552 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()),
3553 CXXConstExpr->arg_end());
3554
3555 CXXConstructExpr *TheCXXConstructExpr =
3556 CXXConstructExpr::Create(C, Ty, SourceLocation(),
3557 CXXConstExpr->getConstructor(),
3558 CXXConstExpr->isElidable(),
3559 ConstructorArgs,
3560 CXXConstExpr->hadMultipleCandidates(),
3561 CXXConstExpr->isListInitialization(),
3562 CXXConstExpr->isStdInitListInitialization(),
3563 CXXConstExpr->requiresZeroInitialization(),
3564 CXXConstExpr->getConstructionKind(),
3565 SourceRange());
3566
3567 DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
3568 SourceLocation());
3569
3570 RValue DV = EmitAnyExpr(&DstExpr);
3571 CharUnits Alignment
3572 = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
3573 EmitAggExpr(TheCXXConstructExpr,
3574 AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
3575 Qualifiers(),
3576 AggValueSlot::IsDestructed,
3577 AggValueSlot::DoesNotNeedGCBarriers,
3578 AggValueSlot::IsNotAliased,
3579 AggValueSlot::DoesNotOverlap));
3580
3581 FinishFunction();
3582 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
3583 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
3584 return HelperFn;
3585}
3586
3587llvm::Value *
3588CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
3589 // Get selectors for retain/autorelease.
3590 IdentifierInfo *CopyID = &getContext().Idents.get("copy");
3591 Selector CopySelector =
3592 getContext().Selectors.getNullarySelector(CopyID);
3593 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
3594 Selector AutoreleaseSelector =
3595 getContext().Selectors.getNullarySelector(AutoreleaseID);
3596
3597 // Emit calls to retain/autorelease.
3598 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
3599 llvm::Value *Val = Block;
3600 RValue Result;
3601 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
3602 Ty, CopySelector,
3603 Val, CallArgList(), nullptr, nullptr);
3604 Val = Result.getScalarVal();
3605 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
3606 Ty, AutoreleaseSelector,
3607 Val, CallArgList(), nullptr, nullptr);
3608 Val = Result.getScalarVal();
3609 return Val;
3610}
3611
3612llvm::Value *
3613CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) {
3614 assert(Args.size() == 3 && "Expected 3 argument here!")((Args.size() == 3 && "Expected 3 argument here!") ? static_cast
<void> (0) : __assert_fail ("Args.size() == 3 && \"Expected 3 argument here!\""
, "/build/llvm-toolchain-snapshot-8~svn350071/tools/clang/lib/CodeGen/CGObjC.cpp"
, 3614, __PRETTY_FUNCTION__))
;
3615
3616 if (!CGM.IsOSVersionAtLeastFn) {
3617 llvm::FunctionType *FTy =
3618 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false);
3619 CGM.IsOSVersionAtLeastFn =
3620 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast");
3621 }
3622
3623 llvm::Value *CallRes =
3624 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args);
3625
3626 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty));
3627}
3628
3629void CodeGenModule::emitAtAvailableLinkGuard() {
3630 if (!IsOSVersionAtLeastFn)
3631 return;
3632 // @available requires CoreFoundation only on Darwin.
3633 if (!Target.getTriple().isOSDarwin())
3634 return;
3635 // Add -framework CoreFoundation to the linker commands. We still want to
3636 // emit the core foundation reference down below because otherwise if
3637 // CoreFoundation is not used in the code, the linker won't link the
3638 // framework.
3639 auto &Context = getLLVMContext();
3640 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"),
3641 llvm::MDString::get(Context, "CoreFoundation")};
3642 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args));
3643 // Emit a reference to a symbol from CoreFoundation to ensure that
3644 // CoreFoundation is linked into the final binary.
3645 llvm::FunctionType *FTy =
3646 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false);
3647 llvm::Constant *CFFunc =
3648 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber");
3649
3650 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false);
3651 llvm::Function *CFLinkCheckFunc = cast<llvm::Function>(CreateBuiltinFunction(
3652 CheckFTy, "__clang_at_available_requires_core_foundation_framework"));
3653 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3654 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility);
3655 CodeGenFunction CGF(*this);
3656 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc));
3657 CGF.EmitNounwindRuntimeCall(CFFunc, llvm::Constant::getNullValue(VoidPtrTy));
3658 CGF.Builder.CreateUnreachable();
3659 addCompilerUsedGlobal(CFLinkCheckFunc);
3660}
3661
3662CGObjCRuntime::~CGObjCRuntime() {}