Bug Summary

File:tools/clang/lib/CodeGen/CGExprScalar.cpp
Warning:line 2724, column 5
Undefined or garbage value returned to caller

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGExprScalar.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-8~svn345461/tools/clang/include -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/tools/clang/lib/CodeGen -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp -faddrsig
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGObjCRuntime.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "TargetInfo.h"
21#include "clang/AST/ASTContext.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/RecordLayout.h"
25#include "clang/AST/StmtVisitor.h"
26#include "clang/Basic/FixedPoint.h"
27#include "clang/Basic/TargetInfo.h"
28#include "clang/Frontend/CodeGenOptions.h"
29#include "llvm/ADT/Optional.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/GetElementPtrTypeIterator.h"
35#include "llvm/IR/GlobalVariable.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/Module.h"
38#include <cstdarg>
39
40using namespace clang;
41using namespace CodeGen;
42using llvm::Value;
43
44//===----------------------------------------------------------------------===//
45// Scalar Expression Emitter
46//===----------------------------------------------------------------------===//
47
48namespace {
49
50/// Determine whether the given binary operation may overflow.
51/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
52/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
53/// the returned overflow check is precise. The returned value is 'true' for
54/// all other opcodes, to be conservative.
55bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
56 BinaryOperator::Opcode Opcode, bool Signed,
57 llvm::APInt &Result) {
58 // Assume overflow is possible, unless we can prove otherwise.
59 bool Overflow = true;
60 const auto &LHSAP = LHS->getValue();
61 const auto &RHSAP = RHS->getValue();
62 if (Opcode == BO_Add) {
63 if (Signed)
64 Result = LHSAP.sadd_ov(RHSAP, Overflow);
65 else
66 Result = LHSAP.uadd_ov(RHSAP, Overflow);
67 } else if (Opcode == BO_Sub) {
68 if (Signed)
69 Result = LHSAP.ssub_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.usub_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Mul) {
73 if (Signed)
74 Result = LHSAP.smul_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.umul_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
78 if (Signed && !RHS->isZero())
79 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
80 else
81 return false;
82 }
83 return Overflow;
84}
85
86struct BinOpInfo {
87 Value *LHS;
88 Value *RHS;
89 QualType Ty; // Computation Type.
90 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
91 FPOptions FPFeatures;
92 const Expr *E; // Entire expr, for error unsupported. May not be binop.
93
94 /// Check if the binop can result in integer overflow.
95 bool mayHaveIntegerOverflow() const {
96 // Without constant input, we can't rule out overflow.
97 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
98 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
99 if (!LHSCI || !RHSCI)
100 return true;
101
102 llvm::APInt Result;
103 return ::mayHaveIntegerOverflow(
104 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
105 }
106
107 /// Check if the binop computes a division or a remainder.
108 bool isDivremOp() const {
109 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
110 Opcode == BO_RemAssign;
111 }
112
113 /// Check if the binop can result in an integer division by zero.
114 bool mayHaveIntegerDivisionByZero() const {
115 if (isDivremOp())
116 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
117 return CI->isZero();
118 return true;
119 }
120
121 /// Check if the binop can result in a float division by zero.
122 bool mayHaveFloatDivisionByZero() const {
123 if (isDivremOp())
124 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
125 return CFP->isZero();
126 return true;
127 }
128};
129
130static bool MustVisitNullValue(const Expr *E) {
131 // If a null pointer expression's type is the C++0x nullptr_t, then
132 // it's not necessarily a simple constant and it must be evaluated
133 // for its potential side effects.
134 return E->getType()->isNullPtrType();
135}
136
137/// If \p E is a widened promoted integer, get its base (unpromoted) type.
138static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
139 const Expr *E) {
140 const Expr *Base = E->IgnoreImpCasts();
141 if (E == Base)
142 return llvm::None;
143
144 QualType BaseTy = Base->getType();
145 if (!BaseTy->isPromotableIntegerType() ||
146 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
147 return llvm::None;
148
149 return BaseTy;
150}
151
152/// Check if \p E is a widened promoted integer.
153static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
154 return getUnwidenedIntegerType(Ctx, E).hasValue();
155}
156
157/// Check if we can skip the overflow check for \p Op.
158static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
159 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&(((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>
(Op.E)) && "Expected a unary or binary operator") ? static_cast
<void> (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 160, __PRETTY_FUNCTION__))
160 "Expected a unary or binary operator")(((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>
(Op.E)) && "Expected a unary or binary operator") ? static_cast
<void> (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 160, __PRETTY_FUNCTION__))
;
161
162 // If the binop has constant inputs and we can prove there is no overflow,
163 // we can elide the overflow check.
164 if (!Op.mayHaveIntegerOverflow())
165 return true;
166
167 // If a unary op has a widened operand, the op cannot overflow.
168 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
169 return !UO->canOverflow();
170
171 // We usually don't need overflow checks for binops with widened operands.
172 // Multiplication with promoted unsigned operands is a special case.
173 const auto *BO = cast<BinaryOperator>(Op.E);
174 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
175 if (!OptionalLHSTy)
176 return false;
177
178 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
179 if (!OptionalRHSTy)
180 return false;
181
182 QualType LHSTy = *OptionalLHSTy;
183 QualType RHSTy = *OptionalRHSTy;
184
185 // This is the simple case: binops without unsigned multiplication, and with
186 // widened operands. No overflow check is needed here.
187 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
188 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
189 return true;
190
191 // For unsigned multiplication the overflow check can be elided if either one
192 // of the unpromoted types are less than half the size of the promoted type.
193 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
194 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
195 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
196}
197
198/// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
199static void updateFastMathFlags(llvm::FastMathFlags &FMF,
200 FPOptions FPFeatures) {
201 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
202}
203
204/// Propagate fast-math flags from \p Op to the instruction in \p V.
205static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
206 if (auto *I = dyn_cast<llvm::Instruction>(V)) {
207 llvm::FastMathFlags FMF = I->getFastMathFlags();
208 updateFastMathFlags(FMF, Op.FPFeatures);
209 I->setFastMathFlags(FMF);
210 }
211 return V;
212}
213
214class ScalarExprEmitter
215 : public StmtVisitor<ScalarExprEmitter, Value*> {
216 CodeGenFunction &CGF;
217 CGBuilderTy &Builder;
218 bool IgnoreResultAssign;
219 llvm::LLVMContext &VMContext;
220public:
221
222 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
223 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
224 VMContext(cgf.getLLVMContext()) {
225 }
226
227 //===--------------------------------------------------------------------===//
228 // Utilities
229 //===--------------------------------------------------------------------===//
230
231 bool TestAndClearIgnoreResultAssign() {
232 bool I = IgnoreResultAssign;
233 IgnoreResultAssign = false;
234 return I;
235 }
236
237 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
238 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
239 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
240 return CGF.EmitCheckedLValue(E, TCK);
241 }
242
243 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
244 const BinOpInfo &Info);
245
246 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
247 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
248 }
249
250 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
251 const AlignValueAttr *AVAttr = nullptr;
252 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
253 const ValueDecl *VD = DRE->getDecl();
254
255 if (VD->getType()->isReferenceType()) {
256 if (const auto *TTy =
257 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
258 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
259 } else {
260 // Assumptions for function parameters are emitted at the start of the
261 // function, so there is no need to repeat that here.
262 if (isa<ParmVarDecl>(VD))
263 return;
264
265 AVAttr = VD->getAttr<AlignValueAttr>();
266 }
267 }
268
269 if (!AVAttr)
270 if (const auto *TTy =
271 dyn_cast<TypedefType>(E->getType()))
272 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
273
274 if (!AVAttr)
275 return;
276
277 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
278 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
279 CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
280 }
281
282 /// EmitLoadOfLValue - Given an expression with complex type that represents a
283 /// value l-value, this method emits the address of the l-value, then loads
284 /// and returns the result.
285 Value *EmitLoadOfLValue(const Expr *E) {
286 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
287 E->getExprLoc());
288
289 EmitLValueAlignmentAssumption(E, V);
290 return V;
291 }
292
293 /// EmitConversionToBool - Convert the specified expression value to a
294 /// boolean (i1) truth value. This is equivalent to "Val != 0".
295 Value *EmitConversionToBool(Value *Src, QualType DstTy);
296
297 /// Emit a check that a conversion to or from a floating-point type does not
298 /// overflow.
299 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
300 Value *Src, QualType SrcType, QualType DstType,
301 llvm::Type *DstTy, SourceLocation Loc);
302
303 /// Known implicit conversion check kinds.
304 /// Keep in sync with the enum of the same name in ubsan_handlers.h
305 enum ImplicitConversionCheckKind : unsigned char {
306 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
307 ICCK_UnsignedIntegerTruncation = 1,
308 ICCK_SignedIntegerTruncation = 2,
309 };
310
311 /// Emit a check that an [implicit] truncation of an integer does not
312 /// discard any bits. It is not UB, so we use the value after truncation.
313 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
314 QualType DstType, SourceLocation Loc);
315
316 /// Emit a conversion from the specified type to the specified destination
317 /// type, both of which are LLVM scalar types.
318 struct ScalarConversionOpts {
319 bool TreatBooleanAsSigned;
320 bool EmitImplicitIntegerTruncationChecks;
321
322 ScalarConversionOpts()
323 : TreatBooleanAsSigned(false),
324 EmitImplicitIntegerTruncationChecks(false) {}
325 };
326 Value *
327 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
328 SourceLocation Loc,
329 ScalarConversionOpts Opts = ScalarConversionOpts());
330
331 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
332 SourceLocation Loc);
333
334 /// Emit a conversion from the specified complex type to the specified
335 /// destination type, where the destination type is an LLVM scalar type.
336 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
337 QualType SrcTy, QualType DstTy,
338 SourceLocation Loc);
339
340 /// EmitNullValue - Emit a value that corresponds to null for the given type.
341 Value *EmitNullValue(QualType Ty);
342
343 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
344 Value *EmitFloatToBoolConversion(Value *V) {
345 // Compare against 0.0 for fp scalars.
346 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
347 return Builder.CreateFCmpUNE(V, Zero, "tobool");
348 }
349
350 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
351 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
352 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
353
354 return Builder.CreateICmpNE(V, Zero, "tobool");
355 }
356
357 Value *EmitIntToBoolConversion(Value *V) {
358 // Because of the type rules of C, we often end up computing a
359 // logical value, then zero extending it to int, then wanting it
360 // as a logical value again. Optimize this common case.
361 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
362 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
363 Value *Result = ZI->getOperand(0);
364 // If there aren't any more uses, zap the instruction to save space.
365 // Note that there can be more uses, for example if this
366 // is the result of an assignment.
367 if (ZI->use_empty())
368 ZI->eraseFromParent();
369 return Result;
370 }
371 }
372
373 return Builder.CreateIsNotNull(V, "tobool");
374 }
375
376 //===--------------------------------------------------------------------===//
377 // Visitor Methods
378 //===--------------------------------------------------------------------===//
379
380 Value *Visit(Expr *E) {
381 ApplyDebugLocation DL(CGF, E);
382 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
383 }
384
385 Value *VisitStmt(Stmt *S) {
386 S->dump(CGF.getContext().getSourceManager());
387 llvm_unreachable("Stmt can't have complex result type!")::llvm::llvm_unreachable_internal("Stmt can't have complex result type!"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 387)
;
388 }
389 Value *VisitExpr(Expr *S);
390
391 Value *VisitParenExpr(ParenExpr *PE) {
392 return Visit(PE->getSubExpr());
393 }
394 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
395 return Visit(E->getReplacement());
396 }
397 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
398 return Visit(GE->getResultExpr());
399 }
400 Value *VisitCoawaitExpr(CoawaitExpr *S) {
401 return CGF.EmitCoawaitExpr(*S).getScalarVal();
402 }
403 Value *VisitCoyieldExpr(CoyieldExpr *S) {
404 return CGF.EmitCoyieldExpr(*S).getScalarVal();
405 }
406 Value *VisitUnaryCoawait(const UnaryOperator *E) {
407 return Visit(E->getSubExpr());
408 }
409
410 // Leaves.
411 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
412 return Builder.getInt(E->getValue());
413 }
414 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
415 return Builder.getInt(E->getValue());
416 }
417 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
418 return llvm::ConstantFP::get(VMContext, E->getValue());
419 }
420 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
421 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
422 }
423 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
424 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
425 }
426 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
427 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
428 }
429 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
430 return EmitNullValue(E->getType());
431 }
432 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
433 return EmitNullValue(E->getType());
434 }
435 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
436 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
437 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
438 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
439 return Builder.CreateBitCast(V, ConvertType(E->getType()));
440 }
441
442 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
443 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
444 }
445
446 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
447 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
448 }
449
450 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
451 if (E->isGLValue())
452 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
453 E->getExprLoc());
454
455 // Otherwise, assume the mapping is the scalar directly.
456 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
457 }
458
459 Value *emitConstant(const CodeGenFunction::ConstantEmission &Constant,
460 Expr *E) {
461 assert(Constant && "not a constant")((Constant && "not a constant") ? static_cast<void
> (0) : __assert_fail ("Constant && \"not a constant\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 461, __PRETTY_FUNCTION__))
;
462 if (Constant.isReference())
463 return EmitLoadOfLValue(Constant.getReferenceLValue(CGF, E),
464 E->getExprLoc());
465 return Constant.getValue();
466 }
467
468 // l-values.
469 Value *VisitDeclRefExpr(DeclRefExpr *E) {
470 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
471 return emitConstant(Constant, E);
472 return EmitLoadOfLValue(E);
473 }
474
475 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
476 return CGF.EmitObjCSelectorExpr(E);
477 }
478 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
479 return CGF.EmitObjCProtocolExpr(E);
480 }
481 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
482 return EmitLoadOfLValue(E);
483 }
484 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
485 if (E->getMethodDecl() &&
486 E->getMethodDecl()->getReturnType()->isReferenceType())
487 return EmitLoadOfLValue(E);
488 return CGF.EmitObjCMessageExpr(E).getScalarVal();
489 }
490
491 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
492 LValue LV = CGF.EmitObjCIsaExpr(E);
493 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
494 return V;
495 }
496
497 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
498 VersionTuple Version = E->getVersion();
499
500 // If we're checking for a platform older than our minimum deployment
501 // target, we can fold the check away.
502 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
503 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
504
505 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
506 llvm::Value *Args[] = {
507 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
508 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
509 llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
510 };
511
512 return CGF.EmitBuiltinAvailable(Args);
513 }
514
515 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
516 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
517 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
518 Value *VisitMemberExpr(MemberExpr *E);
519 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
520 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
521 return EmitLoadOfLValue(E);
522 }
523
524 Value *VisitInitListExpr(InitListExpr *E);
525
526 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
527 assert(CGF.getArrayInitIndex() &&((CGF.getArrayInitIndex() && "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"
) ? static_cast<void> (0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 528, __PRETTY_FUNCTION__))
528 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?")((CGF.getArrayInitIndex() && "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"
) ? static_cast<void> (0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 528, __PRETTY_FUNCTION__))
;
529 return CGF.getArrayInitIndex();
530 }
531
532 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
533 return EmitNullValue(E->getType());
534 }
535 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
536 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
537 return VisitCastExpr(E);
538 }
539 Value *VisitCastExpr(CastExpr *E);
540
541 Value *VisitCallExpr(const CallExpr *E) {
542 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
543 return EmitLoadOfLValue(E);
544
545 Value *V = CGF.EmitCallExpr(E).getScalarVal();
546
547 EmitLValueAlignmentAssumption(E, V);
548 return V;
549 }
550
551 Value *VisitStmtExpr(const StmtExpr *E);
552
553 // Unary Operators.
554 Value *VisitUnaryPostDec(const UnaryOperator *E) {
555 LValue LV = EmitLValue(E->getSubExpr());
556 return EmitScalarPrePostIncDec(E, LV, false, false);
557 }
558 Value *VisitUnaryPostInc(const UnaryOperator *E) {
559 LValue LV = EmitLValue(E->getSubExpr());
560 return EmitScalarPrePostIncDec(E, LV, true, false);
561 }
562 Value *VisitUnaryPreDec(const UnaryOperator *E) {
563 LValue LV = EmitLValue(E->getSubExpr());
564 return EmitScalarPrePostIncDec(E, LV, false, true);
565 }
566 Value *VisitUnaryPreInc(const UnaryOperator *E) {
567 LValue LV = EmitLValue(E->getSubExpr());
568 return EmitScalarPrePostIncDec(E, LV, true, true);
569 }
570
571 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
572 llvm::Value *InVal,
573 bool IsInc);
574
575 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
576 bool isInc, bool isPre);
577
578
579 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
580 if (isa<MemberPointerType>(E->getType())) // never sugared
581 return CGF.CGM.getMemberPointerConstant(E);
582
583 return EmitLValue(E->getSubExpr()).getPointer();
584 }
585 Value *VisitUnaryDeref(const UnaryOperator *E) {
586 if (E->getType()->isVoidType())
587 return Visit(E->getSubExpr()); // the actual value should be unused
588 return EmitLoadOfLValue(E);
589 }
590 Value *VisitUnaryPlus(const UnaryOperator *E) {
591 // This differs from gcc, though, most likely due to a bug in gcc.
592 TestAndClearIgnoreResultAssign();
593 return Visit(E->getSubExpr());
594 }
595 Value *VisitUnaryMinus (const UnaryOperator *E);
596 Value *VisitUnaryNot (const UnaryOperator *E);
597 Value *VisitUnaryLNot (const UnaryOperator *E);
598 Value *VisitUnaryReal (const UnaryOperator *E);
599 Value *VisitUnaryImag (const UnaryOperator *E);
600 Value *VisitUnaryExtension(const UnaryOperator *E) {
601 return Visit(E->getSubExpr());
602 }
603
604 // C++
605 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
606 return EmitLoadOfLValue(E);
607 }
608
609 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
610 return Visit(DAE->getExpr());
611 }
612 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
613 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
614 return Visit(DIE->getExpr());
615 }
616 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
617 return CGF.LoadCXXThis();
618 }
619
620 Value *VisitExprWithCleanups(ExprWithCleanups *E);
621 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
622 return CGF.EmitCXXNewExpr(E);
623 }
624 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
625 CGF.EmitCXXDeleteExpr(E);
626 return nullptr;
627 }
628
629 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
630 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
631 }
632
633 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
634 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
635 }
636
637 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
638 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
639 }
640
641 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
642 // C++ [expr.pseudo]p1:
643 // The result shall only be used as the operand for the function call
644 // operator (), and the result of such a call has type void. The only
645 // effect is the evaluation of the postfix-expression before the dot or
646 // arrow.
647 CGF.EmitScalarExpr(E->getBase());
648 return nullptr;
649 }
650
651 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
652 return EmitNullValue(E->getType());
653 }
654
655 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
656 CGF.EmitCXXThrowExpr(E);
657 return nullptr;
658 }
659
660 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
661 return Builder.getInt1(E->getValue());
662 }
663
664 // Binary Operators.
665 Value *EmitMul(const BinOpInfo &Ops) {
666 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
667 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
668 case LangOptions::SOB_Defined:
669 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
670 case LangOptions::SOB_Undefined:
671 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
672 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
673 // Fall through.
674 case LangOptions::SOB_Trapping:
675 if (CanElideOverflowCheck(CGF.getContext(), Ops))
676 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
677 return EmitOverflowCheckedBinOp(Ops);
678 }
679 }
680
681 if (Ops.Ty->isUnsignedIntegerType() &&
682 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
683 !CanElideOverflowCheck(CGF.getContext(), Ops))
684 return EmitOverflowCheckedBinOp(Ops);
685
686 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
687 Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
688 return propagateFMFlags(V, Ops);
689 }
690 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
691 }
692 /// Create a binary op that checks for overflow.
693 /// Currently only supports +, - and *.
694 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
695
696 // Check for undefined division and modulus behaviors.
697 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
698 llvm::Value *Zero,bool isDiv);
699 // Common helper for getting how wide LHS of shift is.
700 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
701 Value *EmitDiv(const BinOpInfo &Ops);
702 Value *EmitRem(const BinOpInfo &Ops);
703 Value *EmitAdd(const BinOpInfo &Ops);
704 Value *EmitSub(const BinOpInfo &Ops);
705 Value *EmitShl(const BinOpInfo &Ops);
706 Value *EmitShr(const BinOpInfo &Ops);
707 Value *EmitAnd(const BinOpInfo &Ops) {
708 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
709 }
710 Value *EmitXor(const BinOpInfo &Ops) {
711 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
712 }
713 Value *EmitOr (const BinOpInfo &Ops) {
714 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
715 }
716
717 BinOpInfo EmitBinOps(const BinaryOperator *E);
718 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
719 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
720 Value *&Result);
721
722 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
723 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
724
725 // Binary operators and binary compound assignment operators.
726#define HANDLEBINOP(OP) \
727 Value *VisitBin ## OP(const BinaryOperator *E) { \
728 return Emit ## OP(EmitBinOps(E)); \
729 } \
730 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
731 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
732 }
733 HANDLEBINOP(Mul)
734 HANDLEBINOP(Div)
735 HANDLEBINOP(Rem)
736 HANDLEBINOP(Add)
737 HANDLEBINOP(Sub)
738 HANDLEBINOP(Shl)
739 HANDLEBINOP(Shr)
740 HANDLEBINOP(And)
741 HANDLEBINOP(Xor)
1
Within the expansion of the macro 'HANDLEBINOP':
a
Calling 'ScalarExprEmitter::EmitCompoundAssign'
742 HANDLEBINOP(Or)
743#undef HANDLEBINOP
744
745 // Comparisons.
746 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
747 llvm::CmpInst::Predicate SICmpOpc,
748 llvm::CmpInst::Predicate FCmpOpc);
749#define VISITCOMP(CODE, UI, SI, FP) \
750 Value *VisitBin##CODE(const BinaryOperator *E) { \
751 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
752 llvm::FCmpInst::FP); }
753 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
754 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
755 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
756 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
757 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
758 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
759#undef VISITCOMP
760
761 Value *VisitBinAssign (const BinaryOperator *E);
762
763 Value *VisitBinLAnd (const BinaryOperator *E);
764 Value *VisitBinLOr (const BinaryOperator *E);
765 Value *VisitBinComma (const BinaryOperator *E);
766
767 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
768 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
769
770 // Other Operators.
771 Value *VisitBlockExpr(const BlockExpr *BE);
772 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
773 Value *VisitChooseExpr(ChooseExpr *CE);
774 Value *VisitVAArgExpr(VAArgExpr *VE);
775 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
776 return CGF.EmitObjCStringLiteral(E);
777 }
778 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
779 return CGF.EmitObjCBoxedExpr(E);
780 }
781 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
782 return CGF.EmitObjCArrayLiteral(E);
783 }
784 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
785 return CGF.EmitObjCDictionaryLiteral(E);
786 }
787 Value *VisitAsTypeExpr(AsTypeExpr *CE);
788 Value *VisitAtomicExpr(AtomicExpr *AE);
789};
790} // end anonymous namespace.
791
792//===----------------------------------------------------------------------===//
793// Utilities
794//===----------------------------------------------------------------------===//
795
796/// EmitConversionToBool - Convert the specified expression value to a
797/// boolean (i1) truth value. This is equivalent to "Val != 0".
798Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
799 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs")((SrcType.isCanonical() && "EmitScalarConversion strips typedefs"
) ? static_cast<void> (0) : __assert_fail ("SrcType.isCanonical() && \"EmitScalarConversion strips typedefs\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 799, __PRETTY_FUNCTION__))
;
800
801 if (SrcType->isRealFloatingType())
802 return EmitFloatToBoolConversion(Src);
803
804 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
805 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
806
807 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&(((SrcType->isIntegerType() || isa<llvm::PointerType>
(Src->getType())) && "Unknown scalar type to convert"
) ? static_cast<void> (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 808, __PRETTY_FUNCTION__))
808 "Unknown scalar type to convert")(((SrcType->isIntegerType() || isa<llvm::PointerType>
(Src->getType())) && "Unknown scalar type to convert"
) ? static_cast<void> (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 808, __PRETTY_FUNCTION__))
;
809
810 if (isa<llvm::IntegerType>(Src->getType()))
811 return EmitIntToBoolConversion(Src);
812
813 assert(isa<llvm::PointerType>(Src->getType()))((isa<llvm::PointerType>(Src->getType())) ? static_cast
<void> (0) : __assert_fail ("isa<llvm::PointerType>(Src->getType())"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 813, __PRETTY_FUNCTION__))
;
814 return EmitPointerToBoolConversion(Src, SrcType);
815}
816
817void ScalarExprEmitter::EmitFloatConversionCheck(
818 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
819 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
820 CodeGenFunction::SanitizerScope SanScope(&CGF);
821 using llvm::APFloat;
822 using llvm::APSInt;
823
824 llvm::Type *SrcTy = Src->getType();
825
826 llvm::Value *Check = nullptr;
827 if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
828 // Integer to floating-point. This can fail for unsigned short -> __half
829 // or unsigned __int128 -> float.
830 assert(DstType->isFloatingType())((DstType->isFloatingType()) ? static_cast<void> (0)
: __assert_fail ("DstType->isFloatingType()", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 830, __PRETTY_FUNCTION__))
;
831 bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
832
833 APFloat LargestFloat =
834 APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
835 APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
836
837 bool IsExact;
838 if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
839 &IsExact) != APFloat::opOK)
840 // The range of representable values of this floating point type includes
841 // all values of this integer type. Don't need an overflow check.
842 return;
843
844 llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
845 if (SrcIsUnsigned)
846 Check = Builder.CreateICmpULE(Src, Max);
847 else {
848 llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
849 llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
850 llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
851 Check = Builder.CreateAnd(GE, LE);
852 }
853 } else {
854 const llvm::fltSemantics &SrcSema =
855 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
856 if (isa<llvm::IntegerType>(DstTy)) {
857 // Floating-point to integer. This has undefined behavior if the source is
858 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
859 // to an integer).
860 unsigned Width = CGF.getContext().getIntWidth(DstType);
861 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
862
863 APSInt Min = APSInt::getMinValue(Width, Unsigned);
864 APFloat MinSrc(SrcSema, APFloat::uninitialized);
865 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
866 APFloat::opOverflow)
867 // Don't need an overflow check for lower bound. Just check for
868 // -Inf/NaN.
869 MinSrc = APFloat::getInf(SrcSema, true);
870 else
871 // Find the largest value which is too small to represent (before
872 // truncation toward zero).
873 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
874
875 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
876 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
877 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
878 APFloat::opOverflow)
879 // Don't need an overflow check for upper bound. Just check for
880 // +Inf/NaN.
881 MaxSrc = APFloat::getInf(SrcSema, false);
882 else
883 // Find the smallest value which is too large to represent (before
884 // truncation toward zero).
885 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
886
887 // If we're converting from __half, convert the range to float to match
888 // the type of src.
889 if (OrigSrcType->isHalfType()) {
890 const llvm::fltSemantics &Sema =
891 CGF.getContext().getFloatTypeSemantics(SrcType);
892 bool IsInexact;
893 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
894 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
895 }
896
897 llvm::Value *GE =
898 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
899 llvm::Value *LE =
900 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
901 Check = Builder.CreateAnd(GE, LE);
902 } else {
903 // FIXME: Maybe split this sanitizer out from float-cast-overflow.
904 //
905 // Floating-point to floating-point. This has undefined behavior if the
906 // source is not in the range of representable values of the destination
907 // type. The C and C++ standards are spectacularly unclear here. We
908 // diagnose finite out-of-range conversions, but allow infinities and NaNs
909 // to convert to the corresponding value in the smaller type.
910 //
911 // C11 Annex F gives all such conversions defined behavior for IEC 60559
912 // conforming implementations. Unfortunately, LLVM's fptrunc instruction
913 // does not.
914
915 // Converting from a lower rank to a higher rank can never have
916 // undefined behavior, since higher-rank types must have a superset
917 // of values of lower-rank types.
918 if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
919 return;
920
921 assert(!OrigSrcType->isHalfType() &&((!OrigSrcType->isHalfType() && "should not check conversion from __half, it has the lowest rank"
) ? static_cast<void> (0) : __assert_fail ("!OrigSrcType->isHalfType() && \"should not check conversion from __half, it has the lowest rank\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 922, __PRETTY_FUNCTION__))
922 "should not check conversion from __half, it has the lowest rank")((!OrigSrcType->isHalfType() && "should not check conversion from __half, it has the lowest rank"
) ? static_cast<void> (0) : __assert_fail ("!OrigSrcType->isHalfType() && \"should not check conversion from __half, it has the lowest rank\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 922, __PRETTY_FUNCTION__))
;
923
924 const llvm::fltSemantics &DstSema =
925 CGF.getContext().getFloatTypeSemantics(DstType);
926 APFloat MinBad = APFloat::getLargest(DstSema, false);
927 APFloat MaxBad = APFloat::getInf(DstSema, false);
928
929 bool IsInexact;
930 MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
931 MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
932
933 Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
934 CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
935 llvm::Value *GE =
936 Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
937 llvm::Value *LE =
938 Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
939 Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
940 }
941 }
942
943 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
944 CGF.EmitCheckTypeDescriptor(OrigSrcType),
945 CGF.EmitCheckTypeDescriptor(DstType)};
946 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
947 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
948}
949
950void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
951 Value *Dst, QualType DstType,
952 SourceLocation Loc) {
953 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
954 return;
955
956 llvm::Type *SrcTy = Src->getType();
957 llvm::Type *DstTy = Dst->getType();
958
959 // We only care about int->int conversions here.
960 // We ignore conversions to/from pointer and/or bool.
961 if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
962 return;
963
964 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "clang integer type lowered to non-integer llvm type"
) ? static_cast<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"clang integer type lowered to non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 965, __PRETTY_FUNCTION__))
965 "clang integer type lowered to non-integer llvm type")((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "clang integer type lowered to non-integer llvm type"
) ? static_cast<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"clang integer type lowered to non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 965, __PRETTY_FUNCTION__))
;
966
967 unsigned SrcBits = SrcTy->getScalarSizeInBits();
968 unsigned DstBits = DstTy->getScalarSizeInBits();
969 // This must be truncation. Else we do not care.
970 if (SrcBits <= DstBits)
971 return;
972
973 assert(!DstType->isBooleanType() && "we should not get here with booleans.")((!DstType->isBooleanType() && "we should not get here with booleans."
) ? static_cast<void> (0) : __assert_fail ("!DstType->isBooleanType() && \"we should not get here with booleans.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 973, __PRETTY_FUNCTION__))
;
974
975 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
976 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
977
978 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
979 // Else, it is a signed truncation.
980 ImplicitConversionCheckKind Kind;
981 SanitizerMask Mask;
982 if (!SrcSigned && !DstSigned) {
983 Kind = ICCK_UnsignedIntegerTruncation;
984 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
985 } else {
986 Kind = ICCK_SignedIntegerTruncation;
987 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
988 }
989
990 // Do we care about this type of truncation?
991 if (!CGF.SanOpts.has(Mask))
992 return;
993
994 CodeGenFunction::SanitizerScope SanScope(&CGF);
995
996 llvm::Value *Check = nullptr;
997
998 // 1. Extend the truncated value back to the same width as the Src.
999 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1000 // 2. Equality-compare with the original source value
1001 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1002 // If the comparison result is 'i1 false', then the truncation was lossy.
1003
1004 llvm::Constant *StaticArgs[] = {
1005 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1006 CGF.EmitCheckTypeDescriptor(DstType),
1007 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)};
1008 CGF.EmitCheck(std::make_pair(Check, Mask),
1009 SanitizerHandler::ImplicitConversion, StaticArgs, {Src, Dst});
1010}
1011
1012/// Emit a conversion from the specified type to the specified destination type,
1013/// both of which are LLVM scalar types.
1014Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1015 QualType DstType,
1016 SourceLocation Loc,
1017 ScalarConversionOpts Opts) {
1018 // All conversions involving fixed point types should be handled by the
1019 // EmitFixedPoint family functions. This is done to prevent bloating up this
1020 // function more, and although fixed point numbers are represented by
1021 // integers, we do not want to follow any logic that assumes they should be
1022 // treated as integers.
1023 // TODO(leonardchan): When necessary, add another if statement checking for
1024 // conversions to fixed point types from other types.
1025 if (SrcType->isFixedPointType()) {
1026 if (DstType->isFixedPointType()) {
1027 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1028 } else if (DstType->isBooleanType()) {
1029 // We do not need to check the padding bit on unsigned types if unsigned
1030 // padding is enabled because overflow into this bit is undefined
1031 // behavior.
1032 return Builder.CreateIsNotNull(Src, "tobool");
1033 }
1034
1035 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion involving a fixed point type."
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1036)
1036 "Unhandled scalar conversion involving a fixed point type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion involving a fixed point type."
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1036)
;
1037 }
1038
1039 QualType NoncanonicalSrcType = SrcType;
1040 QualType NoncanonicalDstType = DstType;
1041
1042 SrcType = CGF.getContext().getCanonicalType(SrcType);
1043 DstType = CGF.getContext().getCanonicalType(DstType);
1044 if (SrcType == DstType) return Src;
1045
1046 if (DstType->isVoidType()) return nullptr;
1047
1048 llvm::Value *OrigSrc = Src;
1049 QualType OrigSrcType = SrcType;
1050 llvm::Type *SrcTy = Src->getType();
1051
1052 // Handle conversions to bool first, they are special: comparisons against 0.
1053 if (DstType->isBooleanType())
1054 return EmitConversionToBool(Src, SrcType);
1055
1056 llvm::Type *DstTy = ConvertType(DstType);
1057
1058 // Cast from half through float if half isn't a native type.
1059 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1060 // Cast to FP using the intrinsic if the half type itself isn't supported.
1061 if (DstTy->isFloatingPointTy()) {
1062 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1063 return Builder.CreateCall(
1064 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1065 Src);
1066 } else {
1067 // Cast to other types through float, using either the intrinsic or FPExt,
1068 // depending on whether the half type itself is supported
1069 // (as opposed to operations on half, available with NativeHalfType).
1070 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1071 Src = Builder.CreateCall(
1072 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1073 CGF.CGM.FloatTy),
1074 Src);
1075 } else {
1076 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1077 }
1078 SrcType = CGF.getContext().FloatTy;
1079 SrcTy = CGF.FloatTy;
1080 }
1081 }
1082
1083 // Ignore conversions like int -> uint.
1084 if (SrcTy == DstTy)
1085 return Src;
1086
1087 // Handle pointer conversions next: pointers can only be converted to/from
1088 // other pointers and integers. Check for pointer types in terms of LLVM, as
1089 // some native types (like Obj-C id) may map to a pointer type.
1090 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1091 // The source value may be an integer, or a pointer.
1092 if (isa<llvm::PointerType>(SrcTy))
1093 return Builder.CreateBitCast(Src, DstTy, "conv");
1094
1095 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?")((SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isIntegerType() && \"Not ptr->ptr or int->ptr conversion?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1095, __PRETTY_FUNCTION__))
;
1096 // First, convert to the correct width so that we control the kind of
1097 // extension.
1098 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1099 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1100 llvm::Value* IntResult =
1101 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1102 // Then, cast to pointer.
1103 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1104 }
1105
1106 if (isa<llvm::PointerType>(SrcTy)) {
1107 // Must be an ptr to int cast.
1108 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?")((isa<llvm::IntegerType>(DstTy) && "not ptr->int?"
) ? static_cast<void> (0) : __assert_fail ("isa<llvm::IntegerType>(DstTy) && \"not ptr->int?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1108, __PRETTY_FUNCTION__))
;
1109 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1110 }
1111
1112 // A scalar can be splatted to an extended vector of the same element type
1113 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1114 // Sema should add casts to make sure that the source expression's type is
1115 // the same as the vector's element type (sans qualifiers)
1116 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1118, __PRETTY_FUNCTION__))
1117 SrcType.getTypePtr() &&((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1118, __PRETTY_FUNCTION__))
1118 "Splatted expr doesn't match with vector element type?")((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1118, __PRETTY_FUNCTION__))
;
1119
1120 // Splat the element across to all elements
1121 unsigned NumElements = DstTy->getVectorNumElements();
1122 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1123 }
1124
1125 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1126 // Allow bitcast from vector to integer/fp of the same size.
1127 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1128 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1129 if (SrcSize == DstSize)
1130 return Builder.CreateBitCast(Src, DstTy, "conv");
1131
1132 // Conversions between vectors of different sizes are not allowed except
1133 // when vectors of half are involved. Operations on storage-only half
1134 // vectors require promoting half vector operands to float vectors and
1135 // truncating the result, which is either an int or float vector, to a
1136 // short or half vector.
1137
1138 // Source and destination are both expected to be vectors.
1139 llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
1140 llvm::Type *DstElementTy = DstTy->getVectorElementType();
1141 (void)DstElementTy;
1142
1143 assert(((SrcElementTy->isIntegerTy() &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1148, __PRETTY_FUNCTION__))
1144 DstElementTy->isIntegerTy()) ||((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1148, __PRETTY_FUNCTION__))
1145 (SrcElementTy->isFloatingPointTy() &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1148, __PRETTY_FUNCTION__))
1146 DstElementTy->isFloatingPointTy())) &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1148, __PRETTY_FUNCTION__))
1147 "unexpected conversion between a floating-point vector and an "((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1148, __PRETTY_FUNCTION__))
1148 "integer vector")((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1148, __PRETTY_FUNCTION__))
;
1149
1150 // Truncate an i32 vector to an i16 vector.
1151 if (SrcElementTy->isIntegerTy())
1152 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1153
1154 // Truncate a float vector to a half vector.
1155 if (SrcSize > DstSize)
1156 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1157
1158 // Promote a half vector to a float vector.
1159 return Builder.CreateFPExt(Src, DstTy, "conv");
1160 }
1161
1162 // Finally, we have the arithmetic types: real int/float.
1163 Value *Res = nullptr;
1164 llvm::Type *ResTy = DstTy;
1165
1166 // An overflowing conversion has undefined behavior if either the source type
1167 // or the destination type is a floating-point type.
1168 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1169 (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
1170 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1171 Loc);
1172
1173 // Cast to half through float if half isn't a native type.
1174 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1175 // Make sure we cast in a single step if from another FP type.
1176 if (SrcTy->isFloatingPointTy()) {
1177 // Use the intrinsic if the half type itself isn't supported
1178 // (as opposed to operations on half, available with NativeHalfType).
1179 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1180 return Builder.CreateCall(
1181 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1182 // If the half type is supported, just use an fptrunc.
1183 return Builder.CreateFPTrunc(Src, DstTy);
1184 }
1185 DstTy = CGF.FloatTy;
1186 }
1187
1188 if (isa<llvm::IntegerType>(SrcTy)) {
1189 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1190 if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1191 InputSigned = true;
1192 }
1193 if (isa<llvm::IntegerType>(DstTy))
1194 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1195 else if (InputSigned)
1196 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1197 else
1198 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1199 } else if (isa<llvm::IntegerType>(DstTy)) {
1200 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion")((SrcTy->isFloatingPointTy() && "Unknown real conversion"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1200, __PRETTY_FUNCTION__))
;
1201 if (DstType->isSignedIntegerOrEnumerationType())
1202 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1203 else
1204 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1205 } else {
1206 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&((SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1207, __PRETTY_FUNCTION__))
1207 "Unknown real conversion")((SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1207, __PRETTY_FUNCTION__))
;
1208 if (DstTy->getTypeID() < SrcTy->getTypeID())
1209 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1210 else
1211 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1212 }
1213
1214 if (DstTy != ResTy) {
1215 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1216 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion")((ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"
) ? static_cast<void> (0) : __assert_fail ("ResTy->isIntegerTy(16) && \"Only half FP requires extra conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1216, __PRETTY_FUNCTION__))
;
1217 Res = Builder.CreateCall(
1218 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1219 Res);
1220 } else {
1221 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1222 }
1223 }
1224
1225 if (Opts.EmitImplicitIntegerTruncationChecks)
1226 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1227 NoncanonicalDstType, Loc);
1228
1229 return Res;
1230}
1231
1232Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1233 QualType DstTy,
1234 SourceLocation Loc) {
1235 using llvm::APInt;
1236 using llvm::ConstantInt;
1237 using llvm::Value;
1238
1239 assert(SrcTy->isFixedPointType())((SrcTy->isFixedPointType()) ? static_cast<void> (0)
: __assert_fail ("SrcTy->isFixedPointType()", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1239, __PRETTY_FUNCTION__))
;
1240 assert(DstTy->isFixedPointType())((DstTy->isFixedPointType()) ? static_cast<void> (0)
: __assert_fail ("DstTy->isFixedPointType()", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1240, __PRETTY_FUNCTION__))
;
1241
1242 FixedPointSemantics SrcFPSema =
1243 CGF.getContext().getFixedPointSemantics(SrcTy);
1244 FixedPointSemantics DstFPSema =
1245 CGF.getContext().getFixedPointSemantics(DstTy);
1246 unsigned SrcWidth = SrcFPSema.getWidth();
1247 unsigned DstWidth = DstFPSema.getWidth();
1248 unsigned SrcScale = SrcFPSema.getScale();
1249 unsigned DstScale = DstFPSema.getScale();
1250 bool SrcIsSigned = SrcFPSema.isSigned();
1251 bool DstIsSigned = DstFPSema.isSigned();
1252
1253 llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
1254
1255 Value *Result = Src;
1256 unsigned ResultWidth = SrcWidth;
1257
1258 if (!DstFPSema.isSaturated()) {
1259 // Downscale.
1260 if (DstScale < SrcScale)
1261 Result = SrcIsSigned ?
1262 Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
1263 Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1264
1265 // Resize.
1266 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1267
1268 // Upscale.
1269 if (DstScale > SrcScale)
1270 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1271 } else {
1272 // Adjust the number of fractional bits.
1273 if (DstScale > SrcScale) {
1274 ResultWidth = SrcWidth + DstScale - SrcScale;
1275 llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
1276 Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
1277 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1278 } else if (DstScale < SrcScale) {
1279 Result = SrcIsSigned ?
1280 Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
1281 Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1282 }
1283
1284 // Handle saturation.
1285 bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
1286 if (LessIntBits) {
1287 Value *Max = ConstantInt::get(
1288 CGF.getLLVMContext(),
1289 APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
1290 Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
1291 : Builder.CreateICmpUGT(Result, Max);
1292 Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
1293 }
1294 // Cannot overflow min to dest type if src is unsigned since all fixed
1295 // point types can cover the unsigned min of 0.
1296 if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
1297 Value *Min = ConstantInt::get(
1298 CGF.getLLVMContext(),
1299 APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
1300 Value *TooLow = Builder.CreateICmpSLT(Result, Min);
1301 Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
1302 }
1303
1304 // Resize the integer part to get the final destination size.
1305 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1306 }
1307 return Result;
1308}
1309
1310/// Emit a conversion from the specified complex type to the specified
1311/// destination type, where the destination type is an LLVM scalar type.
1312Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1313 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1314 SourceLocation Loc) {
1315 // Get the source element type.
1316 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1317
1318 // Handle conversions to bool first, they are special: comparisons against 0.
1319 if (DstTy->isBooleanType()) {
1320 // Complex != 0 -> (Real != 0) | (Imag != 0)
1321 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1322 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1323 return Builder.CreateOr(Src.first, Src.second, "tobool");
1324 }
1325
1326 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1327 // the imaginary part of the complex value is discarded and the value of the
1328 // real part is converted according to the conversion rules for the
1329 // corresponding real type.
1330 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1331}
1332
1333Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1334 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1335}
1336
1337/// Emit a sanitization check for the given "binary" operation (which
1338/// might actually be a unary increment which has been lowered to a binary
1339/// operation). The check passes if all values in \p Checks (which are \c i1),
1340/// are \c true.
1341void ScalarExprEmitter::EmitBinOpCheck(
1342 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1343 assert(CGF.IsSanitizerScope)((CGF.IsSanitizerScope) ? static_cast<void> (0) : __assert_fail
("CGF.IsSanitizerScope", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1343, __PRETTY_FUNCTION__))
;
1344 SanitizerHandler Check;
1345 SmallVector<llvm::Constant *, 4> StaticData;
1346 SmallVector<llvm::Value *, 2> DynamicData;
1347
1348 BinaryOperatorKind Opcode = Info.Opcode;
1349 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1350 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1351
1352 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1353 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1354 if (UO && UO->getOpcode() == UO_Minus) {
1355 Check = SanitizerHandler::NegateOverflow;
1356 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1357 DynamicData.push_back(Info.RHS);
1358 } else {
1359 if (BinaryOperator::isShiftOp(Opcode)) {
1360 // Shift LHS negative or too large, or RHS out of bounds.
1361 Check = SanitizerHandler::ShiftOutOfBounds;
1362 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1363 StaticData.push_back(
1364 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1365 StaticData.push_back(
1366 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1367 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1368 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1369 Check = SanitizerHandler::DivremOverflow;
1370 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1371 } else {
1372 // Arithmetic overflow (+, -, *).
1373 switch (Opcode) {
1374 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1375 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1376 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1377 default: llvm_unreachable("unexpected opcode for bin op check")::llvm::llvm_unreachable_internal("unexpected opcode for bin op check"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1377)
;
1378 }
1379 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1380 }
1381 DynamicData.push_back(Info.LHS);
1382 DynamicData.push_back(Info.RHS);
1383 }
1384
1385 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1386}
1387
1388//===----------------------------------------------------------------------===//
1389// Visitor Methods
1390//===----------------------------------------------------------------------===//
1391
1392Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1393 CGF.ErrorUnsupported(E, "scalar expression");
1394 if (E->getType()->isVoidType())
1395 return nullptr;
1396 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1397}
1398
1399Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1400 // Vector Mask Case
1401 if (E->getNumSubExprs() == 2) {
1402 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1403 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1404 Value *Mask;
1405
1406 llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
1407 unsigned LHSElts = LTy->getNumElements();
1408
1409 Mask = RHS;
1410
1411 llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1412
1413 // Mask off the high bits of each shuffle index.
1414 Value *MaskBits =
1415 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1416 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1417
1418 // newv = undef
1419 // mask = mask & maskbits
1420 // for each elt
1421 // n = extract mask i
1422 // x = extract val n
1423 // newv = insert newv, x, i
1424 llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1425 MTy->getNumElements());
1426 Value* NewV = llvm::UndefValue::get(RTy);
1427 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1428 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1429 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1430
1431 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1432 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1433 }
1434 return NewV;
1435 }
1436
1437 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1438 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1439
1440 SmallVector<llvm::Constant*, 32> indices;
1441 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1442 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1443 // Check for -1 and output it as undef in the IR.
1444 if (Idx.isSigned() && Idx.isAllOnesValue())
1445 indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1446 else
1447 indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1448 }
1449
1450 Value *SV = llvm::ConstantVector::get(indices);
1451 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1452}
1453
1454Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1455 QualType SrcType = E->getSrcExpr()->getType(),
1456 DstType = E->getType();
1457
1458 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1459
1460 SrcType = CGF.getContext().getCanonicalType(SrcType);
1461 DstType = CGF.getContext().getCanonicalType(DstType);
1462 if (SrcType == DstType) return Src;
1463
1464 assert(SrcType->isVectorType() &&((SrcType->isVectorType() && "ConvertVector source type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1465, __PRETTY_FUNCTION__))
1465 "ConvertVector source type must be a vector")((SrcType->isVectorType() && "ConvertVector source type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1465, __PRETTY_FUNCTION__))
;
1466 assert(DstType->isVectorType() &&((DstType->isVectorType() && "ConvertVector destination type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1467, __PRETTY_FUNCTION__))
1467 "ConvertVector destination type must be a vector")((DstType->isVectorType() && "ConvertVector destination type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1467, __PRETTY_FUNCTION__))
;
1468
1469 llvm::Type *SrcTy = Src->getType();
1470 llvm::Type *DstTy = ConvertType(DstType);
1471
1472 // Ignore conversions like int -> uint.
1473 if (SrcTy == DstTy)
1474 return Src;
1475
1476 QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(),
1477 DstEltType = DstType->getAs<VectorType>()->getElementType();
1478
1479 assert(SrcTy->isVectorTy() &&((SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1480, __PRETTY_FUNCTION__))
1480 "ConvertVector source IR type must be a vector")((SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1480, __PRETTY_FUNCTION__))
;
1481 assert(DstTy->isVectorTy() &&((DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1482, __PRETTY_FUNCTION__))
1482 "ConvertVector destination IR type must be a vector")((DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1482, __PRETTY_FUNCTION__))
;
1483
1484 llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1485 *DstEltTy = DstTy->getVectorElementType();
1486
1487 if (DstEltType->isBooleanType()) {
1488 assert((SrcEltTy->isFloatingPointTy() ||(((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType
>(SrcEltTy)) && "Unknown boolean conversion") ? static_cast
<void> (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1489, __PRETTY_FUNCTION__))
1489 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion")(((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType
>(SrcEltTy)) && "Unknown boolean conversion") ? static_cast
<void> (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1489, __PRETTY_FUNCTION__))
;
1490
1491 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1492 if (SrcEltTy->isFloatingPointTy()) {
1493 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1494 } else {
1495 return Builder.CreateICmpNE(Src, Zero, "tobool");
1496 }
1497 }
1498
1499 // We have the arithmetic types: real int/float.
1500 Value *Res = nullptr;
1501
1502 if (isa<llvm::IntegerType>(SrcEltTy)) {
1503 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1504 if (isa<llvm::IntegerType>(DstEltTy))
1505 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1506 else if (InputSigned)
1507 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1508 else
1509 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1510 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1511 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion")((SrcEltTy->isFloatingPointTy() && "Unknown real conversion"
) ? static_cast<void> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1511, __PRETTY_FUNCTION__))
;
1512 if (DstEltType->isSignedIntegerOrEnumerationType())
1513 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1514 else
1515 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1516 } else {
1517 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&((SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1518, __PRETTY_FUNCTION__))
1518 "Unknown real conversion")((SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1518, __PRETTY_FUNCTION__))
;
1519 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1520 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1521 else
1522 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1523 }
1524
1525 return Res;
1526}
1527
1528Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1529 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1530 CGF.EmitIgnoredExpr(E->getBase());
1531 return emitConstant(Constant, E);
1532 } else {
1533 llvm::APSInt Value;
1534 if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1535 CGF.EmitIgnoredExpr(E->getBase());
1536 return Builder.getInt(Value);
1537 }
1538 }
1539
1540 return EmitLoadOfLValue(E);
1541}
1542
1543Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1544 TestAndClearIgnoreResultAssign();
1545
1546 // Emit subscript expressions in rvalue context's. For most cases, this just
1547 // loads the lvalue formed by the subscript expr. However, we have to be
1548 // careful, because the base of a vector subscript is occasionally an rvalue,
1549 // so we can't get it as an lvalue.
1550 if (!E->getBase()->getType()->isVectorType())
1551 return EmitLoadOfLValue(E);
1552
1553 // Handle the vector case. The base must be a vector, the index must be an
1554 // integer value.
1555 Value *Base = Visit(E->getBase());
1556 Value *Idx = Visit(E->getIdx());
1557 QualType IdxTy = E->getIdx()->getType();
1558
1559 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1560 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1561
1562 return Builder.CreateExtractElement(Base, Idx, "vecext");
1563}
1564
1565static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1566 unsigned Off, llvm::Type *I32Ty) {
1567 int MV = SVI->getMaskValue(Idx);
1568 if (MV == -1)
1569 return llvm::UndefValue::get(I32Ty);
1570 return llvm::ConstantInt::get(I32Ty, Off+MV);
1571}
1572
1573static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1574 if (C->getBitWidth() != 32) {
1575 assert(llvm::ConstantInt::isValueValidForType(I32Ty,((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1577, __PRETTY_FUNCTION__))
1576 C->getZExtValue()) &&((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1577, __PRETTY_FUNCTION__))
1577 "Index operand too large for shufflevector mask!")((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1577, __PRETTY_FUNCTION__))
;
1578 return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1579 }
1580 return C;
1581}
1582
1583Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1584 bool Ignore = TestAndClearIgnoreResultAssign();
1585 (void)Ignore;
1586 assert (Ignore == false && "init list ignored")((Ignore == false && "init list ignored") ? static_cast
<void> (0) : __assert_fail ("Ignore == false && \"init list ignored\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1586, __PRETTY_FUNCTION__))
;
1587 unsigned NumInitElements = E->getNumInits();
1588
1589 if (E->hadArrayRangeDesignator())
1590 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1591
1592 llvm::VectorType *VType =
1593 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1594
1595 if (!VType) {
1596 if (NumInitElements == 0) {
1597 // C++11 value-initialization for the scalar.
1598 return EmitNullValue(E->getType());
1599 }
1600 // We have a scalar in braces. Just use the first element.
1601 return Visit(E->getInit(0));
1602 }
1603
1604 unsigned ResElts = VType->getNumElements();
1605
1606 // Loop over initializers collecting the Value for each, and remembering
1607 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1608 // us to fold the shuffle for the swizzle into the shuffle for the vector
1609 // initializer, since LLVM optimizers generally do not want to touch
1610 // shuffles.
1611 unsigned CurIdx = 0;
1612 bool VIsUndefShuffle = false;
1613 llvm::Value *V = llvm::UndefValue::get(VType);
1614 for (unsigned i = 0; i != NumInitElements; ++i) {
1615 Expr *IE = E->getInit(i);
1616 Value *Init = Visit(IE);
1617 SmallVector<llvm::Constant*, 16> Args;
1618
1619 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1620
1621 // Handle scalar elements. If the scalar initializer is actually one
1622 // element of a different vector of the same width, use shuffle instead of
1623 // extract+insert.
1624 if (!VVT) {
1625 if (isa<ExtVectorElementExpr>(IE)) {
1626 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1627
1628 if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1629 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1630 Value *LHS = nullptr, *RHS = nullptr;
1631 if (CurIdx == 0) {
1632 // insert into undef -> shuffle (src, undef)
1633 // shufflemask must use an i32
1634 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1635 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1636
1637 LHS = EI->getVectorOperand();
1638 RHS = V;
1639 VIsUndefShuffle = true;
1640 } else if (VIsUndefShuffle) {
1641 // insert into undefshuffle && size match -> shuffle (v, src)
1642 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1643 for (unsigned j = 0; j != CurIdx; ++j)
1644 Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1645 Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1646 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1647
1648 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1649 RHS = EI->getVectorOperand();
1650 VIsUndefShuffle = false;
1651 }
1652 if (!Args.empty()) {
1653 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1654 V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1655 ++CurIdx;
1656 continue;
1657 }
1658 }
1659 }
1660 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1661 "vecinit");
1662 VIsUndefShuffle = false;
1663 ++CurIdx;
1664 continue;
1665 }
1666
1667 unsigned InitElts = VVT->getNumElements();
1668
1669 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1670 // input is the same width as the vector being constructed, generate an
1671 // optimized shuffle of the swizzle input into the result.
1672 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1673 if (isa<ExtVectorElementExpr>(IE)) {
1674 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1675 Value *SVOp = SVI->getOperand(0);
1676 llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1677
1678 if (OpTy->getNumElements() == ResElts) {
1679 for (unsigned j = 0; j != CurIdx; ++j) {
1680 // If the current vector initializer is a shuffle with undef, merge
1681 // this shuffle directly into it.
1682 if (VIsUndefShuffle) {
1683 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1684 CGF.Int32Ty));
1685 } else {
1686 Args.push_back(Builder.getInt32(j));
1687 }
1688 }
1689 for (unsigned j = 0, je = InitElts; j != je; ++j)
1690 Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1691 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1692
1693 if (VIsUndefShuffle)
1694 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1695
1696 Init = SVOp;
1697 }
1698 }
1699
1700 // Extend init to result vector length, and then shuffle its contribution
1701 // to the vector initializer into V.
1702 if (Args.empty()) {
1703 for (unsigned j = 0; j != InitElts; ++j)
1704 Args.push_back(Builder.getInt32(j));
1705 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1706 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1707 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1708 Mask, "vext");
1709
1710 Args.clear();
1711 for (unsigned j = 0; j != CurIdx; ++j)
1712 Args.push_back(Builder.getInt32(j));
1713 for (unsigned j = 0; j != InitElts; ++j)
1714 Args.push_back(Builder.getInt32(j+Offset));
1715 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1716 }
1717
1718 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1719 // merging subsequent shuffles into this one.
1720 if (CurIdx == 0)
1721 std::swap(V, Init);
1722 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1723 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1724 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1725 CurIdx += InitElts;
1726 }
1727
1728 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1729 // Emit remaining default initializers.
1730 llvm::Type *EltTy = VType->getElementType();
1731
1732 // Emit remaining default initializers
1733 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1734 Value *Idx = Builder.getInt32(CurIdx);
1735 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1736 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1737 }
1738 return V;
1739}
1740
1741bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1742 const Expr *E = CE->getSubExpr();
1743
1744 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1745 return false;
1746
1747 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1748 // We always assume that 'this' is never null.
1749 return false;
1750 }
1751
1752 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1753 // And that glvalue casts are never null.
1754 if (ICE->getValueKind() != VK_RValue)
1755 return false;
1756 }
1757
1758 return true;
1759}
1760
1761// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1762// have to handle a more broad range of conversions than explicit casts, as they
1763// handle things like function to ptr-to-function decay etc.
1764Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1765 Expr *E = CE->getSubExpr();
1766 QualType DestTy = CE->getType();
1767 CastKind Kind = CE->getCastKind();
1768
1769 // These cases are generally not written to ignore the result of
1770 // evaluating their sub-expressions, so we clear this now.
1771 bool Ignored = TestAndClearIgnoreResultAssign();
1772
1773 // Since almost all cast kinds apply to scalars, this switch doesn't have
1774 // a default case, so the compiler will warn on a missing case. The cases
1775 // are in the same order as in the CastKind enum.
1776 switch (Kind) {
1777 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!")::llvm::llvm_unreachable_internal("dependent cast kind in IR gen!"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1777)
;
1778 case CK_BuiltinFnToFnPtr:
1779 llvm_unreachable("builtin functions are handled elsewhere")::llvm::llvm_unreachable_internal("builtin functions are handled elsewhere"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1779)
;
1780
1781 case CK_LValueBitCast:
1782 case CK_ObjCObjectLValueCast: {
1783 Address Addr = EmitLValue(E).getAddress();
1784 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1785 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1786 return EmitLoadOfLValue(LV, CE->getExprLoc());
1787 }
1788
1789 case CK_CPointerToObjCPointerCast:
1790 case CK_BlockPointerToObjCPointerCast:
1791 case CK_AnyPointerToBlockPointerCast:
1792 case CK_BitCast: {
1793 Value *Src = Visit(const_cast<Expr*>(E));
1794 llvm::Type *SrcTy = Src->getType();
1795 llvm::Type *DstTy = ConvertType(DestTy);
1796 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
1797 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
1798 llvm_unreachable("wrong cast for pointers in different address spaces"::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1799)
1799 "(must be an address space cast)!")::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1799)
;
1800 }
1801
1802 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
1803 if (auto PT = DestTy->getAs<PointerType>())
1804 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
1805 /*MayBeNull=*/true,
1806 CodeGenFunction::CFITCK_UnrelatedCast,
1807 CE->getBeginLoc());
1808 }
1809
1810 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
1811 const QualType SrcType = E->getType();
1812
1813 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
1814 // Casting to pointer that could carry dynamic information (provided by
1815 // invariant.group) requires launder.
1816 Src = Builder.CreateLaunderInvariantGroup(Src);
1817 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
1818 // Casting to pointer that does not carry dynamic information (provided
1819 // by invariant.group) requires stripping it. Note that we don't do it
1820 // if the source could not be dynamic type and destination could be
1821 // dynamic because dynamic information is already laundered. It is
1822 // because launder(strip(src)) == launder(src), so there is no need to
1823 // add extra strip before launder.
1824 Src = Builder.CreateStripInvariantGroup(Src);
1825 }
1826 }
1827
1828 return Builder.CreateBitCast(Src, DstTy);
1829 }
1830 case CK_AddressSpaceConversion: {
1831 Expr::EvalResult Result;
1832 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
1833 Result.Val.isNullPointer()) {
1834 // If E has side effect, it is emitted even if its final result is a
1835 // null pointer. In that case, a DCE pass should be able to
1836 // eliminate the useless instructions emitted during translating E.
1837 if (Result.HasSideEffects)
1838 Visit(E);
1839 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
1840 ConvertType(DestTy)), DestTy);
1841 }
1842 // Since target may map different address spaces in AST to the same address
1843 // space, an address space conversion may end up as a bitcast.
1844 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
1845 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
1846 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
1847 }
1848 case CK_AtomicToNonAtomic:
1849 case CK_NonAtomicToAtomic:
1850 case CK_NoOp:
1851 case CK_UserDefinedConversion:
1852 return Visit(const_cast<Expr*>(E));
1853
1854 case CK_BaseToDerived: {
1855 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
1856 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!")((DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"
) ? static_cast<void> (0) : __assert_fail ("DerivedClassDecl && \"BaseToDerived arg isn't a C++ object pointer!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1856, __PRETTY_FUNCTION__))
;
1857
1858 Address Base = CGF.EmitPointerWithAlignment(E);
1859 Address Derived =
1860 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
1861 CE->path_begin(), CE->path_end(),
1862 CGF.ShouldNullCheckClassCastValue(CE));
1863
1864 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
1865 // performed and the object is not of the derived type.
1866 if (CGF.sanitizePerformTypeCheck())
1867 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
1868 Derived.getPointer(), DestTy->getPointeeType());
1869
1870 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
1871 CGF.EmitVTablePtrCheckForCast(
1872 DestTy->getPointeeType(), Derived.getPointer(),
1873 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
1874 CE->getBeginLoc());
1875
1876 return Derived.getPointer();
1877 }
1878 case CK_UncheckedDerivedToBase:
1879 case CK_DerivedToBase: {
1880 // The EmitPointerWithAlignment path does this fine; just discard
1881 // the alignment.
1882 return CGF.EmitPointerWithAlignment(CE).getPointer();
1883 }
1884
1885 case CK_Dynamic: {
1886 Address V = CGF.EmitPointerWithAlignment(E);
1887 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
1888 return CGF.EmitDynamicCast(V, DCE);
1889 }
1890
1891 case CK_ArrayToPointerDecay:
1892 return CGF.EmitArrayToPointerDecay(E).getPointer();
1893 case CK_FunctionToPointerDecay:
1894 return EmitLValue(E).getPointer();
1895
1896 case CK_NullToPointer:
1897 if (MustVisitNullValue(E))
1898 (void) Visit(E);
1899
1900 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
1901 DestTy);
1902
1903 case CK_NullToMemberPointer: {
1904 if (MustVisitNullValue(E))
1905 (void) Visit(E);
1906
1907 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
1908 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
1909 }
1910
1911 case CK_ReinterpretMemberPointer:
1912 case CK_BaseToDerivedMemberPointer:
1913 case CK_DerivedToBaseMemberPointer: {
1914 Value *Src = Visit(E);
1915
1916 // Note that the AST doesn't distinguish between checked and
1917 // unchecked member pointer conversions, so we always have to
1918 // implement checked conversions here. This is inefficient when
1919 // actual control flow may be required in order to perform the
1920 // check, which it is for data member pointers (but not member
1921 // function pointers on Itanium and ARM).
1922 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
1923 }
1924
1925 case CK_ARCProduceObject:
1926 return CGF.EmitARCRetainScalarExpr(E);
1927 case CK_ARCConsumeObject:
1928 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
1929 case CK_ARCReclaimReturnedObject:
1930 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
1931 case CK_ARCExtendBlockObject:
1932 return CGF.EmitARCExtendBlockObject(E);
1933
1934 case CK_CopyAndAutoreleaseBlockObject:
1935 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
1936
1937 case CK_FloatingRealToComplex:
1938 case CK_FloatingComplexCast:
1939 case CK_IntegralRealToComplex:
1940 case CK_IntegralComplexCast:
1941 case CK_IntegralComplexToFloatingComplex:
1942 case CK_FloatingComplexToIntegralComplex:
1943 case CK_ConstructorConversion:
1944 case CK_ToUnion:
1945 llvm_unreachable("scalar cast to non-scalar value")::llvm::llvm_unreachable_internal("scalar cast to non-scalar value"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1945)
;
1946
1947 case CK_LValueToRValue:
1948 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy))((CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy
)) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1948, __PRETTY_FUNCTION__))
;
1949 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!")((E->isGLValue() && "lvalue-to-rvalue applied to r-value!"
) ? static_cast<void> (0) : __assert_fail ("E->isGLValue() && \"lvalue-to-rvalue applied to r-value!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1949, __PRETTY_FUNCTION__))
;
1950 return Visit(const_cast<Expr*>(E));
1951
1952 case CK_IntegralToPointer: {
1953 Value *Src = Visit(const_cast<Expr*>(E));
1954
1955 // First, convert to the correct width so that we control the kind of
1956 // extension.
1957 auto DestLLVMTy = ConvertType(DestTy);
1958 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
1959 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
1960 llvm::Value* IntResult =
1961 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1962
1963 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
1964
1965 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
1966 // Going from integer to pointer that could be dynamic requires reloading
1967 // dynamic information from invariant.group.
1968 if (DestTy.mayBeDynamicClass())
1969 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
1970 }
1971 return IntToPtr;
1972 }
1973 case CK_PointerToIntegral: {
1974 assert(!DestTy->isBooleanType() && "bool should use PointerToBool")((!DestTy->isBooleanType() && "bool should use PointerToBool"
) ? static_cast<void> (0) : __assert_fail ("!DestTy->isBooleanType() && \"bool should use PointerToBool\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1974, __PRETTY_FUNCTION__))
;
1975 auto *PtrExpr = Visit(E);
1976
1977 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
1978 const QualType SrcType = E->getType();
1979
1980 // Casting to integer requires stripping dynamic information as it does
1981 // not carries it.
1982 if (SrcType.mayBeDynamicClass())
1983 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
1984 }
1985
1986 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
1987 }
1988 case CK_ToVoid: {
1989 CGF.EmitIgnoredExpr(E);
1990 return nullptr;
1991 }
1992 case CK_VectorSplat: {
1993 llvm::Type *DstTy = ConvertType(DestTy);
1994 Value *Elt = Visit(const_cast<Expr*>(E));
1995 // Splat the element across to all elements
1996 unsigned NumElements = DstTy->getVectorNumElements();
1997 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
1998 }
1999
2000 case CK_FixedPointCast:
2001 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2002 CE->getExprLoc());
2003
2004 case CK_FixedPointToBoolean:
2005 assert(E->getType()->isFixedPointType() &&((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2006, __PRETTY_FUNCTION__))
2006 "Expected src type to be fixed point type")((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2006, __PRETTY_FUNCTION__))
;
2007 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type")((DestTy->isBooleanType() && "Expected dest type to be boolean type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isBooleanType() && \"Expected dest type to be boolean type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2007, __PRETTY_FUNCTION__))
;
2008 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2009 CE->getExprLoc());
2010
2011 case CK_IntegralCast: {
2012 ScalarConversionOpts Opts;
2013 if (CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)) {
2014 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE))
2015 Opts.EmitImplicitIntegerTruncationChecks = !ICE->isPartOfExplicitCast();
2016 }
2017 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2018 CE->getExprLoc(), Opts);
2019 }
2020 case CK_IntegralToFloating:
2021 case CK_FloatingToIntegral:
2022 case CK_FloatingCast:
2023 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2024 CE->getExprLoc());
2025 case CK_BooleanToSignedIntegral: {
2026 ScalarConversionOpts Opts;
2027 Opts.TreatBooleanAsSigned = true;
2028 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2029 CE->getExprLoc(), Opts);
2030 }
2031 case CK_IntegralToBoolean:
2032 return EmitIntToBoolConversion(Visit(E));
2033 case CK_PointerToBoolean:
2034 return EmitPointerToBoolConversion(Visit(E), E->getType());
2035 case CK_FloatingToBoolean:
2036 return EmitFloatToBoolConversion(Visit(E));
2037 case CK_MemberPointerToBoolean: {
2038 llvm::Value *MemPtr = Visit(E);
2039 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2040 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2041 }
2042
2043 case CK_FloatingComplexToReal:
2044 case CK_IntegralComplexToReal:
2045 return CGF.EmitComplexExpr(E, false, true).first;
2046
2047 case CK_FloatingComplexToBoolean:
2048 case CK_IntegralComplexToBoolean: {
2049 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2050
2051 // TODO: kill this function off, inline appropriate case here
2052 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2053 CE->getExprLoc());
2054 }
2055
2056 case CK_ZeroToOCLOpaqueType: {
2057 assert((DestTy->isEventT() || DestTy->isQueueT()) &&(((DestTy->isEventT() || DestTy->isQueueT()) &&
"CK_ZeroToOCLEvent cast on non-event type") ? static_cast<
void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2058, __PRETTY_FUNCTION__))
2058 "CK_ZeroToOCLEvent cast on non-event type")(((DestTy->isEventT() || DestTy->isQueueT()) &&
"CK_ZeroToOCLEvent cast on non-event type") ? static_cast<
void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2058, __PRETTY_FUNCTION__))
;
2059 return llvm::Constant::getNullValue(ConvertType(DestTy));
2060 }
2061
2062 case CK_IntToOCLSampler:
2063 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2064
2065 } // end of switch
2066
2067 llvm_unreachable("unknown scalar cast")::llvm::llvm_unreachable_internal("unknown scalar cast", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2067)
;
2068}
2069
2070Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2071 CodeGenFunction::StmtExprEvaluation eval(CGF);
2072 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2073 !E->getType()->isVoidType());
2074 if (!RetAlloca.isValid())
2075 return nullptr;
2076 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2077 E->getExprLoc());
2078}
2079
2080Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2081 CGF.enterFullExpression(E);
2082 CodeGenFunction::RunCleanupsScope Scope(CGF);
2083 Value *V = Visit(E->getSubExpr());
2084 // Defend against dominance problems caused by jumps out of expression
2085 // evaluation through the shared cleanup block.
2086 Scope.ForceCleanup({&V});
2087 return V;
2088}
2089
2090//===----------------------------------------------------------------------===//
2091// Unary Operators
2092//===----------------------------------------------------------------------===//
2093
2094static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2095 llvm::Value *InVal, bool IsInc) {
2096 BinOpInfo BinOp;
2097 BinOp.LHS = InVal;
2098 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2099 BinOp.Ty = E->getType();
2100 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2101 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2102 BinOp.E = E;
2103 return BinOp;
2104}
2105
2106llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2107 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2108 llvm::Value *Amount =
2109 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2110 StringRef Name = IsInc ? "inc" : "dec";
2111 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2112 case LangOptions::SOB_Defined:
2113 return Builder.CreateAdd(InVal, Amount, Name);
2114 case LangOptions::SOB_Undefined:
2115 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2116 return Builder.CreateNSWAdd(InVal, Amount, Name);
2117 // Fall through.
2118 case LangOptions::SOB_Trapping:
2119 if (!E->canOverflow())
2120 return Builder.CreateNSWAdd(InVal, Amount, Name);
2121 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
2122 }
2123 llvm_unreachable("Unknown SignedOverflowBehaviorTy")::llvm::llvm_unreachable_internal("Unknown SignedOverflowBehaviorTy"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2123)
;
2124}
2125
2126llvm::Value *
2127ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2128 bool isInc, bool isPre) {
2129
2130 QualType type = E->getSubExpr()->getType();
2131 llvm::PHINode *atomicPHI = nullptr;
2132 llvm::Value *value;
2133 llvm::Value *input;
2134
2135 int amount = (isInc ? 1 : -1);
2136 bool isSubtraction = !isInc;
2137
2138 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2139 type = atomicTy->getValueType();
2140 if (isInc && type->isBooleanType()) {
2141 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2142 if (isPre) {
2143 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
2144 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2145 return Builder.getTrue();
2146 }
2147 // For atomic bool increment, we just store true and return it for
2148 // preincrement, do an atomic swap with true for postincrement
2149 return Builder.CreateAtomicRMW(
2150 llvm::AtomicRMWInst::Xchg, LV.getPointer(), True,
2151 llvm::AtomicOrdering::SequentiallyConsistent);
2152 }
2153 // Special case for atomic increment / decrement on integers, emit
2154 // atomicrmw instructions. We skip this if we want to be doing overflow
2155 // checking, and fall into the slow path with the atomic cmpxchg loop.
2156 if (!type->isBooleanType() && type->isIntegerType() &&
2157 !(type->isUnsignedIntegerType() &&
2158 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2159 CGF.getLangOpts().getSignedOverflowBehavior() !=
2160 LangOptions::SOB_Trapping) {
2161 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2162 llvm::AtomicRMWInst::Sub;
2163 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2164 llvm::Instruction::Sub;
2165 llvm::Value *amt = CGF.EmitToMemory(
2166 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2167 llvm::Value *old = Builder.CreateAtomicRMW(aop,
2168 LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent);
2169 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2170 }
2171 value = EmitLoadOfLValue(LV, E->getExprLoc());
2172 input = value;
2173 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2174 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2175 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2176 value = CGF.EmitToMemory(value, type);
2177 Builder.CreateBr(opBB);
2178 Builder.SetInsertPoint(opBB);
2179 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2180 atomicPHI->addIncoming(value, startBB);
2181 value = atomicPHI;
2182 } else {
2183 value = EmitLoadOfLValue(LV, E->getExprLoc());
2184 input = value;
2185 }
2186
2187 // Special case of integer increment that we have to check first: bool++.
2188 // Due to promotion rules, we get:
2189 // bool++ -> bool = bool + 1
2190 // -> bool = (int)bool + 1
2191 // -> bool = ((int)bool + 1 != 0)
2192 // An interesting aspect of this is that increment is always true.
2193 // Decrement does not have this property.
2194 if (isInc && type->isBooleanType()) {
2195 value = Builder.getTrue();
2196
2197 // Most common case by far: integer increment.
2198 } else if (type->isIntegerType()) {
2199 // Note that signed integer inc/dec with width less than int can't
2200 // overflow because of promotion rules; we're just eliding a few steps here.
2201 if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2202 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2203 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2204 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2205 value =
2206 EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
2207 } else {
2208 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2209 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2210 }
2211
2212 // Next most common: pointer increment.
2213 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2214 QualType type = ptr->getPointeeType();
2215
2216 // VLA types don't have constant size.
2217 if (const VariableArrayType *vla
2218 = CGF.getContext().getAsVariableArrayType(type)) {
2219 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2220 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2221 if (CGF.getLangOpts().isSignedOverflowDefined())
2222 value = Builder.CreateGEP(value, numElts, "vla.inc");
2223 else
2224 value = CGF.EmitCheckedInBoundsGEP(
2225 value, numElts, /*SignedIndices=*/false, isSubtraction,
2226 E->getExprLoc(), "vla.inc");
2227
2228 // Arithmetic on function pointers (!) is just +-1.
2229 } else if (type->isFunctionType()) {
2230 llvm::Value *amt = Builder.getInt32(amount);
2231
2232 value = CGF.EmitCastToVoidPtr(value);
2233 if (CGF.getLangOpts().isSignedOverflowDefined())
2234 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2235 else
2236 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2237 isSubtraction, E->getExprLoc(),
2238 "incdec.funcptr");
2239 value = Builder.CreateBitCast(value, input->getType());
2240
2241 // For everything else, we can just do a simple increment.
2242 } else {
2243 llvm::Value *amt = Builder.getInt32(amount);
2244 if (CGF.getLangOpts().isSignedOverflowDefined())
2245 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2246 else
2247 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2248 isSubtraction, E->getExprLoc(),
2249 "incdec.ptr");
2250 }
2251
2252 // Vector increment/decrement.
2253 } else if (type->isVectorType()) {
2254 if (type->hasIntegerRepresentation()) {
2255 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2256
2257 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2258 } else {
2259 value = Builder.CreateFAdd(
2260 value,
2261 llvm::ConstantFP::get(value->getType(), amount),
2262 isInc ? "inc" : "dec");
2263 }
2264
2265 // Floating point.
2266 } else if (type->isRealFloatingType()) {
2267 // Add the inc/dec to the real part.
2268 llvm::Value *amt;
2269
2270 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2271 // Another special case: half FP increment should be done via float
2272 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2273 value = Builder.CreateCall(
2274 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2275 CGF.CGM.FloatTy),
2276 input, "incdec.conv");
2277 } else {
2278 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2279 }
2280 }
2281
2282 if (value->getType()->isFloatTy())
2283 amt = llvm::ConstantFP::get(VMContext,
2284 llvm::APFloat(static_cast<float>(amount)));
2285 else if (value->getType()->isDoubleTy())
2286 amt = llvm::ConstantFP::get(VMContext,
2287 llvm::APFloat(static_cast<double>(amount)));
2288 else {
2289 // Remaining types are Half, LongDouble or __float128. Convert from float.
2290 llvm::APFloat F(static_cast<float>(amount));
2291 bool ignored;
2292 const llvm::fltSemantics *FS;
2293 // Don't use getFloatTypeSemantics because Half isn't
2294 // necessarily represented using the "half" LLVM type.
2295 if (value->getType()->isFP128Ty())
2296 FS = &CGF.getTarget().getFloat128Format();
2297 else if (value->getType()->isHalfTy())
2298 FS = &CGF.getTarget().getHalfFormat();
2299 else
2300 FS = &CGF.getTarget().getLongDoubleFormat();
2301 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2302 amt = llvm::ConstantFP::get(VMContext, F);
2303 }
2304 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2305
2306 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2307 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2308 value = Builder.CreateCall(
2309 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2310 CGF.CGM.FloatTy),
2311 value, "incdec.conv");
2312 } else {
2313 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2314 }
2315 }
2316
2317 // Objective-C pointer types.
2318 } else {
2319 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2320 value = CGF.EmitCastToVoidPtr(value);
2321
2322 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2323 if (!isInc) size = -size;
2324 llvm::Value *sizeValue =
2325 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2326
2327 if (CGF.getLangOpts().isSignedOverflowDefined())
2328 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2329 else
2330 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2331 /*SignedIndices=*/false, isSubtraction,
2332 E->getExprLoc(), "incdec.objptr");
2333 value = Builder.CreateBitCast(value, input->getType());
2334 }
2335
2336 if (atomicPHI) {
2337 llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2338 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2339 auto Pair = CGF.EmitAtomicCompareExchange(
2340 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2341 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2342 llvm::Value *success = Pair.second;
2343 atomicPHI->addIncoming(old, opBB);
2344 Builder.CreateCondBr(success, contBB, opBB);
2345 Builder.SetInsertPoint(contBB);
2346 return isPre ? value : input;
2347 }
2348
2349 // Store the updated result through the lvalue.
2350 if (LV.isBitField())
2351 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2352 else
2353 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2354
2355 // If this is a postinc, return the value read from memory, otherwise use the
2356 // updated value.
2357 return isPre ? value : input;
2358}
2359
2360
2361
2362Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2363 TestAndClearIgnoreResultAssign();
2364 // Emit unary minus with EmitSub so we handle overflow cases etc.
2365 BinOpInfo BinOp;
2366 BinOp.RHS = Visit(E->getSubExpr());
2367
2368 if (BinOp.RHS->getType()->isFPOrFPVectorTy())
2369 BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
2370 else
2371 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2372 BinOp.Ty = E->getType();
2373 BinOp.Opcode = BO_Sub;
2374 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2375 BinOp.E = E;
2376 return EmitSub(BinOp);
2377}
2378
2379Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2380 TestAndClearIgnoreResultAssign();
2381 Value *Op = Visit(E->getSubExpr());
2382 return Builder.CreateNot(Op, "neg");
2383}
2384
2385Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2386 // Perform vector logical not on comparison with zero vector.
2387 if (E->getType()->isExtVectorType()) {
2388 Value *Oper = Visit(E->getSubExpr());
2389 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2390 Value *Result;
2391 if (Oper->getType()->isFPOrFPVectorTy())
2392 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2393 else
2394 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2395 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2396 }
2397
2398 // Compare operand to zero.
2399 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2400
2401 // Invert value.
2402 // TODO: Could dynamically modify easy computations here. For example, if
2403 // the operand is an icmp ne, turn into icmp eq.
2404 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2405
2406 // ZExt result to the expr type.
2407 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2408}
2409
2410Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2411 // Try folding the offsetof to a constant.
2412 llvm::APSInt Value;
2413 if (E->EvaluateAsInt(Value, CGF.getContext()))
2414 return Builder.getInt(Value);
2415
2416 // Loop over the components of the offsetof to compute the value.
2417 unsigned n = E->getNumComponents();
2418 llvm::Type* ResultType = ConvertType(E->getType());
2419 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2420 QualType CurrentType = E->getTypeSourceInfo()->getType();
2421 for (unsigned i = 0; i != n; ++i) {
2422 OffsetOfNode ON = E->getComponent(i);
2423 llvm::Value *Offset = nullptr;
2424 switch (ON.getKind()) {
2425 case OffsetOfNode::Array: {
2426 // Compute the index
2427 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2428 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2429 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2430 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2431
2432 // Save the element type
2433 CurrentType =
2434 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2435
2436 // Compute the element size
2437 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2438 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2439
2440 // Multiply out to compute the result
2441 Offset = Builder.CreateMul(Idx, ElemSize);
2442 break;
2443 }
2444
2445 case OffsetOfNode::Field: {
2446 FieldDecl *MemberDecl = ON.getField();
2447 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2448 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2449
2450 // Compute the index of the field in its parent.
2451 unsigned i = 0;
2452 // FIXME: It would be nice if we didn't have to loop here!
2453 for (RecordDecl::field_iterator Field = RD->field_begin(),
2454 FieldEnd = RD->field_end();
2455 Field != FieldEnd; ++Field, ++i) {
2456 if (*Field == MemberDecl)
2457 break;
2458 }
2459 assert(i < RL.getFieldCount() && "offsetof field in wrong type")((i < RL.getFieldCount() && "offsetof field in wrong type"
) ? static_cast<void> (0) : __assert_fail ("i < RL.getFieldCount() && \"offsetof field in wrong type\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2459, __PRETTY_FUNCTION__))
;
2460
2461 // Compute the offset to the field
2462 int64_t OffsetInt = RL.getFieldOffset(i) /
2463 CGF.getContext().getCharWidth();
2464 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2465
2466 // Save the element type.
2467 CurrentType = MemberDecl->getType();
2468 break;
2469 }
2470
2471 case OffsetOfNode::Identifier:
2472 llvm_unreachable("dependent __builtin_offsetof")::llvm::llvm_unreachable_internal("dependent __builtin_offsetof"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2472)
;
2473
2474 case OffsetOfNode::Base: {
2475 if (ON.getBase()->isVirtual()) {
2476 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2477 continue;
2478 }
2479
2480 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2481 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2482
2483 // Save the element type.
2484 CurrentType = ON.getBase()->getType();
2485
2486 // Compute the offset to the base.
2487 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2488 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2489 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2490 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2491 break;
2492 }
2493 }
2494 Result = Builder.CreateAdd(Result, Offset);
2495 }
2496 return Result;
2497}
2498
2499/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2500/// argument of the sizeof expression as an integer.
2501Value *
2502ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2503 const UnaryExprOrTypeTraitExpr *E) {
2504 QualType TypeToSize = E->getTypeOfArgument();
2505 if (E->getKind() == UETT_SizeOf) {
2506 if (const VariableArrayType *VAT =
2507 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2508 if (E->isArgumentType()) {
2509 // sizeof(type) - make sure to emit the VLA size.
2510 CGF.EmitVariablyModifiedType(TypeToSize);
2511 } else {
2512 // C99 6.5.3.4p2: If the argument is an expression of type
2513 // VLA, it is evaluated.
2514 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2515 }
2516
2517 auto VlaSize = CGF.getVLASize(VAT);
2518 llvm::Value *size = VlaSize.NumElts;
2519
2520 // Scale the number of non-VLA elements by the non-VLA element size.
2521 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2522 if (!eltSize.isOne())
2523 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2524
2525 return size;
2526 }
2527 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2528 auto Alignment =
2529 CGF.getContext()
2530 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2531 E->getTypeOfArgument()->getPointeeType()))
2532 .getQuantity();
2533 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2534 }
2535
2536 // If this isn't sizeof(vla), the result must be constant; use the constant
2537 // folding logic so we don't have to duplicate it here.
2538 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2539}
2540
2541Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2542 Expr *Op = E->getSubExpr();
2543 if (Op->getType()->isAnyComplexType()) {
2544 // If it's an l-value, load through the appropriate subobject l-value.
2545 // Note that we have to ask E because Op might be an l-value that
2546 // this won't work for, e.g. an Obj-C property.
2547 if (E->isGLValue())
2548 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2549 E->getExprLoc()).getScalarVal();
2550
2551 // Otherwise, calculate and project.
2552 return CGF.EmitComplexExpr(Op, false, true).first;
2553 }
2554
2555 return Visit(Op);
2556}
2557
2558Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2559 Expr *Op = E->getSubExpr();
2560 if (Op->getType()->isAnyComplexType()) {
2561 // If it's an l-value, load through the appropriate subobject l-value.
2562 // Note that we have to ask E because Op might be an l-value that
2563 // this won't work for, e.g. an Obj-C property.
2564 if (Op->isGLValue())
2565 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2566 E->getExprLoc()).getScalarVal();
2567
2568 // Otherwise, calculate and project.
2569 return CGF.EmitComplexExpr(Op, true, false).second;
2570 }
2571
2572 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2573 // effects are evaluated, but not the actual value.
2574 if (Op->isGLValue())
2575 CGF.EmitLValue(Op);
2576 else
2577 CGF.EmitScalarExpr(Op, true);
2578 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2579}
2580
2581//===----------------------------------------------------------------------===//
2582// Binary Operators
2583//===----------------------------------------------------------------------===//
2584
2585BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2586 TestAndClearIgnoreResultAssign();
2587 BinOpInfo Result;
2588 Result.LHS = Visit(E->getLHS());
2589 Result.RHS = Visit(E->getRHS());
2590 Result.Ty = E->getType();
2591 Result.Opcode = E->getOpcode();
2592 Result.FPFeatures = E->getFPFeatures();
2593 Result.E = E;
2594 return Result;
2595}
2596
2597LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2598 const CompoundAssignOperator *E,
2599 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2600 Value *&Result) {
2601 QualType LHSTy = E->getLHS()->getType();
2602 BinOpInfo OpInfo;
2603
2604 if (E->getComputationResultType()->isAnyComplexType())
4
Taking false branch
2605 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2606
2607 // Emit the RHS first. __block variables need to have the rhs evaluated
2608 // first, plus this should improve codegen a little.
2609 OpInfo.RHS = Visit(E->getRHS());
2610 OpInfo.Ty = E->getComputationResultType();
2611 OpInfo.Opcode = E->getOpcode();
2612 OpInfo.FPFeatures = E->getFPFeatures();
2613 OpInfo.E = E;
2614 // Load/convert the LHS.
2615 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2616
2617 llvm::PHINode *atomicPHI = nullptr;
2618 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
5
Taking true branch
2619 QualType type = atomicTy->getValueType();
2620 if (!type->isBooleanType() && type->isIntegerType() &&
8
Taking true branch
2621 !(type->isUnsignedIntegerType() &&
6
Assuming the condition is false
2622 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2623 CGF.getLangOpts().getSignedOverflowBehavior() !=
7
Assuming the condition is true
2624 LangOptions::SOB_Trapping) {
2625 llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
2626 switch (OpInfo.Opcode) {
9
Control jumps to 'case BO_OrAssign:' at line 2645
2627 // We don't have atomicrmw operands for *, %, /, <<, >>
2628 case BO_MulAssign: case BO_DivAssign:
2629 case BO_RemAssign:
2630 case BO_ShlAssign:
2631 case BO_ShrAssign:
2632 break;
2633 case BO_AddAssign:
2634 aop = llvm::AtomicRMWInst::Add;
2635 break;
2636 case BO_SubAssign:
2637 aop = llvm::AtomicRMWInst::Sub;
2638 break;
2639 case BO_AndAssign:
2640 aop = llvm::AtomicRMWInst::And;
2641 break;
2642 case BO_XorAssign:
2643 aop = llvm::AtomicRMWInst::Xor;
2644 break;
2645 case BO_OrAssign:
2646 aop = llvm::AtomicRMWInst::Or;
2647 break;
10
Execution continues on line 2651
2648 default:
2649 llvm_unreachable("Invalid compound assignment type")::llvm::llvm_unreachable_internal("Invalid compound assignment type"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2649)
;
2650 }
2651 if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
11
Taking true branch
2652 llvm::Value *amt = CGF.EmitToMemory(
2653 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
2654 E->getExprLoc()),
2655 LHSTy);
2656 Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
2657 llvm::AtomicOrdering::SequentiallyConsistent);
2658 return LHSLV;
12
Returning without writing to 'Result'
2659 }
2660 }
2661 // FIXME: For floating point types, we should be saving and restoring the
2662 // floating point environment in the loop.
2663 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2664 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2665 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2666 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2667 Builder.CreateBr(opBB);
2668 Builder.SetInsertPoint(opBB);
2669 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2670 atomicPHI->addIncoming(OpInfo.LHS, startBB);
2671 OpInfo.LHS = atomicPHI;
2672 }
2673 else
2674 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2675
2676 SourceLocation Loc = E->getExprLoc();
2677 OpInfo.LHS =
2678 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
2679
2680 // Expand the binary operator.
2681 Result = (this->*Func)(OpInfo);
2682
2683 // Convert the result back to the LHS type.
2684 Result =
2685 EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, Loc);
2686
2687 if (atomicPHI) {
2688 llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2689 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2690 auto Pair = CGF.EmitAtomicCompareExchange(
2691 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
2692 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
2693 llvm::Value *success = Pair.second;
2694 atomicPHI->addIncoming(old, opBB);
2695 Builder.CreateCondBr(success, contBB, opBB);
2696 Builder.SetInsertPoint(contBB);
2697 return LHSLV;
2698 }
2699
2700 // Store the result value into the LHS lvalue. Bit-fields are handled
2701 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
2702 // 'An assignment expression has the value of the left operand after the
2703 // assignment...'.
2704 if (LHSLV.isBitField())
2705 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
2706 else
2707 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
2708
2709 return LHSLV;
2710}
2711
2712Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
2713 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
2714 bool Ignore = TestAndClearIgnoreResultAssign();
2715 Value *RHS;
2
'RHS' declared without an initial value
2716 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3
Calling 'ScalarExprEmitter::EmitCompoundAssignLValue'
13
Returning from 'ScalarExprEmitter::EmitCompoundAssignLValue'
2717
2718 // If the result is clearly ignored, return now.
2719 if (Ignore)
14
Assuming 'Ignore' is 0
15
Taking false branch
2720 return nullptr;
2721
2722 // The result of an assignment in C is the assigned r-value.
2723 if (!CGF.getLangOpts().CPlusPlus)
16
Assuming the condition is true
17
Taking true branch
2724 return RHS;
18
Undefined or garbage value returned to caller
2725
2726 // If the lvalue is non-volatile, return the computed value of the assignment.
2727 if (!LHS.isVolatileQualified())
2728 return RHS;
2729
2730 // Otherwise, reload the value.
2731 return EmitLoadOfLValue(LHS, E->getExprLoc());
2732}
2733
2734void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
2735 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
2736 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
2737
2738 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
2739 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
2740 SanitizerKind::IntegerDivideByZero));
2741 }
2742
2743 const auto *BO = cast<BinaryOperator>(Ops.E);
2744 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
2745 Ops.Ty->hasSignedIntegerRepresentation() &&
2746 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
2747 Ops.mayHaveIntegerOverflow()) {
2748 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
2749
2750 llvm::Value *IntMin =
2751 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
2752 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
2753
2754 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
2755 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
2756 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
2757 Checks.push_back(
2758 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
2759 }
2760
2761 if (Checks.size() > 0)
2762 EmitBinOpCheck(Checks, Ops);
2763}
2764
2765Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
2766 {
2767 CodeGenFunction::SanitizerScope SanScope(&CGF);
2768 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
2769 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
2770 Ops.Ty->isIntegerType() &&
2771 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
2772 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2773 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
2774 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
2775 Ops.Ty->isRealFloatingType() &&
2776 Ops.mayHaveFloatDivisionByZero()) {
2777 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2778 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
2779 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
2780 Ops);
2781 }
2782 }
2783
2784 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
2785 llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
2786 if (CGF.getLangOpts().OpenCL &&
2787 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
2788 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
2789 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
2790 // build option allows an application to specify that single precision
2791 // floating-point divide (x/y and 1/x) and sqrt used in the program
2792 // source are correctly rounded.
2793 llvm::Type *ValTy = Val->getType();
2794 if (ValTy->isFloatTy() ||
2795 (isa<llvm::VectorType>(ValTy) &&
2796 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
2797 CGF.SetFPAccuracy(Val, 2.5);
2798 }
2799 return Val;
2800 }
2801 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
2802 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
2803 else
2804 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
2805}
2806
2807Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
2808 // Rem in C can't be a floating point type: C99 6.5.5p2.
2809 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
2810 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
2811 Ops.Ty->isIntegerType() &&
2812 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
2813 CodeGenFunction::SanitizerScope SanScope(&CGF);
2814 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2815 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
2816 }
2817
2818 if (Ops.Ty->hasUnsignedIntegerRepresentation())
2819 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
2820 else
2821 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
2822}
2823
2824Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
2825 unsigned IID;
2826 unsigned OpID = 0;
2827
2828 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
2829 switch (Ops.Opcode) {
2830 case BO_Add:
2831 case BO_AddAssign:
2832 OpID = 1;
2833 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
2834 llvm::Intrinsic::uadd_with_overflow;
2835 break;
2836 case BO_Sub:
2837 case BO_SubAssign:
2838 OpID = 2;
2839 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
2840 llvm::Intrinsic::usub_with_overflow;
2841 break;
2842 case BO_Mul:
2843 case BO_MulAssign:
2844 OpID = 3;
2845 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
2846 llvm::Intrinsic::umul_with_overflow;
2847 break;
2848 default:
2849 llvm_unreachable("Unsupported operation for overflow detection")::llvm::llvm_unreachable_internal("Unsupported operation for overflow detection"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2849)
;
2850 }
2851 OpID <<= 1;
2852 if (isSigned)
2853 OpID |= 1;
2854
2855 CodeGenFunction::SanitizerScope SanScope(&CGF);
2856 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
2857
2858 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
2859
2860 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
2861 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
2862 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
2863
2864 // Handle overflow with llvm.trap if no custom handler has been specified.
2865 const std::string *handlerName =
2866 &CGF.getLangOpts().OverflowHandler;
2867 if (handlerName->empty()) {
2868 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
2869 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
2870 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
2871 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
2872 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
2873 : SanitizerKind::UnsignedIntegerOverflow;
2874 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
2875 } else
2876 CGF.EmitTrapCheck(Builder.CreateNot(overflow));
2877 return result;
2878 }
2879
2880 // Branch in case of overflow.
2881 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
2882 llvm::BasicBlock *continueBB =
2883 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
2884 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
2885
2886 Builder.CreateCondBr(overflow, overflowBB, continueBB);
2887
2888 // If an overflow handler is set, then we want to call it and then use its
2889 // result, if it returns.
2890 Builder.SetInsertPoint(overflowBB);
2891
2892 // Get the overflow handler.
2893 llvm::Type *Int8Ty = CGF.Int8Ty;
2894 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
2895 llvm::FunctionType *handlerTy =
2896 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
2897 llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
2898
2899 // Sign extend the args to 64-bit, so that we can use the same handler for
2900 // all types of overflow.
2901 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
2902 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
2903
2904 // Call the handler with the two arguments, the operation, and the size of
2905 // the result.
2906 llvm::Value *handlerArgs[] = {
2907 lhs,
2908 rhs,
2909 Builder.getInt8(OpID),
2910 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
2911 };
2912 llvm::Value *handlerResult =
2913 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
2914
2915 // Truncate the result back to the desired size.
2916 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
2917 Builder.CreateBr(continueBB);
2918
2919 Builder.SetInsertPoint(continueBB);
2920 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
2921 phi->addIncoming(result, initialBB);
2922 phi->addIncoming(handlerResult, overflowBB);
2923
2924 return phi;
2925}
2926
2927/// Emit pointer + index arithmetic.
2928static Value *emitPointerArithmetic(CodeGenFunction &CGF,
2929 const BinOpInfo &op,
2930 bool isSubtraction) {
2931 // Must have binary (not unary) expr here. Unary pointer
2932 // increment/decrement doesn't use this path.
2933 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
2934
2935 Value *pointer = op.LHS;
2936 Expr *pointerOperand = expr->getLHS();
2937 Value *index = op.RHS;
2938 Expr *indexOperand = expr->getRHS();
2939
2940 // In a subtraction, the LHS is always the pointer.
2941 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
2942 std::swap(pointer, index);
2943 std::swap(pointerOperand, indexOperand);
2944 }
2945
2946 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
2947
2948 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
2949 auto &DL = CGF.CGM.getDataLayout();
2950 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
2951
2952 // Some versions of glibc and gcc use idioms (particularly in their malloc
2953 // routines) that add a pointer-sized integer (known to be a pointer value)
2954 // to a null pointer in order to cast the value back to an integer or as
2955 // part of a pointer alignment algorithm. This is undefined behavior, but
2956 // we'd like to be able to compile programs that use it.
2957 //
2958 // Normally, we'd generate a GEP with a null-pointer base here in response
2959 // to that code, but it's also UB to dereference a pointer created that
2960 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
2961 // generate a direct cast of the integer value to a pointer.
2962 //
2963 // The idiom (p = nullptr + N) is not met if any of the following are true:
2964 //
2965 // The operation is subtraction.
2966 // The index is not pointer-sized.
2967 // The pointer type is not byte-sized.
2968 //
2969 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
2970 op.Opcode,
2971 expr->getLHS(),
2972 expr->getRHS()))
2973 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
2974
2975 if (width != DL.getTypeSizeInBits(PtrTy)) {
2976 // Zero-extend or sign-extend the pointer value according to
2977 // whether the index is signed or not.
2978 index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
2979 "idx.ext");
2980 }
2981
2982 // If this is subtraction, negate the index.
2983 if (isSubtraction)
2984 index = CGF.Builder.CreateNeg(index, "idx.neg");
2985
2986 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2987 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
2988 /*Accessed*/ false);
2989
2990 const PointerType *pointerType
2991 = pointerOperand->getType()->getAs<PointerType>();
2992 if (!pointerType) {
2993 QualType objectType = pointerOperand->getType()
2994 ->castAs<ObjCObjectPointerType>()
2995 ->getPointeeType();
2996 llvm::Value *objectSize
2997 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
2998
2999 index = CGF.Builder.CreateMul(index, objectSize);
3000
3001 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3002 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3003 return CGF.Builder.CreateBitCast(result, pointer->getType());
3004 }
3005
3006 QualType elementType = pointerType->getPointeeType();
3007 if (const VariableArrayType *vla
3008 = CGF.getContext().getAsVariableArrayType(elementType)) {
3009 // The element count here is the total number of non-VLA elements.
3010 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3011
3012 // Effectively, the multiply by the VLA size is part of the GEP.
3013 // GEP indexes are signed, and scaling an index isn't permitted to
3014 // signed-overflow, so we use the same semantics for our explicit
3015 // multiply. We suppress this if overflow is not undefined behavior.
3016 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3017 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3018 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3019 } else {
3020 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3021 pointer =
3022 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3023 op.E->getExprLoc(), "add.ptr");
3024 }
3025 return pointer;
3026 }
3027
3028 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3029 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3030 // future proof.
3031 if (elementType->isVoidType() || elementType->isFunctionType()) {
3032 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3033 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3034 return CGF.Builder.CreateBitCast(result, pointer->getType());
3035 }
3036
3037 if (CGF.getLangOpts().isSignedOverflowDefined())
3038 return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3039
3040 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3041 op.E->getExprLoc(), "add.ptr");
3042}
3043
3044// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3045// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3046// the add operand respectively. This allows fmuladd to represent a*b-c, or
3047// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3048// efficient operations.
3049static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
3050 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3051 bool negMul, bool negAdd) {
3052 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.")((!(negMul && negAdd) && "Only one of negMul and negAdd should be set."
) ? static_cast<void> (0) : __assert_fail ("!(negMul && negAdd) && \"Only one of negMul and negAdd should be set.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3052, __PRETTY_FUNCTION__))
;
3053
3054 Value *MulOp0 = MulOp->getOperand(0);
3055 Value *MulOp1 = MulOp->getOperand(1);
3056 if (negMul) {
3057 MulOp0 =
3058 Builder.CreateFSub(
3059 llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
3060 "neg");
3061 } else if (negAdd) {
3062 Addend =
3063 Builder.CreateFSub(
3064 llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
3065 "neg");
3066 }
3067
3068 Value *FMulAdd = Builder.CreateCall(
3069 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3070 {MulOp0, MulOp1, Addend});
3071 MulOp->eraseFromParent();
3072
3073 return FMulAdd;
3074}
3075
3076// Check whether it would be legal to emit an fmuladd intrinsic call to
3077// represent op and if so, build the fmuladd.
3078//
3079// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3080// Does NOT check the type of the operation - it's assumed that this function
3081// will be called from contexts where it's known that the type is contractable.
3082static Value* tryEmitFMulAdd(const BinOpInfo &op,
3083 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3084 bool isSub=false) {
3085
3086 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3088, __PRETTY_FUNCTION__))
3087 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3088, __PRETTY_FUNCTION__))
3088 "Only fadd/fsub can be the root of an fmuladd.")(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3088, __PRETTY_FUNCTION__))
;
3089
3090 // Check whether this op is marked as fusable.
3091 if (!op.FPFeatures.allowFPContractWithinStatement())
3092 return nullptr;
3093
3094 // We have a potentially fusable op. Look for a mul on one of the operands.
3095 // Also, make sure that the mul result isn't used directly. In that case,
3096 // there's no point creating a muladd operation.
3097 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3098 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3099 LHSBinOp->use_empty())
3100 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3101 }
3102 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3103 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3104 RHSBinOp->use_empty())
3105 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3106 }
3107
3108 return nullptr;
3109}
3110
3111Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3112 if (op.LHS->getType()->isPointerTy() ||
3113 op.RHS->getType()->isPointerTy())
3114 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3115
3116 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3117 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3118 case LangOptions::SOB_Defined:
3119 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3120 case LangOptions::SOB_Undefined:
3121 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3122 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3123 // Fall through.
3124 case LangOptions::SOB_Trapping:
3125 if (CanElideOverflowCheck(CGF.getContext(), op))
3126 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3127 return EmitOverflowCheckedBinOp(op);
3128 }
3129 }
3130
3131 if (op.Ty->isUnsignedIntegerType() &&
3132 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3133 !CanElideOverflowCheck(CGF.getContext(), op))
3134 return EmitOverflowCheckedBinOp(op);
3135
3136 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3137 // Try to form an fmuladd.
3138 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3139 return FMulAdd;
3140
3141 Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
3142 return propagateFMFlags(V, op);
3143 }
3144
3145 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3146}
3147
3148Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3149 // The LHS is always a pointer if either side is.
3150 if (!op.LHS->getType()->isPointerTy()) {
3151 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3152 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3153 case LangOptions::SOB_Defined:
3154 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3155 case LangOptions::SOB_Undefined:
3156 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3157 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3158 // Fall through.
3159 case LangOptions::SOB_Trapping:
3160 if (CanElideOverflowCheck(CGF.getContext(), op))
3161 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3162 return EmitOverflowCheckedBinOp(op);
3163 }
3164 }
3165
3166 if (op.Ty->isUnsignedIntegerType() &&
3167 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3168 !CanElideOverflowCheck(CGF.getContext(), op))
3169 return EmitOverflowCheckedBinOp(op);
3170
3171 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3172 // Try to form an fmuladd.
3173 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3174 return FMulAdd;
3175 Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
3176 return propagateFMFlags(V, op);
3177 }
3178
3179 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3180 }
3181
3182 // If the RHS is not a pointer, then we have normal pointer
3183 // arithmetic.
3184 if (!op.RHS->getType()->isPointerTy())
3185 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3186
3187 // Otherwise, this is a pointer subtraction.
3188
3189 // Do the raw subtraction part.
3190 llvm::Value *LHS
3191 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3192 llvm::Value *RHS
3193 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3194 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3195
3196 // Okay, figure out the element size.
3197 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3198 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3199
3200 llvm::Value *divisor = nullptr;
3201
3202 // For a variable-length array, this is going to be non-constant.
3203 if (const VariableArrayType *vla
3204 = CGF.getContext().getAsVariableArrayType(elementType)) {
3205 auto VlaSize = CGF.getVLASize(vla);
3206 elementType = VlaSize.Type;
3207 divisor = VlaSize.NumElts;
3208
3209 // Scale the number of non-VLA elements by the non-VLA element size.
3210 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3211 if (!eltSize.isOne())
3212 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3213
3214 // For everything elese, we can just compute it, safe in the
3215 // assumption that Sema won't let anything through that we can't
3216 // safely compute the size of.
3217 } else {
3218 CharUnits elementSize;
3219 // Handle GCC extension for pointer arithmetic on void* and
3220 // function pointer types.
3221 if (elementType->isVoidType() || elementType->isFunctionType())
3222 elementSize = CharUnits::One();
3223 else
3224 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3225
3226 // Don't even emit the divide for element size of 1.
3227 if (elementSize.isOne())
3228 return diffInChars;
3229
3230 divisor = CGF.CGM.getSize(elementSize);
3231 }
3232
3233 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3234 // pointer difference in C is only defined in the case where both operands
3235 // are pointing to elements of an array.
3236 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3237}
3238
3239Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3240 llvm::IntegerType *Ty;
3241 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3242 Ty = cast<llvm::IntegerType>(VT->getElementType());
3243 else
3244 Ty = cast<llvm::IntegerType>(LHS->getType());
3245 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3246}
3247
3248Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3249 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3250 // RHS to the same size as the LHS.
3251 Value *RHS = Ops.RHS;
3252 if (Ops.LHS->getType() != RHS->getType())
3253 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3254
3255 bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3256 Ops.Ty->hasSignedIntegerRepresentation() &&
3257 !CGF.getLangOpts().isSignedOverflowDefined();
3258 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3259 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3260 if (CGF.getLangOpts().OpenCL)
3261 RHS =
3262 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
3263 else if ((SanitizeBase || SanitizeExponent) &&
3264 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3265 CodeGenFunction::SanitizerScope SanScope(&CGF);
3266 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3267 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3268 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3269
3270 if (SanitizeExponent) {
3271 Checks.push_back(
3272 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3273 }
3274
3275 if (SanitizeBase) {
3276 // Check whether we are shifting any non-zero bits off the top of the
3277 // integer. We only emit this check if exponent is valid - otherwise
3278 // instructions below will have undefined behavior themselves.
3279 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3280 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3281 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3282 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3283 llvm::Value *PromotedWidthMinusOne =
3284 (RHS == Ops.RHS) ? WidthMinusOne
3285 : GetWidthMinusOneValue(Ops.LHS, RHS);
3286 CGF.EmitBlock(CheckShiftBase);
3287 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3288 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3289 /*NUW*/ true, /*NSW*/ true),
3290 "shl.check");
3291 if (CGF.getLangOpts().CPlusPlus) {
3292 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3293 // Under C++11's rules, shifting a 1 bit into the sign bit is
3294 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3295 // define signed left shifts, so we use the C99 and C++11 rules there).
3296 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3297 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3298 }
3299 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3300 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3301 CGF.EmitBlock(Cont);
3302 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3303 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3304 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3305 Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
3306 }
3307
3308 assert(!Checks.empty())((!Checks.empty()) ? static_cast<void> (0) : __assert_fail
("!Checks.empty()", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3308, __PRETTY_FUNCTION__))
;
3309 EmitBinOpCheck(Checks, Ops);
3310 }
3311
3312 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3313}
3314
3315Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3316 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3317 // RHS to the same size as the LHS.
3318 Value *RHS = Ops.RHS;
3319 if (Ops.LHS->getType() != RHS->getType())
3320 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3321
3322 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3323 if (CGF.getLangOpts().OpenCL)
3324 RHS =
3325 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
3326 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3327 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3328 CodeGenFunction::SanitizerScope SanScope(&CGF);
3329 llvm::Value *Valid =
3330 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3331 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3332 }
3333
3334 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3335 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3336 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3337}
3338
3339enum IntrinsicType { VCMPEQ, VCMPGT };
3340// return corresponding comparison intrinsic for given vector type
3341static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3342 BuiltinType::Kind ElemKind) {
3343 switch (ElemKind) {
3344 default: llvm_unreachable("unexpected element type")::llvm::llvm_unreachable_internal("unexpected element type", "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3344)
;
3345 case BuiltinType::Char_U:
3346 case BuiltinType::UChar:
3347 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3348 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3349 case BuiltinType::Char_S:
3350 case BuiltinType::SChar:
3351 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3352 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3353 case BuiltinType::UShort:
3354 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3355 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3356 case BuiltinType::Short:
3357 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3358 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3359 case BuiltinType::UInt:
3360 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3361 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3362 case BuiltinType::Int:
3363 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3364 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3365 case BuiltinType::ULong:
3366 case BuiltinType::ULongLong:
3367 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3368 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3369 case BuiltinType::Long:
3370 case BuiltinType::LongLong:
3371 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3372 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3373 case BuiltinType::Float:
3374 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3375 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3376 case BuiltinType::Double:
3377 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3378 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3379 }
3380}
3381
3382Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3383 llvm::CmpInst::Predicate UICmpOpc,
3384 llvm::CmpInst::Predicate SICmpOpc,
3385 llvm::CmpInst::Predicate FCmpOpc) {
3386 TestAndClearIgnoreResultAssign();
3387 Value *Result;
3388 QualType LHSTy = E->getLHS()->getType();
3389 QualType RHSTy = E->getRHS()->getType();
3390 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3391 assert(E->getOpcode() == BO_EQ ||((E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) ?
static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3392, __PRETTY_FUNCTION__))
3392 E->getOpcode() == BO_NE)((E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) ?
static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3392, __PRETTY_FUNCTION__))
;
3393 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3394 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3395 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3396 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3397 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3398 Value *LHS = Visit(E->getLHS());
3399 Value *RHS = Visit(E->getRHS());
3400
3401 // If AltiVec, the comparison results in a numeric type, so we use
3402 // intrinsics comparing vectors and giving 0 or 1 as a result
3403 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3404 // constants for mapping CR6 register bits to predicate result
3405 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3406
3407 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3408
3409 // in several cases vector arguments order will be reversed
3410 Value *FirstVecArg = LHS,
3411 *SecondVecArg = RHS;
3412
3413 QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
3414 const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
3415 BuiltinType::Kind ElementKind = BTy->getKind();
3416
3417 switch(E->getOpcode()) {
3418 default: llvm_unreachable("is not a comparison operation")::llvm::llvm_unreachable_internal("is not a comparison operation"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3418)
;
3419 case BO_EQ:
3420 CR6 = CR6_LT;
3421 ID = GetIntrinsic(VCMPEQ, ElementKind);
3422 break;
3423 case BO_NE:
3424 CR6 = CR6_EQ;
3425 ID = GetIntrinsic(VCMPEQ, ElementKind);
3426 break;
3427 case BO_LT:
3428 CR6 = CR6_LT;
3429 ID = GetIntrinsic(VCMPGT, ElementKind);
3430 std::swap(FirstVecArg, SecondVecArg);
3431 break;
3432 case BO_GT:
3433 CR6 = CR6_LT;
3434 ID = GetIntrinsic(VCMPGT, ElementKind);
3435 break;
3436 case BO_LE:
3437 if (ElementKind == BuiltinType::Float) {
3438 CR6 = CR6_LT;
3439 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3440 std::swap(FirstVecArg, SecondVecArg);
3441 }
3442 else {
3443 CR6 = CR6_EQ;
3444 ID = GetIntrinsic(VCMPGT, ElementKind);
3445 }
3446 break;
3447 case BO_GE:
3448 if (ElementKind == BuiltinType::Float) {
3449 CR6 = CR6_LT;
3450 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3451 }
3452 else {
3453 CR6 = CR6_EQ;
3454 ID = GetIntrinsic(VCMPGT, ElementKind);
3455 std::swap(FirstVecArg, SecondVecArg);
3456 }
3457 break;
3458 }
3459
3460 Value *CR6Param = Builder.getInt32(CR6);
3461 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
3462 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
3463
3464 // The result type of intrinsic may not be same as E->getType().
3465 // If E->getType() is not BoolTy, EmitScalarConversion will do the
3466 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
3467 // do nothing, if ResultTy is not i1 at the same time, it will cause
3468 // crash later.
3469 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
3470 if (ResultTy->getBitWidth() > 1 &&
3471 E->getType() == CGF.getContext().BoolTy)
3472 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
3473 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3474 E->getExprLoc());
3475 }
3476
3477 if (LHS->getType()->isFPOrFPVectorTy()) {
3478 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
3479 } else if (LHSTy->hasSignedIntegerRepresentation()) {
3480 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
3481 } else {
3482 // Unsigned integers and pointers.
3483
3484 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
3485 !isa<llvm::ConstantPointerNull>(LHS) &&
3486 !isa<llvm::ConstantPointerNull>(RHS)) {
3487
3488 // Dynamic information is required to be stripped for comparisons,
3489 // because it could leak the dynamic information. Based on comparisons
3490 // of pointers to dynamic objects, the optimizer can replace one pointer
3491 // with another, which might be incorrect in presence of invariant
3492 // groups. Comparison with null is safe because null does not carry any
3493 // dynamic information.
3494 if (LHSTy.mayBeDynamicClass())
3495 LHS = Builder.CreateStripInvariantGroup(LHS);
3496 if (RHSTy.mayBeDynamicClass())
3497 RHS = Builder.CreateStripInvariantGroup(RHS);
3498 }
3499
3500 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
3501 }
3502
3503 // If this is a vector comparison, sign extend the result to the appropriate
3504 // vector integer type and return it (don't convert to bool).
3505 if (LHSTy->isVectorType())
3506 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3507
3508 } else {
3509 // Complex Comparison: can only be an equality comparison.
3510 CodeGenFunction::ComplexPairTy LHS, RHS;
3511 QualType CETy;
3512 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
3513 LHS = CGF.EmitComplexExpr(E->getLHS());
3514 CETy = CTy->getElementType();
3515 } else {
3516 LHS.first = Visit(E->getLHS());
3517 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
3518 CETy = LHSTy;
3519 }
3520 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
3521 RHS = CGF.EmitComplexExpr(E->getRHS());
3522 assert(CGF.getContext().hasSameUnqualifiedType(CETy,((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3524, __PRETTY_FUNCTION__))
3523 CTy->getElementType()) &&((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3524, __PRETTY_FUNCTION__))
3524 "The element types must always match.")((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3524, __PRETTY_FUNCTION__))
;
3525 (void)CTy;
3526 } else {
3527 RHS.first = Visit(E->getRHS());
3528 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
3529 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&((CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
"The element types must always match.") ? static_cast<void
> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3530, __PRETTY_FUNCTION__))
3530 "The element types must always match.")((CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
"The element types must always match.") ? static_cast<void
> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3530, __PRETTY_FUNCTION__))
;
3531 }
3532
3533 Value *ResultR, *ResultI;
3534 if (CETy->isRealFloatingType()) {
3535 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
3536 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
3537 } else {
3538 // Complex comparisons can only be equality comparisons. As such, signed
3539 // and unsigned opcodes are the same.
3540 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
3541 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
3542 }
3543
3544 if (E->getOpcode() == BO_EQ) {
3545 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
3546 } else {
3547 assert(E->getOpcode() == BO_NE &&((E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"
) ? static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3548, __PRETTY_FUNCTION__))
3548 "Complex comparison other than == or != ?")((E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"
) ? static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3548, __PRETTY_FUNCTION__))
;
3549 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
3550 }
3551 }
3552
3553 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3554 E->getExprLoc());
3555}
3556
3557Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
3558 bool Ignore = TestAndClearIgnoreResultAssign();
3559
3560 Value *RHS;
3561 LValue LHS;
3562
3563 switch (E->getLHS()->getType().getObjCLifetime()) {
3564 case Qualifiers::OCL_Strong:
3565 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
3566 break;
3567
3568 case Qualifiers::OCL_Autoreleasing:
3569 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
3570 break;
3571
3572 case Qualifiers::OCL_ExplicitNone:
3573 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
3574 break;
3575
3576 case Qualifiers::OCL_Weak:
3577 RHS = Visit(E->getRHS());
3578 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3579 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
3580 break;
3581
3582 case Qualifiers::OCL_None:
3583 // __block variables need to have the rhs evaluated first, plus
3584 // this should improve codegen just a little.
3585 RHS = Visit(E->getRHS());
3586 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3587
3588 // Store the value into the LHS. Bit-fields are handled specially
3589 // because the result is altered by the store, i.e., [C99 6.5.16p1]
3590 // 'An assignment expression has the value of the left operand after
3591 // the assignment...'.
3592 if (LHS.isBitField()) {
3593 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
3594 } else {
3595 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
3596 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
3597 }
3598 }
3599
3600 // If the result is clearly ignored, return now.
3601 if (Ignore)
3602 return nullptr;
3603
3604 // The result of an assignment in C is the assigned r-value.
3605 if (!CGF.getLangOpts().CPlusPlus)
3606 return RHS;
3607
3608 // If the lvalue is non-volatile, return the computed value of the assignment.
3609 if (!LHS.isVolatileQualified())
3610 return RHS;
3611
3612 // Otherwise, reload the value.
3613 return EmitLoadOfLValue(LHS, E->getExprLoc());
3614}
3615
3616Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
3617 // Perform vector logical and on comparisons with zero vectors.
3618 if (E->getType()->isVectorType()) {
3619 CGF.incrementProfileCounter(E);
3620
3621 Value *LHS = Visit(E->getLHS());
3622 Value *RHS = Visit(E->getRHS());
3623 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3624 if (LHS->getType()->isFPOrFPVectorTy()) {
3625 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3626 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3627 } else {
3628 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3629 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3630 }
3631 Value *And = Builder.CreateAnd(LHS, RHS);
3632 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
3633 }
3634
3635 llvm::Type *ResTy = ConvertType(E->getType());
3636
3637 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
3638 // If we have 1 && X, just emit X without inserting the control flow.
3639 bool LHSCondVal;
3640 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3641 if (LHSCondVal) { // If we have 1 && X, just emit X.
3642 CGF.incrementProfileCounter(E);
3643
3644 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3645 // ZExt result to int or bool.
3646 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
3647 }
3648
3649 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
3650 if (!CGF.ContainsLabel(E->getRHS()))
3651 return llvm::Constant::getNullValue(ResTy);
3652 }
3653
3654 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
3655 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
3656
3657 CodeGenFunction::ConditionalEvaluation eval(CGF);
3658
3659 // Branch on the LHS first. If it is false, go to the failure (cont) block.
3660 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
3661 CGF.getProfileCount(E->getRHS()));
3662
3663 // Any edges into the ContBlock are now from an (indeterminate number of)
3664 // edges from this first condition. All of these values will be false. Start
3665 // setting up the PHI node in the Cont Block for this.
3666 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3667 "", ContBlock);
3668 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3669 PI != PE; ++PI)
3670 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
3671
3672 eval.begin(CGF);
3673 CGF.EmitBlock(RHSBlock);
3674 CGF.incrementProfileCounter(E);
3675 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3676 eval.end(CGF);
3677
3678 // Reaquire the RHS block, as there may be subblocks inserted.
3679 RHSBlock = Builder.GetInsertBlock();
3680
3681 // Emit an unconditional branch from this block to ContBlock.
3682 {
3683 // There is no need to emit line number for unconditional branch.
3684 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
3685 CGF.EmitBlock(ContBlock);
3686 }
3687 // Insert an entry into the phi node for the edge with the value of RHSCond.
3688 PN->addIncoming(RHSCond, RHSBlock);
3689
3690 // Artificial location to preserve the scope information
3691 {
3692 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
3693 PN->setDebugLoc(Builder.getCurrentDebugLocation());
3694 }
3695
3696 // ZExt result to int.
3697 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
3698}
3699
3700Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
3701 // Perform vector logical or on comparisons with zero vectors.
3702 if (E->getType()->isVectorType()) {
3703 CGF.incrementProfileCounter(E);
3704
3705 Value *LHS = Visit(E->getLHS());
3706 Value *RHS = Visit(E->getRHS());
3707 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3708 if (LHS->getType()->isFPOrFPVectorTy()) {
3709 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3710 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3711 } else {
3712 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3713 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3714 }
3715 Value *Or = Builder.CreateOr(LHS, RHS);
3716 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
3717 }
3718
3719 llvm::Type *ResTy = ConvertType(E->getType());
3720
3721 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
3722 // If we have 0 || X, just emit X without inserting the control flow.
3723 bool LHSCondVal;
3724 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3725 if (!LHSCondVal) { // If we have 0 || X, just emit X.
3726 CGF.incrementProfileCounter(E);
3727
3728 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3729 // ZExt result to int or bool.
3730 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
3731 }
3732
3733 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
3734 if (!CGF.ContainsLabel(E->getRHS()))
3735 return llvm::ConstantInt::get(ResTy, 1);
3736 }
3737
3738 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
3739 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
3740
3741 CodeGenFunction::ConditionalEvaluation eval(CGF);
3742
3743 // Branch on the LHS first. If it is true, go to the success (cont) block.
3744 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
3745 CGF.getCurrentProfileCount() -
3746 CGF.getProfileCount(E->getRHS()));
3747
3748 // Any edges into the ContBlock are now from an (indeterminate number of)
3749 // edges from this first condition. All of these values will be true. Start
3750 // setting up the PHI node in the Cont Block for this.
3751 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3752 "", ContBlock);
3753 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3754 PI != PE; ++PI)
3755 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
3756
3757 eval.begin(CGF);
3758
3759 // Emit the RHS condition as a bool value.
3760 CGF.EmitBlock(RHSBlock);
3761 CGF.incrementProfileCounter(E);
3762 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3763
3764 eval.end(CGF);
3765
3766 // Reaquire the RHS block, as there may be subblocks inserted.
3767 RHSBlock = Builder.GetInsertBlock();
3768
3769 // Emit an unconditional branch from this block to ContBlock. Insert an entry
3770 // into the phi node for the edge with the value of RHSCond.
3771 CGF.EmitBlock(ContBlock);
3772 PN->addIncoming(RHSCond, RHSBlock);
3773
3774 // ZExt result to int.
3775 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
3776}
3777
3778Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
3779 CGF.EmitIgnoredExpr(E->getLHS());
3780 CGF.EnsureInsertPoint();
3781 return Visit(E->getRHS());
3782}
3783
3784//===----------------------------------------------------------------------===//
3785// Other Operators
3786//===----------------------------------------------------------------------===//
3787
3788/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
3789/// expression is cheap enough and side-effect-free enough to evaluate
3790/// unconditionally instead of conditionally. This is used to convert control
3791/// flow into selects in some cases.
3792static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
3793 CodeGenFunction &CGF) {
3794 // Anything that is an integer or floating point constant is fine.
3795 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
3796
3797 // Even non-volatile automatic variables can't be evaluated unconditionally.
3798 // Referencing a thread_local may cause non-trivial initialization work to
3799 // occur. If we're inside a lambda and one of the variables is from the scope
3800 // outside the lambda, that function may have returned already. Reading its
3801 // locals is a bad idea. Also, these reads may introduce races there didn't
3802 // exist in the source-level program.
3803}
3804
3805
3806Value *ScalarExprEmitter::
3807VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
3808 TestAndClearIgnoreResultAssign();
3809
3810 // Bind the common expression if necessary.
3811 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
3812
3813 Expr *condExpr = E->getCond();
3814 Expr *lhsExpr = E->getTrueExpr();
3815 Expr *rhsExpr = E->getFalseExpr();
3816
3817 // If the condition constant folds and can be elided, try to avoid emitting
3818 // the condition and the dead arm.
3819 bool CondExprBool;
3820 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
3821 Expr *live = lhsExpr, *dead = rhsExpr;
3822 if (!CondExprBool) std::swap(live, dead);
3823
3824 // If the dead side doesn't have labels we need, just emit the Live part.
3825 if (!CGF.ContainsLabel(dead)) {
3826 if (CondExprBool)
3827 CGF.incrementProfileCounter(E);
3828 Value *Result = Visit(live);
3829
3830 // If the live part is a throw expression, it acts like it has a void
3831 // type, so evaluating it returns a null Value*. However, a conditional
3832 // with non-void type must return a non-null Value*.
3833 if (!Result && !E->getType()->isVoidType())
3834 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
3835
3836 return Result;
3837 }
3838 }
3839
3840 // OpenCL: If the condition is a vector, we can treat this condition like
3841 // the select function.
3842 if (CGF.getLangOpts().OpenCL
3843 && condExpr->getType()->isVectorType()) {
3844 CGF.incrementProfileCounter(E);
3845
3846 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
3847 llvm::Value *LHS = Visit(lhsExpr);
3848 llvm::Value *RHS = Visit(rhsExpr);
3849
3850 llvm::Type *condType = ConvertType(condExpr->getType());
3851 llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
3852
3853 unsigned numElem = vecTy->getNumElements();
3854 llvm::Type *elemType = vecTy->getElementType();
3855
3856 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
3857 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
3858 llvm::Value *tmp = Builder.CreateSExt(TestMSB,
3859 llvm::VectorType::get(elemType,
3860 numElem),
3861 "sext");
3862 llvm::Value *tmp2 = Builder.CreateNot(tmp);
3863
3864 // Cast float to int to perform ANDs if necessary.
3865 llvm::Value *RHSTmp = RHS;
3866 llvm::Value *LHSTmp = LHS;
3867 bool wasCast = false;
3868 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
3869 if (rhsVTy->getElementType()->isFloatingPointTy()) {
3870 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
3871 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
3872 wasCast = true;
3873 }
3874
3875 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
3876 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
3877 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
3878 if (wasCast)
3879 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
3880
3881 return tmp5;
3882 }
3883
3884 // If this is a really simple expression (like x ? 4 : 5), emit this as a
3885 // select instead of as control flow. We can only do this if it is cheap and
3886 // safe to evaluate the LHS and RHS unconditionally.
3887 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
3888 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
3889 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
3890 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
3891
3892 CGF.incrementProfileCounter(E, StepV);
3893
3894 llvm::Value *LHS = Visit(lhsExpr);
3895 llvm::Value *RHS = Visit(rhsExpr);
3896 if (!LHS) {
3897 // If the conditional has void type, make sure we return a null Value*.
3898 assert(!RHS && "LHS and RHS types must match")((!RHS && "LHS and RHS types must match") ? static_cast
<void> (0) : __assert_fail ("!RHS && \"LHS and RHS types must match\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3898, __PRETTY_FUNCTION__))
;
3899 return nullptr;
3900 }
3901 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
3902 }
3903
3904 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
3905 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
3906 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
3907
3908 CodeGenFunction::ConditionalEvaluation eval(CGF);
3909 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
3910 CGF.getProfileCount(lhsExpr));
3911
3912 CGF.EmitBlock(LHSBlock);
3913 CGF.incrementProfileCounter(E);
3914 eval.begin(CGF);
3915 Value *LHS = Visit(lhsExpr);
3916 eval.end(CGF);
3917
3918 LHSBlock = Builder.GetInsertBlock();
3919 Builder.CreateBr(ContBlock);
3920
3921 CGF.EmitBlock(RHSBlock);
3922 eval.begin(CGF);
3923 Value *RHS = Visit(rhsExpr);
3924 eval.end(CGF);
3925
3926 RHSBlock = Builder.GetInsertBlock();
3927 CGF.EmitBlock(ContBlock);
3928
3929 // If the LHS or RHS is a throw expression, it will be legitimately null.
3930 if (!LHS)
3931 return RHS;
3932 if (!RHS)
3933 return LHS;
3934
3935 // Create a PHI node for the real part.
3936 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
3937 PN->addIncoming(LHS, LHSBlock);
3938 PN->addIncoming(RHS, RHSBlock);
3939 return PN;
3940}
3941
3942Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
3943 return Visit(E->getChosenSubExpr());
3944}
3945
3946Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
3947 QualType Ty = VE->getType();
3948
3949 if (Ty->isVariablyModifiedType())
3950 CGF.EmitVariablyModifiedType(Ty);
3951
3952 Address ArgValue = Address::invalid();
3953 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
3954
3955 llvm::Type *ArgTy = ConvertType(VE->getType());
3956
3957 // If EmitVAArg fails, emit an error.
3958 if (!ArgPtr.isValid()) {
3959 CGF.ErrorUnsupported(VE, "va_arg expression");
3960 return llvm::UndefValue::get(ArgTy);
3961 }
3962
3963 // FIXME Volatility.
3964 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
3965
3966 // If EmitVAArg promoted the type, we must truncate it.
3967 if (ArgTy != Val->getType()) {
3968 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
3969 Val = Builder.CreateIntToPtr(Val, ArgTy);
3970 else
3971 Val = Builder.CreateTrunc(Val, ArgTy);
3972 }
3973
3974 return Val;
3975}
3976
3977Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
3978 return CGF.EmitBlockLiteral(block);
3979}
3980
3981// Convert a vec3 to vec4, or vice versa.
3982static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
3983 Value *Src, unsigned NumElementsDst) {
3984 llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
3985 SmallVector<llvm::Constant*, 4> Args;
3986 Args.push_back(Builder.getInt32(0));
3987 Args.push_back(Builder.getInt32(1));
3988 Args.push_back(Builder.getInt32(2));
3989 if (NumElementsDst == 4)
3990 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
3991 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
3992 return Builder.CreateShuffleVector(Src, UnV, Mask);
3993}
3994
3995// Create cast instructions for converting LLVM value \p Src to LLVM type \p
3996// DstTy. \p Src has the same size as \p DstTy. Both are single value types
3997// but could be scalar or vectors of different lengths, and either can be
3998// pointer.
3999// There are 4 cases:
4000// 1. non-pointer -> non-pointer : needs 1 bitcast
4001// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4002// 3. pointer -> non-pointer
4003// a) pointer -> intptr_t : needs 1 ptrtoint
4004// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4005// 4. non-pointer -> pointer
4006// a) intptr_t -> pointer : needs 1 inttoptr
4007// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4008// Note: for cases 3b and 4b two casts are required since LLVM casts do not
4009// allow casting directly between pointer types and non-integer non-pointer
4010// types.
4011static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4012 const llvm::DataLayout &DL,
4013 Value *Src, llvm::Type *DstTy,
4014 StringRef Name = "") {
4015 auto SrcTy = Src->getType();
4016
4017 // Case 1.
4018 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4019 return Builder.CreateBitCast(Src, DstTy, Name);
4020
4021 // Case 2.
4022 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4023 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4024
4025 // Case 3.
4026 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4027 // Case 3b.
4028 if (!DstTy->isIntegerTy())
4029 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4030 // Cases 3a and 3b.
4031 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4032 }
4033
4034 // Case 4b.
4035 if (!SrcTy->isIntegerTy())
4036 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4037 // Cases 4a and 4b.
4038 return Builder.CreateIntToPtr(Src, DstTy, Name);
4039}
4040
4041Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4042 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4043 llvm::Type *DstTy = ConvertType(E->getType());
4044
4045 llvm::Type *SrcTy = Src->getType();
4046 unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
4047 cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
4048 unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
4049 cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
4050
4051 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4052 // vector to get a vec4, then a bitcast if the target type is different.
4053 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4054 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4055
4056 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4057 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4058 DstTy);
4059 }
4060
4061 Src->setName("astype");
4062 return Src;
4063 }
4064
4065 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4066 // to vec4 if the original type is not vec4, then a shuffle vector to
4067 // get a vec3.
4068 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4069 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4070 auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
4071 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4072 Vec4Ty);
4073 }
4074
4075 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4076 Src->setName("astype");
4077 return Src;
4078 }
4079
4080 return Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4081 Src, DstTy, "astype");
4082}
4083
4084Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4085 return CGF.EmitAtomicExpr(E).getScalarVal();
4086}
4087
4088//===----------------------------------------------------------------------===//
4089// Entry Point into this File
4090//===----------------------------------------------------------------------===//
4091
4092/// Emit the computation of the specified expression of scalar type, ignoring
4093/// the result.
4094Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4095 assert(E && hasScalarEvaluationKind(E->getType()) &&((E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit") ? static_cast<void>
(0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4096, __PRETTY_FUNCTION__))
4096 "Invalid scalar expression to emit")((E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit") ? static_cast<void>
(0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4096, __PRETTY_FUNCTION__))
;
4097
4098 return ScalarExprEmitter(*this, IgnoreResultAssign)
4099 .Visit(const_cast<Expr *>(E));
4100}
4101
4102/// Emit a conversion from the specified type to the specified destination type,
4103/// both of which are LLVM scalar types.
4104Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4105 QualType DstTy,
4106 SourceLocation Loc) {
4107 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&((hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind
(DstTy) && "Invalid scalar expression to emit") ? static_cast
<void> (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4108, __PRETTY_FUNCTION__))
4108 "Invalid scalar expression to emit")((hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind
(DstTy) && "Invalid scalar expression to emit") ? static_cast
<void> (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4108, __PRETTY_FUNCTION__))
;
4109 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4110}
4111
4112/// Emit a conversion from the specified complex type to the specified
4113/// destination type, where the destination type is an LLVM scalar type.
4114Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4115 QualType SrcTy,
4116 QualType DstTy,
4117 SourceLocation Loc) {
4118 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&((SrcTy->isAnyComplexType() && hasScalarEvaluationKind
(DstTy) && "Invalid complex -> scalar conversion")
? static_cast<void> (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4119, __PRETTY_FUNCTION__))
4119 "Invalid complex -> scalar conversion")((SrcTy->isAnyComplexType() && hasScalarEvaluationKind
(DstTy) && "Invalid complex -> scalar conversion")
? static_cast<void> (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4119, __PRETTY_FUNCTION__))
;
4120 return ScalarExprEmitter(*this)
4121 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4122}
4123
4124
4125llvm::Value *CodeGenFunction::
4126EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4127 bool isInc, bool isPre) {
4128 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4129}
4130
4131LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4132 // object->isa or (*object).isa
4133 // Generate code as for: *(Class*)object
4134
4135 Expr *BaseExpr = E->getBase();
4136 Address Addr = Address::invalid();
4137 if (BaseExpr->isRValue()) {
4138 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4139 } else {
4140 Addr = EmitLValue(BaseExpr).getAddress();
4141 }
4142
4143 // Cast the address to Class*.
4144 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4145 return MakeAddrLValue(Addr, E->getType());
4146}
4147
4148
4149LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4150 const CompoundAssignOperator *E) {
4151 ScalarExprEmitter Scalar(*this);
4152 Value *Result = nullptr;
4153 switch (E->getOpcode()) {
4154#define COMPOUND_OP(Op) \
4155 case BO_##Op##Assign: \
4156 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4157 Result)
4158 COMPOUND_OP(Mul);
4159 COMPOUND_OP(Div);
4160 COMPOUND_OP(Rem);
4161 COMPOUND_OP(Add);
4162 COMPOUND_OP(Sub);
4163 COMPOUND_OP(Shl);
4164 COMPOUND_OP(Shr);
4165 COMPOUND_OP(And);
4166 COMPOUND_OP(Xor);
4167 COMPOUND_OP(Or);
4168#undef COMPOUND_OP
4169
4170 case BO_PtrMemD:
4171 case BO_PtrMemI:
4172 case BO_Mul:
4173 case BO_Div:
4174 case BO_Rem:
4175 case BO_Add:
4176 case BO_Sub:
4177 case BO_Shl:
4178 case BO_Shr:
4179 case BO_LT:
4180 case BO_GT:
4181 case BO_LE:
4182 case BO_GE:
4183 case BO_EQ:
4184 case BO_NE:
4185 case BO_Cmp:
4186 case BO_And:
4187 case BO_Xor:
4188 case BO_Or:
4189 case BO_LAnd:
4190 case BO_LOr:
4191 case BO_Assign:
4192 case BO_Comma:
4193 llvm_unreachable("Not valid compound assignment operators")::llvm::llvm_unreachable_internal("Not valid compound assignment operators"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4193)
;
4194 }
4195
4196 llvm_unreachable("Unhandled compound assignment operator")::llvm::llvm_unreachable_internal("Unhandled compound assignment operator"
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4196)
;
4197}
4198
4199Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr,
4200 ArrayRef<Value *> IdxList,
4201 bool SignedIndices,
4202 bool IsSubtraction,
4203 SourceLocation Loc,
4204 const Twine &Name) {
4205 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4206
4207 // If the pointer overflow sanitizer isn't enabled, do nothing.
4208 if (!SanOpts.has(SanitizerKind::PointerOverflow))
4209 return GEPVal;
4210
4211 // If the GEP has already been reduced to a constant, leave it be.
4212 if (isa<llvm::Constant>(GEPVal))
4213 return GEPVal;
4214
4215 // Only check for overflows in the default address space.
4216 if (GEPVal->getType()->getPointerAddressSpace())
4217 return GEPVal;
4218
4219 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4220 assert(GEP->isInBounds() && "Expected inbounds GEP")((GEP->isInBounds() && "Expected inbounds GEP") ? static_cast
<void> (0) : __assert_fail ("GEP->isInBounds() && \"Expected inbounds GEP\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4220, __PRETTY_FUNCTION__))
;
4221
4222 SanitizerScope SanScope(this);
4223 auto &VMContext = getLLVMContext();
4224 const auto &DL = CGM.getDataLayout();
4225 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4226
4227 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4228 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4229 auto *SAddIntrinsic =
4230 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4231 auto *SMulIntrinsic =
4232 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4233
4234 // The total (signed) byte offset for the GEP.
4235 llvm::Value *TotalOffset = nullptr;
4236 // The offset overflow flag - true if the total offset overflows.
4237 llvm::Value *OffsetOverflows = Builder.getFalse();
4238
4239 /// Return the result of the given binary operation.
4240 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4241 llvm::Value *RHS) -> llvm::Value * {
4242 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop")(((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"
) ? static_cast<void> (0) : __assert_fail ("(Opcode == BO_Add || Opcode == BO_Mul) && \"Can't eval binop\""
, "/build/llvm-toolchain-snapshot-8~svn345461/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4242, __PRETTY_FUNCTION__))
;
4243
4244 // If the operands are constants, return a constant result.
4245 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4246 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4247 llvm::APInt N;
4248 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4249 /*Signed=*/true, N);
4250 if (HasOverflow)
4251 OffsetOverflows = Builder.getTrue();
4252 return llvm::ConstantInt::get(VMContext, N);
4253 }
4254 }
4255
4256 // Otherwise, compute the result with checked arithmetic.
4257 auto *ResultAndOverflow = Builder.CreateCall(
4258 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4259 OffsetOverflows = Builder.CreateOr(
4260 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4261 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4262 };
4263
4264 // Determine the total byte offset by looking at each GEP operand.
4265 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4266 GTI != GTE; ++GTI) {
4267 llvm::Value *LocalOffset;
4268 auto *Index = GTI.getOperand();
4269 // Compute the local offset contributed by this indexing step:
4270 if (auto *STy = GTI.getStructTypeOrNull()) {
4271 // For struct indexing, the local offset is the byte position of the
4272 // specified field.
4273 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4274 LocalOffset = llvm::ConstantInt::get(
4275 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4276 } else {
4277 // Otherwise this is array-like indexing. The local offset is the index
4278 // multiplied by the element size.
4279 auto *ElementSize = llvm::ConstantInt::get(
4280 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4281 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4282 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4283 }
4284
4285 // If this is the first offset, set it as the total offset. Otherwise, add
4286 // the local offset into the running total.
4287 if (!TotalOffset || TotalOffset == Zero)
4288 TotalOffset = LocalOffset;
4289 else
4290 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4291 }
4292
4293 // Common case: if the total offset is zero, don't emit a check.
4294 if (TotalOffset == Zero)
4295 return GEPVal;
4296
4297 // Now that we've computed the total offset, add it to the base pointer (with
4298 // wrapping semantics).
4299 auto *IntPtr = Builder.CreatePtrToInt(GEP->getPointerOperand(), IntPtrTy);
4300 auto *ComputedGEP = Builder.CreateAdd(IntPtr, TotalOffset);
4301
4302 // The GEP is valid if:
4303 // 1) The total offset doesn't overflow, and
4304 // 2) The sign of the difference between the computed address and the base
4305 // pointer matches the sign of the total offset.
4306 llvm::Value *ValidGEP;
4307 auto *NoOffsetOverflow = Builder.CreateNot(OffsetOverflows);
4308 if (SignedIndices) {
4309 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4310 auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero);
4311 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
4312 ValidGEP = Builder.CreateAnd(
4313 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid),
4314 NoOffsetOverflow);
4315 } else if (!SignedIndices && !IsSubtraction) {
4316 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4317 ValidGEP = Builder.CreateAnd(PosOrZeroValid, NoOffsetOverflow);
4318 } else {
4319 auto *NegOrZeroValid = Builder.CreateICmpULE(ComputedGEP, IntPtr);
4320 ValidGEP = Builder.CreateAnd(NegOrZeroValid, NoOffsetOverflow);
4321 }
4322
4323 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
4324 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
4325 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4326 EmitCheck(std::make_pair(ValidGEP, SanitizerKind::PointerOverflow),
4327 SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
4328
4329 return GEPVal;
4330}