Bug Summary

File:tools/clang/lib/CodeGen/CGExprScalar.cpp
Warning:line 4416, column 10
Although the value stored to 'Src' is used in the enclosing expression, the value is never actually read from 'Src'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGExprScalar.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn374645/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-10~svn374645/tools/clang/include -I /build/llvm-toolchain-snapshot-10~svn374645/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-10~svn374645/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn374645/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn374645/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn374645=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-10-12-182855-27821-1 -x c++ /build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CodeGenFunction.h"
18#include "CodeGenModule.h"
19#include "ConstantEmitter.h"
20#include "TargetInfo.h"
21#include "clang/AST/ASTContext.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/RecordLayout.h"
25#include "clang/AST/StmtVisitor.h"
26#include "clang/Basic/CodeGenOptions.h"
27#include "clang/Basic/FixedPoint.h"
28#include "clang/Basic/TargetInfo.h"
29#include "llvm/ADT/Optional.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/GetElementPtrTypeIterator.h"
35#include "llvm/IR/GlobalVariable.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/Module.h"
38#include <cstdarg>
39
40using namespace clang;
41using namespace CodeGen;
42using llvm::Value;
43
44//===----------------------------------------------------------------------===//
45// Scalar Expression Emitter
46//===----------------------------------------------------------------------===//
47
48namespace {
49
50/// Determine whether the given binary operation may overflow.
51/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
52/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
53/// the returned overflow check is precise. The returned value is 'true' for
54/// all other opcodes, to be conservative.
55bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
56 BinaryOperator::Opcode Opcode, bool Signed,
57 llvm::APInt &Result) {
58 // Assume overflow is possible, unless we can prove otherwise.
59 bool Overflow = true;
60 const auto &LHSAP = LHS->getValue();
61 const auto &RHSAP = RHS->getValue();
62 if (Opcode == BO_Add) {
63 if (Signed)
64 Result = LHSAP.sadd_ov(RHSAP, Overflow);
65 else
66 Result = LHSAP.uadd_ov(RHSAP, Overflow);
67 } else if (Opcode == BO_Sub) {
68 if (Signed)
69 Result = LHSAP.ssub_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.usub_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Mul) {
73 if (Signed)
74 Result = LHSAP.smul_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.umul_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
78 if (Signed && !RHS->isZero())
79 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
80 else
81 return false;
82 }
83 return Overflow;
84}
85
86struct BinOpInfo {
87 Value *LHS;
88 Value *RHS;
89 QualType Ty; // Computation Type.
90 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
91 FPOptions FPFeatures;
92 const Expr *E; // Entire expr, for error unsupported. May not be binop.
93
94 /// Check if the binop can result in integer overflow.
95 bool mayHaveIntegerOverflow() const {
96 // Without constant input, we can't rule out overflow.
97 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
98 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
99 if (!LHSCI || !RHSCI)
100 return true;
101
102 llvm::APInt Result;
103 return ::mayHaveIntegerOverflow(
104 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
105 }
106
107 /// Check if the binop computes a division or a remainder.
108 bool isDivremOp() const {
109 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
110 Opcode == BO_RemAssign;
111 }
112
113 /// Check if the binop can result in an integer division by zero.
114 bool mayHaveIntegerDivisionByZero() const {
115 if (isDivremOp())
116 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
117 return CI->isZero();
118 return true;
119 }
120
121 /// Check if the binop can result in a float division by zero.
122 bool mayHaveFloatDivisionByZero() const {
123 if (isDivremOp())
124 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
125 return CFP->isZero();
126 return true;
127 }
128
129 /// Check if either operand is a fixed point type or integer type, with at
130 /// least one being a fixed point type. In any case, this
131 /// operation did not follow usual arithmetic conversion and both operands may
132 /// not be the same.
133 bool isFixedPointBinOp() const {
134 // We cannot simply check the result type since comparison operations return
135 // an int.
136 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
137 QualType LHSType = BinOp->getLHS()->getType();
138 QualType RHSType = BinOp->getRHS()->getType();
139 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
140 }
141 return false;
142 }
143};
144
145static bool MustVisitNullValue(const Expr *E) {
146 // If a null pointer expression's type is the C++0x nullptr_t, then
147 // it's not necessarily a simple constant and it must be evaluated
148 // for its potential side effects.
149 return E->getType()->isNullPtrType();
150}
151
152/// If \p E is a widened promoted integer, get its base (unpromoted) type.
153static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
154 const Expr *E) {
155 const Expr *Base = E->IgnoreImpCasts();
156 if (E == Base)
157 return llvm::None;
158
159 QualType BaseTy = Base->getType();
160 if (!BaseTy->isPromotableIntegerType() ||
161 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
162 return llvm::None;
163
164 return BaseTy;
165}
166
167/// Check if \p E is a widened promoted integer.
168static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
169 return getUnwidenedIntegerType(Ctx, E).hasValue();
170}
171
172/// Check if we can skip the overflow check for \p Op.
173static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
174 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&(((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>
(Op.E)) && "Expected a unary or binary operator") ? static_cast
<void> (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 175, __PRETTY_FUNCTION__))
175 "Expected a unary or binary operator")(((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>
(Op.E)) && "Expected a unary or binary operator") ? static_cast
<void> (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 175, __PRETTY_FUNCTION__))
;
176
177 // If the binop has constant inputs and we can prove there is no overflow,
178 // we can elide the overflow check.
179 if (!Op.mayHaveIntegerOverflow())
180 return true;
181
182 // If a unary op has a widened operand, the op cannot overflow.
183 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
184 return !UO->canOverflow();
185
186 // We usually don't need overflow checks for binops with widened operands.
187 // Multiplication with promoted unsigned operands is a special case.
188 const auto *BO = cast<BinaryOperator>(Op.E);
189 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
190 if (!OptionalLHSTy)
191 return false;
192
193 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
194 if (!OptionalRHSTy)
195 return false;
196
197 QualType LHSTy = *OptionalLHSTy;
198 QualType RHSTy = *OptionalRHSTy;
199
200 // This is the simple case: binops without unsigned multiplication, and with
201 // widened operands. No overflow check is needed here.
202 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
203 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
204 return true;
205
206 // For unsigned multiplication the overflow check can be elided if either one
207 // of the unpromoted types are less than half the size of the promoted type.
208 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
209 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
210 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
211}
212
213/// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
214static void updateFastMathFlags(llvm::FastMathFlags &FMF,
215 FPOptions FPFeatures) {
216 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
217}
218
219/// Propagate fast-math flags from \p Op to the instruction in \p V.
220static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
221 if (auto *I = dyn_cast<llvm::Instruction>(V)) {
222 llvm::FastMathFlags FMF = I->getFastMathFlags();
223 updateFastMathFlags(FMF, Op.FPFeatures);
224 I->setFastMathFlags(FMF);
225 }
226 return V;
227}
228
229class ScalarExprEmitter
230 : public StmtVisitor<ScalarExprEmitter, Value*> {
231 CodeGenFunction &CGF;
232 CGBuilderTy &Builder;
233 bool IgnoreResultAssign;
234 llvm::LLVMContext &VMContext;
235public:
236
237 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
238 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
239 VMContext(cgf.getLLVMContext()) {
240 }
241
242 //===--------------------------------------------------------------------===//
243 // Utilities
244 //===--------------------------------------------------------------------===//
245
246 bool TestAndClearIgnoreResultAssign() {
247 bool I = IgnoreResultAssign;
248 IgnoreResultAssign = false;
249 return I;
250 }
251
252 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
253 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
254 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
255 return CGF.EmitCheckedLValue(E, TCK);
256 }
257
258 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
259 const BinOpInfo &Info);
260
261 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
262 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
263 }
264
265 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
266 const AlignValueAttr *AVAttr = nullptr;
267 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
268 const ValueDecl *VD = DRE->getDecl();
269
270 if (VD->getType()->isReferenceType()) {
271 if (const auto *TTy =
272 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
273 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
274 } else {
275 // Assumptions for function parameters are emitted at the start of the
276 // function, so there is no need to repeat that here,
277 // unless the alignment-assumption sanitizer is enabled,
278 // then we prefer the assumption over alignment attribute
279 // on IR function param.
280 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
281 return;
282
283 AVAttr = VD->getAttr<AlignValueAttr>();
284 }
285 }
286
287 if (!AVAttr)
288 if (const auto *TTy =
289 dyn_cast<TypedefType>(E->getType()))
290 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
291
292 if (!AVAttr)
293 return;
294
295 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
296 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
297 CGF.EmitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
298 }
299
300 /// EmitLoadOfLValue - Given an expression with complex type that represents a
301 /// value l-value, this method emits the address of the l-value, then loads
302 /// and returns the result.
303 Value *EmitLoadOfLValue(const Expr *E) {
304 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
305 E->getExprLoc());
306
307 EmitLValueAlignmentAssumption(E, V);
308 return V;
309 }
310
311 /// EmitConversionToBool - Convert the specified expression value to a
312 /// boolean (i1) truth value. This is equivalent to "Val != 0".
313 Value *EmitConversionToBool(Value *Src, QualType DstTy);
314
315 /// Emit a check that a conversion from a floating-point type does not
316 /// overflow.
317 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
318 Value *Src, QualType SrcType, QualType DstType,
319 llvm::Type *DstTy, SourceLocation Loc);
320
321 /// Known implicit conversion check kinds.
322 /// Keep in sync with the enum of the same name in ubsan_handlers.h
323 enum ImplicitConversionCheckKind : unsigned char {
324 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
325 ICCK_UnsignedIntegerTruncation = 1,
326 ICCK_SignedIntegerTruncation = 2,
327 ICCK_IntegerSignChange = 3,
328 ICCK_SignedIntegerTruncationOrSignChange = 4,
329 };
330
331 /// Emit a check that an [implicit] truncation of an integer does not
332 /// discard any bits. It is not UB, so we use the value after truncation.
333 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
334 QualType DstType, SourceLocation Loc);
335
336 /// Emit a check that an [implicit] conversion of an integer does not change
337 /// the sign of the value. It is not UB, so we use the value after conversion.
338 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
339 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
340 QualType DstType, SourceLocation Loc);
341
342 /// Emit a conversion from the specified type to the specified destination
343 /// type, both of which are LLVM scalar types.
344 struct ScalarConversionOpts {
345 bool TreatBooleanAsSigned;
346 bool EmitImplicitIntegerTruncationChecks;
347 bool EmitImplicitIntegerSignChangeChecks;
348
349 ScalarConversionOpts()
350 : TreatBooleanAsSigned(false),
351 EmitImplicitIntegerTruncationChecks(false),
352 EmitImplicitIntegerSignChangeChecks(false) {}
353
354 ScalarConversionOpts(clang::SanitizerSet SanOpts)
355 : TreatBooleanAsSigned(false),
356 EmitImplicitIntegerTruncationChecks(
357 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
358 EmitImplicitIntegerSignChangeChecks(
359 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
360 };
361 Value *
362 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
363 SourceLocation Loc,
364 ScalarConversionOpts Opts = ScalarConversionOpts());
365
366 /// Convert between either a fixed point and other fixed point or fixed point
367 /// and an integer.
368 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
369 SourceLocation Loc);
370 Value *EmitFixedPointConversion(Value *Src, FixedPointSemantics &SrcFixedSema,
371 FixedPointSemantics &DstFixedSema,
372 SourceLocation Loc,
373 bool DstIsInteger = false);
374
375 /// Emit a conversion from the specified complex type to the specified
376 /// destination type, where the destination type is an LLVM scalar type.
377 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
378 QualType SrcTy, QualType DstTy,
379 SourceLocation Loc);
380
381 /// EmitNullValue - Emit a value that corresponds to null for the given type.
382 Value *EmitNullValue(QualType Ty);
383
384 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
385 Value *EmitFloatToBoolConversion(Value *V) {
386 // Compare against 0.0 for fp scalars.
387 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
388 return Builder.CreateFCmpUNE(V, Zero, "tobool");
389 }
390
391 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
392 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
393 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
394
395 return Builder.CreateICmpNE(V, Zero, "tobool");
396 }
397
398 Value *EmitIntToBoolConversion(Value *V) {
399 // Because of the type rules of C, we often end up computing a
400 // logical value, then zero extending it to int, then wanting it
401 // as a logical value again. Optimize this common case.
402 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
403 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
404 Value *Result = ZI->getOperand(0);
405 // If there aren't any more uses, zap the instruction to save space.
406 // Note that there can be more uses, for example if this
407 // is the result of an assignment.
408 if (ZI->use_empty())
409 ZI->eraseFromParent();
410 return Result;
411 }
412 }
413
414 return Builder.CreateIsNotNull(V, "tobool");
415 }
416
417 //===--------------------------------------------------------------------===//
418 // Visitor Methods
419 //===--------------------------------------------------------------------===//
420
421 Value *Visit(Expr *E) {
422 ApplyDebugLocation DL(CGF, E);
423 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
424 }
425
426 Value *VisitStmt(Stmt *S) {
427 S->dump(CGF.getContext().getSourceManager());
428 llvm_unreachable("Stmt can't have complex result type!")::llvm::llvm_unreachable_internal("Stmt can't have complex result type!"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 428)
;
429 }
430 Value *VisitExpr(Expr *S);
431
432 Value *VisitConstantExpr(ConstantExpr *E) {
433 return Visit(E->getSubExpr());
434 }
435 Value *VisitParenExpr(ParenExpr *PE) {
436 return Visit(PE->getSubExpr());
437 }
438 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
439 return Visit(E->getReplacement());
440 }
441 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
442 return Visit(GE->getResultExpr());
443 }
444 Value *VisitCoawaitExpr(CoawaitExpr *S) {
445 return CGF.EmitCoawaitExpr(*S).getScalarVal();
446 }
447 Value *VisitCoyieldExpr(CoyieldExpr *S) {
448 return CGF.EmitCoyieldExpr(*S).getScalarVal();
449 }
450 Value *VisitUnaryCoawait(const UnaryOperator *E) {
451 return Visit(E->getSubExpr());
452 }
453
454 // Leaves.
455 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
456 return Builder.getInt(E->getValue());
457 }
458 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
459 return Builder.getInt(E->getValue());
460 }
461 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
462 return llvm::ConstantFP::get(VMContext, E->getValue());
463 }
464 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
465 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
466 }
467 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
468 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
469 }
470 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
471 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
472 }
473 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
474 return EmitNullValue(E->getType());
475 }
476 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
477 return EmitNullValue(E->getType());
478 }
479 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
480 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
481 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
482 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
483 return Builder.CreateBitCast(V, ConvertType(E->getType()));
484 }
485
486 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
487 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
488 }
489
490 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
491 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
492 }
493
494 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
495 if (E->isGLValue())
496 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
497 E->getExprLoc());
498
499 // Otherwise, assume the mapping is the scalar directly.
500 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
501 }
502
503 // l-values.
504 Value *VisitDeclRefExpr(DeclRefExpr *E) {
505 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
506 return CGF.emitScalarConstant(Constant, E);
507 return EmitLoadOfLValue(E);
508 }
509
510 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
511 return CGF.EmitObjCSelectorExpr(E);
512 }
513 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
514 return CGF.EmitObjCProtocolExpr(E);
515 }
516 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
517 return EmitLoadOfLValue(E);
518 }
519 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
520 if (E->getMethodDecl() &&
521 E->getMethodDecl()->getReturnType()->isReferenceType())
522 return EmitLoadOfLValue(E);
523 return CGF.EmitObjCMessageExpr(E).getScalarVal();
524 }
525
526 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
527 LValue LV = CGF.EmitObjCIsaExpr(E);
528 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
529 return V;
530 }
531
532 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
533 VersionTuple Version = E->getVersion();
534
535 // If we're checking for a platform older than our minimum deployment
536 // target, we can fold the check away.
537 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
538 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
539
540 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
541 llvm::Value *Args[] = {
542 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
543 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
544 llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
545 };
546
547 return CGF.EmitBuiltinAvailable(Args);
548 }
549
550 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
551 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
552 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
553 Value *VisitMemberExpr(MemberExpr *E);
554 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
555 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
556 return EmitLoadOfLValue(E);
557 }
558
559 Value *VisitInitListExpr(InitListExpr *E);
560
561 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
562 assert(CGF.getArrayInitIndex() &&((CGF.getArrayInitIndex() && "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"
) ? static_cast<void> (0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 563, __PRETTY_FUNCTION__))
563 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?")((CGF.getArrayInitIndex() && "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"
) ? static_cast<void> (0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 563, __PRETTY_FUNCTION__))
;
564 return CGF.getArrayInitIndex();
565 }
566
567 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
568 return EmitNullValue(E->getType());
569 }
570 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
571 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
572 return VisitCastExpr(E);
573 }
574 Value *VisitCastExpr(CastExpr *E);
575
576 Value *VisitCallExpr(const CallExpr *E) {
577 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
578 return EmitLoadOfLValue(E);
579
580 Value *V = CGF.EmitCallExpr(E).getScalarVal();
581
582 EmitLValueAlignmentAssumption(E, V);
583 return V;
584 }
585
586 Value *VisitStmtExpr(const StmtExpr *E);
587
588 // Unary Operators.
589 Value *VisitUnaryPostDec(const UnaryOperator *E) {
590 LValue LV = EmitLValue(E->getSubExpr());
591 return EmitScalarPrePostIncDec(E, LV, false, false);
592 }
593 Value *VisitUnaryPostInc(const UnaryOperator *E) {
594 LValue LV = EmitLValue(E->getSubExpr());
595 return EmitScalarPrePostIncDec(E, LV, true, false);
596 }
597 Value *VisitUnaryPreDec(const UnaryOperator *E) {
598 LValue LV = EmitLValue(E->getSubExpr());
599 return EmitScalarPrePostIncDec(E, LV, false, true);
600 }
601 Value *VisitUnaryPreInc(const UnaryOperator *E) {
602 LValue LV = EmitLValue(E->getSubExpr());
603 return EmitScalarPrePostIncDec(E, LV, true, true);
604 }
605
606 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
607 llvm::Value *InVal,
608 bool IsInc);
609
610 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
611 bool isInc, bool isPre);
612
613
614 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
615 if (isa<MemberPointerType>(E->getType())) // never sugared
616 return CGF.CGM.getMemberPointerConstant(E);
617
618 return EmitLValue(E->getSubExpr()).getPointer();
619 }
620 Value *VisitUnaryDeref(const UnaryOperator *E) {
621 if (E->getType()->isVoidType())
622 return Visit(E->getSubExpr()); // the actual value should be unused
623 return EmitLoadOfLValue(E);
624 }
625 Value *VisitUnaryPlus(const UnaryOperator *E) {
626 // This differs from gcc, though, most likely due to a bug in gcc.
627 TestAndClearIgnoreResultAssign();
628 return Visit(E->getSubExpr());
629 }
630 Value *VisitUnaryMinus (const UnaryOperator *E);
631 Value *VisitUnaryNot (const UnaryOperator *E);
632 Value *VisitUnaryLNot (const UnaryOperator *E);
633 Value *VisitUnaryReal (const UnaryOperator *E);
634 Value *VisitUnaryImag (const UnaryOperator *E);
635 Value *VisitUnaryExtension(const UnaryOperator *E) {
636 return Visit(E->getSubExpr());
637 }
638
639 // C++
640 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
641 return EmitLoadOfLValue(E);
642 }
643 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
644 auto &Ctx = CGF.getContext();
645 APValue Evaluated =
646 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
647 return ConstantEmitter(CGF.CGM, &CGF)
648 .emitAbstract(SLE->getLocation(), Evaluated, SLE->getType());
649 }
650
651 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
652 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
653 return Visit(DAE->getExpr());
654 }
655 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
656 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
657 return Visit(DIE->getExpr());
658 }
659 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
660 return CGF.LoadCXXThis();
661 }
662
663 Value *VisitExprWithCleanups(ExprWithCleanups *E);
664 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
665 return CGF.EmitCXXNewExpr(E);
666 }
667 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
668 CGF.EmitCXXDeleteExpr(E);
669 return nullptr;
670 }
671
672 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
673 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
674 }
675
676 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
677 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
678 }
679
680 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
681 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
682 }
683
684 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
685 // C++ [expr.pseudo]p1:
686 // The result shall only be used as the operand for the function call
687 // operator (), and the result of such a call has type void. The only
688 // effect is the evaluation of the postfix-expression before the dot or
689 // arrow.
690 CGF.EmitScalarExpr(E->getBase());
691 return nullptr;
692 }
693
694 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
695 return EmitNullValue(E->getType());
696 }
697
698 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
699 CGF.EmitCXXThrowExpr(E);
700 return nullptr;
701 }
702
703 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
704 return Builder.getInt1(E->getValue());
705 }
706
707 // Binary Operators.
708 Value *EmitMul(const BinOpInfo &Ops) {
709 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
710 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
711 case LangOptions::SOB_Defined:
712 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
713 case LangOptions::SOB_Undefined:
714 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
715 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
716 LLVM_FALLTHROUGH[[gnu::fallthrough]];
717 case LangOptions::SOB_Trapping:
718 if (CanElideOverflowCheck(CGF.getContext(), Ops))
719 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
720 return EmitOverflowCheckedBinOp(Ops);
721 }
722 }
723
724 if (Ops.Ty->isUnsignedIntegerType() &&
725 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
726 !CanElideOverflowCheck(CGF.getContext(), Ops))
727 return EmitOverflowCheckedBinOp(Ops);
728
729 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
730 Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
731 return propagateFMFlags(V, Ops);
732 }
733 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
734 }
735 /// Create a binary op that checks for overflow.
736 /// Currently only supports +, - and *.
737 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
738
739 // Check for undefined division and modulus behaviors.
740 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
741 llvm::Value *Zero,bool isDiv);
742 // Common helper for getting how wide LHS of shift is.
743 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
744 Value *EmitDiv(const BinOpInfo &Ops);
745 Value *EmitRem(const BinOpInfo &Ops);
746 Value *EmitAdd(const BinOpInfo &Ops);
747 Value *EmitSub(const BinOpInfo &Ops);
748 Value *EmitShl(const BinOpInfo &Ops);
749 Value *EmitShr(const BinOpInfo &Ops);
750 Value *EmitAnd(const BinOpInfo &Ops) {
751 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
752 }
753 Value *EmitXor(const BinOpInfo &Ops) {
754 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
755 }
756 Value *EmitOr (const BinOpInfo &Ops) {
757 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
758 }
759
760 // Helper functions for fixed point binary operations.
761 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
762
763 BinOpInfo EmitBinOps(const BinaryOperator *E);
764 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
765 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
766 Value *&Result);
767
768 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
769 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
770
771 // Binary operators and binary compound assignment operators.
772#define HANDLEBINOP(OP) \
773 Value *VisitBin ## OP(const BinaryOperator *E) { \
774 return Emit ## OP(EmitBinOps(E)); \
775 } \
776 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
777 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
778 }
779 HANDLEBINOP(Mul)
780 HANDLEBINOP(Div)
781 HANDLEBINOP(Rem)
782 HANDLEBINOP(Add)
783 HANDLEBINOP(Sub)
784 HANDLEBINOP(Shl)
785 HANDLEBINOP(Shr)
786 HANDLEBINOP(And)
787 HANDLEBINOP(Xor)
788 HANDLEBINOP(Or)
789#undef HANDLEBINOP
790
791 // Comparisons.
792 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
793 llvm::CmpInst::Predicate SICmpOpc,
794 llvm::CmpInst::Predicate FCmpOpc);
795#define VISITCOMP(CODE, UI, SI, FP) \
796 Value *VisitBin##CODE(const BinaryOperator *E) { \
797 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
798 llvm::FCmpInst::FP); }
799 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
800 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
801 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
802 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
803 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
804 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
805#undef VISITCOMP
806
807 Value *VisitBinAssign (const BinaryOperator *E);
808
809 Value *VisitBinLAnd (const BinaryOperator *E);
810 Value *VisitBinLOr (const BinaryOperator *E);
811 Value *VisitBinComma (const BinaryOperator *E);
812
813 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
814 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
815
816 // Other Operators.
817 Value *VisitBlockExpr(const BlockExpr *BE);
818 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
819 Value *VisitChooseExpr(ChooseExpr *CE);
820 Value *VisitVAArgExpr(VAArgExpr *VE);
821 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
822 return CGF.EmitObjCStringLiteral(E);
823 }
824 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
825 return CGF.EmitObjCBoxedExpr(E);
826 }
827 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
828 return CGF.EmitObjCArrayLiteral(E);
829 }
830 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
831 return CGF.EmitObjCDictionaryLiteral(E);
832 }
833 Value *VisitAsTypeExpr(AsTypeExpr *CE);
834 Value *VisitAtomicExpr(AtomicExpr *AE);
835};
836} // end anonymous namespace.
837
838//===----------------------------------------------------------------------===//
839// Utilities
840//===----------------------------------------------------------------------===//
841
842/// EmitConversionToBool - Convert the specified expression value to a
843/// boolean (i1) truth value. This is equivalent to "Val != 0".
844Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
845 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs")((SrcType.isCanonical() && "EmitScalarConversion strips typedefs"
) ? static_cast<void> (0) : __assert_fail ("SrcType.isCanonical() && \"EmitScalarConversion strips typedefs\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 845, __PRETTY_FUNCTION__))
;
846
847 if (SrcType->isRealFloatingType())
848 return EmitFloatToBoolConversion(Src);
849
850 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
851 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
852
853 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&(((SrcType->isIntegerType() || isa<llvm::PointerType>
(Src->getType())) && "Unknown scalar type to convert"
) ? static_cast<void> (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 854, __PRETTY_FUNCTION__))
854 "Unknown scalar type to convert")(((SrcType->isIntegerType() || isa<llvm::PointerType>
(Src->getType())) && "Unknown scalar type to convert"
) ? static_cast<void> (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 854, __PRETTY_FUNCTION__))
;
855
856 if (isa<llvm::IntegerType>(Src->getType()))
857 return EmitIntToBoolConversion(Src);
858
859 assert(isa<llvm::PointerType>(Src->getType()))((isa<llvm::PointerType>(Src->getType())) ? static_cast
<void> (0) : __assert_fail ("isa<llvm::PointerType>(Src->getType())"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 859, __PRETTY_FUNCTION__))
;
860 return EmitPointerToBoolConversion(Src, SrcType);
861}
862
863void ScalarExprEmitter::EmitFloatConversionCheck(
864 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
865 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
866 assert(SrcType->isFloatingType() && "not a conversion from floating point")((SrcType->isFloatingType() && "not a conversion from floating point"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isFloatingType() && \"not a conversion from floating point\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 866, __PRETTY_FUNCTION__))
;
867 if (!isa<llvm::IntegerType>(DstTy))
868 return;
869
870 CodeGenFunction::SanitizerScope SanScope(&CGF);
871 using llvm::APFloat;
872 using llvm::APSInt;
873
874 llvm::Value *Check = nullptr;
875 const llvm::fltSemantics &SrcSema =
876 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
877
878 // Floating-point to integer. This has undefined behavior if the source is
879 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
880 // to an integer).
881 unsigned Width = CGF.getContext().getIntWidth(DstType);
882 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
883
884 APSInt Min = APSInt::getMinValue(Width, Unsigned);
885 APFloat MinSrc(SrcSema, APFloat::uninitialized);
886 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
887 APFloat::opOverflow)
888 // Don't need an overflow check for lower bound. Just check for
889 // -Inf/NaN.
890 MinSrc = APFloat::getInf(SrcSema, true);
891 else
892 // Find the largest value which is too small to represent (before
893 // truncation toward zero).
894 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
895
896 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
897 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
898 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
899 APFloat::opOverflow)
900 // Don't need an overflow check for upper bound. Just check for
901 // +Inf/NaN.
902 MaxSrc = APFloat::getInf(SrcSema, false);
903 else
904 // Find the smallest value which is too large to represent (before
905 // truncation toward zero).
906 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
907
908 // If we're converting from __half, convert the range to float to match
909 // the type of src.
910 if (OrigSrcType->isHalfType()) {
911 const llvm::fltSemantics &Sema =
912 CGF.getContext().getFloatTypeSemantics(SrcType);
913 bool IsInexact;
914 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
915 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
916 }
917
918 llvm::Value *GE =
919 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
920 llvm::Value *LE =
921 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
922 Check = Builder.CreateAnd(GE, LE);
923
924 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
925 CGF.EmitCheckTypeDescriptor(OrigSrcType),
926 CGF.EmitCheckTypeDescriptor(DstType)};
927 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
928 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
929}
930
931// Should be called within CodeGenFunction::SanitizerScope RAII scope.
932// Returns 'i1 false' when the truncation Src -> Dst was lossy.
933static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
934 std::pair<llvm::Value *, SanitizerMask>>
935EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
936 QualType DstType, CGBuilderTy &Builder) {
937 llvm::Type *SrcTy = Src->getType();
938 llvm::Type *DstTy = Dst->getType();
939 (void)DstTy; // Only used in assert()
940
941 // This should be truncation of integral types.
942 assert(Src != Dst)((Src != Dst) ? static_cast<void> (0) : __assert_fail (
"Src != Dst", "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 942, __PRETTY_FUNCTION__))
;
943 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits())((SrcTy->getScalarSizeInBits() > Dst->getType()->
getScalarSizeInBits()) ? static_cast<void> (0) : __assert_fail
("SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 943, __PRETTY_FUNCTION__))
;
944 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 945, __PRETTY_FUNCTION__))
945 "non-integer llvm type")((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 945, __PRETTY_FUNCTION__))
;
946
947 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
948 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
949
950 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
951 // Else, it is a signed truncation.
952 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
953 SanitizerMask Mask;
954 if (!SrcSigned && !DstSigned) {
955 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
956 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
957 } else {
958 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
959 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
960 }
961
962 llvm::Value *Check = nullptr;
963 // 1. Extend the truncated value back to the same width as the Src.
964 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
965 // 2. Equality-compare with the original source value
966 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
967 // If the comparison result is 'i1 false', then the truncation was lossy.
968 return std::make_pair(Kind, std::make_pair(Check, Mask));
969}
970
971void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
972 Value *Dst, QualType DstType,
973 SourceLocation Loc) {
974 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
975 return;
976
977 // We only care about int->int conversions here.
978 // We ignore conversions to/from pointer and/or bool.
979 if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
980 return;
981
982 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
983 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
984 // This must be truncation. Else we do not care.
985 if (SrcBits <= DstBits)
986 return;
987
988 assert(!DstType->isBooleanType() && "we should not get here with booleans.")((!DstType->isBooleanType() && "we should not get here with booleans."
) ? static_cast<void> (0) : __assert_fail ("!DstType->isBooleanType() && \"we should not get here with booleans.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 988, __PRETTY_FUNCTION__))
;
989
990 // If the integer sign change sanitizer is enabled,
991 // and we are truncating from larger unsigned type to smaller signed type,
992 // let that next sanitizer deal with it.
993 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
994 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
995 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
996 (!SrcSigned && DstSigned))
997 return;
998
999 CodeGenFunction::SanitizerScope SanScope(&CGF);
1000
1001 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1002 std::pair<llvm::Value *, SanitizerMask>>
1003 Check =
1004 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1005 // If the comparison result is 'i1 false', then the truncation was lossy.
1006
1007 // Do we care about this type of truncation?
1008 if (!CGF.SanOpts.has(Check.second.second))
1009 return;
1010
1011 llvm::Constant *StaticArgs[] = {
1012 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1013 CGF.EmitCheckTypeDescriptor(DstType),
1014 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1015 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1016 {Src, Dst});
1017}
1018
1019// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1020// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1021static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1022 std::pair<llvm::Value *, SanitizerMask>>
1023EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1024 QualType DstType, CGBuilderTy &Builder) {
1025 llvm::Type *SrcTy = Src->getType();
1026 llvm::Type *DstTy = Dst->getType();
1027
1028 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1029, __PRETTY_FUNCTION__))
1029 "non-integer llvm type")((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1029, __PRETTY_FUNCTION__))
;
1030
1031 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1032 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1033 (void)SrcSigned; // Only used in assert()
1034 (void)DstSigned; // Only used in assert()
1035 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1036 unsigned DstBits = DstTy->getScalarSizeInBits();
1037 (void)SrcBits; // Only used in assert()
1038 (void)DstBits; // Only used in assert()
1039
1040 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&((((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
"either the widths should be different, or the signednesses."
) ? static_cast<void> (0) : __assert_fail ("((SrcBits != DstBits) || (SrcSigned != DstSigned)) && \"either the widths should be different, or the signednesses.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1041, __PRETTY_FUNCTION__))
1041 "either the widths should be different, or the signednesses.")((((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
"either the widths should be different, or the signednesses."
) ? static_cast<void> (0) : __assert_fail ("((SrcBits != DstBits) || (SrcSigned != DstSigned)) && \"either the widths should be different, or the signednesses.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1041, __PRETTY_FUNCTION__))
;
1042
1043 // NOTE: zero value is considered to be non-negative.
1044 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1045 const char *Name) -> Value * {
1046 // Is this value a signed type?
1047 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1048 llvm::Type *VTy = V->getType();
1049 if (!VSigned) {
1050 // If the value is unsigned, then it is never negative.
1051 // FIXME: can we encounter non-scalar VTy here?
1052 return llvm::ConstantInt::getFalse(VTy->getContext());
1053 }
1054 // Get the zero of the same type with which we will be comparing.
1055 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1056 // %V.isnegative = icmp slt %V, 0
1057 // I.e is %V *strictly* less than zero, does it have negative value?
1058 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1059 llvm::Twine(Name) + "." + V->getName() +
1060 ".negativitycheck");
1061 };
1062
1063 // 1. Was the old Value negative?
1064 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1065 // 2. Is the new Value negative?
1066 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1067 // 3. Now, was the 'negativity status' preserved during the conversion?
1068 // NOTE: conversion from negative to zero is considered to change the sign.
1069 // (We want to get 'false' when the conversion changed the sign)
1070 // So we should just equality-compare the negativity statuses.
1071 llvm::Value *Check = nullptr;
1072 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1073 // If the comparison result is 'false', then the conversion changed the sign.
1074 return std::make_pair(
1075 ScalarExprEmitter::ICCK_IntegerSignChange,
1076 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1077}
1078
1079void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1080 Value *Dst, QualType DstType,
1081 SourceLocation Loc) {
1082 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1083 return;
1084
1085 llvm::Type *SrcTy = Src->getType();
1086 llvm::Type *DstTy = Dst->getType();
1087
1088 // We only care about int->int conversions here.
1089 // We ignore conversions to/from pointer and/or bool.
1090 if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
1091 return;
1092
1093 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1094 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1095 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1096 unsigned DstBits = DstTy->getScalarSizeInBits();
1097
1098 // Now, we do not need to emit the check in *all* of the cases.
1099 // We can avoid emitting it in some obvious cases where it would have been
1100 // dropped by the opt passes (instcombine) always anyways.
1101 // If it's a cast between effectively the same type, no check.
1102 // NOTE: this is *not* equivalent to checking the canonical types.
1103 if (SrcSigned == DstSigned && SrcBits == DstBits)
1104 return;
1105 // At least one of the values needs to have signed type.
1106 // If both are unsigned, then obviously, neither of them can be negative.
1107 if (!SrcSigned && !DstSigned)
1108 return;
1109 // If the conversion is to *larger* *signed* type, then no check is needed.
1110 // Because either sign-extension happens (so the sign will remain),
1111 // or zero-extension will happen (the sign bit will be zero.)
1112 if ((DstBits > SrcBits) && DstSigned)
1113 return;
1114 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1115 (SrcBits > DstBits) && SrcSigned) {
1116 // If the signed integer truncation sanitizer is enabled,
1117 // and this is a truncation from signed type, then no check is needed.
1118 // Because here sign change check is interchangeable with truncation check.
1119 return;
1120 }
1121 // That's it. We can't rule out any more cases with the data we have.
1122
1123 CodeGenFunction::SanitizerScope SanScope(&CGF);
1124
1125 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1126 std::pair<llvm::Value *, SanitizerMask>>
1127 Check;
1128
1129 // Each of these checks needs to return 'false' when an issue was detected.
1130 ImplicitConversionCheckKind CheckKind;
1131 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1132 // So we can 'and' all the checks together, and still get 'false',
1133 // if at least one of the checks detected an issue.
1134
1135 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1136 CheckKind = Check.first;
1137 Checks.emplace_back(Check.second);
1138
1139 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1140 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1141 // If the signed integer truncation sanitizer was enabled,
1142 // and we are truncating from larger unsigned type to smaller signed type,
1143 // let's handle the case we skipped in that check.
1144 Check =
1145 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1146 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1147 Checks.emplace_back(Check.second);
1148 // If the comparison result is 'i1 false', then the truncation was lossy.
1149 }
1150
1151 llvm::Constant *StaticArgs[] = {
1152 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1153 CGF.EmitCheckTypeDescriptor(DstType),
1154 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1155 // EmitCheck() will 'and' all the checks together.
1156 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1157 {Src, Dst});
1158}
1159
1160/// Emit a conversion from the specified type to the specified destination type,
1161/// both of which are LLVM scalar types.
1162Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1163 QualType DstType,
1164 SourceLocation Loc,
1165 ScalarConversionOpts Opts) {
1166 // All conversions involving fixed point types should be handled by the
1167 // EmitFixedPoint family functions. This is done to prevent bloating up this
1168 // function more, and although fixed point numbers are represented by
1169 // integers, we do not want to follow any logic that assumes they should be
1170 // treated as integers.
1171 // TODO(leonardchan): When necessary, add another if statement checking for
1172 // conversions to fixed point types from other types.
1173 if (SrcType->isFixedPointType()) {
1174 if (DstType->isBooleanType())
1175 // It is important that we check this before checking if the dest type is
1176 // an integer because booleans are technically integer types.
1177 // We do not need to check the padding bit on unsigned types if unsigned
1178 // padding is enabled because overflow into this bit is undefined
1179 // behavior.
1180 return Builder.CreateIsNotNull(Src, "tobool");
1181 if (DstType->isFixedPointType() || DstType->isIntegerType())
1182 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1183
1184 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion from a fixed point type to another type."
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1185)
1185 "Unhandled scalar conversion from a fixed point type to another type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion from a fixed point type to another type."
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1185)
;
1186 } else if (DstType->isFixedPointType()) {
1187 if (SrcType->isIntegerType())
1188 // This also includes converting booleans and enums to fixed point types.
1189 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1190
1191 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion to a fixed point type from another type."
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1192)
1192 "Unhandled scalar conversion to a fixed point type from another type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion to a fixed point type from another type."
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1192)
;
1193 }
1194
1195 QualType NoncanonicalSrcType = SrcType;
1196 QualType NoncanonicalDstType = DstType;
1197
1198 SrcType = CGF.getContext().getCanonicalType(SrcType);
1199 DstType = CGF.getContext().getCanonicalType(DstType);
1200 if (SrcType == DstType) return Src;
1201
1202 if (DstType->isVoidType()) return nullptr;
1203
1204 llvm::Value *OrigSrc = Src;
1205 QualType OrigSrcType = SrcType;
1206 llvm::Type *SrcTy = Src->getType();
1207
1208 // Handle conversions to bool first, they are special: comparisons against 0.
1209 if (DstType->isBooleanType())
1210 return EmitConversionToBool(Src, SrcType);
1211
1212 llvm::Type *DstTy = ConvertType(DstType);
1213
1214 // Cast from half through float if half isn't a native type.
1215 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1216 // Cast to FP using the intrinsic if the half type itself isn't supported.
1217 if (DstTy->isFloatingPointTy()) {
1218 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1219 return Builder.CreateCall(
1220 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1221 Src);
1222 } else {
1223 // Cast to other types through float, using either the intrinsic or FPExt,
1224 // depending on whether the half type itself is supported
1225 // (as opposed to operations on half, available with NativeHalfType).
1226 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1227 Src = Builder.CreateCall(
1228 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1229 CGF.CGM.FloatTy),
1230 Src);
1231 } else {
1232 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1233 }
1234 SrcType = CGF.getContext().FloatTy;
1235 SrcTy = CGF.FloatTy;
1236 }
1237 }
1238
1239 // Ignore conversions like int -> uint.
1240 if (SrcTy == DstTy) {
1241 if (Opts.EmitImplicitIntegerSignChangeChecks)
1242 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1243 NoncanonicalDstType, Loc);
1244
1245 return Src;
1246 }
1247
1248 // Handle pointer conversions next: pointers can only be converted to/from
1249 // other pointers and integers. Check for pointer types in terms of LLVM, as
1250 // some native types (like Obj-C id) may map to a pointer type.
1251 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1252 // The source value may be an integer, or a pointer.
1253 if (isa<llvm::PointerType>(SrcTy))
1254 return Builder.CreateBitCast(Src, DstTy, "conv");
1255
1256 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?")((SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isIntegerType() && \"Not ptr->ptr or int->ptr conversion?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1256, __PRETTY_FUNCTION__))
;
1257 // First, convert to the correct width so that we control the kind of
1258 // extension.
1259 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1260 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1261 llvm::Value* IntResult =
1262 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1263 // Then, cast to pointer.
1264 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1265 }
1266
1267 if (isa<llvm::PointerType>(SrcTy)) {
1268 // Must be an ptr to int cast.
1269 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?")((isa<llvm::IntegerType>(DstTy) && "not ptr->int?"
) ? static_cast<void> (0) : __assert_fail ("isa<llvm::IntegerType>(DstTy) && \"not ptr->int?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1269, __PRETTY_FUNCTION__))
;
1270 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1271 }
1272
1273 // A scalar can be splatted to an extended vector of the same element type
1274 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1275 // Sema should add casts to make sure that the source expression's type is
1276 // the same as the vector's element type (sans qualifiers)
1277 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1279, __PRETTY_FUNCTION__))
1278 SrcType.getTypePtr() &&((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1279, __PRETTY_FUNCTION__))
1279 "Splatted expr doesn't match with vector element type?")((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1279, __PRETTY_FUNCTION__))
;
1280
1281 // Splat the element across to all elements
1282 unsigned NumElements = DstTy->getVectorNumElements();
1283 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1284 }
1285
1286 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1287 // Allow bitcast from vector to integer/fp of the same size.
1288 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1289 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1290 if (SrcSize == DstSize)
1291 return Builder.CreateBitCast(Src, DstTy, "conv");
1292
1293 // Conversions between vectors of different sizes are not allowed except
1294 // when vectors of half are involved. Operations on storage-only half
1295 // vectors require promoting half vector operands to float vectors and
1296 // truncating the result, which is either an int or float vector, to a
1297 // short or half vector.
1298
1299 // Source and destination are both expected to be vectors.
1300 llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
1301 llvm::Type *DstElementTy = DstTy->getVectorElementType();
1302 (void)DstElementTy;
1303
1304 assert(((SrcElementTy->isIntegerTy() &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1309, __PRETTY_FUNCTION__))
1305 DstElementTy->isIntegerTy()) ||((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1309, __PRETTY_FUNCTION__))
1306 (SrcElementTy->isFloatingPointTy() &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1309, __PRETTY_FUNCTION__))
1307 DstElementTy->isFloatingPointTy())) &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1309, __PRETTY_FUNCTION__))
1308 "unexpected conversion between a floating-point vector and an "((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1309, __PRETTY_FUNCTION__))
1309 "integer vector")((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1309, __PRETTY_FUNCTION__))
;
1310
1311 // Truncate an i32 vector to an i16 vector.
1312 if (SrcElementTy->isIntegerTy())
1313 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1314
1315 // Truncate a float vector to a half vector.
1316 if (SrcSize > DstSize)
1317 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1318
1319 // Promote a half vector to a float vector.
1320 return Builder.CreateFPExt(Src, DstTy, "conv");
1321 }
1322
1323 // Finally, we have the arithmetic types: real int/float.
1324 Value *Res = nullptr;
1325 llvm::Type *ResTy = DstTy;
1326
1327 // An overflowing conversion has undefined behavior if either the source type
1328 // or the destination type is a floating-point type. However, we consider the
1329 // range of representable values for all floating-point types to be
1330 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1331 // floating-point type.
1332 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1333 OrigSrcType->isFloatingType())
1334 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1335 Loc);
1336
1337 // Cast to half through float if half isn't a native type.
1338 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1339 // Make sure we cast in a single step if from another FP type.
1340 if (SrcTy->isFloatingPointTy()) {
1341 // Use the intrinsic if the half type itself isn't supported
1342 // (as opposed to operations on half, available with NativeHalfType).
1343 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1344 return Builder.CreateCall(
1345 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1346 // If the half type is supported, just use an fptrunc.
1347 return Builder.CreateFPTrunc(Src, DstTy);
1348 }
1349 DstTy = CGF.FloatTy;
1350 }
1351
1352 if (isa<llvm::IntegerType>(SrcTy)) {
1353 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1354 if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1355 InputSigned = true;
1356 }
1357 if (isa<llvm::IntegerType>(DstTy))
1358 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1359 else if (InputSigned)
1360 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1361 else
1362 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1363 } else if (isa<llvm::IntegerType>(DstTy)) {
1364 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion")((SrcTy->isFloatingPointTy() && "Unknown real conversion"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1364, __PRETTY_FUNCTION__))
;
1365 if (DstType->isSignedIntegerOrEnumerationType())
1366 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1367 else
1368 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1369 } else {
1370 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&((SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1371, __PRETTY_FUNCTION__))
1371 "Unknown real conversion")((SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1371, __PRETTY_FUNCTION__))
;
1372 if (DstTy->getTypeID() < SrcTy->getTypeID())
1373 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1374 else
1375 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1376 }
1377
1378 if (DstTy != ResTy) {
1379 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1380 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion")((ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"
) ? static_cast<void> (0) : __assert_fail ("ResTy->isIntegerTy(16) && \"Only half FP requires extra conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1380, __PRETTY_FUNCTION__))
;
1381 Res = Builder.CreateCall(
1382 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1383 Res);
1384 } else {
1385 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1386 }
1387 }
1388
1389 if (Opts.EmitImplicitIntegerTruncationChecks)
1390 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1391 NoncanonicalDstType, Loc);
1392
1393 if (Opts.EmitImplicitIntegerSignChangeChecks)
1394 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1395 NoncanonicalDstType, Loc);
1396
1397 return Res;
1398}
1399
1400Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1401 QualType DstTy,
1402 SourceLocation Loc) {
1403 FixedPointSemantics SrcFPSema =
1404 CGF.getContext().getFixedPointSemantics(SrcTy);
1405 FixedPointSemantics DstFPSema =
1406 CGF.getContext().getFixedPointSemantics(DstTy);
1407 return EmitFixedPointConversion(Src, SrcFPSema, DstFPSema, Loc,
1408 DstTy->isIntegerType());
1409}
1410
1411Value *ScalarExprEmitter::EmitFixedPointConversion(
1412 Value *Src, FixedPointSemantics &SrcFPSema, FixedPointSemantics &DstFPSema,
1413 SourceLocation Loc, bool DstIsInteger) {
1414 using llvm::APInt;
1415 using llvm::ConstantInt;
1416 using llvm::Value;
1417
1418 unsigned SrcWidth = SrcFPSema.getWidth();
1419 unsigned DstWidth = DstFPSema.getWidth();
1420 unsigned SrcScale = SrcFPSema.getScale();
1421 unsigned DstScale = DstFPSema.getScale();
1422 bool SrcIsSigned = SrcFPSema.isSigned();
1423 bool DstIsSigned = DstFPSema.isSigned();
1424
1425 llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
1426
1427 Value *Result = Src;
1428 unsigned ResultWidth = SrcWidth;
1429
1430 // Downscale.
1431 if (DstScale < SrcScale) {
1432 // When converting to integers, we round towards zero. For negative numbers,
1433 // right shifting rounds towards negative infinity. In this case, we can
1434 // just round up before shifting.
1435 if (DstIsInteger && SrcIsSigned) {
1436 Value *Zero = llvm::Constant::getNullValue(Result->getType());
1437 Value *IsNegative = Builder.CreateICmpSLT(Result, Zero);
1438 Value *LowBits = ConstantInt::get(
1439 CGF.getLLVMContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
1440 Value *Rounded = Builder.CreateAdd(Result, LowBits);
1441 Result = Builder.CreateSelect(IsNegative, Rounded, Result);
1442 }
1443
1444 Result = SrcIsSigned
1445 ? Builder.CreateAShr(Result, SrcScale - DstScale, "downscale")
1446 : Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1447 }
1448
1449 if (!DstFPSema.isSaturated()) {
1450 // Resize.
1451 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1452
1453 // Upscale.
1454 if (DstScale > SrcScale)
1455 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1456 } else {
1457 // Adjust the number of fractional bits.
1458 if (DstScale > SrcScale) {
1459 // Compare to DstWidth to prevent resizing twice.
1460 ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
1461 llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
1462 Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
1463 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1464 }
1465
1466 // Handle saturation.
1467 bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
1468 if (LessIntBits) {
1469 Value *Max = ConstantInt::get(
1470 CGF.getLLVMContext(),
1471 APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
1472 Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
1473 : Builder.CreateICmpUGT(Result, Max);
1474 Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
1475 }
1476 // Cannot overflow min to dest type if src is unsigned since all fixed
1477 // point types can cover the unsigned min of 0.
1478 if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
1479 Value *Min = ConstantInt::get(
1480 CGF.getLLVMContext(),
1481 APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
1482 Value *TooLow = Builder.CreateICmpSLT(Result, Min);
1483 Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
1484 }
1485
1486 // Resize the integer part to get the final destination size.
1487 if (ResultWidth != DstWidth)
1488 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1489 }
1490 return Result;
1491}
1492
1493/// Emit a conversion from the specified complex type to the specified
1494/// destination type, where the destination type is an LLVM scalar type.
1495Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1496 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1497 SourceLocation Loc) {
1498 // Get the source element type.
1499 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1500
1501 // Handle conversions to bool first, they are special: comparisons against 0.
1502 if (DstTy->isBooleanType()) {
1503 // Complex != 0 -> (Real != 0) | (Imag != 0)
1504 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1505 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1506 return Builder.CreateOr(Src.first, Src.second, "tobool");
1507 }
1508
1509 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1510 // the imaginary part of the complex value is discarded and the value of the
1511 // real part is converted according to the conversion rules for the
1512 // corresponding real type.
1513 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1514}
1515
1516Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1517 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1518}
1519
1520/// Emit a sanitization check for the given "binary" operation (which
1521/// might actually be a unary increment which has been lowered to a binary
1522/// operation). The check passes if all values in \p Checks (which are \c i1),
1523/// are \c true.
1524void ScalarExprEmitter::EmitBinOpCheck(
1525 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1526 assert(CGF.IsSanitizerScope)((CGF.IsSanitizerScope) ? static_cast<void> (0) : __assert_fail
("CGF.IsSanitizerScope", "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1526, __PRETTY_FUNCTION__))
;
1527 SanitizerHandler Check;
1528 SmallVector<llvm::Constant *, 4> StaticData;
1529 SmallVector<llvm::Value *, 2> DynamicData;
1530
1531 BinaryOperatorKind Opcode = Info.Opcode;
1532 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1533 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1534
1535 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1536 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1537 if (UO && UO->getOpcode() == UO_Minus) {
1538 Check = SanitizerHandler::NegateOverflow;
1539 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1540 DynamicData.push_back(Info.RHS);
1541 } else {
1542 if (BinaryOperator::isShiftOp(Opcode)) {
1543 // Shift LHS negative or too large, or RHS out of bounds.
1544 Check = SanitizerHandler::ShiftOutOfBounds;
1545 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1546 StaticData.push_back(
1547 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1548 StaticData.push_back(
1549 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1550 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1551 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1552 Check = SanitizerHandler::DivremOverflow;
1553 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1554 } else {
1555 // Arithmetic overflow (+, -, *).
1556 switch (Opcode) {
1557 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1558 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1559 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1560 default: llvm_unreachable("unexpected opcode for bin op check")::llvm::llvm_unreachable_internal("unexpected opcode for bin op check"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1560)
;
1561 }
1562 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1563 }
1564 DynamicData.push_back(Info.LHS);
1565 DynamicData.push_back(Info.RHS);
1566 }
1567
1568 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1569}
1570
1571//===----------------------------------------------------------------------===//
1572// Visitor Methods
1573//===----------------------------------------------------------------------===//
1574
1575Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1576 CGF.ErrorUnsupported(E, "scalar expression");
1577 if (E->getType()->isVoidType())
1578 return nullptr;
1579 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1580}
1581
1582Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1583 // Vector Mask Case
1584 if (E->getNumSubExprs() == 2) {
1585 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1586 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1587 Value *Mask;
1588
1589 llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
1590 unsigned LHSElts = LTy->getNumElements();
1591
1592 Mask = RHS;
1593
1594 llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1595
1596 // Mask off the high bits of each shuffle index.
1597 Value *MaskBits =
1598 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1599 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1600
1601 // newv = undef
1602 // mask = mask & maskbits
1603 // for each elt
1604 // n = extract mask i
1605 // x = extract val n
1606 // newv = insert newv, x, i
1607 llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1608 MTy->getNumElements());
1609 Value* NewV = llvm::UndefValue::get(RTy);
1610 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1611 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1612 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1613
1614 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1615 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1616 }
1617 return NewV;
1618 }
1619
1620 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1621 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1622
1623 SmallVector<llvm::Constant*, 32> indices;
1624 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1625 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1626 // Check for -1 and output it as undef in the IR.
1627 if (Idx.isSigned() && Idx.isAllOnesValue())
1628 indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1629 else
1630 indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1631 }
1632
1633 Value *SV = llvm::ConstantVector::get(indices);
1634 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1635}
1636
1637Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1638 QualType SrcType = E->getSrcExpr()->getType(),
1639 DstType = E->getType();
1640
1641 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1642
1643 SrcType = CGF.getContext().getCanonicalType(SrcType);
1644 DstType = CGF.getContext().getCanonicalType(DstType);
1645 if (SrcType == DstType) return Src;
1646
1647 assert(SrcType->isVectorType() &&((SrcType->isVectorType() && "ConvertVector source type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1648, __PRETTY_FUNCTION__))
1648 "ConvertVector source type must be a vector")((SrcType->isVectorType() && "ConvertVector source type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1648, __PRETTY_FUNCTION__))
;
1649 assert(DstType->isVectorType() &&((DstType->isVectorType() && "ConvertVector destination type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1650, __PRETTY_FUNCTION__))
1650 "ConvertVector destination type must be a vector")((DstType->isVectorType() && "ConvertVector destination type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1650, __PRETTY_FUNCTION__))
;
1651
1652 llvm::Type *SrcTy = Src->getType();
1653 llvm::Type *DstTy = ConvertType(DstType);
1654
1655 // Ignore conversions like int -> uint.
1656 if (SrcTy == DstTy)
1657 return Src;
1658
1659 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1660 DstEltType = DstType->castAs<VectorType>()->getElementType();
1661
1662 assert(SrcTy->isVectorTy() &&((SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1663, __PRETTY_FUNCTION__))
1663 "ConvertVector source IR type must be a vector")((SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1663, __PRETTY_FUNCTION__))
;
1664 assert(DstTy->isVectorTy() &&((DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1665, __PRETTY_FUNCTION__))
1665 "ConvertVector destination IR type must be a vector")((DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1665, __PRETTY_FUNCTION__))
;
1666
1667 llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1668 *DstEltTy = DstTy->getVectorElementType();
1669
1670 if (DstEltType->isBooleanType()) {
1671 assert((SrcEltTy->isFloatingPointTy() ||(((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType
>(SrcEltTy)) && "Unknown boolean conversion") ? static_cast
<void> (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1672, __PRETTY_FUNCTION__))
1672 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion")(((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType
>(SrcEltTy)) && "Unknown boolean conversion") ? static_cast
<void> (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1672, __PRETTY_FUNCTION__))
;
1673
1674 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1675 if (SrcEltTy->isFloatingPointTy()) {
1676 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1677 } else {
1678 return Builder.CreateICmpNE(Src, Zero, "tobool");
1679 }
1680 }
1681
1682 // We have the arithmetic types: real int/float.
1683 Value *Res = nullptr;
1684
1685 if (isa<llvm::IntegerType>(SrcEltTy)) {
1686 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1687 if (isa<llvm::IntegerType>(DstEltTy))
1688 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1689 else if (InputSigned)
1690 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1691 else
1692 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1693 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1694 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion")((SrcEltTy->isFloatingPointTy() && "Unknown real conversion"
) ? static_cast<void> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1694, __PRETTY_FUNCTION__))
;
1695 if (DstEltType->isSignedIntegerOrEnumerationType())
1696 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1697 else
1698 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1699 } else {
1700 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&((SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1701, __PRETTY_FUNCTION__))
1701 "Unknown real conversion")((SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1701, __PRETTY_FUNCTION__))
;
1702 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1703 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1704 else
1705 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1706 }
1707
1708 return Res;
1709}
1710
1711Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1712 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1713 CGF.EmitIgnoredExpr(E->getBase());
1714 return CGF.emitScalarConstant(Constant, E);
1715 } else {
1716 Expr::EvalResult Result;
1717 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1718 llvm::APSInt Value = Result.Val.getInt();
1719 CGF.EmitIgnoredExpr(E->getBase());
1720 return Builder.getInt(Value);
1721 }
1722 }
1723
1724 return EmitLoadOfLValue(E);
1725}
1726
1727Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1728 TestAndClearIgnoreResultAssign();
1729
1730 // Emit subscript expressions in rvalue context's. For most cases, this just
1731 // loads the lvalue formed by the subscript expr. However, we have to be
1732 // careful, because the base of a vector subscript is occasionally an rvalue,
1733 // so we can't get it as an lvalue.
1734 if (!E->getBase()->getType()->isVectorType())
1735 return EmitLoadOfLValue(E);
1736
1737 // Handle the vector case. The base must be a vector, the index must be an
1738 // integer value.
1739 Value *Base = Visit(E->getBase());
1740 Value *Idx = Visit(E->getIdx());
1741 QualType IdxTy = E->getIdx()->getType();
1742
1743 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1744 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1745
1746 return Builder.CreateExtractElement(Base, Idx, "vecext");
1747}
1748
1749static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1750 unsigned Off, llvm::Type *I32Ty) {
1751 int MV = SVI->getMaskValue(Idx);
1752 if (MV == -1)
1753 return llvm::UndefValue::get(I32Ty);
1754 return llvm::ConstantInt::get(I32Ty, Off+MV);
1755}
1756
1757static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1758 if (C->getBitWidth() != 32) {
1759 assert(llvm::ConstantInt::isValueValidForType(I32Ty,((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1761, __PRETTY_FUNCTION__))
1760 C->getZExtValue()) &&((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1761, __PRETTY_FUNCTION__))
1761 "Index operand too large for shufflevector mask!")((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1761, __PRETTY_FUNCTION__))
;
1762 return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1763 }
1764 return C;
1765}
1766
1767Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1768 bool Ignore = TestAndClearIgnoreResultAssign();
1769 (void)Ignore;
1770 assert (Ignore == false && "init list ignored")((Ignore == false && "init list ignored") ? static_cast
<void> (0) : __assert_fail ("Ignore == false && \"init list ignored\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1770, __PRETTY_FUNCTION__))
;
1771 unsigned NumInitElements = E->getNumInits();
1772
1773 if (E->hadArrayRangeDesignator())
1774 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1775
1776 llvm::VectorType *VType =
1777 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1778
1779 if (!VType) {
1780 if (NumInitElements == 0) {
1781 // C++11 value-initialization for the scalar.
1782 return EmitNullValue(E->getType());
1783 }
1784 // We have a scalar in braces. Just use the first element.
1785 return Visit(E->getInit(0));
1786 }
1787
1788 unsigned ResElts = VType->getNumElements();
1789
1790 // Loop over initializers collecting the Value for each, and remembering
1791 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1792 // us to fold the shuffle for the swizzle into the shuffle for the vector
1793 // initializer, since LLVM optimizers generally do not want to touch
1794 // shuffles.
1795 unsigned CurIdx = 0;
1796 bool VIsUndefShuffle = false;
1797 llvm::Value *V = llvm::UndefValue::get(VType);
1798 for (unsigned i = 0; i != NumInitElements; ++i) {
1799 Expr *IE = E->getInit(i);
1800 Value *Init = Visit(IE);
1801 SmallVector<llvm::Constant*, 16> Args;
1802
1803 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1804
1805 // Handle scalar elements. If the scalar initializer is actually one
1806 // element of a different vector of the same width, use shuffle instead of
1807 // extract+insert.
1808 if (!VVT) {
1809 if (isa<ExtVectorElementExpr>(IE)) {
1810 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1811
1812 if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1813 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1814 Value *LHS = nullptr, *RHS = nullptr;
1815 if (CurIdx == 0) {
1816 // insert into undef -> shuffle (src, undef)
1817 // shufflemask must use an i32
1818 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1819 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1820
1821 LHS = EI->getVectorOperand();
1822 RHS = V;
1823 VIsUndefShuffle = true;
1824 } else if (VIsUndefShuffle) {
1825 // insert into undefshuffle && size match -> shuffle (v, src)
1826 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1827 for (unsigned j = 0; j != CurIdx; ++j)
1828 Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1829 Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1830 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1831
1832 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1833 RHS = EI->getVectorOperand();
1834 VIsUndefShuffle = false;
1835 }
1836 if (!Args.empty()) {
1837 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1838 V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1839 ++CurIdx;
1840 continue;
1841 }
1842 }
1843 }
1844 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1845 "vecinit");
1846 VIsUndefShuffle = false;
1847 ++CurIdx;
1848 continue;
1849 }
1850
1851 unsigned InitElts = VVT->getNumElements();
1852
1853 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1854 // input is the same width as the vector being constructed, generate an
1855 // optimized shuffle of the swizzle input into the result.
1856 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1857 if (isa<ExtVectorElementExpr>(IE)) {
1858 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1859 Value *SVOp = SVI->getOperand(0);
1860 llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1861
1862 if (OpTy->getNumElements() == ResElts) {
1863 for (unsigned j = 0; j != CurIdx; ++j) {
1864 // If the current vector initializer is a shuffle with undef, merge
1865 // this shuffle directly into it.
1866 if (VIsUndefShuffle) {
1867 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1868 CGF.Int32Ty));
1869 } else {
1870 Args.push_back(Builder.getInt32(j));
1871 }
1872 }
1873 for (unsigned j = 0, je = InitElts; j != je; ++j)
1874 Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1875 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1876
1877 if (VIsUndefShuffle)
1878 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1879
1880 Init = SVOp;
1881 }
1882 }
1883
1884 // Extend init to result vector length, and then shuffle its contribution
1885 // to the vector initializer into V.
1886 if (Args.empty()) {
1887 for (unsigned j = 0; j != InitElts; ++j)
1888 Args.push_back(Builder.getInt32(j));
1889 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1890 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1891 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1892 Mask, "vext");
1893
1894 Args.clear();
1895 for (unsigned j = 0; j != CurIdx; ++j)
1896 Args.push_back(Builder.getInt32(j));
1897 for (unsigned j = 0; j != InitElts; ++j)
1898 Args.push_back(Builder.getInt32(j+Offset));
1899 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1900 }
1901
1902 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1903 // merging subsequent shuffles into this one.
1904 if (CurIdx == 0)
1905 std::swap(V, Init);
1906 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1907 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1908 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1909 CurIdx += InitElts;
1910 }
1911
1912 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1913 // Emit remaining default initializers.
1914 llvm::Type *EltTy = VType->getElementType();
1915
1916 // Emit remaining default initializers
1917 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1918 Value *Idx = Builder.getInt32(CurIdx);
1919 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1920 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1921 }
1922 return V;
1923}
1924
1925bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1926 const Expr *E = CE->getSubExpr();
1927
1928 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1929 return false;
1930
1931 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1932 // We always assume that 'this' is never null.
1933 return false;
1934 }
1935
1936 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1937 // And that glvalue casts are never null.
1938 if (ICE->getValueKind() != VK_RValue)
1939 return false;
1940 }
1941
1942 return true;
1943}
1944
1945// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1946// have to handle a more broad range of conversions than explicit casts, as they
1947// handle things like function to ptr-to-function decay etc.
1948Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1949 Expr *E = CE->getSubExpr();
1950 QualType DestTy = CE->getType();
1951 CastKind Kind = CE->getCastKind();
1952
1953 // These cases are generally not written to ignore the result of
1954 // evaluating their sub-expressions, so we clear this now.
1955 bool Ignored = TestAndClearIgnoreResultAssign();
1956
1957 // Since almost all cast kinds apply to scalars, this switch doesn't have
1958 // a default case, so the compiler will warn on a missing case. The cases
1959 // are in the same order as in the CastKind enum.
1960 switch (Kind) {
1961 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!")::llvm::llvm_unreachable_internal("dependent cast kind in IR gen!"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1961)
;
1962 case CK_BuiltinFnToFnPtr:
1963 llvm_unreachable("builtin functions are handled elsewhere")::llvm::llvm_unreachable_internal("builtin functions are handled elsewhere"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1963)
;
1964
1965 case CK_LValueBitCast:
1966 case CK_ObjCObjectLValueCast: {
1967 Address Addr = EmitLValue(E).getAddress();
1968 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1969 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1970 return EmitLoadOfLValue(LV, CE->getExprLoc());
1971 }
1972
1973 case CK_LValueToRValueBitCast: {
1974 LValue SourceLVal = CGF.EmitLValue(E);
1975 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(),
1976 CGF.ConvertTypeForMem(DestTy));
1977 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
1978 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
1979 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
1980 }
1981
1982 case CK_CPointerToObjCPointerCast:
1983 case CK_BlockPointerToObjCPointerCast:
1984 case CK_AnyPointerToBlockPointerCast:
1985 case CK_BitCast: {
1986 Value *Src = Visit(const_cast<Expr*>(E));
1987 llvm::Type *SrcTy = Src->getType();
1988 llvm::Type *DstTy = ConvertType(DestTy);
1989 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
1990 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
1991 llvm_unreachable("wrong cast for pointers in different address spaces"::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1992)
1992 "(must be an address space cast)!")::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 1992)
;
1993 }
1994
1995 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
1996 if (auto PT = DestTy->getAs<PointerType>())
1997 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
1998 /*MayBeNull=*/true,
1999 CodeGenFunction::CFITCK_UnrelatedCast,
2000 CE->getBeginLoc());
2001 }
2002
2003 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2004 const QualType SrcType = E->getType();
2005
2006 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2007 // Casting to pointer that could carry dynamic information (provided by
2008 // invariant.group) requires launder.
2009 Src = Builder.CreateLaunderInvariantGroup(Src);
2010 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2011 // Casting to pointer that does not carry dynamic information (provided
2012 // by invariant.group) requires stripping it. Note that we don't do it
2013 // if the source could not be dynamic type and destination could be
2014 // dynamic because dynamic information is already laundered. It is
2015 // because launder(strip(src)) == launder(src), so there is no need to
2016 // add extra strip before launder.
2017 Src = Builder.CreateStripInvariantGroup(Src);
2018 }
2019 }
2020
2021 // Update heapallocsite metadata when there is an explicit cast.
2022 if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(Src))
2023 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE))
2024 CGF.getDebugInfo()->
2025 addHeapAllocSiteMetadata(CI, CE->getType(), CE->getExprLoc());
2026
2027 return Builder.CreateBitCast(Src, DstTy);
2028 }
2029 case CK_AddressSpaceConversion: {
2030 Expr::EvalResult Result;
2031 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2032 Result.Val.isNullPointer()) {
2033 // If E has side effect, it is emitted even if its final result is a
2034 // null pointer. In that case, a DCE pass should be able to
2035 // eliminate the useless instructions emitted during translating E.
2036 if (Result.HasSideEffects)
2037 Visit(E);
2038 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2039 ConvertType(DestTy)), DestTy);
2040 }
2041 // Since target may map different address spaces in AST to the same address
2042 // space, an address space conversion may end up as a bitcast.
2043 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2044 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2045 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2046 }
2047 case CK_AtomicToNonAtomic:
2048 case CK_NonAtomicToAtomic:
2049 case CK_NoOp:
2050 case CK_UserDefinedConversion:
2051 return Visit(const_cast<Expr*>(E));
2052
2053 case CK_BaseToDerived: {
2054 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2055 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!")((DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"
) ? static_cast<void> (0) : __assert_fail ("DerivedClassDecl && \"BaseToDerived arg isn't a C++ object pointer!\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2055, __PRETTY_FUNCTION__))
;
2056
2057 Address Base = CGF.EmitPointerWithAlignment(E);
2058 Address Derived =
2059 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2060 CE->path_begin(), CE->path_end(),
2061 CGF.ShouldNullCheckClassCastValue(CE));
2062
2063 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2064 // performed and the object is not of the derived type.
2065 if (CGF.sanitizePerformTypeCheck())
2066 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2067 Derived.getPointer(), DestTy->getPointeeType());
2068
2069 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2070 CGF.EmitVTablePtrCheckForCast(
2071 DestTy->getPointeeType(), Derived.getPointer(),
2072 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2073 CE->getBeginLoc());
2074
2075 return Derived.getPointer();
2076 }
2077 case CK_UncheckedDerivedToBase:
2078 case CK_DerivedToBase: {
2079 // The EmitPointerWithAlignment path does this fine; just discard
2080 // the alignment.
2081 return CGF.EmitPointerWithAlignment(CE).getPointer();
2082 }
2083
2084 case CK_Dynamic: {
2085 Address V = CGF.EmitPointerWithAlignment(E);
2086 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2087 return CGF.EmitDynamicCast(V, DCE);
2088 }
2089
2090 case CK_ArrayToPointerDecay:
2091 return CGF.EmitArrayToPointerDecay(E).getPointer();
2092 case CK_FunctionToPointerDecay:
2093 return EmitLValue(E).getPointer();
2094
2095 case CK_NullToPointer:
2096 if (MustVisitNullValue(E))
2097 CGF.EmitIgnoredExpr(E);
2098
2099 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2100 DestTy);
2101
2102 case CK_NullToMemberPointer: {
2103 if (MustVisitNullValue(E))
2104 CGF.EmitIgnoredExpr(E);
2105
2106 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2107 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2108 }
2109
2110 case CK_ReinterpretMemberPointer:
2111 case CK_BaseToDerivedMemberPointer:
2112 case CK_DerivedToBaseMemberPointer: {
2113 Value *Src = Visit(E);
2114
2115 // Note that the AST doesn't distinguish between checked and
2116 // unchecked member pointer conversions, so we always have to
2117 // implement checked conversions here. This is inefficient when
2118 // actual control flow may be required in order to perform the
2119 // check, which it is for data member pointers (but not member
2120 // function pointers on Itanium and ARM).
2121 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2122 }
2123
2124 case CK_ARCProduceObject:
2125 return CGF.EmitARCRetainScalarExpr(E);
2126 case CK_ARCConsumeObject:
2127 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2128 case CK_ARCReclaimReturnedObject:
2129 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2130 case CK_ARCExtendBlockObject:
2131 return CGF.EmitARCExtendBlockObject(E);
2132
2133 case CK_CopyAndAutoreleaseBlockObject:
2134 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2135
2136 case CK_FloatingRealToComplex:
2137 case CK_FloatingComplexCast:
2138 case CK_IntegralRealToComplex:
2139 case CK_IntegralComplexCast:
2140 case CK_IntegralComplexToFloatingComplex:
2141 case CK_FloatingComplexToIntegralComplex:
2142 case CK_ConstructorConversion:
2143 case CK_ToUnion:
2144 llvm_unreachable("scalar cast to non-scalar value")::llvm::llvm_unreachable_internal("scalar cast to non-scalar value"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2144)
;
2145
2146 case CK_LValueToRValue:
2147 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy))((CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy
)) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2147, __PRETTY_FUNCTION__))
;
2148 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!")((E->isGLValue() && "lvalue-to-rvalue applied to r-value!"
) ? static_cast<void> (0) : __assert_fail ("E->isGLValue() && \"lvalue-to-rvalue applied to r-value!\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2148, __PRETTY_FUNCTION__))
;
2149 return Visit(const_cast<Expr*>(E));
2150
2151 case CK_IntegralToPointer: {
2152 Value *Src = Visit(const_cast<Expr*>(E));
2153
2154 // First, convert to the correct width so that we control the kind of
2155 // extension.
2156 auto DestLLVMTy = ConvertType(DestTy);
2157 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2158 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2159 llvm::Value* IntResult =
2160 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2161
2162 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2163
2164 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2165 // Going from integer to pointer that could be dynamic requires reloading
2166 // dynamic information from invariant.group.
2167 if (DestTy.mayBeDynamicClass())
2168 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2169 }
2170 return IntToPtr;
2171 }
2172 case CK_PointerToIntegral: {
2173 assert(!DestTy->isBooleanType() && "bool should use PointerToBool")((!DestTy->isBooleanType() && "bool should use PointerToBool"
) ? static_cast<void> (0) : __assert_fail ("!DestTy->isBooleanType() && \"bool should use PointerToBool\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2173, __PRETTY_FUNCTION__))
;
2174 auto *PtrExpr = Visit(E);
2175
2176 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2177 const QualType SrcType = E->getType();
2178
2179 // Casting to integer requires stripping dynamic information as it does
2180 // not carries it.
2181 if (SrcType.mayBeDynamicClass())
2182 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2183 }
2184
2185 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2186 }
2187 case CK_ToVoid: {
2188 CGF.EmitIgnoredExpr(E);
2189 return nullptr;
2190 }
2191 case CK_VectorSplat: {
2192 llvm::Type *DstTy = ConvertType(DestTy);
2193 Value *Elt = Visit(const_cast<Expr*>(E));
2194 // Splat the element across to all elements
2195 unsigned NumElements = DstTy->getVectorNumElements();
2196 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2197 }
2198
2199 case CK_FixedPointCast:
2200 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2201 CE->getExprLoc());
2202
2203 case CK_FixedPointToBoolean:
2204 assert(E->getType()->isFixedPointType() &&((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2205, __PRETTY_FUNCTION__))
2205 "Expected src type to be fixed point type")((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2205, __PRETTY_FUNCTION__))
;
2206 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type")((DestTy->isBooleanType() && "Expected dest type to be boolean type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isBooleanType() && \"Expected dest type to be boolean type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2206, __PRETTY_FUNCTION__))
;
2207 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2208 CE->getExprLoc());
2209
2210 case CK_FixedPointToIntegral:
2211 assert(E->getType()->isFixedPointType() &&((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2212, __PRETTY_FUNCTION__))
2212 "Expected src type to be fixed point type")((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2212, __PRETTY_FUNCTION__))
;
2213 assert(DestTy->isIntegerType() && "Expected dest type to be an integer")((DestTy->isIntegerType() && "Expected dest type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isIntegerType() && \"Expected dest type to be an integer\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2213, __PRETTY_FUNCTION__))
;
2214 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2215 CE->getExprLoc());
2216
2217 case CK_IntegralToFixedPoint:
2218 assert(E->getType()->isIntegerType() &&((E->getType()->isIntegerType() && "Expected src type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isIntegerType() && \"Expected src type to be an integer\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2219, __PRETTY_FUNCTION__))
2219 "Expected src type to be an integer")((E->getType()->isIntegerType() && "Expected src type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isIntegerType() && \"Expected src type to be an integer\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2219, __PRETTY_FUNCTION__))
;
2220 assert(DestTy->isFixedPointType() &&((DestTy->isFixedPointType() && "Expected dest type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isFixedPointType() && \"Expected dest type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2221, __PRETTY_FUNCTION__))
2221 "Expected dest type to be fixed point type")((DestTy->isFixedPointType() && "Expected dest type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isFixedPointType() && \"Expected dest type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2221, __PRETTY_FUNCTION__))
;
2222 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2223 CE->getExprLoc());
2224
2225 case CK_IntegralCast: {
2226 ScalarConversionOpts Opts;
2227 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2228 if (!ICE->isPartOfExplicitCast())
2229 Opts = ScalarConversionOpts(CGF.SanOpts);
2230 }
2231 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2232 CE->getExprLoc(), Opts);
2233 }
2234 case CK_IntegralToFloating:
2235 case CK_FloatingToIntegral:
2236 case CK_FloatingCast:
2237 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2238 CE->getExprLoc());
2239 case CK_BooleanToSignedIntegral: {
2240 ScalarConversionOpts Opts;
2241 Opts.TreatBooleanAsSigned = true;
2242 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2243 CE->getExprLoc(), Opts);
2244 }
2245 case CK_IntegralToBoolean:
2246 return EmitIntToBoolConversion(Visit(E));
2247 case CK_PointerToBoolean:
2248 return EmitPointerToBoolConversion(Visit(E), E->getType());
2249 case CK_FloatingToBoolean:
2250 return EmitFloatToBoolConversion(Visit(E));
2251 case CK_MemberPointerToBoolean: {
2252 llvm::Value *MemPtr = Visit(E);
2253 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2254 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2255 }
2256
2257 case CK_FloatingComplexToReal:
2258 case CK_IntegralComplexToReal:
2259 return CGF.EmitComplexExpr(E, false, true).first;
2260
2261 case CK_FloatingComplexToBoolean:
2262 case CK_IntegralComplexToBoolean: {
2263 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2264
2265 // TODO: kill this function off, inline appropriate case here
2266 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2267 CE->getExprLoc());
2268 }
2269
2270 case CK_ZeroToOCLOpaqueType: {
2271 assert((DestTy->isEventT() || DestTy->isQueueT() ||(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2273, __PRETTY_FUNCTION__))
2272 DestTy->isOCLIntelSubgroupAVCType()) &&(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2273, __PRETTY_FUNCTION__))
2273 "CK_ZeroToOCLEvent cast on non-event type")(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2273, __PRETTY_FUNCTION__))
;
2274 return llvm::Constant::getNullValue(ConvertType(DestTy));
2275 }
2276
2277 case CK_IntToOCLSampler:
2278 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2279
2280 } // end of switch
2281
2282 llvm_unreachable("unknown scalar cast")::llvm::llvm_unreachable_internal("unknown scalar cast", "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2282)
;
2283}
2284
2285Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2286 CodeGenFunction::StmtExprEvaluation eval(CGF);
2287 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2288 !E->getType()->isVoidType());
2289 if (!RetAlloca.isValid())
2290 return nullptr;
2291 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2292 E->getExprLoc());
2293}
2294
2295Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2296 CGF.enterFullExpression(E);
2297 CodeGenFunction::RunCleanupsScope Scope(CGF);
2298 Value *V = Visit(E->getSubExpr());
2299 // Defend against dominance problems caused by jumps out of expression
2300 // evaluation through the shared cleanup block.
2301 Scope.ForceCleanup({&V});
2302 return V;
2303}
2304
2305//===----------------------------------------------------------------------===//
2306// Unary Operators
2307//===----------------------------------------------------------------------===//
2308
2309static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2310 llvm::Value *InVal, bool IsInc) {
2311 BinOpInfo BinOp;
2312 BinOp.LHS = InVal;
2313 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2314 BinOp.Ty = E->getType();
2315 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2316 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2317 BinOp.E = E;
2318 return BinOp;
2319}
2320
2321llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2322 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2323 llvm::Value *Amount =
2324 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2325 StringRef Name = IsInc ? "inc" : "dec";
2326 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2327 case LangOptions::SOB_Defined:
2328 return Builder.CreateAdd(InVal, Amount, Name);
2329 case LangOptions::SOB_Undefined:
2330 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2331 return Builder.CreateNSWAdd(InVal, Amount, Name);
2332 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2333 case LangOptions::SOB_Trapping:
2334 if (!E->canOverflow())
2335 return Builder.CreateNSWAdd(InVal, Amount, Name);
2336 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
2337 }
2338 llvm_unreachable("Unknown SignedOverflowBehaviorTy")::llvm::llvm_unreachable_internal("Unknown SignedOverflowBehaviorTy"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2338)
;
2339}
2340
2341llvm::Value *
2342ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2343 bool isInc, bool isPre) {
2344
2345 QualType type = E->getSubExpr()->getType();
2346 llvm::PHINode *atomicPHI = nullptr;
2347 llvm::Value *value;
2348 llvm::Value *input;
2349
2350 int amount = (isInc ? 1 : -1);
2351 bool isSubtraction = !isInc;
2352
2353 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2354 type = atomicTy->getValueType();
2355 if (isInc && type->isBooleanType()) {
2356 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2357 if (isPre) {
2358 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
2359 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2360 return Builder.getTrue();
2361 }
2362 // For atomic bool increment, we just store true and return it for
2363 // preincrement, do an atomic swap with true for postincrement
2364 return Builder.CreateAtomicRMW(
2365 llvm::AtomicRMWInst::Xchg, LV.getPointer(), True,
2366 llvm::AtomicOrdering::SequentiallyConsistent);
2367 }
2368 // Special case for atomic increment / decrement on integers, emit
2369 // atomicrmw instructions. We skip this if we want to be doing overflow
2370 // checking, and fall into the slow path with the atomic cmpxchg loop.
2371 if (!type->isBooleanType() && type->isIntegerType() &&
2372 !(type->isUnsignedIntegerType() &&
2373 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2374 CGF.getLangOpts().getSignedOverflowBehavior() !=
2375 LangOptions::SOB_Trapping) {
2376 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2377 llvm::AtomicRMWInst::Sub;
2378 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2379 llvm::Instruction::Sub;
2380 llvm::Value *amt = CGF.EmitToMemory(
2381 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2382 llvm::Value *old = Builder.CreateAtomicRMW(aop,
2383 LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent);
2384 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2385 }
2386 value = EmitLoadOfLValue(LV, E->getExprLoc());
2387 input = value;
2388 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2389 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2390 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2391 value = CGF.EmitToMemory(value, type);
2392 Builder.CreateBr(opBB);
2393 Builder.SetInsertPoint(opBB);
2394 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2395 atomicPHI->addIncoming(value, startBB);
2396 value = atomicPHI;
2397 } else {
2398 value = EmitLoadOfLValue(LV, E->getExprLoc());
2399 input = value;
2400 }
2401
2402 // Special case of integer increment that we have to check first: bool++.
2403 // Due to promotion rules, we get:
2404 // bool++ -> bool = bool + 1
2405 // -> bool = (int)bool + 1
2406 // -> bool = ((int)bool + 1 != 0)
2407 // An interesting aspect of this is that increment is always true.
2408 // Decrement does not have this property.
2409 if (isInc && type->isBooleanType()) {
2410 value = Builder.getTrue();
2411
2412 // Most common case by far: integer increment.
2413 } else if (type->isIntegerType()) {
2414 // Note that signed integer inc/dec with width less than int can't
2415 // overflow because of promotion rules; we're just eliding a few steps here.
2416 if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2417 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2418 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2419 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2420 value =
2421 EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
2422 } else {
2423 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2424 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2425 }
2426
2427 // Next most common: pointer increment.
2428 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2429 QualType type = ptr->getPointeeType();
2430
2431 // VLA types don't have constant size.
2432 if (const VariableArrayType *vla
2433 = CGF.getContext().getAsVariableArrayType(type)) {
2434 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2435 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2436 if (CGF.getLangOpts().isSignedOverflowDefined())
2437 value = Builder.CreateGEP(value, numElts, "vla.inc");
2438 else
2439 value = CGF.EmitCheckedInBoundsGEP(
2440 value, numElts, /*SignedIndices=*/false, isSubtraction,
2441 E->getExprLoc(), "vla.inc");
2442
2443 // Arithmetic on function pointers (!) is just +-1.
2444 } else if (type->isFunctionType()) {
2445 llvm::Value *amt = Builder.getInt32(amount);
2446
2447 value = CGF.EmitCastToVoidPtr(value);
2448 if (CGF.getLangOpts().isSignedOverflowDefined())
2449 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2450 else
2451 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2452 isSubtraction, E->getExprLoc(),
2453 "incdec.funcptr");
2454 value = Builder.CreateBitCast(value, input->getType());
2455
2456 // For everything else, we can just do a simple increment.
2457 } else {
2458 llvm::Value *amt = Builder.getInt32(amount);
2459 if (CGF.getLangOpts().isSignedOverflowDefined())
2460 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2461 else
2462 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2463 isSubtraction, E->getExprLoc(),
2464 "incdec.ptr");
2465 }
2466
2467 // Vector increment/decrement.
2468 } else if (type->isVectorType()) {
2469 if (type->hasIntegerRepresentation()) {
2470 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2471
2472 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2473 } else {
2474 value = Builder.CreateFAdd(
2475 value,
2476 llvm::ConstantFP::get(value->getType(), amount),
2477 isInc ? "inc" : "dec");
2478 }
2479
2480 // Floating point.
2481 } else if (type->isRealFloatingType()) {
2482 // Add the inc/dec to the real part.
2483 llvm::Value *amt;
2484
2485 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2486 // Another special case: half FP increment should be done via float
2487 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2488 value = Builder.CreateCall(
2489 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2490 CGF.CGM.FloatTy),
2491 input, "incdec.conv");
2492 } else {
2493 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2494 }
2495 }
2496
2497 if (value->getType()->isFloatTy())
2498 amt = llvm::ConstantFP::get(VMContext,
2499 llvm::APFloat(static_cast<float>(amount)));
2500 else if (value->getType()->isDoubleTy())
2501 amt = llvm::ConstantFP::get(VMContext,
2502 llvm::APFloat(static_cast<double>(amount)));
2503 else {
2504 // Remaining types are Half, LongDouble or __float128. Convert from float.
2505 llvm::APFloat F(static_cast<float>(amount));
2506 bool ignored;
2507 const llvm::fltSemantics *FS;
2508 // Don't use getFloatTypeSemantics because Half isn't
2509 // necessarily represented using the "half" LLVM type.
2510 if (value->getType()->isFP128Ty())
2511 FS = &CGF.getTarget().getFloat128Format();
2512 else if (value->getType()->isHalfTy())
2513 FS = &CGF.getTarget().getHalfFormat();
2514 else
2515 FS = &CGF.getTarget().getLongDoubleFormat();
2516 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2517 amt = llvm::ConstantFP::get(VMContext, F);
2518 }
2519 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2520
2521 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2522 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2523 value = Builder.CreateCall(
2524 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2525 CGF.CGM.FloatTy),
2526 value, "incdec.conv");
2527 } else {
2528 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2529 }
2530 }
2531
2532 // Objective-C pointer types.
2533 } else {
2534 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2535 value = CGF.EmitCastToVoidPtr(value);
2536
2537 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2538 if (!isInc) size = -size;
2539 llvm::Value *sizeValue =
2540 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2541
2542 if (CGF.getLangOpts().isSignedOverflowDefined())
2543 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2544 else
2545 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2546 /*SignedIndices=*/false, isSubtraction,
2547 E->getExprLoc(), "incdec.objptr");
2548 value = Builder.CreateBitCast(value, input->getType());
2549 }
2550
2551 if (atomicPHI) {
2552 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2553 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2554 auto Pair = CGF.EmitAtomicCompareExchange(
2555 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2556 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2557 llvm::Value *success = Pair.second;
2558 atomicPHI->addIncoming(old, curBlock);
2559 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2560 Builder.SetInsertPoint(contBB);
2561 return isPre ? value : input;
2562 }
2563
2564 // Store the updated result through the lvalue.
2565 if (LV.isBitField())
2566 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2567 else
2568 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2569
2570 // If this is a postinc, return the value read from memory, otherwise use the
2571 // updated value.
2572 return isPre ? value : input;
2573}
2574
2575
2576
2577Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2578 TestAndClearIgnoreResultAssign();
2579 // Emit unary minus with EmitSub so we handle overflow cases etc.
2580 BinOpInfo BinOp;
2581 BinOp.RHS = Visit(E->getSubExpr());
2582
2583 if (BinOp.RHS->getType()->isFPOrFPVectorTy())
2584 BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
2585 else
2586 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2587 BinOp.Ty = E->getType();
2588 BinOp.Opcode = BO_Sub;
2589 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2590 BinOp.E = E;
2591 return EmitSub(BinOp);
2592}
2593
2594Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2595 TestAndClearIgnoreResultAssign();
2596 Value *Op = Visit(E->getSubExpr());
2597 return Builder.CreateNot(Op, "neg");
2598}
2599
2600Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2601 // Perform vector logical not on comparison with zero vector.
2602 if (E->getType()->isExtVectorType()) {
2603 Value *Oper = Visit(E->getSubExpr());
2604 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2605 Value *Result;
2606 if (Oper->getType()->isFPOrFPVectorTy())
2607 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2608 else
2609 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2610 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2611 }
2612
2613 // Compare operand to zero.
2614 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2615
2616 // Invert value.
2617 // TODO: Could dynamically modify easy computations here. For example, if
2618 // the operand is an icmp ne, turn into icmp eq.
2619 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2620
2621 // ZExt result to the expr type.
2622 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2623}
2624
2625Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2626 // Try folding the offsetof to a constant.
2627 Expr::EvalResult EVResult;
2628 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2629 llvm::APSInt Value = EVResult.Val.getInt();
2630 return Builder.getInt(Value);
2631 }
2632
2633 // Loop over the components of the offsetof to compute the value.
2634 unsigned n = E->getNumComponents();
2635 llvm::Type* ResultType = ConvertType(E->getType());
2636 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2637 QualType CurrentType = E->getTypeSourceInfo()->getType();
2638 for (unsigned i = 0; i != n; ++i) {
2639 OffsetOfNode ON = E->getComponent(i);
2640 llvm::Value *Offset = nullptr;
2641 switch (ON.getKind()) {
2642 case OffsetOfNode::Array: {
2643 // Compute the index
2644 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2645 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2646 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2647 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2648
2649 // Save the element type
2650 CurrentType =
2651 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2652
2653 // Compute the element size
2654 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2655 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2656
2657 // Multiply out to compute the result
2658 Offset = Builder.CreateMul(Idx, ElemSize);
2659 break;
2660 }
2661
2662 case OffsetOfNode::Field: {
2663 FieldDecl *MemberDecl = ON.getField();
2664 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2665 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2666
2667 // Compute the index of the field in its parent.
2668 unsigned i = 0;
2669 // FIXME: It would be nice if we didn't have to loop here!
2670 for (RecordDecl::field_iterator Field = RD->field_begin(),
2671 FieldEnd = RD->field_end();
2672 Field != FieldEnd; ++Field, ++i) {
2673 if (*Field == MemberDecl)
2674 break;
2675 }
2676 assert(i < RL.getFieldCount() && "offsetof field in wrong type")((i < RL.getFieldCount() && "offsetof field in wrong type"
) ? static_cast<void> (0) : __assert_fail ("i < RL.getFieldCount() && \"offsetof field in wrong type\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2676, __PRETTY_FUNCTION__))
;
2677
2678 // Compute the offset to the field
2679 int64_t OffsetInt = RL.getFieldOffset(i) /
2680 CGF.getContext().getCharWidth();
2681 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2682
2683 // Save the element type.
2684 CurrentType = MemberDecl->getType();
2685 break;
2686 }
2687
2688 case OffsetOfNode::Identifier:
2689 llvm_unreachable("dependent __builtin_offsetof")::llvm::llvm_unreachable_internal("dependent __builtin_offsetof"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2689)
;
2690
2691 case OffsetOfNode::Base: {
2692 if (ON.getBase()->isVirtual()) {
2693 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2694 continue;
2695 }
2696
2697 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2698 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2699
2700 // Save the element type.
2701 CurrentType = ON.getBase()->getType();
2702
2703 // Compute the offset to the base.
2704 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2705 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2706 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2707 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2708 break;
2709 }
2710 }
2711 Result = Builder.CreateAdd(Result, Offset);
2712 }
2713 return Result;
2714}
2715
2716/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2717/// argument of the sizeof expression as an integer.
2718Value *
2719ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2720 const UnaryExprOrTypeTraitExpr *E) {
2721 QualType TypeToSize = E->getTypeOfArgument();
2722 if (E->getKind() == UETT_SizeOf) {
2723 if (const VariableArrayType *VAT =
2724 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2725 if (E->isArgumentType()) {
2726 // sizeof(type) - make sure to emit the VLA size.
2727 CGF.EmitVariablyModifiedType(TypeToSize);
2728 } else {
2729 // C99 6.5.3.4p2: If the argument is an expression of type
2730 // VLA, it is evaluated.
2731 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2732 }
2733
2734 auto VlaSize = CGF.getVLASize(VAT);
2735 llvm::Value *size = VlaSize.NumElts;
2736
2737 // Scale the number of non-VLA elements by the non-VLA element size.
2738 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2739 if (!eltSize.isOne())
2740 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2741
2742 return size;
2743 }
2744 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2745 auto Alignment =
2746 CGF.getContext()
2747 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2748 E->getTypeOfArgument()->getPointeeType()))
2749 .getQuantity();
2750 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2751 }
2752
2753 // If this isn't sizeof(vla), the result must be constant; use the constant
2754 // folding logic so we don't have to duplicate it here.
2755 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2756}
2757
2758Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2759 Expr *Op = E->getSubExpr();
2760 if (Op->getType()->isAnyComplexType()) {
2761 // If it's an l-value, load through the appropriate subobject l-value.
2762 // Note that we have to ask E because Op might be an l-value that
2763 // this won't work for, e.g. an Obj-C property.
2764 if (E->isGLValue())
2765 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2766 E->getExprLoc()).getScalarVal();
2767
2768 // Otherwise, calculate and project.
2769 return CGF.EmitComplexExpr(Op, false, true).first;
2770 }
2771
2772 return Visit(Op);
2773}
2774
2775Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2776 Expr *Op = E->getSubExpr();
2777 if (Op->getType()->isAnyComplexType()) {
2778 // If it's an l-value, load through the appropriate subobject l-value.
2779 // Note that we have to ask E because Op might be an l-value that
2780 // this won't work for, e.g. an Obj-C property.
2781 if (Op->isGLValue())
2782 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2783 E->getExprLoc()).getScalarVal();
2784
2785 // Otherwise, calculate and project.
2786 return CGF.EmitComplexExpr(Op, true, false).second;
2787 }
2788
2789 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2790 // effects are evaluated, but not the actual value.
2791 if (Op->isGLValue())
2792 CGF.EmitLValue(Op);
2793 else
2794 CGF.EmitScalarExpr(Op, true);
2795 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2796}
2797
2798//===----------------------------------------------------------------------===//
2799// Binary Operators
2800//===----------------------------------------------------------------------===//
2801
2802BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2803 TestAndClearIgnoreResultAssign();
2804 BinOpInfo Result;
2805 Result.LHS = Visit(E->getLHS());
2806 Result.RHS = Visit(E->getRHS());
2807 Result.Ty = E->getType();
2808 Result.Opcode = E->getOpcode();
2809 Result.FPFeatures = E->getFPFeatures();
2810 Result.E = E;
2811 return Result;
2812}
2813
2814LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2815 const CompoundAssignOperator *E,
2816 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2817 Value *&Result) {
2818 QualType LHSTy = E->getLHS()->getType();
2819 BinOpInfo OpInfo;
2820
2821 if (E->getComputationResultType()->isAnyComplexType())
2822 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2823
2824 // Emit the RHS first. __block variables need to have the rhs evaluated
2825 // first, plus this should improve codegen a little.
2826 OpInfo.RHS = Visit(E->getRHS());
2827 OpInfo.Ty = E->getComputationResultType();
2828 OpInfo.Opcode = E->getOpcode();
2829 OpInfo.FPFeatures = E->getFPFeatures();
2830 OpInfo.E = E;
2831 // Load/convert the LHS.
2832 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2833
2834 llvm::PHINode *atomicPHI = nullptr;
2835 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2836 QualType type = atomicTy->getValueType();
2837 if (!type->isBooleanType() && type->isIntegerType() &&
2838 !(type->isUnsignedIntegerType() &&
2839 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2840 CGF.getLangOpts().getSignedOverflowBehavior() !=
2841 LangOptions::SOB_Trapping) {
2842 llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
2843 switch (OpInfo.Opcode) {
2844 // We don't have atomicrmw operands for *, %, /, <<, >>
2845 case BO_MulAssign: case BO_DivAssign:
2846 case BO_RemAssign:
2847 case BO_ShlAssign:
2848 case BO_ShrAssign:
2849 break;
2850 case BO_AddAssign:
2851 aop = llvm::AtomicRMWInst::Add;
2852 break;
2853 case BO_SubAssign:
2854 aop = llvm::AtomicRMWInst::Sub;
2855 break;
2856 case BO_AndAssign:
2857 aop = llvm::AtomicRMWInst::And;
2858 break;
2859 case BO_XorAssign:
2860 aop = llvm::AtomicRMWInst::Xor;
2861 break;
2862 case BO_OrAssign:
2863 aop = llvm::AtomicRMWInst::Or;
2864 break;
2865 default:
2866 llvm_unreachable("Invalid compound assignment type")::llvm::llvm_unreachable_internal("Invalid compound assignment type"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 2866)
;
2867 }
2868 if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
2869 llvm::Value *amt = CGF.EmitToMemory(
2870 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
2871 E->getExprLoc()),
2872 LHSTy);
2873 Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
2874 llvm::AtomicOrdering::SequentiallyConsistent);
2875 return LHSLV;
2876 }
2877 }
2878 // FIXME: For floating point types, we should be saving and restoring the
2879 // floating point environment in the loop.
2880 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2881 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2882 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2883 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2884 Builder.CreateBr(opBB);
2885 Builder.SetInsertPoint(opBB);
2886 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2887 atomicPHI->addIncoming(OpInfo.LHS, startBB);
2888 OpInfo.LHS = atomicPHI;
2889 }
2890 else
2891 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2892
2893 SourceLocation Loc = E->getExprLoc();
2894 OpInfo.LHS =
2895 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
2896
2897 // Expand the binary operator.
2898 Result = (this->*Func)(OpInfo);
2899
2900 // Convert the result back to the LHS type,
2901 // potentially with Implicit Conversion sanitizer check.
2902 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
2903 Loc, ScalarConversionOpts(CGF.SanOpts));
2904
2905 if (atomicPHI) {
2906 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2907 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2908 auto Pair = CGF.EmitAtomicCompareExchange(
2909 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
2910 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
2911 llvm::Value *success = Pair.second;
2912 atomicPHI->addIncoming(old, curBlock);
2913 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2914 Builder.SetInsertPoint(contBB);
2915 return LHSLV;
2916 }
2917
2918 // Store the result value into the LHS lvalue. Bit-fields are handled
2919 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
2920 // 'An assignment expression has the value of the left operand after the
2921 // assignment...'.
2922 if (LHSLV.isBitField())
2923 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
2924 else
2925 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
2926
2927 return LHSLV;
2928}
2929
2930Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
2931 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
2932 bool Ignore = TestAndClearIgnoreResultAssign();
2933 Value *RHS = nullptr;
2934 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
2935
2936 // If the result is clearly ignored, return now.
2937 if (Ignore)
2938 return nullptr;
2939
2940 // The result of an assignment in C is the assigned r-value.
2941 if (!CGF.getLangOpts().CPlusPlus)
2942 return RHS;
2943
2944 // If the lvalue is non-volatile, return the computed value of the assignment.
2945 if (!LHS.isVolatileQualified())
2946 return RHS;
2947
2948 // Otherwise, reload the value.
2949 return EmitLoadOfLValue(LHS, E->getExprLoc());
2950}
2951
2952void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
2953 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
2954 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
2955
2956 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
2957 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
2958 SanitizerKind::IntegerDivideByZero));
2959 }
2960
2961 const auto *BO = cast<BinaryOperator>(Ops.E);
2962 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
2963 Ops.Ty->hasSignedIntegerRepresentation() &&
2964 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
2965 Ops.mayHaveIntegerOverflow()) {
2966 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
2967
2968 llvm::Value *IntMin =
2969 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
2970 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
2971
2972 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
2973 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
2974 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
2975 Checks.push_back(
2976 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
2977 }
2978
2979 if (Checks.size() > 0)
2980 EmitBinOpCheck(Checks, Ops);
2981}
2982
2983Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
2984 {
2985 CodeGenFunction::SanitizerScope SanScope(&CGF);
2986 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
2987 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
2988 Ops.Ty->isIntegerType() &&
2989 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
2990 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2991 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
2992 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
2993 Ops.Ty->isRealFloatingType() &&
2994 Ops.mayHaveFloatDivisionByZero()) {
2995 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2996 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
2997 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
2998 Ops);
2999 }
3000 }
3001
3002 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3003 llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3004 if (CGF.getLangOpts().OpenCL &&
3005 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
3006 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3007 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3008 // build option allows an application to specify that single precision
3009 // floating-point divide (x/y and 1/x) and sqrt used in the program
3010 // source are correctly rounded.
3011 llvm::Type *ValTy = Val->getType();
3012 if (ValTy->isFloatTy() ||
3013 (isa<llvm::VectorType>(ValTy) &&
3014 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3015 CGF.SetFPAccuracy(Val, 2.5);
3016 }
3017 return Val;
3018 }
3019 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3020 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3021 else
3022 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3023}
3024
3025Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3026 // Rem in C can't be a floating point type: C99 6.5.5p2.
3027 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3028 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3029 Ops.Ty->isIntegerType() &&
3030 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3031 CodeGenFunction::SanitizerScope SanScope(&CGF);
3032 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3033 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3034 }
3035
3036 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3037 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3038 else
3039 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3040}
3041
3042Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3043 unsigned IID;
3044 unsigned OpID = 0;
3045
3046 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3047 switch (Ops.Opcode) {
3048 case BO_Add:
3049 case BO_AddAssign:
3050 OpID = 1;
3051 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3052 llvm::Intrinsic::uadd_with_overflow;
3053 break;
3054 case BO_Sub:
3055 case BO_SubAssign:
3056 OpID = 2;
3057 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3058 llvm::Intrinsic::usub_with_overflow;
3059 break;
3060 case BO_Mul:
3061 case BO_MulAssign:
3062 OpID = 3;
3063 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3064 llvm::Intrinsic::umul_with_overflow;
3065 break;
3066 default:
3067 llvm_unreachable("Unsupported operation for overflow detection")::llvm::llvm_unreachable_internal("Unsupported operation for overflow detection"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3067)
;
3068 }
3069 OpID <<= 1;
3070 if (isSigned)
3071 OpID |= 1;
3072
3073 CodeGenFunction::SanitizerScope SanScope(&CGF);
3074 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3075
3076 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3077
3078 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3079 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3080 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3081
3082 // Handle overflow with llvm.trap if no custom handler has been specified.
3083 const std::string *handlerName =
3084 &CGF.getLangOpts().OverflowHandler;
3085 if (handlerName->empty()) {
3086 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3087 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3088 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3089 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3090 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3091 : SanitizerKind::UnsignedIntegerOverflow;
3092 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3093 } else
3094 CGF.EmitTrapCheck(Builder.CreateNot(overflow));
3095 return result;
3096 }
3097
3098 // Branch in case of overflow.
3099 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3100 llvm::BasicBlock *continueBB =
3101 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3102 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3103
3104 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3105
3106 // If an overflow handler is set, then we want to call it and then use its
3107 // result, if it returns.
3108 Builder.SetInsertPoint(overflowBB);
3109
3110 // Get the overflow handler.
3111 llvm::Type *Int8Ty = CGF.Int8Ty;
3112 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3113 llvm::FunctionType *handlerTy =
3114 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3115 llvm::FunctionCallee handler =
3116 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3117
3118 // Sign extend the args to 64-bit, so that we can use the same handler for
3119 // all types of overflow.
3120 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3121 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3122
3123 // Call the handler with the two arguments, the operation, and the size of
3124 // the result.
3125 llvm::Value *handlerArgs[] = {
3126 lhs,
3127 rhs,
3128 Builder.getInt8(OpID),
3129 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3130 };
3131 llvm::Value *handlerResult =
3132 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3133
3134 // Truncate the result back to the desired size.
3135 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3136 Builder.CreateBr(continueBB);
3137
3138 Builder.SetInsertPoint(continueBB);
3139 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3140 phi->addIncoming(result, initialBB);
3141 phi->addIncoming(handlerResult, overflowBB);
3142
3143 return phi;
3144}
3145
3146/// Emit pointer + index arithmetic.
3147static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3148 const BinOpInfo &op,
3149 bool isSubtraction) {
3150 // Must have binary (not unary) expr here. Unary pointer
3151 // increment/decrement doesn't use this path.
3152 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3153
3154 Value *pointer = op.LHS;
3155 Expr *pointerOperand = expr->getLHS();
3156 Value *index = op.RHS;
3157 Expr *indexOperand = expr->getRHS();
3158
3159 // In a subtraction, the LHS is always the pointer.
3160 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3161 std::swap(pointer, index);
3162 std::swap(pointerOperand, indexOperand);
3163 }
3164
3165 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3166
3167 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3168 auto &DL = CGF.CGM.getDataLayout();
3169 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3170
3171 // Some versions of glibc and gcc use idioms (particularly in their malloc
3172 // routines) that add a pointer-sized integer (known to be a pointer value)
3173 // to a null pointer in order to cast the value back to an integer or as
3174 // part of a pointer alignment algorithm. This is undefined behavior, but
3175 // we'd like to be able to compile programs that use it.
3176 //
3177 // Normally, we'd generate a GEP with a null-pointer base here in response
3178 // to that code, but it's also UB to dereference a pointer created that
3179 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3180 // generate a direct cast of the integer value to a pointer.
3181 //
3182 // The idiom (p = nullptr + N) is not met if any of the following are true:
3183 //
3184 // The operation is subtraction.
3185 // The index is not pointer-sized.
3186 // The pointer type is not byte-sized.
3187 //
3188 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3189 op.Opcode,
3190 expr->getLHS(),
3191 expr->getRHS()))
3192 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3193
3194 if (width != DL.getTypeSizeInBits(PtrTy)) {
3195 // Zero-extend or sign-extend the pointer value according to
3196 // whether the index is signed or not.
3197 index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
3198 "idx.ext");
3199 }
3200
3201 // If this is subtraction, negate the index.
3202 if (isSubtraction)
3203 index = CGF.Builder.CreateNeg(index, "idx.neg");
3204
3205 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3206 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3207 /*Accessed*/ false);
3208
3209 const PointerType *pointerType
3210 = pointerOperand->getType()->getAs<PointerType>();
3211 if (!pointerType) {
3212 QualType objectType = pointerOperand->getType()
3213 ->castAs<ObjCObjectPointerType>()
3214 ->getPointeeType();
3215 llvm::Value *objectSize
3216 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3217
3218 index = CGF.Builder.CreateMul(index, objectSize);
3219
3220 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3221 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3222 return CGF.Builder.CreateBitCast(result, pointer->getType());
3223 }
3224
3225 QualType elementType = pointerType->getPointeeType();
3226 if (const VariableArrayType *vla
3227 = CGF.getContext().getAsVariableArrayType(elementType)) {
3228 // The element count here is the total number of non-VLA elements.
3229 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3230
3231 // Effectively, the multiply by the VLA size is part of the GEP.
3232 // GEP indexes are signed, and scaling an index isn't permitted to
3233 // signed-overflow, so we use the same semantics for our explicit
3234 // multiply. We suppress this if overflow is not undefined behavior.
3235 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3236 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3237 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3238 } else {
3239 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3240 pointer =
3241 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3242 op.E->getExprLoc(), "add.ptr");
3243 }
3244 return pointer;
3245 }
3246
3247 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3248 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3249 // future proof.
3250 if (elementType->isVoidType() || elementType->isFunctionType()) {
3251 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3252 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3253 return CGF.Builder.CreateBitCast(result, pointer->getType());
3254 }
3255
3256 if (CGF.getLangOpts().isSignedOverflowDefined())
3257 return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3258
3259 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3260 op.E->getExprLoc(), "add.ptr");
3261}
3262
3263// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3264// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3265// the add operand respectively. This allows fmuladd to represent a*b-c, or
3266// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3267// efficient operations.
3268static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
3269 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3270 bool negMul, bool negAdd) {
3271 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.")((!(negMul && negAdd) && "Only one of negMul and negAdd should be set."
) ? static_cast<void> (0) : __assert_fail ("!(negMul && negAdd) && \"Only one of negMul and negAdd should be set.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3271, __PRETTY_FUNCTION__))
;
3272
3273 Value *MulOp0 = MulOp->getOperand(0);
3274 Value *MulOp1 = MulOp->getOperand(1);
3275 if (negMul) {
3276 MulOp0 =
3277 Builder.CreateFSub(
3278 llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
3279 "neg");
3280 } else if (negAdd) {
3281 Addend =
3282 Builder.CreateFSub(
3283 llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
3284 "neg");
3285 }
3286
3287 Value *FMulAdd = Builder.CreateCall(
3288 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3289 {MulOp0, MulOp1, Addend});
3290 MulOp->eraseFromParent();
3291
3292 return FMulAdd;
3293}
3294
3295// Check whether it would be legal to emit an fmuladd intrinsic call to
3296// represent op and if so, build the fmuladd.
3297//
3298// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3299// Does NOT check the type of the operation - it's assumed that this function
3300// will be called from contexts where it's known that the type is contractable.
3301static Value* tryEmitFMulAdd(const BinOpInfo &op,
3302 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3303 bool isSub=false) {
3304
3305 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3307, __PRETTY_FUNCTION__))
3306 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3307, __PRETTY_FUNCTION__))
3307 "Only fadd/fsub can be the root of an fmuladd.")(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3307, __PRETTY_FUNCTION__))
;
3308
3309 // Check whether this op is marked as fusable.
3310 if (!op.FPFeatures.allowFPContractWithinStatement())
3311 return nullptr;
3312
3313 // We have a potentially fusable op. Look for a mul on one of the operands.
3314 // Also, make sure that the mul result isn't used directly. In that case,
3315 // there's no point creating a muladd operation.
3316 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3317 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3318 LHSBinOp->use_empty())
3319 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3320 }
3321 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3322 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3323 RHSBinOp->use_empty())
3324 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3325 }
3326
3327 return nullptr;
3328}
3329
3330Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3331 if (op.LHS->getType()->isPointerTy() ||
3332 op.RHS->getType()->isPointerTy())
3333 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3334
3335 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3336 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3337 case LangOptions::SOB_Defined:
3338 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3339 case LangOptions::SOB_Undefined:
3340 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3341 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3342 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3343 case LangOptions::SOB_Trapping:
3344 if (CanElideOverflowCheck(CGF.getContext(), op))
3345 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3346 return EmitOverflowCheckedBinOp(op);
3347 }
3348 }
3349
3350 if (op.Ty->isUnsignedIntegerType() &&
3351 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3352 !CanElideOverflowCheck(CGF.getContext(), op))
3353 return EmitOverflowCheckedBinOp(op);
3354
3355 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3356 // Try to form an fmuladd.
3357 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3358 return FMulAdd;
3359
3360 Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
3361 return propagateFMFlags(V, op);
3362 }
3363
3364 if (op.isFixedPointBinOp())
3365 return EmitFixedPointBinOp(op);
3366
3367 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3368}
3369
3370/// The resulting value must be calculated with exact precision, so the operands
3371/// may not be the same type.
3372Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3373 using llvm::APSInt;
3374 using llvm::ConstantInt;
3375
3376 const auto *BinOp = cast<BinaryOperator>(op.E);
3377
3378 // The result is a fixed point type and at least one of the operands is fixed
3379 // point while the other is either fixed point or an int. This resulting type
3380 // should be determined by Sema::handleFixedPointConversions().
3381 QualType ResultTy = op.Ty;
3382 QualType LHSTy = BinOp->getLHS()->getType();
3383 QualType RHSTy = BinOp->getRHS()->getType();
3384 ASTContext &Ctx = CGF.getContext();
3385 Value *LHS = op.LHS;
3386 Value *RHS = op.RHS;
3387
3388 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3389 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3390 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3391 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3392
3393 // Convert the operands to the full precision type.
3394 Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
3395 BinOp->getExprLoc());
3396 Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
3397 BinOp->getExprLoc());
3398
3399 // Perform the actual addition.
3400 Value *Result;
3401 switch (BinOp->getOpcode()) {
3402 case BO_Add: {
3403 if (ResultFixedSema.isSaturated()) {
3404 llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
3405 ? llvm::Intrinsic::sadd_sat
3406 : llvm::Intrinsic::uadd_sat;
3407 Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
3408 } else {
3409 Result = Builder.CreateAdd(FullLHS, FullRHS);
3410 }
3411 break;
3412 }
3413 case BO_Sub: {
3414 if (ResultFixedSema.isSaturated()) {
3415 llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
3416 ? llvm::Intrinsic::ssub_sat
3417 : llvm::Intrinsic::usub_sat;
3418 Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
3419 } else {
3420 Result = Builder.CreateSub(FullLHS, FullRHS);
3421 }
3422 break;
3423 }
3424 case BO_LT:
3425 return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
3426 : Builder.CreateICmpULT(FullLHS, FullRHS);
3427 case BO_GT:
3428 return CommonFixedSema.isSigned() ? Builder.CreateICmpSGT(FullLHS, FullRHS)
3429 : Builder.CreateICmpUGT(FullLHS, FullRHS);
3430 case BO_LE:
3431 return CommonFixedSema.isSigned() ? Builder.CreateICmpSLE(FullLHS, FullRHS)
3432 : Builder.CreateICmpULE(FullLHS, FullRHS);
3433 case BO_GE:
3434 return CommonFixedSema.isSigned() ? Builder.CreateICmpSGE(FullLHS, FullRHS)
3435 : Builder.CreateICmpUGE(FullLHS, FullRHS);
3436 case BO_EQ:
3437 // For equality operations, we assume any padding bits on unsigned types are
3438 // zero'd out. They could be overwritten through non-saturating operations
3439 // that cause overflow, but this leads to undefined behavior.
3440 return Builder.CreateICmpEQ(FullLHS, FullRHS);
3441 case BO_NE:
3442 return Builder.CreateICmpNE(FullLHS, FullRHS);
3443 case BO_Mul:
3444 case BO_Div:
3445 case BO_Shl:
3446 case BO_Shr:
3447 case BO_Cmp:
3448 case BO_LAnd:
3449 case BO_LOr:
3450 case BO_MulAssign:
3451 case BO_DivAssign:
3452 case BO_AddAssign:
3453 case BO_SubAssign:
3454 case BO_ShlAssign:
3455 case BO_ShrAssign:
3456 llvm_unreachable("Found unimplemented fixed point binary operation")::llvm::llvm_unreachable_internal("Found unimplemented fixed point binary operation"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3456)
;
3457 case BO_PtrMemD:
3458 case BO_PtrMemI:
3459 case BO_Rem:
3460 case BO_Xor:
3461 case BO_And:
3462 case BO_Or:
3463 case BO_Assign:
3464 case BO_RemAssign:
3465 case BO_AndAssign:
3466 case BO_XorAssign:
3467 case BO_OrAssign:
3468 case BO_Comma:
3469 llvm_unreachable("Found unsupported binary operation for fixed point types.")::llvm::llvm_unreachable_internal("Found unsupported binary operation for fixed point types."
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3469)
;
3470 }
3471
3472 // Convert to the result type.
3473 return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
3474 BinOp->getExprLoc());
3475}
3476
3477Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3478 // The LHS is always a pointer if either side is.
3479 if (!op.LHS->getType()->isPointerTy()) {
3480 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3481 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3482 case LangOptions::SOB_Defined:
3483 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3484 case LangOptions::SOB_Undefined:
3485 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3486 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3487 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3488 case LangOptions::SOB_Trapping:
3489 if (CanElideOverflowCheck(CGF.getContext(), op))
3490 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3491 return EmitOverflowCheckedBinOp(op);
3492 }
3493 }
3494
3495 if (op.Ty->isUnsignedIntegerType() &&
3496 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3497 !CanElideOverflowCheck(CGF.getContext(), op))
3498 return EmitOverflowCheckedBinOp(op);
3499
3500 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3501 // Try to form an fmuladd.
3502 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3503 return FMulAdd;
3504 Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
3505 return propagateFMFlags(V, op);
3506 }
3507
3508 if (op.isFixedPointBinOp())
3509 return EmitFixedPointBinOp(op);
3510
3511 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3512 }
3513
3514 // If the RHS is not a pointer, then we have normal pointer
3515 // arithmetic.
3516 if (!op.RHS->getType()->isPointerTy())
3517 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3518
3519 // Otherwise, this is a pointer subtraction.
3520
3521 // Do the raw subtraction part.
3522 llvm::Value *LHS
3523 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3524 llvm::Value *RHS
3525 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3526 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3527
3528 // Okay, figure out the element size.
3529 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3530 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3531
3532 llvm::Value *divisor = nullptr;
3533
3534 // For a variable-length array, this is going to be non-constant.
3535 if (const VariableArrayType *vla
3536 = CGF.getContext().getAsVariableArrayType(elementType)) {
3537 auto VlaSize = CGF.getVLASize(vla);
3538 elementType = VlaSize.Type;
3539 divisor = VlaSize.NumElts;
3540
3541 // Scale the number of non-VLA elements by the non-VLA element size.
3542 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3543 if (!eltSize.isOne())
3544 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3545
3546 // For everything elese, we can just compute it, safe in the
3547 // assumption that Sema won't let anything through that we can't
3548 // safely compute the size of.
3549 } else {
3550 CharUnits elementSize;
3551 // Handle GCC extension for pointer arithmetic on void* and
3552 // function pointer types.
3553 if (elementType->isVoidType() || elementType->isFunctionType())
3554 elementSize = CharUnits::One();
3555 else
3556 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3557
3558 // Don't even emit the divide for element size of 1.
3559 if (elementSize.isOne())
3560 return diffInChars;
3561
3562 divisor = CGF.CGM.getSize(elementSize);
3563 }
3564
3565 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3566 // pointer difference in C is only defined in the case where both operands
3567 // are pointing to elements of an array.
3568 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3569}
3570
3571Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3572 llvm::IntegerType *Ty;
3573 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3574 Ty = cast<llvm::IntegerType>(VT->getElementType());
3575 else
3576 Ty = cast<llvm::IntegerType>(LHS->getType());
3577 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3578}
3579
3580Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3581 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3582 // RHS to the same size as the LHS.
3583 Value *RHS = Ops.RHS;
3584 if (Ops.LHS->getType() != RHS->getType())
3585 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3586
3587 bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3588 Ops.Ty->hasSignedIntegerRepresentation() &&
3589 !CGF.getLangOpts().isSignedOverflowDefined() &&
3590 !CGF.getLangOpts().CPlusPlus2a;
3591 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3592 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3593 if (CGF.getLangOpts().OpenCL)
3594 RHS =
3595 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
3596 else if ((SanitizeBase || SanitizeExponent) &&
3597 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3598 CodeGenFunction::SanitizerScope SanScope(&CGF);
3599 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3600 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3601 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3602
3603 if (SanitizeExponent) {
3604 Checks.push_back(
3605 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3606 }
3607
3608 if (SanitizeBase) {
3609 // Check whether we are shifting any non-zero bits off the top of the
3610 // integer. We only emit this check if exponent is valid - otherwise
3611 // instructions below will have undefined behavior themselves.
3612 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3613 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3614 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3615 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3616 llvm::Value *PromotedWidthMinusOne =
3617 (RHS == Ops.RHS) ? WidthMinusOne
3618 : GetWidthMinusOneValue(Ops.LHS, RHS);
3619 CGF.EmitBlock(CheckShiftBase);
3620 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3621 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3622 /*NUW*/ true, /*NSW*/ true),
3623 "shl.check");
3624 if (CGF.getLangOpts().CPlusPlus) {
3625 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3626 // Under C++11's rules, shifting a 1 bit into the sign bit is
3627 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3628 // define signed left shifts, so we use the C99 and C++11 rules there).
3629 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3630 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3631 }
3632 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3633 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3634 CGF.EmitBlock(Cont);
3635 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3636 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3637 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3638 Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
3639 }
3640
3641 assert(!Checks.empty())((!Checks.empty()) ? static_cast<void> (0) : __assert_fail
("!Checks.empty()", "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3641, __PRETTY_FUNCTION__))
;
3642 EmitBinOpCheck(Checks, Ops);
3643 }
3644
3645 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3646}
3647
3648Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3649 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3650 // RHS to the same size as the LHS.
3651 Value *RHS = Ops.RHS;
3652 if (Ops.LHS->getType() != RHS->getType())
3653 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3654
3655 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3656 if (CGF.getLangOpts().OpenCL)
3657 RHS =
3658 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
3659 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3660 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3661 CodeGenFunction::SanitizerScope SanScope(&CGF);
3662 llvm::Value *Valid =
3663 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3664 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3665 }
3666
3667 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3668 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3669 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3670}
3671
3672enum IntrinsicType { VCMPEQ, VCMPGT };
3673// return corresponding comparison intrinsic for given vector type
3674static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3675 BuiltinType::Kind ElemKind) {
3676 switch (ElemKind) {
3677 default: llvm_unreachable("unexpected element type")::llvm::llvm_unreachable_internal("unexpected element type", "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3677)
;
3678 case BuiltinType::Char_U:
3679 case BuiltinType::UChar:
3680 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3681 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3682 case BuiltinType::Char_S:
3683 case BuiltinType::SChar:
3684 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3685 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3686 case BuiltinType::UShort:
3687 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3688 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3689 case BuiltinType::Short:
3690 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3691 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3692 case BuiltinType::UInt:
3693 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3694 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3695 case BuiltinType::Int:
3696 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3697 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3698 case BuiltinType::ULong:
3699 case BuiltinType::ULongLong:
3700 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3701 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3702 case BuiltinType::Long:
3703 case BuiltinType::LongLong:
3704 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3705 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3706 case BuiltinType::Float:
3707 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3708 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3709 case BuiltinType::Double:
3710 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3711 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3712 }
3713}
3714
3715Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3716 llvm::CmpInst::Predicate UICmpOpc,
3717 llvm::CmpInst::Predicate SICmpOpc,
3718 llvm::CmpInst::Predicate FCmpOpc) {
3719 TestAndClearIgnoreResultAssign();
3720 Value *Result;
3721 QualType LHSTy = E->getLHS()->getType();
3722 QualType RHSTy = E->getRHS()->getType();
3723 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3724 assert(E->getOpcode() == BO_EQ ||((E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) ?
static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3725, __PRETTY_FUNCTION__))
3725 E->getOpcode() == BO_NE)((E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) ?
static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3725, __PRETTY_FUNCTION__))
;
3726 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3727 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3728 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3729 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3730 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3731 BinOpInfo BOInfo = EmitBinOps(E);
3732 Value *LHS = BOInfo.LHS;
3733 Value *RHS = BOInfo.RHS;
3734
3735 // If AltiVec, the comparison results in a numeric type, so we use
3736 // intrinsics comparing vectors and giving 0 or 1 as a result
3737 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3738 // constants for mapping CR6 register bits to predicate result
3739 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3740
3741 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3742
3743 // in several cases vector arguments order will be reversed
3744 Value *FirstVecArg = LHS,
3745 *SecondVecArg = RHS;
3746
3747 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
3748 const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
3749 BuiltinType::Kind ElementKind = BTy->getKind();
3750
3751 switch(E->getOpcode()) {
3752 default: llvm_unreachable("is not a comparison operation")::llvm::llvm_unreachable_internal("is not a comparison operation"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3752)
;
3753 case BO_EQ:
3754 CR6 = CR6_LT;
3755 ID = GetIntrinsic(VCMPEQ, ElementKind);
3756 break;
3757 case BO_NE:
3758 CR6 = CR6_EQ;
3759 ID = GetIntrinsic(VCMPEQ, ElementKind);
3760 break;
3761 case BO_LT:
3762 CR6 = CR6_LT;
3763 ID = GetIntrinsic(VCMPGT, ElementKind);
3764 std::swap(FirstVecArg, SecondVecArg);
3765 break;
3766 case BO_GT:
3767 CR6 = CR6_LT;
3768 ID = GetIntrinsic(VCMPGT, ElementKind);
3769 break;
3770 case BO_LE:
3771 if (ElementKind == BuiltinType::Float) {
3772 CR6 = CR6_LT;
3773 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3774 std::swap(FirstVecArg, SecondVecArg);
3775 }
3776 else {
3777 CR6 = CR6_EQ;
3778 ID = GetIntrinsic(VCMPGT, ElementKind);
3779 }
3780 break;
3781 case BO_GE:
3782 if (ElementKind == BuiltinType::Float) {
3783 CR6 = CR6_LT;
3784 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3785 }
3786 else {
3787 CR6 = CR6_EQ;
3788 ID = GetIntrinsic(VCMPGT, ElementKind);
3789 std::swap(FirstVecArg, SecondVecArg);
3790 }
3791 break;
3792 }
3793
3794 Value *CR6Param = Builder.getInt32(CR6);
3795 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
3796 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
3797
3798 // The result type of intrinsic may not be same as E->getType().
3799 // If E->getType() is not BoolTy, EmitScalarConversion will do the
3800 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
3801 // do nothing, if ResultTy is not i1 at the same time, it will cause
3802 // crash later.
3803 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
3804 if (ResultTy->getBitWidth() > 1 &&
3805 E->getType() == CGF.getContext().BoolTy)
3806 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
3807 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3808 E->getExprLoc());
3809 }
3810
3811 if (BOInfo.isFixedPointBinOp()) {
3812 Result = EmitFixedPointBinOp(BOInfo);
3813 } else if (LHS->getType()->isFPOrFPVectorTy()) {
3814 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
3815 } else if (LHSTy->hasSignedIntegerRepresentation()) {
3816 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
3817 } else {
3818 // Unsigned integers and pointers.
3819
3820 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
3821 !isa<llvm::ConstantPointerNull>(LHS) &&
3822 !isa<llvm::ConstantPointerNull>(RHS)) {
3823
3824 // Dynamic information is required to be stripped for comparisons,
3825 // because it could leak the dynamic information. Based on comparisons
3826 // of pointers to dynamic objects, the optimizer can replace one pointer
3827 // with another, which might be incorrect in presence of invariant
3828 // groups. Comparison with null is safe because null does not carry any
3829 // dynamic information.
3830 if (LHSTy.mayBeDynamicClass())
3831 LHS = Builder.CreateStripInvariantGroup(LHS);
3832 if (RHSTy.mayBeDynamicClass())
3833 RHS = Builder.CreateStripInvariantGroup(RHS);
3834 }
3835
3836 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
3837 }
3838
3839 // If this is a vector comparison, sign extend the result to the appropriate
3840 // vector integer type and return it (don't convert to bool).
3841 if (LHSTy->isVectorType())
3842 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3843
3844 } else {
3845 // Complex Comparison: can only be an equality comparison.
3846 CodeGenFunction::ComplexPairTy LHS, RHS;
3847 QualType CETy;
3848 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
3849 LHS = CGF.EmitComplexExpr(E->getLHS());
3850 CETy = CTy->getElementType();
3851 } else {
3852 LHS.first = Visit(E->getLHS());
3853 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
3854 CETy = LHSTy;
3855 }
3856 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
3857 RHS = CGF.EmitComplexExpr(E->getRHS());
3858 assert(CGF.getContext().hasSameUnqualifiedType(CETy,((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3860, __PRETTY_FUNCTION__))
3859 CTy->getElementType()) &&((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3860, __PRETTY_FUNCTION__))
3860 "The element types must always match.")((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3860, __PRETTY_FUNCTION__))
;
3861 (void)CTy;
3862 } else {
3863 RHS.first = Visit(E->getRHS());
3864 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
3865 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&((CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
"The element types must always match.") ? static_cast<void
> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3866, __PRETTY_FUNCTION__))
3866 "The element types must always match.")((CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
"The element types must always match.") ? static_cast<void
> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3866, __PRETTY_FUNCTION__))
;
3867 }
3868
3869 Value *ResultR, *ResultI;
3870 if (CETy->isRealFloatingType()) {
3871 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
3872 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
3873 } else {
3874 // Complex comparisons can only be equality comparisons. As such, signed
3875 // and unsigned opcodes are the same.
3876 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
3877 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
3878 }
3879
3880 if (E->getOpcode() == BO_EQ) {
3881 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
3882 } else {
3883 assert(E->getOpcode() == BO_NE &&((E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"
) ? static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3884, __PRETTY_FUNCTION__))
3884 "Complex comparison other than == or != ?")((E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"
) ? static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 3884, __PRETTY_FUNCTION__))
;
3885 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
3886 }
3887 }
3888
3889 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3890 E->getExprLoc());
3891}
3892
3893Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
3894 bool Ignore = TestAndClearIgnoreResultAssign();
3895
3896 Value *RHS;
3897 LValue LHS;
3898
3899 switch (E->getLHS()->getType().getObjCLifetime()) {
3900 case Qualifiers::OCL_Strong:
3901 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
3902 break;
3903
3904 case Qualifiers::OCL_Autoreleasing:
3905 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
3906 break;
3907
3908 case Qualifiers::OCL_ExplicitNone:
3909 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
3910 break;
3911
3912 case Qualifiers::OCL_Weak:
3913 RHS = Visit(E->getRHS());
3914 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3915 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
3916 break;
3917
3918 case Qualifiers::OCL_None:
3919 // __block variables need to have the rhs evaluated first, plus
3920 // this should improve codegen just a little.
3921 RHS = Visit(E->getRHS());
3922 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3923
3924 // Store the value into the LHS. Bit-fields are handled specially
3925 // because the result is altered by the store, i.e., [C99 6.5.16p1]
3926 // 'An assignment expression has the value of the left operand after
3927 // the assignment...'.
3928 if (LHS.isBitField()) {
3929 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
3930 } else {
3931 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
3932 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
3933 }
3934 }
3935
3936 // If the result is clearly ignored, return now.
3937 if (Ignore)
3938 return nullptr;
3939
3940 // The result of an assignment in C is the assigned r-value.
3941 if (!CGF.getLangOpts().CPlusPlus)
3942 return RHS;
3943
3944 // If the lvalue is non-volatile, return the computed value of the assignment.
3945 if (!LHS.isVolatileQualified())
3946 return RHS;
3947
3948 // Otherwise, reload the value.
3949 return EmitLoadOfLValue(LHS, E->getExprLoc());
3950}
3951
3952Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
3953 // Perform vector logical and on comparisons with zero vectors.
3954 if (E->getType()->isVectorType()) {
3955 CGF.incrementProfileCounter(E);
3956
3957 Value *LHS = Visit(E->getLHS());
3958 Value *RHS = Visit(E->getRHS());
3959 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3960 if (LHS->getType()->isFPOrFPVectorTy()) {
3961 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3962 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3963 } else {
3964 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3965 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3966 }
3967 Value *And = Builder.CreateAnd(LHS, RHS);
3968 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
3969 }
3970
3971 llvm::Type *ResTy = ConvertType(E->getType());
3972
3973 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
3974 // If we have 1 && X, just emit X without inserting the control flow.
3975 bool LHSCondVal;
3976 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3977 if (LHSCondVal) { // If we have 1 && X, just emit X.
3978 CGF.incrementProfileCounter(E);
3979
3980 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3981 // ZExt result to int or bool.
3982 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
3983 }
3984
3985 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
3986 if (!CGF.ContainsLabel(E->getRHS()))
3987 return llvm::Constant::getNullValue(ResTy);
3988 }
3989
3990 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
3991 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
3992
3993 CodeGenFunction::ConditionalEvaluation eval(CGF);
3994
3995 // Branch on the LHS first. If it is false, go to the failure (cont) block.
3996 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
3997 CGF.getProfileCount(E->getRHS()));
3998
3999 // Any edges into the ContBlock are now from an (indeterminate number of)
4000 // edges from this first condition. All of these values will be false. Start
4001 // setting up the PHI node in the Cont Block for this.
4002 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4003 "", ContBlock);
4004 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4005 PI != PE; ++PI)
4006 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4007
4008 eval.begin(CGF);
4009 CGF.EmitBlock(RHSBlock);
4010 CGF.incrementProfileCounter(E);
4011 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4012 eval.end(CGF);
4013
4014 // Reaquire the RHS block, as there may be subblocks inserted.
4015 RHSBlock = Builder.GetInsertBlock();
4016
4017 // Emit an unconditional branch from this block to ContBlock.
4018 {
4019 // There is no need to emit line number for unconditional branch.
4020 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4021 CGF.EmitBlock(ContBlock);
4022 }
4023 // Insert an entry into the phi node for the edge with the value of RHSCond.
4024 PN->addIncoming(RHSCond, RHSBlock);
4025
4026 // Artificial location to preserve the scope information
4027 {
4028 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4029 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4030 }
4031
4032 // ZExt result to int.
4033 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4034}
4035
4036Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4037 // Perform vector logical or on comparisons with zero vectors.
4038 if (E->getType()->isVectorType()) {
4039 CGF.incrementProfileCounter(E);
4040
4041 Value *LHS = Visit(E->getLHS());
4042 Value *RHS = Visit(E->getRHS());
4043 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4044 if (LHS->getType()->isFPOrFPVectorTy()) {
4045 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4046 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4047 } else {
4048 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4049 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4050 }
4051 Value *Or = Builder.CreateOr(LHS, RHS);
4052 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4053 }
4054
4055 llvm::Type *ResTy = ConvertType(E->getType());
4056
4057 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4058 // If we have 0 || X, just emit X without inserting the control flow.
4059 bool LHSCondVal;
4060 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4061 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4062 CGF.incrementProfileCounter(E);
4063
4064 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4065 // ZExt result to int or bool.
4066 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4067 }
4068
4069 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4070 if (!CGF.ContainsLabel(E->getRHS()))
4071 return llvm::ConstantInt::get(ResTy, 1);
4072 }
4073
4074 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4075 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4076
4077 CodeGenFunction::ConditionalEvaluation eval(CGF);
4078
4079 // Branch on the LHS first. If it is true, go to the success (cont) block.
4080 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4081 CGF.getCurrentProfileCount() -
4082 CGF.getProfileCount(E->getRHS()));
4083
4084 // Any edges into the ContBlock are now from an (indeterminate number of)
4085 // edges from this first condition. All of these values will be true. Start
4086 // setting up the PHI node in the Cont Block for this.
4087 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4088 "", ContBlock);
4089 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4090 PI != PE; ++PI)
4091 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4092
4093 eval.begin(CGF);
4094
4095 // Emit the RHS condition as a bool value.
4096 CGF.EmitBlock(RHSBlock);
4097 CGF.incrementProfileCounter(E);
4098 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4099
4100 eval.end(CGF);
4101
4102 // Reaquire the RHS block, as there may be subblocks inserted.
4103 RHSBlock = Builder.GetInsertBlock();
4104
4105 // Emit an unconditional branch from this block to ContBlock. Insert an entry
4106 // into the phi node for the edge with the value of RHSCond.
4107 CGF.EmitBlock(ContBlock);
4108 PN->addIncoming(RHSCond, RHSBlock);
4109
4110 // ZExt result to int.
4111 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4112}
4113
4114Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4115 CGF.EmitIgnoredExpr(E->getLHS());
4116 CGF.EnsureInsertPoint();
4117 return Visit(E->getRHS());
4118}
4119
4120//===----------------------------------------------------------------------===//
4121// Other Operators
4122//===----------------------------------------------------------------------===//
4123
4124/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4125/// expression is cheap enough and side-effect-free enough to evaluate
4126/// unconditionally instead of conditionally. This is used to convert control
4127/// flow into selects in some cases.
4128static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4129 CodeGenFunction &CGF) {
4130 // Anything that is an integer or floating point constant is fine.
4131 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4132
4133 // Even non-volatile automatic variables can't be evaluated unconditionally.
4134 // Referencing a thread_local may cause non-trivial initialization work to
4135 // occur. If we're inside a lambda and one of the variables is from the scope
4136 // outside the lambda, that function may have returned already. Reading its
4137 // locals is a bad idea. Also, these reads may introduce races there didn't
4138 // exist in the source-level program.
4139}
4140
4141
4142Value *ScalarExprEmitter::
4143VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4144 TestAndClearIgnoreResultAssign();
4145
4146 // Bind the common expression if necessary.
4147 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4148
4149 Expr *condExpr = E->getCond();
4150 Expr *lhsExpr = E->getTrueExpr();
4151 Expr *rhsExpr = E->getFalseExpr();
4152
4153 // If the condition constant folds and can be elided, try to avoid emitting
4154 // the condition and the dead arm.
4155 bool CondExprBool;
4156 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4157 Expr *live = lhsExpr, *dead = rhsExpr;
4158 if (!CondExprBool) std::swap(live, dead);
4159
4160 // If the dead side doesn't have labels we need, just emit the Live part.
4161 if (!CGF.ContainsLabel(dead)) {
4162 if (CondExprBool)
4163 CGF.incrementProfileCounter(E);
4164 Value *Result = Visit(live);
4165
4166 // If the live part is a throw expression, it acts like it has a void
4167 // type, so evaluating it returns a null Value*. However, a conditional
4168 // with non-void type must return a non-null Value*.
4169 if (!Result && !E->getType()->isVoidType())
4170 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4171
4172 return Result;
4173 }
4174 }
4175
4176 // OpenCL: If the condition is a vector, we can treat this condition like
4177 // the select function.
4178 if (CGF.getLangOpts().OpenCL
4179 && condExpr->getType()->isVectorType()) {
4180 CGF.incrementProfileCounter(E);
4181
4182 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4183 llvm::Value *LHS = Visit(lhsExpr);
4184 llvm::Value *RHS = Visit(rhsExpr);
4185
4186 llvm::Type *condType = ConvertType(condExpr->getType());
4187 llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
4188
4189 unsigned numElem = vecTy->getNumElements();
4190 llvm::Type *elemType = vecTy->getElementType();
4191
4192 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4193 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4194 llvm::Value *tmp = Builder.CreateSExt(TestMSB,
4195 llvm::VectorType::get(elemType,
4196 numElem),
4197 "sext");
4198 llvm::Value *tmp2 = Builder.CreateNot(tmp);
4199
4200 // Cast float to int to perform ANDs if necessary.
4201 llvm::Value *RHSTmp = RHS;
4202 llvm::Value *LHSTmp = LHS;
4203 bool wasCast = false;
4204 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4205 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4206 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4207 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4208 wasCast = true;
4209 }
4210
4211 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4212 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4213 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4214 if (wasCast)
4215 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4216
4217 return tmp5;
4218 }
4219
4220 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4221 // select instead of as control flow. We can only do this if it is cheap and
4222 // safe to evaluate the LHS and RHS unconditionally.
4223 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4224 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4225 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4226 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4227
4228 CGF.incrementProfileCounter(E, StepV);
4229
4230 llvm::Value *LHS = Visit(lhsExpr);
4231 llvm::Value *RHS = Visit(rhsExpr);
4232 if (!LHS) {
4233 // If the conditional has void type, make sure we return a null Value*.
4234 assert(!RHS && "LHS and RHS types must match")((!RHS && "LHS and RHS types must match") ? static_cast
<void> (0) : __assert_fail ("!RHS && \"LHS and RHS types must match\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4234, __PRETTY_FUNCTION__))
;
4235 return nullptr;
4236 }
4237 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4238 }
4239
4240 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4241 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4242 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4243
4244 CodeGenFunction::ConditionalEvaluation eval(CGF);
4245 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4246 CGF.getProfileCount(lhsExpr));
4247
4248 CGF.EmitBlock(LHSBlock);
4249 CGF.incrementProfileCounter(E);
4250 eval.begin(CGF);
4251 Value *LHS = Visit(lhsExpr);
4252 eval.end(CGF);
4253
4254 LHSBlock = Builder.GetInsertBlock();
4255 Builder.CreateBr(ContBlock);
4256
4257 CGF.EmitBlock(RHSBlock);
4258 eval.begin(CGF);
4259 Value *RHS = Visit(rhsExpr);
4260 eval.end(CGF);
4261
4262 RHSBlock = Builder.GetInsertBlock();
4263 CGF.EmitBlock(ContBlock);
4264
4265 // If the LHS or RHS is a throw expression, it will be legitimately null.
4266 if (!LHS)
4267 return RHS;
4268 if (!RHS)
4269 return LHS;
4270
4271 // Create a PHI node for the real part.
4272 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4273 PN->addIncoming(LHS, LHSBlock);
4274 PN->addIncoming(RHS, RHSBlock);
4275 return PN;
4276}
4277
4278Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4279 return Visit(E->getChosenSubExpr());
4280}
4281
4282Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4283 QualType Ty = VE->getType();
4284
4285 if (Ty->isVariablyModifiedType())
4286 CGF.EmitVariablyModifiedType(Ty);
4287
4288 Address ArgValue = Address::invalid();
4289 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4290
4291 llvm::Type *ArgTy = ConvertType(VE->getType());
4292
4293 // If EmitVAArg fails, emit an error.
4294 if (!ArgPtr.isValid()) {
4295 CGF.ErrorUnsupported(VE, "va_arg expression");
4296 return llvm::UndefValue::get(ArgTy);
4297 }
4298
4299 // FIXME Volatility.
4300 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4301
4302 // If EmitVAArg promoted the type, we must truncate it.
4303 if (ArgTy != Val->getType()) {
4304 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4305 Val = Builder.CreateIntToPtr(Val, ArgTy);
4306 else
4307 Val = Builder.CreateTrunc(Val, ArgTy);
4308 }
4309
4310 return Val;
4311}
4312
4313Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4314 return CGF.EmitBlockLiteral(block);
4315}
4316
4317// Convert a vec3 to vec4, or vice versa.
4318static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4319 Value *Src, unsigned NumElementsDst) {
4320 llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
4321 SmallVector<llvm::Constant*, 4> Args;
4322 Args.push_back(Builder.getInt32(0));
4323 Args.push_back(Builder.getInt32(1));
4324 Args.push_back(Builder.getInt32(2));
4325 if (NumElementsDst == 4)
4326 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
4327 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
4328 return Builder.CreateShuffleVector(Src, UnV, Mask);
4329}
4330
4331// Create cast instructions for converting LLVM value \p Src to LLVM type \p
4332// DstTy. \p Src has the same size as \p DstTy. Both are single value types
4333// but could be scalar or vectors of different lengths, and either can be
4334// pointer.
4335// There are 4 cases:
4336// 1. non-pointer -> non-pointer : needs 1 bitcast
4337// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4338// 3. pointer -> non-pointer
4339// a) pointer -> intptr_t : needs 1 ptrtoint
4340// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4341// 4. non-pointer -> pointer
4342// a) intptr_t -> pointer : needs 1 inttoptr
4343// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4344// Note: for cases 3b and 4b two casts are required since LLVM casts do not
4345// allow casting directly between pointer types and non-integer non-pointer
4346// types.
4347static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4348 const llvm::DataLayout &DL,
4349 Value *Src, llvm::Type *DstTy,
4350 StringRef Name = "") {
4351 auto SrcTy = Src->getType();
4352
4353 // Case 1.
4354 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4355 return Builder.CreateBitCast(Src, DstTy, Name);
4356
4357 // Case 2.
4358 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4359 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4360
4361 // Case 3.
4362 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4363 // Case 3b.
4364 if (!DstTy->isIntegerTy())
4365 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4366 // Cases 3a and 3b.
4367 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4368 }
4369
4370 // Case 4b.
4371 if (!SrcTy->isIntegerTy())
4372 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4373 // Cases 4a and 4b.
4374 return Builder.CreateIntToPtr(Src, DstTy, Name);
4375}
4376
4377Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4378 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4379 llvm::Type *DstTy = ConvertType(E->getType());
4380
4381 llvm::Type *SrcTy = Src->getType();
4382 unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
4383 cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
4384 unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
4385 cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
4386
4387 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4388 // vector to get a vec4, then a bitcast if the target type is different.
4389 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4390 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4391
4392 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4393 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4394 DstTy);
4395 }
4396
4397 Src->setName("astype");
4398 return Src;
4399 }
4400
4401 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4402 // to vec4 if the original type is not vec4, then a shuffle vector to
4403 // get a vec3.
4404 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4405 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4406 auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
4407 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4408 Vec4Ty);
4409 }
4410
4411 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4412 Src->setName("astype");
4413 return Src;
4414 }
4415
4416 return Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
Although the value stored to 'Src' is used in the enclosing expression, the value is never actually read from 'Src'
4417 Src, DstTy, "astype");
4418}
4419
4420Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4421 return CGF.EmitAtomicExpr(E).getScalarVal();
4422}
4423
4424//===----------------------------------------------------------------------===//
4425// Entry Point into this File
4426//===----------------------------------------------------------------------===//
4427
4428/// Emit the computation of the specified expression of scalar type, ignoring
4429/// the result.
4430Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4431 assert(E && hasScalarEvaluationKind(E->getType()) &&((E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit") ? static_cast<void>
(0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4432, __PRETTY_FUNCTION__))
4432 "Invalid scalar expression to emit")((E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit") ? static_cast<void>
(0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4432, __PRETTY_FUNCTION__))
;
4433
4434 return ScalarExprEmitter(*this, IgnoreResultAssign)
4435 .Visit(const_cast<Expr *>(E));
4436}
4437
4438/// Emit a conversion from the specified type to the specified destination type,
4439/// both of which are LLVM scalar types.
4440Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4441 QualType DstTy,
4442 SourceLocation Loc) {
4443 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&((hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind
(DstTy) && "Invalid scalar expression to emit") ? static_cast
<void> (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4444, __PRETTY_FUNCTION__))
4444 "Invalid scalar expression to emit")((hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind
(DstTy) && "Invalid scalar expression to emit") ? static_cast
<void> (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4444, __PRETTY_FUNCTION__))
;
4445 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4446}
4447
4448/// Emit a conversion from the specified complex type to the specified
4449/// destination type, where the destination type is an LLVM scalar type.
4450Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4451 QualType SrcTy,
4452 QualType DstTy,
4453 SourceLocation Loc) {
4454 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&((SrcTy->isAnyComplexType() && hasScalarEvaluationKind
(DstTy) && "Invalid complex -> scalar conversion")
? static_cast<void> (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4455, __PRETTY_FUNCTION__))
4455 "Invalid complex -> scalar conversion")((SrcTy->isAnyComplexType() && hasScalarEvaluationKind
(DstTy) && "Invalid complex -> scalar conversion")
? static_cast<void> (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4455, __PRETTY_FUNCTION__))
;
4456 return ScalarExprEmitter(*this)
4457 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4458}
4459
4460
4461llvm::Value *CodeGenFunction::
4462EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4463 bool isInc, bool isPre) {
4464 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4465}
4466
4467LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4468 // object->isa or (*object).isa
4469 // Generate code as for: *(Class*)object
4470
4471 Expr *BaseExpr = E->getBase();
4472 Address Addr = Address::invalid();
4473 if (BaseExpr->isRValue()) {
4474 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4475 } else {
4476 Addr = EmitLValue(BaseExpr).getAddress();
4477 }
4478
4479 // Cast the address to Class*.
4480 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4481 return MakeAddrLValue(Addr, E->getType());
4482}
4483
4484
4485LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4486 const CompoundAssignOperator *E) {
4487 ScalarExprEmitter Scalar(*this);
4488 Value *Result = nullptr;
4489 switch (E->getOpcode()) {
4490#define COMPOUND_OP(Op) \
4491 case BO_##Op##Assign: \
4492 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4493 Result)
4494 COMPOUND_OP(Mul);
4495 COMPOUND_OP(Div);
4496 COMPOUND_OP(Rem);
4497 COMPOUND_OP(Add);
4498 COMPOUND_OP(Sub);
4499 COMPOUND_OP(Shl);
4500 COMPOUND_OP(Shr);
4501 COMPOUND_OP(And);
4502 COMPOUND_OP(Xor);
4503 COMPOUND_OP(Or);
4504#undef COMPOUND_OP
4505
4506 case BO_PtrMemD:
4507 case BO_PtrMemI:
4508 case BO_Mul:
4509 case BO_Div:
4510 case BO_Rem:
4511 case BO_Add:
4512 case BO_Sub:
4513 case BO_Shl:
4514 case BO_Shr:
4515 case BO_LT:
4516 case BO_GT:
4517 case BO_LE:
4518 case BO_GE:
4519 case BO_EQ:
4520 case BO_NE:
4521 case BO_Cmp:
4522 case BO_And:
4523 case BO_Xor:
4524 case BO_Or:
4525 case BO_LAnd:
4526 case BO_LOr:
4527 case BO_Assign:
4528 case BO_Comma:
4529 llvm_unreachable("Not valid compound assignment operators")::llvm::llvm_unreachable_internal("Not valid compound assignment operators"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4529)
;
4530 }
4531
4532 llvm_unreachable("Unhandled compound assignment operator")::llvm::llvm_unreachable_internal("Unhandled compound assignment operator"
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4532)
;
4533}
4534
4535struct GEPOffsetAndOverflow {
4536 // The total (signed) byte offset for the GEP.
4537 llvm::Value *TotalOffset;
4538 // The offset overflow flag - true if the total offset overflows.
4539 llvm::Value *OffsetOverflows;
4540};
4541
4542/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
4543/// and compute the total offset it applies from it's base pointer BasePtr.
4544/// Returns offset in bytes and a boolean flag whether an overflow happened
4545/// during evaluation.
4546static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
4547 llvm::LLVMContext &VMContext,
4548 CodeGenModule &CGM,
4549 CGBuilderTy Builder) {
4550 const auto &DL = CGM.getDataLayout();
4551
4552 // The total (signed) byte offset for the GEP.
4553 llvm::Value *TotalOffset = nullptr;
4554
4555 // Was the GEP already reduced to a constant?
4556 if (isa<llvm::Constant>(GEPVal)) {
4557 // Compute the offset by casting both pointers to integers and subtracting:
4558 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
4559 Value *BasePtr_int =
4560 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
4561 Value *GEPVal_int =
4562 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
4563 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
4564 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
4565 }
4566
4567 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4568 assert(GEP->getPointerOperand() == BasePtr &&((GEP->getPointerOperand() == BasePtr && "BasePtr must be the the base of the GEP."
) ? static_cast<void> (0) : __assert_fail ("GEP->getPointerOperand() == BasePtr && \"BasePtr must be the the base of the GEP.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4569, __PRETTY_FUNCTION__))
4569 "BasePtr must be the the base of the GEP.")((GEP->getPointerOperand() == BasePtr && "BasePtr must be the the base of the GEP."
) ? static_cast<void> (0) : __assert_fail ("GEP->getPointerOperand() == BasePtr && \"BasePtr must be the the base of the GEP.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4569, __PRETTY_FUNCTION__))
;
4570 assert(GEP->isInBounds() && "Expected inbounds GEP")((GEP->isInBounds() && "Expected inbounds GEP") ? static_cast
<void> (0) : __assert_fail ("GEP->isInBounds() && \"Expected inbounds GEP\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4570, __PRETTY_FUNCTION__))
;
4571
4572 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4573
4574 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4575 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4576 auto *SAddIntrinsic =
4577 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4578 auto *SMulIntrinsic =
4579 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4580
4581 // The offset overflow flag - true if the total offset overflows.
4582 llvm::Value *OffsetOverflows = Builder.getFalse();
4583
4584 /// Return the result of the given binary operation.
4585 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4586 llvm::Value *RHS) -> llvm::Value * {
4587 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop")(((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"
) ? static_cast<void> (0) : __assert_fail ("(Opcode == BO_Add || Opcode == BO_Mul) && \"Can't eval binop\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4587, __PRETTY_FUNCTION__))
;
4588
4589 // If the operands are constants, return a constant result.
4590 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4591 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4592 llvm::APInt N;
4593 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4594 /*Signed=*/true, N);
4595 if (HasOverflow)
4596 OffsetOverflows = Builder.getTrue();
4597 return llvm::ConstantInt::get(VMContext, N);
4598 }
4599 }
4600
4601 // Otherwise, compute the result with checked arithmetic.
4602 auto *ResultAndOverflow = Builder.CreateCall(
4603 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4604 OffsetOverflows = Builder.CreateOr(
4605 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4606 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4607 };
4608
4609 // Determine the total byte offset by looking at each GEP operand.
4610 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4611 GTI != GTE; ++GTI) {
4612 llvm::Value *LocalOffset;
4613 auto *Index = GTI.getOperand();
4614 // Compute the local offset contributed by this indexing step:
4615 if (auto *STy = GTI.getStructTypeOrNull()) {
4616 // For struct indexing, the local offset is the byte position of the
4617 // specified field.
4618 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4619 LocalOffset = llvm::ConstantInt::get(
4620 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4621 } else {
4622 // Otherwise this is array-like indexing. The local offset is the index
4623 // multiplied by the element size.
4624 auto *ElementSize = llvm::ConstantInt::get(
4625 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4626 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4627 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4628 }
4629
4630 // If this is the first offset, set it as the total offset. Otherwise, add
4631 // the local offset into the running total.
4632 if (!TotalOffset || TotalOffset == Zero)
4633 TotalOffset = LocalOffset;
4634 else
4635 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4636 }
4637
4638 return {TotalOffset, OffsetOverflows};
4639}
4640
4641Value *
4642CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
4643 bool SignedIndices, bool IsSubtraction,
4644 SourceLocation Loc, const Twine &Name) {
4645 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4646
4647 // If the pointer overflow sanitizer isn't enabled, do nothing.
4648 if (!SanOpts.has(SanitizerKind::PointerOverflow))
4649 return GEPVal;
4650
4651 llvm::Type *PtrTy = Ptr->getType();
4652
4653 // Perform nullptr-and-offset check unless the nullptr is defined.
4654 bool PerformNullCheck = !NullPointerIsDefined(
4655 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
4656 // Check for overflows unless the GEP got constant-folded,
4657 // and only in the default address space
4658 bool PerformOverflowCheck =
4659 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
4660
4661 if (!(PerformNullCheck || PerformOverflowCheck))
4662 return GEPVal;
4663
4664 const auto &DL = CGM.getDataLayout();
4665
4666 SanitizerScope SanScope(this);
4667 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4668
4669 GEPOffsetAndOverflow EvaluatedGEP =
4670 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
4671
4672 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4675, __PRETTY_FUNCTION__))
4673 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4675, __PRETTY_FUNCTION__))
4674 "If the offset got constant-folded, we don't expect that there was an "(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4675, __PRETTY_FUNCTION__))
4675 "overflow.")(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4675, __PRETTY_FUNCTION__))
;
4676
4677 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4678
4679 // Common case: if the total offset is zero, and we are using C++ semantics,
4680 // where nullptr+0 is defined, don't emit a check.
4681 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
4682 return GEPVal;
4683
4684 // Now that we've computed the total offset, add it to the base pointer (with
4685 // wrapping semantics).
4686 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
4687 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
4688
4689 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
4690
4691 if (PerformNullCheck) {
4692 // In C++, if the base pointer evaluates to a null pointer value,
4693 // the only valid pointer this inbounds GEP can produce is also
4694 // a null pointer, so the offset must also evaluate to zero.
4695 // Likewise, if we have non-zero base pointer, we can not get null pointer
4696 // as a result, so the offset can not be -intptr_t(BasePtr).
4697 // In other words, both pointers are either null, or both are non-null,
4698 // or the behaviour is undefined.
4699 //
4700 // C, however, is more strict in this regard, and gives more
4701 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
4702 // So both the input to the 'gep inbounds' AND the output must not be null.
4703 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
4704 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
4705 auto *Valid =
4706 CGM.getLangOpts().CPlusPlus
4707 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
4708 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
4709 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
4710 }
4711
4712 if (PerformOverflowCheck) {
4713 // The GEP is valid if:
4714 // 1) The total offset doesn't overflow, and
4715 // 2) The sign of the difference between the computed address and the base
4716 // pointer matches the sign of the total offset.
4717 llvm::Value *ValidGEP;
4718 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
4719 if (SignedIndices) {
4720 // GEP is computed as `unsigned base + signed offset`, therefore:
4721 // * If offset was positive, then the computed pointer can not be
4722 // [unsigned] less than the base pointer, unless it overflowed.
4723 // * If offset was negative, then the computed pointer can not be
4724 // [unsigned] greater than the bas pointere, unless it overflowed.
4725 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4726 auto *PosOrZeroOffset =
4727 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
4728 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
4729 ValidGEP =
4730 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
4731 } else if (!IsSubtraction) {
4732 // GEP is computed as `unsigned base + unsigned offset`, therefore the
4733 // computed pointer can not be [unsigned] less than base pointer,
4734 // unless there was an overflow.
4735 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
4736 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4737 } else {
4738 // GEP is computed as `unsigned base - unsigned offset`, therefore the
4739 // computed pointer can not be [unsigned] greater than base pointer,
4740 // unless there was an overflow.
4741 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
4742 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
4743 }
4744 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
4745 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
4746 }
4747
4748 assert(!Checks.empty() && "Should have produced some checks.")((!Checks.empty() && "Should have produced some checks."
) ? static_cast<void> (0) : __assert_fail ("!Checks.empty() && \"Should have produced some checks.\""
, "/build/llvm-toolchain-snapshot-10~svn374645/tools/clang/lib/CodeGen/CGExprScalar.cpp"
, 4748, __PRETTY_FUNCTION__))
;
4749
4750 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
4751 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
4752 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4753 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
4754
4755 return GEPVal;
4756}