Bug Summary

File:clang/lib/CodeGen/CGExprScalar.cpp
Warning:line 2791, column 51
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGExprScalar.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-03-09-184146-41876-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/RecordLayout.h"
27#include "clang/AST/StmtVisitor.h"
28#include "clang/Basic/CodeGenOptions.h"
29#include "clang/Basic/FixedPoint.h"
30#include "clang/Basic/TargetInfo.h"
31#include "llvm/ADT/Optional.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/GetElementPtrTypeIterator.h"
37#include "llvm/IR/GlobalVariable.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/IntrinsicsPowerPC.h"
40#include "llvm/IR/Module.h"
41#include <cstdarg>
42
43using namespace clang;
44using namespace CodeGen;
45using llvm::Value;
46
47//===----------------------------------------------------------------------===//
48// Scalar Expression Emitter
49//===----------------------------------------------------------------------===//
50
51namespace {
52
53/// Determine whether the given binary operation may overflow.
54/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
55/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
56/// the returned overflow check is precise. The returned value is 'true' for
57/// all other opcodes, to be conservative.
58bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
59 BinaryOperator::Opcode Opcode, bool Signed,
60 llvm::APInt &Result) {
61 // Assume overflow is possible, unless we can prove otherwise.
62 bool Overflow = true;
63 const auto &LHSAP = LHS->getValue();
64 const auto &RHSAP = RHS->getValue();
65 if (Opcode == BO_Add) {
66 if (Signed)
67 Result = LHSAP.sadd_ov(RHSAP, Overflow);
68 else
69 Result = LHSAP.uadd_ov(RHSAP, Overflow);
70 } else if (Opcode == BO_Sub) {
71 if (Signed)
72 Result = LHSAP.ssub_ov(RHSAP, Overflow);
73 else
74 Result = LHSAP.usub_ov(RHSAP, Overflow);
75 } else if (Opcode == BO_Mul) {
76 if (Signed)
77 Result = LHSAP.smul_ov(RHSAP, Overflow);
78 else
79 Result = LHSAP.umul_ov(RHSAP, Overflow);
80 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
81 if (Signed && !RHS->isZero())
82 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
83 else
84 return false;
85 }
86 return Overflow;
87}
88
89struct BinOpInfo {
90 Value *LHS;
91 Value *RHS;
92 QualType Ty; // Computation Type.
93 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
94 FPOptions FPFeatures;
95 const Expr *E; // Entire expr, for error unsupported. May not be binop.
96
97 /// Check if the binop can result in integer overflow.
98 bool mayHaveIntegerOverflow() const {
99 // Without constant input, we can't rule out overflow.
100 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
101 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
102 if (!LHSCI || !RHSCI)
103 return true;
104
105 llvm::APInt Result;
106 return ::mayHaveIntegerOverflow(
107 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
108 }
109
110 /// Check if the binop computes a division or a remainder.
111 bool isDivremOp() const {
112 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
113 Opcode == BO_RemAssign;
114 }
115
116 /// Check if the binop can result in an integer division by zero.
117 bool mayHaveIntegerDivisionByZero() const {
118 if (isDivremOp())
119 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
120 return CI->isZero();
121 return true;
122 }
123
124 /// Check if the binop can result in a float division by zero.
125 bool mayHaveFloatDivisionByZero() const {
126 if (isDivremOp())
127 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
128 return CFP->isZero();
129 return true;
130 }
131
132 /// Check if either operand is a fixed point type or integer type, with at
133 /// least one being a fixed point type. In any case, this
134 /// operation did not follow usual arithmetic conversion and both operands may
135 /// not be the same.
136 bool isFixedPointBinOp() const {
137 // We cannot simply check the result type since comparison operations return
138 // an int.
139 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
140 QualType LHSType = BinOp->getLHS()->getType();
141 QualType RHSType = BinOp->getRHS()->getType();
142 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
143 }
144 return false;
145 }
146};
147
148static bool MustVisitNullValue(const Expr *E) {
149 // If a null pointer expression's type is the C++0x nullptr_t, then
150 // it's not necessarily a simple constant and it must be evaluated
151 // for its potential side effects.
152 return E->getType()->isNullPtrType();
153}
154
155/// If \p E is a widened promoted integer, get its base (unpromoted) type.
156static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
157 const Expr *E) {
158 const Expr *Base = E->IgnoreImpCasts();
159 if (E == Base)
160 return llvm::None;
161
162 QualType BaseTy = Base->getType();
163 if (!BaseTy->isPromotableIntegerType() ||
164 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
165 return llvm::None;
166
167 return BaseTy;
168}
169
170/// Check if \p E is a widened promoted integer.
171static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
172 return getUnwidenedIntegerType(Ctx, E).hasValue();
173}
174
175/// Check if we can skip the overflow check for \p Op.
176static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
177 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&(((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>
(Op.E)) && "Expected a unary or binary operator") ? static_cast
<void> (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 178, __PRETTY_FUNCTION__))
178 "Expected a unary or binary operator")(((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>
(Op.E)) && "Expected a unary or binary operator") ? static_cast
<void> (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 178, __PRETTY_FUNCTION__))
;
179
180 // If the binop has constant inputs and we can prove there is no overflow,
181 // we can elide the overflow check.
182 if (!Op.mayHaveIntegerOverflow())
183 return true;
184
185 // If a unary op has a widened operand, the op cannot overflow.
186 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
187 return !UO->canOverflow();
188
189 // We usually don't need overflow checks for binops with widened operands.
190 // Multiplication with promoted unsigned operands is a special case.
191 const auto *BO = cast<BinaryOperator>(Op.E);
192 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
193 if (!OptionalLHSTy)
194 return false;
195
196 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
197 if (!OptionalRHSTy)
198 return false;
199
200 QualType LHSTy = *OptionalLHSTy;
201 QualType RHSTy = *OptionalRHSTy;
202
203 // This is the simple case: binops without unsigned multiplication, and with
204 // widened operands. No overflow check is needed here.
205 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
206 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
207 return true;
208
209 // For unsigned multiplication the overflow check can be elided if either one
210 // of the unpromoted types are less than half the size of the promoted type.
211 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
212 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
213 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
214}
215
216/// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
217static void updateFastMathFlags(llvm::FastMathFlags &FMF,
218 FPOptions FPFeatures) {
219 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
220}
221
222/// Propagate fast-math flags from \p Op to the instruction in \p V.
223static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
224 if (auto *I = dyn_cast<llvm::Instruction>(V)) {
225 llvm::FastMathFlags FMF = I->getFastMathFlags();
226 updateFastMathFlags(FMF, Op.FPFeatures);
227 I->setFastMathFlags(FMF);
228 }
229 return V;
230}
231
232class ScalarExprEmitter
233 : public StmtVisitor<ScalarExprEmitter, Value*> {
234 CodeGenFunction &CGF;
235 CGBuilderTy &Builder;
236 bool IgnoreResultAssign;
237 llvm::LLVMContext &VMContext;
238public:
239
240 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
241 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
242 VMContext(cgf.getLLVMContext()) {
243 }
244
245 //===--------------------------------------------------------------------===//
246 // Utilities
247 //===--------------------------------------------------------------------===//
248
249 bool TestAndClearIgnoreResultAssign() {
250 bool I = IgnoreResultAssign;
251 IgnoreResultAssign = false;
252 return I;
253 }
254
255 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
256 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
257 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
258 return CGF.EmitCheckedLValue(E, TCK);
259 }
260
261 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
262 const BinOpInfo &Info);
263
264 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
265 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
266 }
267
268 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
269 const AlignValueAttr *AVAttr = nullptr;
270 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
271 const ValueDecl *VD = DRE->getDecl();
272
273 if (VD->getType()->isReferenceType()) {
274 if (const auto *TTy =
275 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
276 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
277 } else {
278 // Assumptions for function parameters are emitted at the start of the
279 // function, so there is no need to repeat that here,
280 // unless the alignment-assumption sanitizer is enabled,
281 // then we prefer the assumption over alignment attribute
282 // on IR function param.
283 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
284 return;
285
286 AVAttr = VD->getAttr<AlignValueAttr>();
287 }
288 }
289
290 if (!AVAttr)
291 if (const auto *TTy =
292 dyn_cast<TypedefType>(E->getType()))
293 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
294
295 if (!AVAttr)
296 return;
297
298 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
299 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
300 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
301 }
302
303 /// EmitLoadOfLValue - Given an expression with complex type that represents a
304 /// value l-value, this method emits the address of the l-value, then loads
305 /// and returns the result.
306 Value *EmitLoadOfLValue(const Expr *E) {
307 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
308 E->getExprLoc());
309
310 EmitLValueAlignmentAssumption(E, V);
311 return V;
312 }
313
314 /// EmitConversionToBool - Convert the specified expression value to a
315 /// boolean (i1) truth value. This is equivalent to "Val != 0".
316 Value *EmitConversionToBool(Value *Src, QualType DstTy);
317
318 /// Emit a check that a conversion from a floating-point type does not
319 /// overflow.
320 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
321 Value *Src, QualType SrcType, QualType DstType,
322 llvm::Type *DstTy, SourceLocation Loc);
323
324 /// Known implicit conversion check kinds.
325 /// Keep in sync with the enum of the same name in ubsan_handlers.h
326 enum ImplicitConversionCheckKind : unsigned char {
327 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
328 ICCK_UnsignedIntegerTruncation = 1,
329 ICCK_SignedIntegerTruncation = 2,
330 ICCK_IntegerSignChange = 3,
331 ICCK_SignedIntegerTruncationOrSignChange = 4,
332 };
333
334 /// Emit a check that an [implicit] truncation of an integer does not
335 /// discard any bits. It is not UB, so we use the value after truncation.
336 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
337 QualType DstType, SourceLocation Loc);
338
339 /// Emit a check that an [implicit] conversion of an integer does not change
340 /// the sign of the value. It is not UB, so we use the value after conversion.
341 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
342 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
343 QualType DstType, SourceLocation Loc);
344
345 /// Emit a conversion from the specified type to the specified destination
346 /// type, both of which are LLVM scalar types.
347 struct ScalarConversionOpts {
348 bool TreatBooleanAsSigned;
349 bool EmitImplicitIntegerTruncationChecks;
350 bool EmitImplicitIntegerSignChangeChecks;
351
352 ScalarConversionOpts()
353 : TreatBooleanAsSigned(false),
354 EmitImplicitIntegerTruncationChecks(false),
355 EmitImplicitIntegerSignChangeChecks(false) {}
356
357 ScalarConversionOpts(clang::SanitizerSet SanOpts)
358 : TreatBooleanAsSigned(false),
359 EmitImplicitIntegerTruncationChecks(
360 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
361 EmitImplicitIntegerSignChangeChecks(
362 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
363 };
364 Value *
365 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
366 SourceLocation Loc,
367 ScalarConversionOpts Opts = ScalarConversionOpts());
368
369 /// Convert between either a fixed point and other fixed point or fixed point
370 /// and an integer.
371 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
372 SourceLocation Loc);
373 Value *EmitFixedPointConversion(Value *Src, FixedPointSemantics &SrcFixedSema,
374 FixedPointSemantics &DstFixedSema,
375 SourceLocation Loc,
376 bool DstIsInteger = false);
377
378 /// Emit a conversion from the specified complex type to the specified
379 /// destination type, where the destination type is an LLVM scalar type.
380 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
381 QualType SrcTy, QualType DstTy,
382 SourceLocation Loc);
383
384 /// EmitNullValue - Emit a value that corresponds to null for the given type.
385 Value *EmitNullValue(QualType Ty);
386
387 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
388 Value *EmitFloatToBoolConversion(Value *V) {
389 // Compare against 0.0 for fp scalars.
390 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
391 return Builder.CreateFCmpUNE(V, Zero, "tobool");
392 }
393
394 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
395 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
396 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
397
398 return Builder.CreateICmpNE(V, Zero, "tobool");
399 }
400
401 Value *EmitIntToBoolConversion(Value *V) {
402 // Because of the type rules of C, we often end up computing a
403 // logical value, then zero extending it to int, then wanting it
404 // as a logical value again. Optimize this common case.
405 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
406 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
407 Value *Result = ZI->getOperand(0);
408 // If there aren't any more uses, zap the instruction to save space.
409 // Note that there can be more uses, for example if this
410 // is the result of an assignment.
411 if (ZI->use_empty())
412 ZI->eraseFromParent();
413 return Result;
414 }
415 }
416
417 return Builder.CreateIsNotNull(V, "tobool");
418 }
419
420 //===--------------------------------------------------------------------===//
421 // Visitor Methods
422 //===--------------------------------------------------------------------===//
423
424 Value *Visit(Expr *E) {
425 ApplyDebugLocation DL(CGF, E);
426 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
427 }
428
429 Value *VisitStmt(Stmt *S) {
430 S->dump(CGF.getContext().getSourceManager());
431 llvm_unreachable("Stmt can't have complex result type!")::llvm::llvm_unreachable_internal("Stmt can't have complex result type!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 431)
;
432 }
433 Value *VisitExpr(Expr *S);
434
435 Value *VisitConstantExpr(ConstantExpr *E) {
436 return Visit(E->getSubExpr());
437 }
438 Value *VisitParenExpr(ParenExpr *PE) {
439 return Visit(PE->getSubExpr());
440 }
441 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
442 return Visit(E->getReplacement());
443 }
444 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
445 return Visit(GE->getResultExpr());
446 }
447 Value *VisitCoawaitExpr(CoawaitExpr *S) {
448 return CGF.EmitCoawaitExpr(*S).getScalarVal();
449 }
450 Value *VisitCoyieldExpr(CoyieldExpr *S) {
451 return CGF.EmitCoyieldExpr(*S).getScalarVal();
452 }
453 Value *VisitUnaryCoawait(const UnaryOperator *E) {
454 return Visit(E->getSubExpr());
455 }
456
457 // Leaves.
458 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
459 return Builder.getInt(E->getValue());
460 }
461 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
462 return Builder.getInt(E->getValue());
463 }
464 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
465 return llvm::ConstantFP::get(VMContext, E->getValue());
466 }
467 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
468 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
469 }
470 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
471 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
472 }
473 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
474 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
475 }
476 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
477 return EmitNullValue(E->getType());
478 }
479 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
480 return EmitNullValue(E->getType());
481 }
482 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
483 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
484 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
485 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
486 return Builder.CreateBitCast(V, ConvertType(E->getType()));
487 }
488
489 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
490 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
491 }
492
493 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
494 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
495 }
496
497 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
498 if (E->isGLValue())
499 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
500 E->getExprLoc());
501
502 // Otherwise, assume the mapping is the scalar directly.
503 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
504 }
505
506 // l-values.
507 Value *VisitDeclRefExpr(DeclRefExpr *E) {
508 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
509 return CGF.emitScalarConstant(Constant, E);
510 return EmitLoadOfLValue(E);
511 }
512
513 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
514 return CGF.EmitObjCSelectorExpr(E);
515 }
516 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
517 return CGF.EmitObjCProtocolExpr(E);
518 }
519 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
520 return EmitLoadOfLValue(E);
521 }
522 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
523 if (E->getMethodDecl() &&
524 E->getMethodDecl()->getReturnType()->isReferenceType())
525 return EmitLoadOfLValue(E);
526 return CGF.EmitObjCMessageExpr(E).getScalarVal();
527 }
528
529 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
530 LValue LV = CGF.EmitObjCIsaExpr(E);
531 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
532 return V;
533 }
534
535 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
536 VersionTuple Version = E->getVersion();
537
538 // If we're checking for a platform older than our minimum deployment
539 // target, we can fold the check away.
540 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
541 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
542
543 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
544 llvm::Value *Args[] = {
545 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
546 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
547 llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
548 };
549
550 return CGF.EmitBuiltinAvailable(Args);
551 }
552
553 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
554 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
555 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
556 Value *VisitMemberExpr(MemberExpr *E);
557 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
558 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
559 return EmitLoadOfLValue(E);
560 }
561
562 Value *VisitInitListExpr(InitListExpr *E);
563
564 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
565 assert(CGF.getArrayInitIndex() &&((CGF.getArrayInitIndex() && "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"
) ? static_cast<void> (0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 566, __PRETTY_FUNCTION__))
566 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?")((CGF.getArrayInitIndex() && "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"
) ? static_cast<void> (0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 566, __PRETTY_FUNCTION__))
;
567 return CGF.getArrayInitIndex();
568 }
569
570 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
571 return EmitNullValue(E->getType());
572 }
573 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
574 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
575 return VisitCastExpr(E);
576 }
577 Value *VisitCastExpr(CastExpr *E);
578
579 Value *VisitCallExpr(const CallExpr *E) {
580 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
581 return EmitLoadOfLValue(E);
582
583 Value *V = CGF.EmitCallExpr(E).getScalarVal();
584
585 EmitLValueAlignmentAssumption(E, V);
586 return V;
587 }
588
589 Value *VisitStmtExpr(const StmtExpr *E);
590
591 // Unary Operators.
592 Value *VisitUnaryPostDec(const UnaryOperator *E) {
593 LValue LV = EmitLValue(E->getSubExpr());
594 return EmitScalarPrePostIncDec(E, LV, false, false);
595 }
596 Value *VisitUnaryPostInc(const UnaryOperator *E) {
597 LValue LV = EmitLValue(E->getSubExpr());
598 return EmitScalarPrePostIncDec(E, LV, true, false);
599 }
600 Value *VisitUnaryPreDec(const UnaryOperator *E) {
601 LValue LV = EmitLValue(E->getSubExpr());
602 return EmitScalarPrePostIncDec(E, LV, false, true);
603 }
604 Value *VisitUnaryPreInc(const UnaryOperator *E) {
605 LValue LV = EmitLValue(E->getSubExpr());
606 return EmitScalarPrePostIncDec(E, LV, true, true);
607 }
608
609 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
610 llvm::Value *InVal,
611 bool IsInc);
612
613 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
614 bool isInc, bool isPre);
615
616
617 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
618 if (isa<MemberPointerType>(E->getType())) // never sugared
619 return CGF.CGM.getMemberPointerConstant(E);
620
621 return EmitLValue(E->getSubExpr()).getPointer(CGF);
622 }
623 Value *VisitUnaryDeref(const UnaryOperator *E) {
624 if (E->getType()->isVoidType())
625 return Visit(E->getSubExpr()); // the actual value should be unused
626 return EmitLoadOfLValue(E);
627 }
628 Value *VisitUnaryPlus(const UnaryOperator *E) {
629 // This differs from gcc, though, most likely due to a bug in gcc.
630 TestAndClearIgnoreResultAssign();
631 return Visit(E->getSubExpr());
632 }
633 Value *VisitUnaryMinus (const UnaryOperator *E);
634 Value *VisitUnaryNot (const UnaryOperator *E);
635 Value *VisitUnaryLNot (const UnaryOperator *E);
636 Value *VisitUnaryReal (const UnaryOperator *E);
637 Value *VisitUnaryImag (const UnaryOperator *E);
638 Value *VisitUnaryExtension(const UnaryOperator *E) {
639 return Visit(E->getSubExpr());
640 }
641
642 // C++
643 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
644 return EmitLoadOfLValue(E);
645 }
646 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
647 auto &Ctx = CGF.getContext();
648 APValue Evaluated =
649 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
650 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
651 SLE->getType());
652 }
653
654 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
655 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
656 return Visit(DAE->getExpr());
657 }
658 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
659 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
660 return Visit(DIE->getExpr());
661 }
662 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
663 return CGF.LoadCXXThis();
664 }
665
666 Value *VisitExprWithCleanups(ExprWithCleanups *E);
667 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
668 return CGF.EmitCXXNewExpr(E);
669 }
670 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
671 CGF.EmitCXXDeleteExpr(E);
672 return nullptr;
673 }
674
675 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
676 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
677 }
678
679 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
680 return Builder.getInt1(E->isSatisfied());
681 }
682
683 Value *VisitRequiresExpr(const RequiresExpr *E) {
684 return Builder.getInt1(E->isSatisfied());
685 }
686
687 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
688 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
689 }
690
691 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
692 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
693 }
694
695 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
696 // C++ [expr.pseudo]p1:
697 // The result shall only be used as the operand for the function call
698 // operator (), and the result of such a call has type void. The only
699 // effect is the evaluation of the postfix-expression before the dot or
700 // arrow.
701 CGF.EmitScalarExpr(E->getBase());
702 return nullptr;
703 }
704
705 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
706 return EmitNullValue(E->getType());
707 }
708
709 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
710 CGF.EmitCXXThrowExpr(E);
711 return nullptr;
712 }
713
714 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
715 return Builder.getInt1(E->getValue());
716 }
717
718 // Binary Operators.
719 Value *EmitMul(const BinOpInfo &Ops) {
720 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
721 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
722 case LangOptions::SOB_Defined:
723 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
724 case LangOptions::SOB_Undefined:
725 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
726 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
727 LLVM_FALLTHROUGH[[gnu::fallthrough]];
728 case LangOptions::SOB_Trapping:
729 if (CanElideOverflowCheck(CGF.getContext(), Ops))
730 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
731 return EmitOverflowCheckedBinOp(Ops);
732 }
733 }
734
735 if (Ops.Ty->isUnsignedIntegerType() &&
736 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
737 !CanElideOverflowCheck(CGF.getContext(), Ops))
738 return EmitOverflowCheckedBinOp(Ops);
739
740 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
741 Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
742 return propagateFMFlags(V, Ops);
743 }
744 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
745 }
746 /// Create a binary op that checks for overflow.
747 /// Currently only supports +, - and *.
748 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
749
750 // Check for undefined division and modulus behaviors.
751 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
752 llvm::Value *Zero,bool isDiv);
753 // Common helper for getting how wide LHS of shift is.
754 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
755 Value *EmitDiv(const BinOpInfo &Ops);
756 Value *EmitRem(const BinOpInfo &Ops);
757 Value *EmitAdd(const BinOpInfo &Ops);
758 Value *EmitSub(const BinOpInfo &Ops);
759 Value *EmitShl(const BinOpInfo &Ops);
760 Value *EmitShr(const BinOpInfo &Ops);
761 Value *EmitAnd(const BinOpInfo &Ops) {
762 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
763 }
764 Value *EmitXor(const BinOpInfo &Ops) {
765 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
766 }
767 Value *EmitOr (const BinOpInfo &Ops) {
768 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
769 }
770
771 // Helper functions for fixed point binary operations.
772 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
773
774 BinOpInfo EmitBinOps(const BinaryOperator *E);
775 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
776 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
777 Value *&Result);
778
779 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
780 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
781
782 // Binary operators and binary compound assignment operators.
783#define HANDLEBINOP(OP) \
784 Value *VisitBin ## OP(const BinaryOperator *E) { \
785 return Emit ## OP(EmitBinOps(E)); \
786 } \
787 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
788 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
789 }
790 HANDLEBINOP(Mul)
791 HANDLEBINOP(Div)
792 HANDLEBINOP(Rem)
793 HANDLEBINOP(Add)
794 HANDLEBINOP(Sub)
795 HANDLEBINOP(Shl)
796 HANDLEBINOP(Shr)
797 HANDLEBINOP(And)
798 HANDLEBINOP(Xor)
799 HANDLEBINOP(Or)
800#undef HANDLEBINOP
801
802 // Comparisons.
803 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
804 llvm::CmpInst::Predicate SICmpOpc,
805 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
806#define VISITCOMP(CODE, UI, SI, FP, SIG) \
807 Value *VisitBin##CODE(const BinaryOperator *E) { \
808 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
809 llvm::FCmpInst::FP, SIG); }
810 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
811 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
812 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
813 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
814 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
815 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
816#undef VISITCOMP
817
818 Value *VisitBinAssign (const BinaryOperator *E);
819
820 Value *VisitBinLAnd (const BinaryOperator *E);
821 Value *VisitBinLOr (const BinaryOperator *E);
822 Value *VisitBinComma (const BinaryOperator *E);
823
824 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
825 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
826
827 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
828 return Visit(E->getSemanticForm());
829 }
830
831 // Other Operators.
832 Value *VisitBlockExpr(const BlockExpr *BE);
833 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
834 Value *VisitChooseExpr(ChooseExpr *CE);
835 Value *VisitVAArgExpr(VAArgExpr *VE);
836 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
837 return CGF.EmitObjCStringLiteral(E);
838 }
839 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
840 return CGF.EmitObjCBoxedExpr(E);
841 }
842 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
843 return CGF.EmitObjCArrayLiteral(E);
844 }
845 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
846 return CGF.EmitObjCDictionaryLiteral(E);
847 }
848 Value *VisitAsTypeExpr(AsTypeExpr *CE);
849 Value *VisitAtomicExpr(AtomicExpr *AE);
850};
851} // end anonymous namespace.
852
853//===----------------------------------------------------------------------===//
854// Utilities
855//===----------------------------------------------------------------------===//
856
857/// EmitConversionToBool - Convert the specified expression value to a
858/// boolean (i1) truth value. This is equivalent to "Val != 0".
859Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
860 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs")((SrcType.isCanonical() && "EmitScalarConversion strips typedefs"
) ? static_cast<void> (0) : __assert_fail ("SrcType.isCanonical() && \"EmitScalarConversion strips typedefs\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 860, __PRETTY_FUNCTION__))
;
861
862 if (SrcType->isRealFloatingType())
863 return EmitFloatToBoolConversion(Src);
864
865 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
866 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
867
868 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&(((SrcType->isIntegerType() || isa<llvm::PointerType>
(Src->getType())) && "Unknown scalar type to convert"
) ? static_cast<void> (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 869, __PRETTY_FUNCTION__))
869 "Unknown scalar type to convert")(((SrcType->isIntegerType() || isa<llvm::PointerType>
(Src->getType())) && "Unknown scalar type to convert"
) ? static_cast<void> (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 869, __PRETTY_FUNCTION__))
;
870
871 if (isa<llvm::IntegerType>(Src->getType()))
872 return EmitIntToBoolConversion(Src);
873
874 assert(isa<llvm::PointerType>(Src->getType()))((isa<llvm::PointerType>(Src->getType())) ? static_cast
<void> (0) : __assert_fail ("isa<llvm::PointerType>(Src->getType())"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 874, __PRETTY_FUNCTION__))
;
875 return EmitPointerToBoolConversion(Src, SrcType);
876}
877
878void ScalarExprEmitter::EmitFloatConversionCheck(
879 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
880 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
881 assert(SrcType->isFloatingType() && "not a conversion from floating point")((SrcType->isFloatingType() && "not a conversion from floating point"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isFloatingType() && \"not a conversion from floating point\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 881, __PRETTY_FUNCTION__))
;
882 if (!isa<llvm::IntegerType>(DstTy))
883 return;
884
885 CodeGenFunction::SanitizerScope SanScope(&CGF);
886 using llvm::APFloat;
887 using llvm::APSInt;
888
889 llvm::Value *Check = nullptr;
890 const llvm::fltSemantics &SrcSema =
891 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
892
893 // Floating-point to integer. This has undefined behavior if the source is
894 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
895 // to an integer).
896 unsigned Width = CGF.getContext().getIntWidth(DstType);
897 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
898
899 APSInt Min = APSInt::getMinValue(Width, Unsigned);
900 APFloat MinSrc(SrcSema, APFloat::uninitialized);
901 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
902 APFloat::opOverflow)
903 // Don't need an overflow check for lower bound. Just check for
904 // -Inf/NaN.
905 MinSrc = APFloat::getInf(SrcSema, true);
906 else
907 // Find the largest value which is too small to represent (before
908 // truncation toward zero).
909 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
910
911 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
912 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
913 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
914 APFloat::opOverflow)
915 // Don't need an overflow check for upper bound. Just check for
916 // +Inf/NaN.
917 MaxSrc = APFloat::getInf(SrcSema, false);
918 else
919 // Find the smallest value which is too large to represent (before
920 // truncation toward zero).
921 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
922
923 // If we're converting from __half, convert the range to float to match
924 // the type of src.
925 if (OrigSrcType->isHalfType()) {
926 const llvm::fltSemantics &Sema =
927 CGF.getContext().getFloatTypeSemantics(SrcType);
928 bool IsInexact;
929 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
930 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
931 }
932
933 llvm::Value *GE =
934 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
935 llvm::Value *LE =
936 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
937 Check = Builder.CreateAnd(GE, LE);
938
939 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
940 CGF.EmitCheckTypeDescriptor(OrigSrcType),
941 CGF.EmitCheckTypeDescriptor(DstType)};
942 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
943 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
944}
945
946// Should be called within CodeGenFunction::SanitizerScope RAII scope.
947// Returns 'i1 false' when the truncation Src -> Dst was lossy.
948static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
949 std::pair<llvm::Value *, SanitizerMask>>
950EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
951 QualType DstType, CGBuilderTy &Builder) {
952 llvm::Type *SrcTy = Src->getType();
953 llvm::Type *DstTy = Dst->getType();
954 (void)DstTy; // Only used in assert()
955
956 // This should be truncation of integral types.
957 assert(Src != Dst)((Src != Dst) ? static_cast<void> (0) : __assert_fail (
"Src != Dst", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 957, __PRETTY_FUNCTION__))
;
958 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits())((SrcTy->getScalarSizeInBits() > Dst->getType()->
getScalarSizeInBits()) ? static_cast<void> (0) : __assert_fail
("SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 958, __PRETTY_FUNCTION__))
;
959 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 960, __PRETTY_FUNCTION__))
960 "non-integer llvm type")((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 960, __PRETTY_FUNCTION__))
;
961
962 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
963 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
964
965 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
966 // Else, it is a signed truncation.
967 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
968 SanitizerMask Mask;
969 if (!SrcSigned && !DstSigned) {
970 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
971 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
972 } else {
973 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
974 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
975 }
976
977 llvm::Value *Check = nullptr;
978 // 1. Extend the truncated value back to the same width as the Src.
979 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
980 // 2. Equality-compare with the original source value
981 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
982 // If the comparison result is 'i1 false', then the truncation was lossy.
983 return std::make_pair(Kind, std::make_pair(Check, Mask));
984}
985
986static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
987 QualType SrcType, QualType DstType) {
988 return SrcType->isIntegerType() && DstType->isIntegerType();
989}
990
991void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
992 Value *Dst, QualType DstType,
993 SourceLocation Loc) {
994 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
995 return;
996
997 // We only care about int->int conversions here.
998 // We ignore conversions to/from pointer and/or bool.
999 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1000 DstType))
1001 return;
1002
1003 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1004 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1005 // This must be truncation. Else we do not care.
1006 if (SrcBits <= DstBits)
1007 return;
1008
1009 assert(!DstType->isBooleanType() && "we should not get here with booleans.")((!DstType->isBooleanType() && "we should not get here with booleans."
) ? static_cast<void> (0) : __assert_fail ("!DstType->isBooleanType() && \"we should not get here with booleans.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1009, __PRETTY_FUNCTION__))
;
1010
1011 // If the integer sign change sanitizer is enabled,
1012 // and we are truncating from larger unsigned type to smaller signed type,
1013 // let that next sanitizer deal with it.
1014 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1015 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1016 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1017 (!SrcSigned && DstSigned))
1018 return;
1019
1020 CodeGenFunction::SanitizerScope SanScope(&CGF);
1021
1022 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1023 std::pair<llvm::Value *, SanitizerMask>>
1024 Check =
1025 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1026 // If the comparison result is 'i1 false', then the truncation was lossy.
1027
1028 // Do we care about this type of truncation?
1029 if (!CGF.SanOpts.has(Check.second.second))
1030 return;
1031
1032 llvm::Constant *StaticArgs[] = {
1033 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1034 CGF.EmitCheckTypeDescriptor(DstType),
1035 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1036 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1037 {Src, Dst});
1038}
1039
1040// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1041// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1042static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1043 std::pair<llvm::Value *, SanitizerMask>>
1044EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1045 QualType DstType, CGBuilderTy &Builder) {
1046 llvm::Type *SrcTy = Src->getType();
1047 llvm::Type *DstTy = Dst->getType();
1048
1049 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1050, __PRETTY_FUNCTION__))
1050 "non-integer llvm type")((isa<llvm::IntegerType>(SrcTy) && isa<llvm::
IntegerType>(DstTy) && "non-integer llvm type") ? static_cast
<void> (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1050, __PRETTY_FUNCTION__))
;
1051
1052 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1053 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1054 (void)SrcSigned; // Only used in assert()
1055 (void)DstSigned; // Only used in assert()
1056 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1057 unsigned DstBits = DstTy->getScalarSizeInBits();
1058 (void)SrcBits; // Only used in assert()
1059 (void)DstBits; // Only used in assert()
1060
1061 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&((((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
"either the widths should be different, or the signednesses."
) ? static_cast<void> (0) : __assert_fail ("((SrcBits != DstBits) || (SrcSigned != DstSigned)) && \"either the widths should be different, or the signednesses.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1062, __PRETTY_FUNCTION__))
1062 "either the widths should be different, or the signednesses.")((((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
"either the widths should be different, or the signednesses."
) ? static_cast<void> (0) : __assert_fail ("((SrcBits != DstBits) || (SrcSigned != DstSigned)) && \"either the widths should be different, or the signednesses.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1062, __PRETTY_FUNCTION__))
;
1063
1064 // NOTE: zero value is considered to be non-negative.
1065 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1066 const char *Name) -> Value * {
1067 // Is this value a signed type?
1068 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1069 llvm::Type *VTy = V->getType();
1070 if (!VSigned) {
1071 // If the value is unsigned, then it is never negative.
1072 // FIXME: can we encounter non-scalar VTy here?
1073 return llvm::ConstantInt::getFalse(VTy->getContext());
1074 }
1075 // Get the zero of the same type with which we will be comparing.
1076 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1077 // %V.isnegative = icmp slt %V, 0
1078 // I.e is %V *strictly* less than zero, does it have negative value?
1079 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1080 llvm::Twine(Name) + "." + V->getName() +
1081 ".negativitycheck");
1082 };
1083
1084 // 1. Was the old Value negative?
1085 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1086 // 2. Is the new Value negative?
1087 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1088 // 3. Now, was the 'negativity status' preserved during the conversion?
1089 // NOTE: conversion from negative to zero is considered to change the sign.
1090 // (We want to get 'false' when the conversion changed the sign)
1091 // So we should just equality-compare the negativity statuses.
1092 llvm::Value *Check = nullptr;
1093 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1094 // If the comparison result is 'false', then the conversion changed the sign.
1095 return std::make_pair(
1096 ScalarExprEmitter::ICCK_IntegerSignChange,
1097 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1098}
1099
1100void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1101 Value *Dst, QualType DstType,
1102 SourceLocation Loc) {
1103 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1104 return;
1105
1106 llvm::Type *SrcTy = Src->getType();
1107 llvm::Type *DstTy = Dst->getType();
1108
1109 // We only care about int->int conversions here.
1110 // We ignore conversions to/from pointer and/or bool.
1111 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1112 DstType))
1113 return;
1114
1115 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1116 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1117 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1118 unsigned DstBits = DstTy->getScalarSizeInBits();
1119
1120 // Now, we do not need to emit the check in *all* of the cases.
1121 // We can avoid emitting it in some obvious cases where it would have been
1122 // dropped by the opt passes (instcombine) always anyways.
1123 // If it's a cast between effectively the same type, no check.
1124 // NOTE: this is *not* equivalent to checking the canonical types.
1125 if (SrcSigned == DstSigned && SrcBits == DstBits)
1126 return;
1127 // At least one of the values needs to have signed type.
1128 // If both are unsigned, then obviously, neither of them can be negative.
1129 if (!SrcSigned && !DstSigned)
1130 return;
1131 // If the conversion is to *larger* *signed* type, then no check is needed.
1132 // Because either sign-extension happens (so the sign will remain),
1133 // or zero-extension will happen (the sign bit will be zero.)
1134 if ((DstBits > SrcBits) && DstSigned)
1135 return;
1136 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1137 (SrcBits > DstBits) && SrcSigned) {
1138 // If the signed integer truncation sanitizer is enabled,
1139 // and this is a truncation from signed type, then no check is needed.
1140 // Because here sign change check is interchangeable with truncation check.
1141 return;
1142 }
1143 // That's it. We can't rule out any more cases with the data we have.
1144
1145 CodeGenFunction::SanitizerScope SanScope(&CGF);
1146
1147 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1148 std::pair<llvm::Value *, SanitizerMask>>
1149 Check;
1150
1151 // Each of these checks needs to return 'false' when an issue was detected.
1152 ImplicitConversionCheckKind CheckKind;
1153 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1154 // So we can 'and' all the checks together, and still get 'false',
1155 // if at least one of the checks detected an issue.
1156
1157 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1158 CheckKind = Check.first;
1159 Checks.emplace_back(Check.second);
1160
1161 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1162 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1163 // If the signed integer truncation sanitizer was enabled,
1164 // and we are truncating from larger unsigned type to smaller signed type,
1165 // let's handle the case we skipped in that check.
1166 Check =
1167 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1168 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1169 Checks.emplace_back(Check.second);
1170 // If the comparison result is 'i1 false', then the truncation was lossy.
1171 }
1172
1173 llvm::Constant *StaticArgs[] = {
1174 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1175 CGF.EmitCheckTypeDescriptor(DstType),
1176 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1177 // EmitCheck() will 'and' all the checks together.
1178 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1179 {Src, Dst});
1180}
1181
1182/// Emit a conversion from the specified type to the specified destination type,
1183/// both of which are LLVM scalar types.
1184Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1185 QualType DstType,
1186 SourceLocation Loc,
1187 ScalarConversionOpts Opts) {
1188 // All conversions involving fixed point types should be handled by the
1189 // EmitFixedPoint family functions. This is done to prevent bloating up this
1190 // function more, and although fixed point numbers are represented by
1191 // integers, we do not want to follow any logic that assumes they should be
1192 // treated as integers.
1193 // TODO(leonardchan): When necessary, add another if statement checking for
1194 // conversions to fixed point types from other types.
1195 if (SrcType->isFixedPointType()) {
1196 if (DstType->isBooleanType())
1197 // It is important that we check this before checking if the dest type is
1198 // an integer because booleans are technically integer types.
1199 // We do not need to check the padding bit on unsigned types if unsigned
1200 // padding is enabled because overflow into this bit is undefined
1201 // behavior.
1202 return Builder.CreateIsNotNull(Src, "tobool");
1203 if (DstType->isFixedPointType() || DstType->isIntegerType())
1204 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1205
1206 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion from a fixed point type to another type."
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1207)
1207 "Unhandled scalar conversion from a fixed point type to another type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion from a fixed point type to another type."
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1207)
;
1208 } else if (DstType->isFixedPointType()) {
1209 if (SrcType->isIntegerType())
1210 // This also includes converting booleans and enums to fixed point types.
1211 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1212
1213 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion to a fixed point type from another type."
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1214)
1214 "Unhandled scalar conversion to a fixed point type from another type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion to a fixed point type from another type."
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1214)
;
1215 }
1216
1217 QualType NoncanonicalSrcType = SrcType;
1218 QualType NoncanonicalDstType = DstType;
1219
1220 SrcType = CGF.getContext().getCanonicalType(SrcType);
1221 DstType = CGF.getContext().getCanonicalType(DstType);
1222 if (SrcType == DstType) return Src;
1223
1224 if (DstType->isVoidType()) return nullptr;
1225
1226 llvm::Value *OrigSrc = Src;
1227 QualType OrigSrcType = SrcType;
1228 llvm::Type *SrcTy = Src->getType();
1229
1230 // Handle conversions to bool first, they are special: comparisons against 0.
1231 if (DstType->isBooleanType())
1232 return EmitConversionToBool(Src, SrcType);
1233
1234 llvm::Type *DstTy = ConvertType(DstType);
1235
1236 // Cast from half through float if half isn't a native type.
1237 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1238 // Cast to FP using the intrinsic if the half type itself isn't supported.
1239 if (DstTy->isFloatingPointTy()) {
1240 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1241 return Builder.CreateCall(
1242 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1243 Src);
1244 } else {
1245 // Cast to other types through float, using either the intrinsic or FPExt,
1246 // depending on whether the half type itself is supported
1247 // (as opposed to operations on half, available with NativeHalfType).
1248 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1249 Src = Builder.CreateCall(
1250 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1251 CGF.CGM.FloatTy),
1252 Src);
1253 } else {
1254 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1255 }
1256 SrcType = CGF.getContext().FloatTy;
1257 SrcTy = CGF.FloatTy;
1258 }
1259 }
1260
1261 // Ignore conversions like int -> uint.
1262 if (SrcTy == DstTy) {
1263 if (Opts.EmitImplicitIntegerSignChangeChecks)
1264 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1265 NoncanonicalDstType, Loc);
1266
1267 return Src;
1268 }
1269
1270 // Handle pointer conversions next: pointers can only be converted to/from
1271 // other pointers and integers. Check for pointer types in terms of LLVM, as
1272 // some native types (like Obj-C id) may map to a pointer type.
1273 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1274 // The source value may be an integer, or a pointer.
1275 if (isa<llvm::PointerType>(SrcTy))
1276 return Builder.CreateBitCast(Src, DstTy, "conv");
1277
1278 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?")((SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isIntegerType() && \"Not ptr->ptr or int->ptr conversion?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1278, __PRETTY_FUNCTION__))
;
1279 // First, convert to the correct width so that we control the kind of
1280 // extension.
1281 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1282 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1283 llvm::Value* IntResult =
1284 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1285 // Then, cast to pointer.
1286 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1287 }
1288
1289 if (isa<llvm::PointerType>(SrcTy)) {
1290 // Must be an ptr to int cast.
1291 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?")((isa<llvm::IntegerType>(DstTy) && "not ptr->int?"
) ? static_cast<void> (0) : __assert_fail ("isa<llvm::IntegerType>(DstTy) && \"not ptr->int?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1291, __PRETTY_FUNCTION__))
;
1292 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1293 }
1294
1295 // A scalar can be splatted to an extended vector of the same element type
1296 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1297 // Sema should add casts to make sure that the source expression's type is
1298 // the same as the vector's element type (sans qualifiers)
1299 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1301, __PRETTY_FUNCTION__))
1300 SrcType.getTypePtr() &&((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1301, __PRETTY_FUNCTION__))
1301 "Splatted expr doesn't match with vector element type?")((DstType->castAs<ExtVectorType>()->getElementType
().getTypePtr() == SrcType.getTypePtr() && "Splatted expr doesn't match with vector element type?"
) ? static_cast<void> (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1301, __PRETTY_FUNCTION__))
;
1302
1303 // Splat the element across to all elements
1304 unsigned NumElements = DstTy->getVectorNumElements();
1305 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1306 }
1307
1308 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1309 // Allow bitcast from vector to integer/fp of the same size.
1310 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1311 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1312 if (SrcSize == DstSize)
1313 return Builder.CreateBitCast(Src, DstTy, "conv");
1314
1315 // Conversions between vectors of different sizes are not allowed except
1316 // when vectors of half are involved. Operations on storage-only half
1317 // vectors require promoting half vector operands to float vectors and
1318 // truncating the result, which is either an int or float vector, to a
1319 // short or half vector.
1320
1321 // Source and destination are both expected to be vectors.
1322 llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
1323 llvm::Type *DstElementTy = DstTy->getVectorElementType();
1324 (void)DstElementTy;
1325
1326 assert(((SrcElementTy->isIntegerTy() &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1331, __PRETTY_FUNCTION__))
1327 DstElementTy->isIntegerTy()) ||((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1331, __PRETTY_FUNCTION__))
1328 (SrcElementTy->isFloatingPointTy() &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1331, __PRETTY_FUNCTION__))
1329 DstElementTy->isFloatingPointTy())) &&((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1331, __PRETTY_FUNCTION__))
1330 "unexpected conversion between a floating-point vector and an "((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1331, __PRETTY_FUNCTION__))
1331 "integer vector")((((SrcElementTy->isIntegerTy() && DstElementTy->
isIntegerTy()) || (SrcElementTy->isFloatingPointTy() &&
DstElementTy->isFloatingPointTy())) && "unexpected conversion between a floating-point vector and an "
"integer vector") ? static_cast<void> (0) : __assert_fail
("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1331, __PRETTY_FUNCTION__))
;
1332
1333 // Truncate an i32 vector to an i16 vector.
1334 if (SrcElementTy->isIntegerTy())
1335 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1336
1337 // Truncate a float vector to a half vector.
1338 if (SrcSize > DstSize)
1339 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1340
1341 // Promote a half vector to a float vector.
1342 return Builder.CreateFPExt(Src, DstTy, "conv");
1343 }
1344
1345 // Finally, we have the arithmetic types: real int/float.
1346 Value *Res = nullptr;
1347 llvm::Type *ResTy = DstTy;
1348
1349 // An overflowing conversion has undefined behavior if either the source type
1350 // or the destination type is a floating-point type. However, we consider the
1351 // range of representable values for all floating-point types to be
1352 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1353 // floating-point type.
1354 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1355 OrigSrcType->isFloatingType())
1356 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1357 Loc);
1358
1359 // Cast to half through float if half isn't a native type.
1360 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1361 // Make sure we cast in a single step if from another FP type.
1362 if (SrcTy->isFloatingPointTy()) {
1363 // Use the intrinsic if the half type itself isn't supported
1364 // (as opposed to operations on half, available with NativeHalfType).
1365 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1366 return Builder.CreateCall(
1367 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1368 // If the half type is supported, just use an fptrunc.
1369 return Builder.CreateFPTrunc(Src, DstTy);
1370 }
1371 DstTy = CGF.FloatTy;
1372 }
1373
1374 if (isa<llvm::IntegerType>(SrcTy)) {
1375 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1376 if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1377 InputSigned = true;
1378 }
1379 if (isa<llvm::IntegerType>(DstTy))
1380 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1381 else if (InputSigned)
1382 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1383 else
1384 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1385 } else if (isa<llvm::IntegerType>(DstTy)) {
1386 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion")((SrcTy->isFloatingPointTy() && "Unknown real conversion"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1386, __PRETTY_FUNCTION__))
;
1387 if (DstType->isSignedIntegerOrEnumerationType())
1388 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1389 else
1390 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1391 } else {
1392 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&((SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1393, __PRETTY_FUNCTION__))
1393 "Unknown real conversion")((SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1393, __PRETTY_FUNCTION__))
;
1394 if (DstTy->getTypeID() < SrcTy->getTypeID())
1395 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1396 else
1397 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1398 }
1399
1400 if (DstTy != ResTy) {
1401 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1402 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion")((ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"
) ? static_cast<void> (0) : __assert_fail ("ResTy->isIntegerTy(16) && \"Only half FP requires extra conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1402, __PRETTY_FUNCTION__))
;
1403 Res = Builder.CreateCall(
1404 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1405 Res);
1406 } else {
1407 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1408 }
1409 }
1410
1411 if (Opts.EmitImplicitIntegerTruncationChecks)
1412 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1413 NoncanonicalDstType, Loc);
1414
1415 if (Opts.EmitImplicitIntegerSignChangeChecks)
1416 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1417 NoncanonicalDstType, Loc);
1418
1419 return Res;
1420}
1421
1422Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1423 QualType DstTy,
1424 SourceLocation Loc) {
1425 FixedPointSemantics SrcFPSema =
1426 CGF.getContext().getFixedPointSemantics(SrcTy);
1427 FixedPointSemantics DstFPSema =
1428 CGF.getContext().getFixedPointSemantics(DstTy);
1429 return EmitFixedPointConversion(Src, SrcFPSema, DstFPSema, Loc,
1430 DstTy->isIntegerType());
1431}
1432
1433Value *ScalarExprEmitter::EmitFixedPointConversion(
1434 Value *Src, FixedPointSemantics &SrcFPSema, FixedPointSemantics &DstFPSema,
1435 SourceLocation Loc, bool DstIsInteger) {
1436 using llvm::APInt;
1437 using llvm::ConstantInt;
1438 using llvm::Value;
1439
1440 unsigned SrcWidth = SrcFPSema.getWidth();
1441 unsigned DstWidth = DstFPSema.getWidth();
1442 unsigned SrcScale = SrcFPSema.getScale();
1443 unsigned DstScale = DstFPSema.getScale();
1444 bool SrcIsSigned = SrcFPSema.isSigned();
1445 bool DstIsSigned = DstFPSema.isSigned();
1446
1447 llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
1448
1449 Value *Result = Src;
1450 unsigned ResultWidth = SrcWidth;
1451
1452 // Downscale.
1453 if (DstScale < SrcScale) {
1454 // When converting to integers, we round towards zero. For negative numbers,
1455 // right shifting rounds towards negative infinity. In this case, we can
1456 // just round up before shifting.
1457 if (DstIsInteger && SrcIsSigned) {
1458 Value *Zero = llvm::Constant::getNullValue(Result->getType());
1459 Value *IsNegative = Builder.CreateICmpSLT(Result, Zero);
1460 Value *LowBits = ConstantInt::get(
1461 CGF.getLLVMContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
1462 Value *Rounded = Builder.CreateAdd(Result, LowBits);
1463 Result = Builder.CreateSelect(IsNegative, Rounded, Result);
1464 }
1465
1466 Result = SrcIsSigned
1467 ? Builder.CreateAShr(Result, SrcScale - DstScale, "downscale")
1468 : Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1469 }
1470
1471 if (!DstFPSema.isSaturated()) {
1472 // Resize.
1473 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1474
1475 // Upscale.
1476 if (DstScale > SrcScale)
1477 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1478 } else {
1479 // Adjust the number of fractional bits.
1480 if (DstScale > SrcScale) {
1481 // Compare to DstWidth to prevent resizing twice.
1482 ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
1483 llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
1484 Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
1485 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1486 }
1487
1488 // Handle saturation.
1489 bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
1490 if (LessIntBits) {
1491 Value *Max = ConstantInt::get(
1492 CGF.getLLVMContext(),
1493 APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
1494 Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
1495 : Builder.CreateICmpUGT(Result, Max);
1496 Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
1497 }
1498 // Cannot overflow min to dest type if src is unsigned since all fixed
1499 // point types can cover the unsigned min of 0.
1500 if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
1501 Value *Min = ConstantInt::get(
1502 CGF.getLLVMContext(),
1503 APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
1504 Value *TooLow = Builder.CreateICmpSLT(Result, Min);
1505 Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
1506 }
1507
1508 // Resize the integer part to get the final destination size.
1509 if (ResultWidth != DstWidth)
1510 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1511 }
1512 return Result;
1513}
1514
1515/// Emit a conversion from the specified complex type to the specified
1516/// destination type, where the destination type is an LLVM scalar type.
1517Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1518 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1519 SourceLocation Loc) {
1520 // Get the source element type.
1521 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1522
1523 // Handle conversions to bool first, they are special: comparisons against 0.
1524 if (DstTy->isBooleanType()) {
1525 // Complex != 0 -> (Real != 0) | (Imag != 0)
1526 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1527 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1528 return Builder.CreateOr(Src.first, Src.second, "tobool");
1529 }
1530
1531 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1532 // the imaginary part of the complex value is discarded and the value of the
1533 // real part is converted according to the conversion rules for the
1534 // corresponding real type.
1535 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1536}
1537
1538Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1539 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1540}
1541
1542/// Emit a sanitization check for the given "binary" operation (which
1543/// might actually be a unary increment which has been lowered to a binary
1544/// operation). The check passes if all values in \p Checks (which are \c i1),
1545/// are \c true.
1546void ScalarExprEmitter::EmitBinOpCheck(
1547 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1548 assert(CGF.IsSanitizerScope)((CGF.IsSanitizerScope) ? static_cast<void> (0) : __assert_fail
("CGF.IsSanitizerScope", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1548, __PRETTY_FUNCTION__))
;
1549 SanitizerHandler Check;
1550 SmallVector<llvm::Constant *, 4> StaticData;
1551 SmallVector<llvm::Value *, 2> DynamicData;
1552
1553 BinaryOperatorKind Opcode = Info.Opcode;
1554 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1555 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1556
1557 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1558 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1559 if (UO && UO->getOpcode() == UO_Minus) {
1560 Check = SanitizerHandler::NegateOverflow;
1561 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1562 DynamicData.push_back(Info.RHS);
1563 } else {
1564 if (BinaryOperator::isShiftOp(Opcode)) {
1565 // Shift LHS negative or too large, or RHS out of bounds.
1566 Check = SanitizerHandler::ShiftOutOfBounds;
1567 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1568 StaticData.push_back(
1569 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1570 StaticData.push_back(
1571 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1572 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1573 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1574 Check = SanitizerHandler::DivremOverflow;
1575 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1576 } else {
1577 // Arithmetic overflow (+, -, *).
1578 switch (Opcode) {
1579 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1580 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1581 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1582 default: llvm_unreachable("unexpected opcode for bin op check")::llvm::llvm_unreachable_internal("unexpected opcode for bin op check"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1582)
;
1583 }
1584 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1585 }
1586 DynamicData.push_back(Info.LHS);
1587 DynamicData.push_back(Info.RHS);
1588 }
1589
1590 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1591}
1592
1593//===----------------------------------------------------------------------===//
1594// Visitor Methods
1595//===----------------------------------------------------------------------===//
1596
1597Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1598 CGF.ErrorUnsupported(E, "scalar expression");
1599 if (E->getType()->isVoidType())
1600 return nullptr;
1601 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1602}
1603
1604Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1605 // Vector Mask Case
1606 if (E->getNumSubExprs() == 2) {
1607 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1608 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1609 Value *Mask;
1610
1611 llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
1612 unsigned LHSElts = LTy->getNumElements();
1613
1614 Mask = RHS;
1615
1616 llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1617
1618 // Mask off the high bits of each shuffle index.
1619 Value *MaskBits =
1620 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1621 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1622
1623 // newv = undef
1624 // mask = mask & maskbits
1625 // for each elt
1626 // n = extract mask i
1627 // x = extract val n
1628 // newv = insert newv, x, i
1629 llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1630 MTy->getNumElements());
1631 Value* NewV = llvm::UndefValue::get(RTy);
1632 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1633 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1634 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1635
1636 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1637 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1638 }
1639 return NewV;
1640 }
1641
1642 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1643 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1644
1645 SmallVector<llvm::Constant*, 32> indices;
1646 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1647 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1648 // Check for -1 and output it as undef in the IR.
1649 if (Idx.isSigned() && Idx.isAllOnesValue())
1650 indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1651 else
1652 indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1653 }
1654
1655 Value *SV = llvm::ConstantVector::get(indices);
1656 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1657}
1658
1659Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1660 QualType SrcType = E->getSrcExpr()->getType(),
1661 DstType = E->getType();
1662
1663 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1664
1665 SrcType = CGF.getContext().getCanonicalType(SrcType);
1666 DstType = CGF.getContext().getCanonicalType(DstType);
1667 if (SrcType == DstType) return Src;
1668
1669 assert(SrcType->isVectorType() &&((SrcType->isVectorType() && "ConvertVector source type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1670, __PRETTY_FUNCTION__))
1670 "ConvertVector source type must be a vector")((SrcType->isVectorType() && "ConvertVector source type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1670, __PRETTY_FUNCTION__))
;
1671 assert(DstType->isVectorType() &&((DstType->isVectorType() && "ConvertVector destination type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1672, __PRETTY_FUNCTION__))
1672 "ConvertVector destination type must be a vector")((DstType->isVectorType() && "ConvertVector destination type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1672, __PRETTY_FUNCTION__))
;
1673
1674 llvm::Type *SrcTy = Src->getType();
1675 llvm::Type *DstTy = ConvertType(DstType);
1676
1677 // Ignore conversions like int -> uint.
1678 if (SrcTy == DstTy)
1679 return Src;
1680
1681 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1682 DstEltType = DstType->castAs<VectorType>()->getElementType();
1683
1684 assert(SrcTy->isVectorTy() &&((SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1685, __PRETTY_FUNCTION__))
1685 "ConvertVector source IR type must be a vector")((SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1685, __PRETTY_FUNCTION__))
;
1686 assert(DstTy->isVectorTy() &&((DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1687, __PRETTY_FUNCTION__))
1687 "ConvertVector destination IR type must be a vector")((DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? static_cast<void> (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1687, __PRETTY_FUNCTION__))
;
1688
1689 llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1690 *DstEltTy = DstTy->getVectorElementType();
1691
1692 if (DstEltType->isBooleanType()) {
1693 assert((SrcEltTy->isFloatingPointTy() ||(((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType
>(SrcEltTy)) && "Unknown boolean conversion") ? static_cast
<void> (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1694, __PRETTY_FUNCTION__))
1694 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion")(((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType
>(SrcEltTy)) && "Unknown boolean conversion") ? static_cast
<void> (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1694, __PRETTY_FUNCTION__))
;
1695
1696 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1697 if (SrcEltTy->isFloatingPointTy()) {
1698 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1699 } else {
1700 return Builder.CreateICmpNE(Src, Zero, "tobool");
1701 }
1702 }
1703
1704 // We have the arithmetic types: real int/float.
1705 Value *Res = nullptr;
1706
1707 if (isa<llvm::IntegerType>(SrcEltTy)) {
1708 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1709 if (isa<llvm::IntegerType>(DstEltTy))
1710 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1711 else if (InputSigned)
1712 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1713 else
1714 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1715 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1716 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion")((SrcEltTy->isFloatingPointTy() && "Unknown real conversion"
) ? static_cast<void> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1716, __PRETTY_FUNCTION__))
;
1717 if (DstEltType->isSignedIntegerOrEnumerationType())
1718 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1719 else
1720 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1721 } else {
1722 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&((SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1723, __PRETTY_FUNCTION__))
1723 "Unknown real conversion")((SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy
() && "Unknown real conversion") ? static_cast<void
> (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1723, __PRETTY_FUNCTION__))
;
1724 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1725 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1726 else
1727 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1728 }
1729
1730 return Res;
1731}
1732
1733Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1734 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1735 CGF.EmitIgnoredExpr(E->getBase());
1736 return CGF.emitScalarConstant(Constant, E);
1737 } else {
1738 Expr::EvalResult Result;
1739 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1740 llvm::APSInt Value = Result.Val.getInt();
1741 CGF.EmitIgnoredExpr(E->getBase());
1742 return Builder.getInt(Value);
1743 }
1744 }
1745
1746 return EmitLoadOfLValue(E);
1747}
1748
1749Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1750 TestAndClearIgnoreResultAssign();
1751
1752 // Emit subscript expressions in rvalue context's. For most cases, this just
1753 // loads the lvalue formed by the subscript expr. However, we have to be
1754 // careful, because the base of a vector subscript is occasionally an rvalue,
1755 // so we can't get it as an lvalue.
1756 if (!E->getBase()->getType()->isVectorType())
1757 return EmitLoadOfLValue(E);
1758
1759 // Handle the vector case. The base must be a vector, the index must be an
1760 // integer value.
1761 Value *Base = Visit(E->getBase());
1762 Value *Idx = Visit(E->getIdx());
1763 QualType IdxTy = E->getIdx()->getType();
1764
1765 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1766 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1767
1768 return Builder.CreateExtractElement(Base, Idx, "vecext");
1769}
1770
1771static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1772 unsigned Off, llvm::Type *I32Ty) {
1773 int MV = SVI->getMaskValue(Idx);
1774 if (MV == -1)
1775 return llvm::UndefValue::get(I32Ty);
1776 return llvm::ConstantInt::get(I32Ty, Off+MV);
1777}
1778
1779static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1780 if (C->getBitWidth() != 32) {
1781 assert(llvm::ConstantInt::isValueValidForType(I32Ty,((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1783, __PRETTY_FUNCTION__))
1782 C->getZExtValue()) &&((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1783, __PRETTY_FUNCTION__))
1783 "Index operand too large for shufflevector mask!")((llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue
()) && "Index operand too large for shufflevector mask!"
) ? static_cast<void> (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1783, __PRETTY_FUNCTION__))
;
1784 return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1785 }
1786 return C;
1787}
1788
1789Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1790 bool Ignore = TestAndClearIgnoreResultAssign();
1791 (void)Ignore;
1792 assert (Ignore == false && "init list ignored")((Ignore == false && "init list ignored") ? static_cast
<void> (0) : __assert_fail ("Ignore == false && \"init list ignored\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1792, __PRETTY_FUNCTION__))
;
1793 unsigned NumInitElements = E->getNumInits();
1794
1795 if (E->hadArrayRangeDesignator())
1796 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1797
1798 llvm::VectorType *VType =
1799 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1800
1801 if (!VType) {
1802 if (NumInitElements == 0) {
1803 // C++11 value-initialization for the scalar.
1804 return EmitNullValue(E->getType());
1805 }
1806 // We have a scalar in braces. Just use the first element.
1807 return Visit(E->getInit(0));
1808 }
1809
1810 unsigned ResElts = VType->getNumElements();
1811
1812 // Loop over initializers collecting the Value for each, and remembering
1813 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1814 // us to fold the shuffle for the swizzle into the shuffle for the vector
1815 // initializer, since LLVM optimizers generally do not want to touch
1816 // shuffles.
1817 unsigned CurIdx = 0;
1818 bool VIsUndefShuffle = false;
1819 llvm::Value *V = llvm::UndefValue::get(VType);
1820 for (unsigned i = 0; i != NumInitElements; ++i) {
1821 Expr *IE = E->getInit(i);
1822 Value *Init = Visit(IE);
1823 SmallVector<llvm::Constant*, 16> Args;
1824
1825 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1826
1827 // Handle scalar elements. If the scalar initializer is actually one
1828 // element of a different vector of the same width, use shuffle instead of
1829 // extract+insert.
1830 if (!VVT) {
1831 if (isa<ExtVectorElementExpr>(IE)) {
1832 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1833
1834 if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1835 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1836 Value *LHS = nullptr, *RHS = nullptr;
1837 if (CurIdx == 0) {
1838 // insert into undef -> shuffle (src, undef)
1839 // shufflemask must use an i32
1840 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1841 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1842
1843 LHS = EI->getVectorOperand();
1844 RHS = V;
1845 VIsUndefShuffle = true;
1846 } else if (VIsUndefShuffle) {
1847 // insert into undefshuffle && size match -> shuffle (v, src)
1848 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1849 for (unsigned j = 0; j != CurIdx; ++j)
1850 Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1851 Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1852 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1853
1854 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1855 RHS = EI->getVectorOperand();
1856 VIsUndefShuffle = false;
1857 }
1858 if (!Args.empty()) {
1859 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1860 V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1861 ++CurIdx;
1862 continue;
1863 }
1864 }
1865 }
1866 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1867 "vecinit");
1868 VIsUndefShuffle = false;
1869 ++CurIdx;
1870 continue;
1871 }
1872
1873 unsigned InitElts = VVT->getNumElements();
1874
1875 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1876 // input is the same width as the vector being constructed, generate an
1877 // optimized shuffle of the swizzle input into the result.
1878 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1879 if (isa<ExtVectorElementExpr>(IE)) {
1880 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1881 Value *SVOp = SVI->getOperand(0);
1882 llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1883
1884 if (OpTy->getNumElements() == ResElts) {
1885 for (unsigned j = 0; j != CurIdx; ++j) {
1886 // If the current vector initializer is a shuffle with undef, merge
1887 // this shuffle directly into it.
1888 if (VIsUndefShuffle) {
1889 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1890 CGF.Int32Ty));
1891 } else {
1892 Args.push_back(Builder.getInt32(j));
1893 }
1894 }
1895 for (unsigned j = 0, je = InitElts; j != je; ++j)
1896 Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1897 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1898
1899 if (VIsUndefShuffle)
1900 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1901
1902 Init = SVOp;
1903 }
1904 }
1905
1906 // Extend init to result vector length, and then shuffle its contribution
1907 // to the vector initializer into V.
1908 if (Args.empty()) {
1909 for (unsigned j = 0; j != InitElts; ++j)
1910 Args.push_back(Builder.getInt32(j));
1911 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1912 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1913 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1914 Mask, "vext");
1915
1916 Args.clear();
1917 for (unsigned j = 0; j != CurIdx; ++j)
1918 Args.push_back(Builder.getInt32(j));
1919 for (unsigned j = 0; j != InitElts; ++j)
1920 Args.push_back(Builder.getInt32(j+Offset));
1921 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1922 }
1923
1924 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1925 // merging subsequent shuffles into this one.
1926 if (CurIdx == 0)
1927 std::swap(V, Init);
1928 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1929 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1930 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1931 CurIdx += InitElts;
1932 }
1933
1934 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1935 // Emit remaining default initializers.
1936 llvm::Type *EltTy = VType->getElementType();
1937
1938 // Emit remaining default initializers
1939 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1940 Value *Idx = Builder.getInt32(CurIdx);
1941 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1942 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1943 }
1944 return V;
1945}
1946
1947bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1948 const Expr *E = CE->getSubExpr();
1949
1950 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1951 return false;
1952
1953 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1954 // We always assume that 'this' is never null.
1955 return false;
1956 }
1957
1958 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1959 // And that glvalue casts are never null.
1960 if (ICE->getValueKind() != VK_RValue)
1961 return false;
1962 }
1963
1964 return true;
1965}
1966
1967// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1968// have to handle a more broad range of conversions than explicit casts, as they
1969// handle things like function to ptr-to-function decay etc.
1970Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1971 Expr *E = CE->getSubExpr();
1972 QualType DestTy = CE->getType();
1973 CastKind Kind = CE->getCastKind();
1974
1975 // These cases are generally not written to ignore the result of
1976 // evaluating their sub-expressions, so we clear this now.
1977 bool Ignored = TestAndClearIgnoreResultAssign();
1978
1979 // Since almost all cast kinds apply to scalars, this switch doesn't have
1980 // a default case, so the compiler will warn on a missing case. The cases
1981 // are in the same order as in the CastKind enum.
1982 switch (Kind) {
1983 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!")::llvm::llvm_unreachable_internal("dependent cast kind in IR gen!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1983)
;
1984 case CK_BuiltinFnToFnPtr:
1985 llvm_unreachable("builtin functions are handled elsewhere")::llvm::llvm_unreachable_internal("builtin functions are handled elsewhere"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 1985)
;
1986
1987 case CK_LValueBitCast:
1988 case CK_ObjCObjectLValueCast: {
1989 Address Addr = EmitLValue(E).getAddress(CGF);
1990 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1991 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1992 return EmitLoadOfLValue(LV, CE->getExprLoc());
1993 }
1994
1995 case CK_LValueToRValueBitCast: {
1996 LValue SourceLVal = CGF.EmitLValue(E);
1997 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
1998 CGF.ConvertTypeForMem(DestTy));
1999 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2000 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2001 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2002 }
2003
2004 case CK_CPointerToObjCPointerCast:
2005 case CK_BlockPointerToObjCPointerCast:
2006 case CK_AnyPointerToBlockPointerCast:
2007 case CK_BitCast: {
2008 Value *Src = Visit(const_cast<Expr*>(E));
2009 llvm::Type *SrcTy = Src->getType();
2010 llvm::Type *DstTy = ConvertType(DestTy);
2011 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2012 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2013 llvm_unreachable("wrong cast for pointers in different address spaces"::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2014)
2014 "(must be an address space cast)!")::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2014)
;
2015 }
2016
2017 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2018 if (auto PT = DestTy->getAs<PointerType>())
2019 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2020 /*MayBeNull=*/true,
2021 CodeGenFunction::CFITCK_UnrelatedCast,
2022 CE->getBeginLoc());
2023 }
2024
2025 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2026 const QualType SrcType = E->getType();
2027
2028 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2029 // Casting to pointer that could carry dynamic information (provided by
2030 // invariant.group) requires launder.
2031 Src = Builder.CreateLaunderInvariantGroup(Src);
2032 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2033 // Casting to pointer that does not carry dynamic information (provided
2034 // by invariant.group) requires stripping it. Note that we don't do it
2035 // if the source could not be dynamic type and destination could be
2036 // dynamic because dynamic information is already laundered. It is
2037 // because launder(strip(src)) == launder(src), so there is no need to
2038 // add extra strip before launder.
2039 Src = Builder.CreateStripInvariantGroup(Src);
2040 }
2041 }
2042
2043 // Update heapallocsite metadata when there is an explicit cast.
2044 if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(Src))
2045 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE))
2046 CGF.getDebugInfo()->
2047 addHeapAllocSiteMetadata(CI, CE->getType(), CE->getExprLoc());
2048
2049 return Builder.CreateBitCast(Src, DstTy);
2050 }
2051 case CK_AddressSpaceConversion: {
2052 Expr::EvalResult Result;
2053 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2054 Result.Val.isNullPointer()) {
2055 // If E has side effect, it is emitted even if its final result is a
2056 // null pointer. In that case, a DCE pass should be able to
2057 // eliminate the useless instructions emitted during translating E.
2058 if (Result.HasSideEffects)
2059 Visit(E);
2060 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2061 ConvertType(DestTy)), DestTy);
2062 }
2063 // Since target may map different address spaces in AST to the same address
2064 // space, an address space conversion may end up as a bitcast.
2065 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2066 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2067 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2068 }
2069 case CK_AtomicToNonAtomic:
2070 case CK_NonAtomicToAtomic:
2071 case CK_NoOp:
2072 case CK_UserDefinedConversion:
2073 return Visit(const_cast<Expr*>(E));
2074
2075 case CK_BaseToDerived: {
2076 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2077 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!")((DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"
) ? static_cast<void> (0) : __assert_fail ("DerivedClassDecl && \"BaseToDerived arg isn't a C++ object pointer!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2077, __PRETTY_FUNCTION__))
;
2078
2079 Address Base = CGF.EmitPointerWithAlignment(E);
2080 Address Derived =
2081 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2082 CE->path_begin(), CE->path_end(),
2083 CGF.ShouldNullCheckClassCastValue(CE));
2084
2085 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2086 // performed and the object is not of the derived type.
2087 if (CGF.sanitizePerformTypeCheck())
2088 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2089 Derived.getPointer(), DestTy->getPointeeType());
2090
2091 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2092 CGF.EmitVTablePtrCheckForCast(
2093 DestTy->getPointeeType(), Derived.getPointer(),
2094 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2095 CE->getBeginLoc());
2096
2097 return Derived.getPointer();
2098 }
2099 case CK_UncheckedDerivedToBase:
2100 case CK_DerivedToBase: {
2101 // The EmitPointerWithAlignment path does this fine; just discard
2102 // the alignment.
2103 return CGF.EmitPointerWithAlignment(CE).getPointer();
2104 }
2105
2106 case CK_Dynamic: {
2107 Address V = CGF.EmitPointerWithAlignment(E);
2108 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2109 return CGF.EmitDynamicCast(V, DCE);
2110 }
2111
2112 case CK_ArrayToPointerDecay:
2113 return CGF.EmitArrayToPointerDecay(E).getPointer();
2114 case CK_FunctionToPointerDecay:
2115 return EmitLValue(E).getPointer(CGF);
2116
2117 case CK_NullToPointer:
2118 if (MustVisitNullValue(E))
2119 CGF.EmitIgnoredExpr(E);
2120
2121 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2122 DestTy);
2123
2124 case CK_NullToMemberPointer: {
2125 if (MustVisitNullValue(E))
2126 CGF.EmitIgnoredExpr(E);
2127
2128 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2129 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2130 }
2131
2132 case CK_ReinterpretMemberPointer:
2133 case CK_BaseToDerivedMemberPointer:
2134 case CK_DerivedToBaseMemberPointer: {
2135 Value *Src = Visit(E);
2136
2137 // Note that the AST doesn't distinguish between checked and
2138 // unchecked member pointer conversions, so we always have to
2139 // implement checked conversions here. This is inefficient when
2140 // actual control flow may be required in order to perform the
2141 // check, which it is for data member pointers (but not member
2142 // function pointers on Itanium and ARM).
2143 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2144 }
2145
2146 case CK_ARCProduceObject:
2147 return CGF.EmitARCRetainScalarExpr(E);
2148 case CK_ARCConsumeObject:
2149 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2150 case CK_ARCReclaimReturnedObject:
2151 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2152 case CK_ARCExtendBlockObject:
2153 return CGF.EmitARCExtendBlockObject(E);
2154
2155 case CK_CopyAndAutoreleaseBlockObject:
2156 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2157
2158 case CK_FloatingRealToComplex:
2159 case CK_FloatingComplexCast:
2160 case CK_IntegralRealToComplex:
2161 case CK_IntegralComplexCast:
2162 case CK_IntegralComplexToFloatingComplex:
2163 case CK_FloatingComplexToIntegralComplex:
2164 case CK_ConstructorConversion:
2165 case CK_ToUnion:
2166 llvm_unreachable("scalar cast to non-scalar value")::llvm::llvm_unreachable_internal("scalar cast to non-scalar value"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2166)
;
2167
2168 case CK_LValueToRValue:
2169 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy))((CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy
)) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2169, __PRETTY_FUNCTION__))
;
2170 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!")((E->isGLValue() && "lvalue-to-rvalue applied to r-value!"
) ? static_cast<void> (0) : __assert_fail ("E->isGLValue() && \"lvalue-to-rvalue applied to r-value!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2170, __PRETTY_FUNCTION__))
;
2171 return Visit(const_cast<Expr*>(E));
2172
2173 case CK_IntegralToPointer: {
2174 Value *Src = Visit(const_cast<Expr*>(E));
2175
2176 // First, convert to the correct width so that we control the kind of
2177 // extension.
2178 auto DestLLVMTy = ConvertType(DestTy);
2179 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2180 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2181 llvm::Value* IntResult =
2182 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2183
2184 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2185
2186 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2187 // Going from integer to pointer that could be dynamic requires reloading
2188 // dynamic information from invariant.group.
2189 if (DestTy.mayBeDynamicClass())
2190 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2191 }
2192 return IntToPtr;
2193 }
2194 case CK_PointerToIntegral: {
2195 assert(!DestTy->isBooleanType() && "bool should use PointerToBool")((!DestTy->isBooleanType() && "bool should use PointerToBool"
) ? static_cast<void> (0) : __assert_fail ("!DestTy->isBooleanType() && \"bool should use PointerToBool\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2195, __PRETTY_FUNCTION__))
;
2196 auto *PtrExpr = Visit(E);
2197
2198 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2199 const QualType SrcType = E->getType();
2200
2201 // Casting to integer requires stripping dynamic information as it does
2202 // not carries it.
2203 if (SrcType.mayBeDynamicClass())
2204 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2205 }
2206
2207 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2208 }
2209 case CK_ToVoid: {
2210 CGF.EmitIgnoredExpr(E);
2211 return nullptr;
2212 }
2213 case CK_VectorSplat: {
2214 llvm::Type *DstTy = ConvertType(DestTy);
2215 Value *Elt = Visit(const_cast<Expr*>(E));
2216 // Splat the element across to all elements
2217 unsigned NumElements = DstTy->getVectorNumElements();
2218 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2219 }
2220
2221 case CK_FixedPointCast:
2222 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2223 CE->getExprLoc());
2224
2225 case CK_FixedPointToBoolean:
2226 assert(E->getType()->isFixedPointType() &&((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2227, __PRETTY_FUNCTION__))
2227 "Expected src type to be fixed point type")((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2227, __PRETTY_FUNCTION__))
;
2228 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type")((DestTy->isBooleanType() && "Expected dest type to be boolean type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isBooleanType() && \"Expected dest type to be boolean type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2228, __PRETTY_FUNCTION__))
;
2229 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2230 CE->getExprLoc());
2231
2232 case CK_FixedPointToIntegral:
2233 assert(E->getType()->isFixedPointType() &&((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2234, __PRETTY_FUNCTION__))
2234 "Expected src type to be fixed point type")((E->getType()->isFixedPointType() && "Expected src type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2234, __PRETTY_FUNCTION__))
;
2235 assert(DestTy->isIntegerType() && "Expected dest type to be an integer")((DestTy->isIntegerType() && "Expected dest type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isIntegerType() && \"Expected dest type to be an integer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2235, __PRETTY_FUNCTION__))
;
2236 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2237 CE->getExprLoc());
2238
2239 case CK_IntegralToFixedPoint:
2240 assert(E->getType()->isIntegerType() &&((E->getType()->isIntegerType() && "Expected src type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isIntegerType() && \"Expected src type to be an integer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2241, __PRETTY_FUNCTION__))
2241 "Expected src type to be an integer")((E->getType()->isIntegerType() && "Expected src type to be an integer"
) ? static_cast<void> (0) : __assert_fail ("E->getType()->isIntegerType() && \"Expected src type to be an integer\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2241, __PRETTY_FUNCTION__))
;
2242 assert(DestTy->isFixedPointType() &&((DestTy->isFixedPointType() && "Expected dest type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isFixedPointType() && \"Expected dest type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2243, __PRETTY_FUNCTION__))
2243 "Expected dest type to be fixed point type")((DestTy->isFixedPointType() && "Expected dest type to be fixed point type"
) ? static_cast<void> (0) : __assert_fail ("DestTy->isFixedPointType() && \"Expected dest type to be fixed point type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2243, __PRETTY_FUNCTION__))
;
2244 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2245 CE->getExprLoc());
2246
2247 case CK_IntegralCast: {
2248 ScalarConversionOpts Opts;
2249 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2250 if (!ICE->isPartOfExplicitCast())
2251 Opts = ScalarConversionOpts(CGF.SanOpts);
2252 }
2253 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2254 CE->getExprLoc(), Opts);
2255 }
2256 case CK_IntegralToFloating:
2257 case CK_FloatingToIntegral:
2258 case CK_FloatingCast:
2259 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2260 CE->getExprLoc());
2261 case CK_BooleanToSignedIntegral: {
2262 ScalarConversionOpts Opts;
2263 Opts.TreatBooleanAsSigned = true;
2264 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2265 CE->getExprLoc(), Opts);
2266 }
2267 case CK_IntegralToBoolean:
2268 return EmitIntToBoolConversion(Visit(E));
2269 case CK_PointerToBoolean:
2270 return EmitPointerToBoolConversion(Visit(E), E->getType());
2271 case CK_FloatingToBoolean:
2272 return EmitFloatToBoolConversion(Visit(E));
2273 case CK_MemberPointerToBoolean: {
2274 llvm::Value *MemPtr = Visit(E);
2275 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2276 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2277 }
2278
2279 case CK_FloatingComplexToReal:
2280 case CK_IntegralComplexToReal:
2281 return CGF.EmitComplexExpr(E, false, true).first;
2282
2283 case CK_FloatingComplexToBoolean:
2284 case CK_IntegralComplexToBoolean: {
2285 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2286
2287 // TODO: kill this function off, inline appropriate case here
2288 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2289 CE->getExprLoc());
2290 }
2291
2292 case CK_ZeroToOCLOpaqueType: {
2293 assert((DestTy->isEventT() || DestTy->isQueueT() ||(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2295, __PRETTY_FUNCTION__))
2294 DestTy->isOCLIntelSubgroupAVCType()) &&(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2295, __PRETTY_FUNCTION__))
2295 "CK_ZeroToOCLEvent cast on non-event type")(((DestTy->isEventT() || DestTy->isQueueT() || DestTy->
isOCLIntelSubgroupAVCType()) && "CK_ZeroToOCLEvent cast on non-event type"
) ? static_cast<void> (0) : __assert_fail ("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2295, __PRETTY_FUNCTION__))
;
2296 return llvm::Constant::getNullValue(ConvertType(DestTy));
2297 }
2298
2299 case CK_IntToOCLSampler:
2300 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2301
2302 } // end of switch
2303
2304 llvm_unreachable("unknown scalar cast")::llvm::llvm_unreachable_internal("unknown scalar cast", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2304)
;
2305}
2306
2307Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2308 CodeGenFunction::StmtExprEvaluation eval(CGF);
2309 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2310 !E->getType()->isVoidType());
2311 if (!RetAlloca.isValid())
2312 return nullptr;
2313 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2314 E->getExprLoc());
2315}
2316
2317Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2318 CGF.enterFullExpression(E);
2319 CodeGenFunction::RunCleanupsScope Scope(CGF);
2320 Value *V = Visit(E->getSubExpr());
2321 // Defend against dominance problems caused by jumps out of expression
2322 // evaluation through the shared cleanup block.
2323 Scope.ForceCleanup({&V});
2324 return V;
2325}
2326
2327//===----------------------------------------------------------------------===//
2328// Unary Operators
2329//===----------------------------------------------------------------------===//
2330
2331static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2332 llvm::Value *InVal, bool IsInc) {
2333 BinOpInfo BinOp;
2334 BinOp.LHS = InVal;
2335 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2336 BinOp.Ty = E->getType();
2337 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2338 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2339 BinOp.E = E;
2340 return BinOp;
2341}
2342
2343llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2344 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2345 llvm::Value *Amount =
2346 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2347 StringRef Name = IsInc ? "inc" : "dec";
2348 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2349 case LangOptions::SOB_Defined:
2350 return Builder.CreateAdd(InVal, Amount, Name);
2351 case LangOptions::SOB_Undefined:
2352 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2353 return Builder.CreateNSWAdd(InVal, Amount, Name);
2354 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2355 case LangOptions::SOB_Trapping:
2356 if (!E->canOverflow())
2357 return Builder.CreateNSWAdd(InVal, Amount, Name);
2358 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
2359 }
2360 llvm_unreachable("Unknown SignedOverflowBehaviorTy")::llvm::llvm_unreachable_internal("Unknown SignedOverflowBehaviorTy"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2360)
;
2361}
2362
2363namespace {
2364/// Handles check and update for lastprivate conditional variables.
2365class OMPLastprivateConditionalUpdateRAII {
2366private:
2367 CodeGenFunction &CGF;
2368 const UnaryOperator *E;
2369
2370public:
2371 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2372 const UnaryOperator *E)
2373 : CGF(CGF), E(E) {}
2374 ~OMPLastprivateConditionalUpdateRAII() {
2375 if (CGF.getLangOpts().OpenMP)
2376 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2377 CGF, E->getSubExpr());
2378 }
2379};
2380} // namespace
2381
2382llvm::Value *
2383ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2384 bool isInc, bool isPre) {
2385 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2386 QualType type = E->getSubExpr()->getType();
2387 llvm::PHINode *atomicPHI = nullptr;
2388 llvm::Value *value;
2389 llvm::Value *input;
2390
2391 int amount = (isInc ? 1 : -1);
2392 bool isSubtraction = !isInc;
2393
2394 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2395 type = atomicTy->getValueType();
2396 if (isInc && type->isBooleanType()) {
2397 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2398 if (isPre) {
2399 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2400 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2401 return Builder.getTrue();
2402 }
2403 // For atomic bool increment, we just store true and return it for
2404 // preincrement, do an atomic swap with true for postincrement
2405 return Builder.CreateAtomicRMW(
2406 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2407 llvm::AtomicOrdering::SequentiallyConsistent);
2408 }
2409 // Special case for atomic increment / decrement on integers, emit
2410 // atomicrmw instructions. We skip this if we want to be doing overflow
2411 // checking, and fall into the slow path with the atomic cmpxchg loop.
2412 if (!type->isBooleanType() && type->isIntegerType() &&
2413 !(type->isUnsignedIntegerType() &&
2414 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2415 CGF.getLangOpts().getSignedOverflowBehavior() !=
2416 LangOptions::SOB_Trapping) {
2417 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2418 llvm::AtomicRMWInst::Sub;
2419 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2420 llvm::Instruction::Sub;
2421 llvm::Value *amt = CGF.EmitToMemory(
2422 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2423 llvm::Value *old =
2424 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2425 llvm::AtomicOrdering::SequentiallyConsistent);
2426 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2427 }
2428 value = EmitLoadOfLValue(LV, E->getExprLoc());
2429 input = value;
2430 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2431 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2432 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2433 value = CGF.EmitToMemory(value, type);
2434 Builder.CreateBr(opBB);
2435 Builder.SetInsertPoint(opBB);
2436 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2437 atomicPHI->addIncoming(value, startBB);
2438 value = atomicPHI;
2439 } else {
2440 value = EmitLoadOfLValue(LV, E->getExprLoc());
2441 input = value;
2442 }
2443
2444 // Special case of integer increment that we have to check first: bool++.
2445 // Due to promotion rules, we get:
2446 // bool++ -> bool = bool + 1
2447 // -> bool = (int)bool + 1
2448 // -> bool = ((int)bool + 1 != 0)
2449 // An interesting aspect of this is that increment is always true.
2450 // Decrement does not have this property.
2451 if (isInc && type->isBooleanType()) {
2452 value = Builder.getTrue();
2453
2454 // Most common case by far: integer increment.
2455 } else if (type->isIntegerType()) {
2456 QualType promotedType;
2457 bool canPerformLossyDemotionCheck = false;
2458 if (type->isPromotableIntegerType()) {
2459 promotedType = CGF.getContext().getPromotedIntegerType(type);
2460 assert(promotedType != type && "Shouldn't promote to the same type.")((promotedType != type && "Shouldn't promote to the same type."
) ? static_cast<void> (0) : __assert_fail ("promotedType != type && \"Shouldn't promote to the same type.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2460, __PRETTY_FUNCTION__))
;
2461 canPerformLossyDemotionCheck = true;
2462 canPerformLossyDemotionCheck &=
2463 CGF.getContext().getCanonicalType(type) !=
2464 CGF.getContext().getCanonicalType(promotedType);
2465 canPerformLossyDemotionCheck &=
2466 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2467 type, promotedType);
2468 assert((!canPerformLossyDemotionCheck ||(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2475, __PRETTY_FUNCTION__))
2469 type->isSignedIntegerOrEnumerationType() ||(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2475, __PRETTY_FUNCTION__))
2470 promotedType->isSignedIntegerOrEnumerationType() ||(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2475, __PRETTY_FUNCTION__))
2471 ConvertType(type)->getScalarSizeInBits() ==(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2475, __PRETTY_FUNCTION__))
2472 ConvertType(promotedType)->getScalarSizeInBits()) &&(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2475, __PRETTY_FUNCTION__))
2473 "The following check expects that if we do promotion to different "(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2475, __PRETTY_FUNCTION__))
2474 "underlying canonical type, at least one of the types (either "(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2475, __PRETTY_FUNCTION__))
2475 "base or promoted) will be signed, or the bitwidths will match.")(((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType
() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType
(type)->getScalarSizeInBits() == ConvertType(promotedType)
->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? static_cast<void> (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2475, __PRETTY_FUNCTION__))
;
2476 }
2477 if (CGF.SanOpts.hasOneOf(
2478 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2479 canPerformLossyDemotionCheck) {
2480 // While `x += 1` (for `x` with width less than int) is modeled as
2481 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2482 // ease; inc/dec with width less than int can't overflow because of
2483 // promotion rules, so we omit promotion+demotion, which means that we can
2484 // not catch lossy "demotion". Because we still want to catch these cases
2485 // when the sanitizer is enabled, we perform the promotion, then perform
2486 // the increment/decrement in the wider type, and finally
2487 // perform the demotion. This will catch lossy demotions.
2488
2489 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2490 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2491 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2492 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2493 // emitted.
2494 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2495 ScalarConversionOpts(CGF.SanOpts));
2496
2497 // Note that signed integer inc/dec with width less than int can't
2498 // overflow because of promotion rules; we're just eliding a few steps
2499 // here.
2500 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2501 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2502 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2503 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2504 value =
2505 EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
2506 } else {
2507 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2508 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2509 }
2510
2511 // Next most common: pointer increment.
2512 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2513 QualType type = ptr->getPointeeType();
2514
2515 // VLA types don't have constant size.
2516 if (const VariableArrayType *vla
2517 = CGF.getContext().getAsVariableArrayType(type)) {
2518 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2519 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2520 if (CGF.getLangOpts().isSignedOverflowDefined())
2521 value = Builder.CreateGEP(value, numElts, "vla.inc");
2522 else
2523 value = CGF.EmitCheckedInBoundsGEP(
2524 value, numElts, /*SignedIndices=*/false, isSubtraction,
2525 E->getExprLoc(), "vla.inc");
2526
2527 // Arithmetic on function pointers (!) is just +-1.
2528 } else if (type->isFunctionType()) {
2529 llvm::Value *amt = Builder.getInt32(amount);
2530
2531 value = CGF.EmitCastToVoidPtr(value);
2532 if (CGF.getLangOpts().isSignedOverflowDefined())
2533 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2534 else
2535 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2536 isSubtraction, E->getExprLoc(),
2537 "incdec.funcptr");
2538 value = Builder.CreateBitCast(value, input->getType());
2539
2540 // For everything else, we can just do a simple increment.
2541 } else {
2542 llvm::Value *amt = Builder.getInt32(amount);
2543 if (CGF.getLangOpts().isSignedOverflowDefined())
2544 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2545 else
2546 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2547 isSubtraction, E->getExprLoc(),
2548 "incdec.ptr");
2549 }
2550
2551 // Vector increment/decrement.
2552 } else if (type->isVectorType()) {
2553 if (type->hasIntegerRepresentation()) {
2554 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2555
2556 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2557 } else {
2558 value = Builder.CreateFAdd(
2559 value,
2560 llvm::ConstantFP::get(value->getType(), amount),
2561 isInc ? "inc" : "dec");
2562 }
2563
2564 // Floating point.
2565 } else if (type->isRealFloatingType()) {
2566 // Add the inc/dec to the real part.
2567 llvm::Value *amt;
2568
2569 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2570 // Another special case: half FP increment should be done via float
2571 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2572 value = Builder.CreateCall(
2573 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2574 CGF.CGM.FloatTy),
2575 input, "incdec.conv");
2576 } else {
2577 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2578 }
2579 }
2580
2581 if (value->getType()->isFloatTy())
2582 amt = llvm::ConstantFP::get(VMContext,
2583 llvm::APFloat(static_cast<float>(amount)));
2584 else if (value->getType()->isDoubleTy())
2585 amt = llvm::ConstantFP::get(VMContext,
2586 llvm::APFloat(static_cast<double>(amount)));
2587 else {
2588 // Remaining types are Half, LongDouble or __float128. Convert from float.
2589 llvm::APFloat F(static_cast<float>(amount));
2590 bool ignored;
2591 const llvm::fltSemantics *FS;
2592 // Don't use getFloatTypeSemantics because Half isn't
2593 // necessarily represented using the "half" LLVM type.
2594 if (value->getType()->isFP128Ty())
2595 FS = &CGF.getTarget().getFloat128Format();
2596 else if (value->getType()->isHalfTy())
2597 FS = &CGF.getTarget().getHalfFormat();
2598 else
2599 FS = &CGF.getTarget().getLongDoubleFormat();
2600 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2601 amt = llvm::ConstantFP::get(VMContext, F);
2602 }
2603 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2604
2605 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2606 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2607 value = Builder.CreateCall(
2608 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2609 CGF.CGM.FloatTy),
2610 value, "incdec.conv");
2611 } else {
2612 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2613 }
2614 }
2615
2616 // Objective-C pointer types.
2617 } else {
2618 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2619 value = CGF.EmitCastToVoidPtr(value);
2620
2621 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2622 if (!isInc) size = -size;
2623 llvm::Value *sizeValue =
2624 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2625
2626 if (CGF.getLangOpts().isSignedOverflowDefined())
2627 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2628 else
2629 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2630 /*SignedIndices=*/false, isSubtraction,
2631 E->getExprLoc(), "incdec.objptr");
2632 value = Builder.CreateBitCast(value, input->getType());
2633 }
2634
2635 if (atomicPHI) {
2636 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2637 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2638 auto Pair = CGF.EmitAtomicCompareExchange(
2639 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2640 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2641 llvm::Value *success = Pair.second;
2642 atomicPHI->addIncoming(old, curBlock);
2643 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2644 Builder.SetInsertPoint(contBB);
2645 return isPre ? value : input;
2646 }
2647
2648 // Store the updated result through the lvalue.
2649 if (LV.isBitField())
2650 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2651 else
2652 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2653
2654 // If this is a postinc, return the value read from memory, otherwise use the
2655 // updated value.
2656 return isPre ? value : input;
2657}
2658
2659
2660
2661Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2662 TestAndClearIgnoreResultAssign();
2663 Value *Op = Visit(E->getSubExpr());
2664
2665 // Generate a unary FNeg for FP ops.
2666 if (Op->getType()->isFPOrFPVectorTy())
2667 return Builder.CreateFNeg(Op, "fneg");
2668
2669 // Emit unary minus with EmitSub so we handle overflow cases etc.
2670 BinOpInfo BinOp;
2671 BinOp.RHS = Op;
2672 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2673 BinOp.Ty = E->getType();
2674 BinOp.Opcode = BO_Sub;
2675 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2676 BinOp.E = E;
2677 return EmitSub(BinOp);
2678}
2679
2680Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2681 TestAndClearIgnoreResultAssign();
2682 Value *Op = Visit(E->getSubExpr());
2683 return Builder.CreateNot(Op, "neg");
2684}
2685
2686Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2687 // Perform vector logical not on comparison with zero vector.
2688 if (E->getType()->isExtVectorType()) {
2689 Value *Oper = Visit(E->getSubExpr());
2690 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2691 Value *Result;
2692 if (Oper->getType()->isFPOrFPVectorTy())
2693 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2694 else
2695 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2696 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2697 }
2698
2699 // Compare operand to zero.
2700 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2701
2702 // Invert value.
2703 // TODO: Could dynamically modify easy computations here. For example, if
2704 // the operand is an icmp ne, turn into icmp eq.
2705 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2706
2707 // ZExt result to the expr type.
2708 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2709}
2710
2711Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2712 // Try folding the offsetof to a constant.
2713 Expr::EvalResult EVResult;
2714 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
1
Assuming the condition is false
2
Taking false branch
2715 llvm::APSInt Value = EVResult.Val.getInt();
2716 return Builder.getInt(Value);
2717 }
2718
2719 // Loop over the components of the offsetof to compute the value.
2720 unsigned n = E->getNumComponents();
2721 llvm::Type* ResultType = ConvertType(E->getType());
2722 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2723 QualType CurrentType = E->getTypeSourceInfo()->getType();
2724 for (unsigned i = 0; i != n; ++i) {
3
Assuming 'i' is not equal to 'n'
4
Loop condition is true. Entering loop body
2725 OffsetOfNode ON = E->getComponent(i);
2726 llvm::Value *Offset = nullptr;
2727 switch (ON.getKind()) {
5
Control jumps to 'case Base:' at line 2777
2728 case OffsetOfNode::Array: {
2729 // Compute the index
2730 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2731 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2732 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2733 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2734
2735 // Save the element type
2736 CurrentType =
2737 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2738
2739 // Compute the element size
2740 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2741 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2742
2743 // Multiply out to compute the result
2744 Offset = Builder.CreateMul(Idx, ElemSize);
2745 break;
2746 }
2747
2748 case OffsetOfNode::Field: {
2749 FieldDecl *MemberDecl = ON.getField();
2750 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2751 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2752
2753 // Compute the index of the field in its parent.
2754 unsigned i = 0;
2755 // FIXME: It would be nice if we didn't have to loop here!
2756 for (RecordDecl::field_iterator Field = RD->field_begin(),
2757 FieldEnd = RD->field_end();
2758 Field != FieldEnd; ++Field, ++i) {
2759 if (*Field == MemberDecl)
2760 break;
2761 }
2762 assert(i < RL.getFieldCount() && "offsetof field in wrong type")((i < RL.getFieldCount() && "offsetof field in wrong type"
) ? static_cast<void> (0) : __assert_fail ("i < RL.getFieldCount() && \"offsetof field in wrong type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2762, __PRETTY_FUNCTION__))
;
2763
2764 // Compute the offset to the field
2765 int64_t OffsetInt = RL.getFieldOffset(i) /
2766 CGF.getContext().getCharWidth();
2767 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2768
2769 // Save the element type.
2770 CurrentType = MemberDecl->getType();
2771 break;
2772 }
2773
2774 case OffsetOfNode::Identifier:
2775 llvm_unreachable("dependent __builtin_offsetof")::llvm::llvm_unreachable_internal("dependent __builtin_offsetof"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2775)
;
2776
2777 case OffsetOfNode::Base: {
2778 if (ON.getBase()->isVirtual()) {
6
Assuming the condition is false
7
Taking false branch
2779 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2780 continue;
2781 }
2782
2783 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
8
The object is a 'RecordType'
2784 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2785
2786 // Save the element type.
2787 CurrentType = ON.getBase()->getType();
2788
2789 // Compute the offset to the base.
2790 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
9
Assuming the object is not a 'RecordType'
10
'BaseRT' initialized to a null pointer value
2791 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
11
Called C++ object pointer is null
2792 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2793 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2794 break;
2795 }
2796 }
2797 Result = Builder.CreateAdd(Result, Offset);
2798 }
2799 return Result;
2800}
2801
2802/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2803/// argument of the sizeof expression as an integer.
2804Value *
2805ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2806 const UnaryExprOrTypeTraitExpr *E) {
2807 QualType TypeToSize = E->getTypeOfArgument();
2808 if (E->getKind() == UETT_SizeOf) {
2809 if (const VariableArrayType *VAT =
2810 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2811 if (E->isArgumentType()) {
2812 // sizeof(type) - make sure to emit the VLA size.
2813 CGF.EmitVariablyModifiedType(TypeToSize);
2814 } else {
2815 // C99 6.5.3.4p2: If the argument is an expression of type
2816 // VLA, it is evaluated.
2817 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2818 }
2819
2820 auto VlaSize = CGF.getVLASize(VAT);
2821 llvm::Value *size = VlaSize.NumElts;
2822
2823 // Scale the number of non-VLA elements by the non-VLA element size.
2824 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2825 if (!eltSize.isOne())
2826 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2827
2828 return size;
2829 }
2830 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2831 auto Alignment =
2832 CGF.getContext()
2833 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2834 E->getTypeOfArgument()->getPointeeType()))
2835 .getQuantity();
2836 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2837 }
2838
2839 // If this isn't sizeof(vla), the result must be constant; use the constant
2840 // folding logic so we don't have to duplicate it here.
2841 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2842}
2843
2844Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2845 Expr *Op = E->getSubExpr();
2846 if (Op->getType()->isAnyComplexType()) {
2847 // If it's an l-value, load through the appropriate subobject l-value.
2848 // Note that we have to ask E because Op might be an l-value that
2849 // this won't work for, e.g. an Obj-C property.
2850 if (E->isGLValue())
2851 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2852 E->getExprLoc()).getScalarVal();
2853
2854 // Otherwise, calculate and project.
2855 return CGF.EmitComplexExpr(Op, false, true).first;
2856 }
2857
2858 return Visit(Op);
2859}
2860
2861Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2862 Expr *Op = E->getSubExpr();
2863 if (Op->getType()->isAnyComplexType()) {
2864 // If it's an l-value, load through the appropriate subobject l-value.
2865 // Note that we have to ask E because Op might be an l-value that
2866 // this won't work for, e.g. an Obj-C property.
2867 if (Op->isGLValue())
2868 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2869 E->getExprLoc()).getScalarVal();
2870
2871 // Otherwise, calculate and project.
2872 return CGF.EmitComplexExpr(Op, true, false).second;
2873 }
2874
2875 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2876 // effects are evaluated, but not the actual value.
2877 if (Op->isGLValue())
2878 CGF.EmitLValue(Op);
2879 else
2880 CGF.EmitScalarExpr(Op, true);
2881 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2882}
2883
2884//===----------------------------------------------------------------------===//
2885// Binary Operators
2886//===----------------------------------------------------------------------===//
2887
2888BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2889 TestAndClearIgnoreResultAssign();
2890 BinOpInfo Result;
2891 Result.LHS = Visit(E->getLHS());
2892 Result.RHS = Visit(E->getRHS());
2893 Result.Ty = E->getType();
2894 Result.Opcode = E->getOpcode();
2895 Result.FPFeatures = E->getFPFeatures();
2896 Result.E = E;
2897 return Result;
2898}
2899
2900LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2901 const CompoundAssignOperator *E,
2902 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2903 Value *&Result) {
2904 QualType LHSTy = E->getLHS()->getType();
2905 BinOpInfo OpInfo;
2906
2907 if (E->getComputationResultType()->isAnyComplexType())
2908 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2909
2910 // Emit the RHS first. __block variables need to have the rhs evaluated
2911 // first, plus this should improve codegen a little.
2912 OpInfo.RHS = Visit(E->getRHS());
2913 OpInfo.Ty = E->getComputationResultType();
2914 OpInfo.Opcode = E->getOpcode();
2915 OpInfo.FPFeatures = E->getFPFeatures();
2916 OpInfo.E = E;
2917 // Load/convert the LHS.
2918 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2919
2920 llvm::PHINode *atomicPHI = nullptr;
2921 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2922 QualType type = atomicTy->getValueType();
2923 if (!type->isBooleanType() && type->isIntegerType() &&
2924 !(type->isUnsignedIntegerType() &&
2925 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2926 CGF.getLangOpts().getSignedOverflowBehavior() !=
2927 LangOptions::SOB_Trapping) {
2928 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
2929 llvm::Instruction::BinaryOps Op;
2930 switch (OpInfo.Opcode) {
2931 // We don't have atomicrmw operands for *, %, /, <<, >>
2932 case BO_MulAssign: case BO_DivAssign:
2933 case BO_RemAssign:
2934 case BO_ShlAssign:
2935 case BO_ShrAssign:
2936 break;
2937 case BO_AddAssign:
2938 AtomicOp = llvm::AtomicRMWInst::Add;
2939 Op = llvm::Instruction::Add;
2940 break;
2941 case BO_SubAssign:
2942 AtomicOp = llvm::AtomicRMWInst::Sub;
2943 Op = llvm::Instruction::Sub;
2944 break;
2945 case BO_AndAssign:
2946 AtomicOp = llvm::AtomicRMWInst::And;
2947 Op = llvm::Instruction::And;
2948 break;
2949 case BO_XorAssign:
2950 AtomicOp = llvm::AtomicRMWInst::Xor;
2951 Op = llvm::Instruction::Xor;
2952 break;
2953 case BO_OrAssign:
2954 AtomicOp = llvm::AtomicRMWInst::Or;
2955 Op = llvm::Instruction::Or;
2956 break;
2957 default:
2958 llvm_unreachable("Invalid compound assignment type")::llvm::llvm_unreachable_internal("Invalid compound assignment type"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 2958)
;
2959 }
2960 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
2961 llvm::Value *Amt = CGF.EmitToMemory(
2962 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
2963 E->getExprLoc()),
2964 LHSTy);
2965 Value *OldVal = Builder.CreateAtomicRMW(
2966 AtomicOp, LHSLV.getPointer(CGF), Amt,
2967 llvm::AtomicOrdering::SequentiallyConsistent);
2968
2969 // Since operation is atomic, the result type is guaranteed to be the
2970 // same as the input in LLVM terms.
2971 Result = Builder.CreateBinOp(Op, OldVal, Amt);
2972 return LHSLV;
2973 }
2974 }
2975 // FIXME: For floating point types, we should be saving and restoring the
2976 // floating point environment in the loop.
2977 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2978 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2979 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2980 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2981 Builder.CreateBr(opBB);
2982 Builder.SetInsertPoint(opBB);
2983 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2984 atomicPHI->addIncoming(OpInfo.LHS, startBB);
2985 OpInfo.LHS = atomicPHI;
2986 }
2987 else
2988 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2989
2990 SourceLocation Loc = E->getExprLoc();
2991 OpInfo.LHS =
2992 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
2993
2994 // Expand the binary operator.
2995 Result = (this->*Func)(OpInfo);
2996
2997 // Convert the result back to the LHS type,
2998 // potentially with Implicit Conversion sanitizer check.
2999 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3000 Loc, ScalarConversionOpts(CGF.SanOpts));
3001
3002 if (atomicPHI) {
3003 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3004 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3005 auto Pair = CGF.EmitAtomicCompareExchange(
3006 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3007 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3008 llvm::Value *success = Pair.second;
3009 atomicPHI->addIncoming(old, curBlock);
3010 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3011 Builder.SetInsertPoint(contBB);
3012 return LHSLV;
3013 }
3014
3015 // Store the result value into the LHS lvalue. Bit-fields are handled
3016 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3017 // 'An assignment expression has the value of the left operand after the
3018 // assignment...'.
3019 if (LHSLV.isBitField())
3020 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3021 else
3022 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3023
3024 if (CGF.getLangOpts().OpenMP)
3025 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3026 E->getLHS());
3027 return LHSLV;
3028}
3029
3030Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3031 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3032 bool Ignore = TestAndClearIgnoreResultAssign();
3033 Value *RHS = nullptr;
3034 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3035
3036 // If the result is clearly ignored, return now.
3037 if (Ignore)
3038 return nullptr;
3039
3040 // The result of an assignment in C is the assigned r-value.
3041 if (!CGF.getLangOpts().CPlusPlus)
3042 return RHS;
3043
3044 // If the lvalue is non-volatile, return the computed value of the assignment.
3045 if (!LHS.isVolatileQualified())
3046 return RHS;
3047
3048 // Otherwise, reload the value.
3049 return EmitLoadOfLValue(LHS, E->getExprLoc());
3050}
3051
3052void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3053 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3054 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3055
3056 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3057 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3058 SanitizerKind::IntegerDivideByZero));
3059 }
3060
3061 const auto *BO = cast<BinaryOperator>(Ops.E);
3062 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3063 Ops.Ty->hasSignedIntegerRepresentation() &&
3064 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3065 Ops.mayHaveIntegerOverflow()) {
3066 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3067
3068 llvm::Value *IntMin =
3069 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3070 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
3071
3072 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3073 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3074 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3075 Checks.push_back(
3076 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3077 }
3078
3079 if (Checks.size() > 0)
3080 EmitBinOpCheck(Checks, Ops);
3081}
3082
3083Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3084 {
3085 CodeGenFunction::SanitizerScope SanScope(&CGF);
3086 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3087 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3088 Ops.Ty->isIntegerType() &&
3089 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3090 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3091 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3092 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3093 Ops.Ty->isRealFloatingType() &&
3094 Ops.mayHaveFloatDivisionByZero()) {
3095 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3096 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3097 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3098 Ops);
3099 }
3100 }
3101
3102 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3103 llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3104 if (CGF.getLangOpts().OpenCL &&
3105 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
3106 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3107 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3108 // build option allows an application to specify that single precision
3109 // floating-point divide (x/y and 1/x) and sqrt used in the program
3110 // source are correctly rounded.
3111 llvm::Type *ValTy = Val->getType();
3112 if (ValTy->isFloatTy() ||
3113 (isa<llvm::VectorType>(ValTy) &&
3114 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3115 CGF.SetFPAccuracy(Val, 2.5);
3116 }
3117 return Val;
3118 }
3119 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3120 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3121 else
3122 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3123}
3124
3125Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3126 // Rem in C can't be a floating point type: C99 6.5.5p2.
3127 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3128 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3129 Ops.Ty->isIntegerType() &&
3130 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3131 CodeGenFunction::SanitizerScope SanScope(&CGF);
3132 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3133 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3134 }
3135
3136 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3137 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3138 else
3139 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3140}
3141
3142Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3143 unsigned IID;
3144 unsigned OpID = 0;
3145
3146 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3147 switch (Ops.Opcode) {
3148 case BO_Add:
3149 case BO_AddAssign:
3150 OpID = 1;
3151 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3152 llvm::Intrinsic::uadd_with_overflow;
3153 break;
3154 case BO_Sub:
3155 case BO_SubAssign:
3156 OpID = 2;
3157 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3158 llvm::Intrinsic::usub_with_overflow;
3159 break;
3160 case BO_Mul:
3161 case BO_MulAssign:
3162 OpID = 3;
3163 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3164 llvm::Intrinsic::umul_with_overflow;
3165 break;
3166 default:
3167 llvm_unreachable("Unsupported operation for overflow detection")::llvm::llvm_unreachable_internal("Unsupported operation for overflow detection"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3167)
;
3168 }
3169 OpID <<= 1;
3170 if (isSigned)
3171 OpID |= 1;
3172
3173 CodeGenFunction::SanitizerScope SanScope(&CGF);
3174 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3175
3176 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3177
3178 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3179 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3180 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3181
3182 // Handle overflow with llvm.trap if no custom handler has been specified.
3183 const std::string *handlerName =
3184 &CGF.getLangOpts().OverflowHandler;
3185 if (handlerName->empty()) {
3186 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3187 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3188 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3189 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3190 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3191 : SanitizerKind::UnsignedIntegerOverflow;
3192 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3193 } else
3194 CGF.EmitTrapCheck(Builder.CreateNot(overflow));
3195 return result;
3196 }
3197
3198 // Branch in case of overflow.
3199 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3200 llvm::BasicBlock *continueBB =
3201 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3202 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3203
3204 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3205
3206 // If an overflow handler is set, then we want to call it and then use its
3207 // result, if it returns.
3208 Builder.SetInsertPoint(overflowBB);
3209
3210 // Get the overflow handler.
3211 llvm::Type *Int8Ty = CGF.Int8Ty;
3212 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3213 llvm::FunctionType *handlerTy =
3214 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3215 llvm::FunctionCallee handler =
3216 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3217
3218 // Sign extend the args to 64-bit, so that we can use the same handler for
3219 // all types of overflow.
3220 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3221 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3222
3223 // Call the handler with the two arguments, the operation, and the size of
3224 // the result.
3225 llvm::Value *handlerArgs[] = {
3226 lhs,
3227 rhs,
3228 Builder.getInt8(OpID),
3229 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3230 };
3231 llvm::Value *handlerResult =
3232 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3233
3234 // Truncate the result back to the desired size.
3235 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3236 Builder.CreateBr(continueBB);
3237
3238 Builder.SetInsertPoint(continueBB);
3239 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3240 phi->addIncoming(result, initialBB);
3241 phi->addIncoming(handlerResult, overflowBB);
3242
3243 return phi;
3244}
3245
3246/// Emit pointer + index arithmetic.
3247static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3248 const BinOpInfo &op,
3249 bool isSubtraction) {
3250 // Must have binary (not unary) expr here. Unary pointer
3251 // increment/decrement doesn't use this path.
3252 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3253
3254 Value *pointer = op.LHS;
3255 Expr *pointerOperand = expr->getLHS();
3256 Value *index = op.RHS;
3257 Expr *indexOperand = expr->getRHS();
3258
3259 // In a subtraction, the LHS is always the pointer.
3260 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3261 std::swap(pointer, index);
3262 std::swap(pointerOperand, indexOperand);
3263 }
3264
3265 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3266
3267 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3268 auto &DL = CGF.CGM.getDataLayout();
3269 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3270
3271 // Some versions of glibc and gcc use idioms (particularly in their malloc
3272 // routines) that add a pointer-sized integer (known to be a pointer value)
3273 // to a null pointer in order to cast the value back to an integer or as
3274 // part of a pointer alignment algorithm. This is undefined behavior, but
3275 // we'd like to be able to compile programs that use it.
3276 //
3277 // Normally, we'd generate a GEP with a null-pointer base here in response
3278 // to that code, but it's also UB to dereference a pointer created that
3279 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3280 // generate a direct cast of the integer value to a pointer.
3281 //
3282 // The idiom (p = nullptr + N) is not met if any of the following are true:
3283 //
3284 // The operation is subtraction.
3285 // The index is not pointer-sized.
3286 // The pointer type is not byte-sized.
3287 //
3288 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3289 op.Opcode,
3290 expr->getLHS(),
3291 expr->getRHS()))
3292 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3293
3294 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3295 // Zero-extend or sign-extend the pointer value according to
3296 // whether the index is signed or not.
3297 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3298 "idx.ext");
3299 }
3300
3301 // If this is subtraction, negate the index.
3302 if (isSubtraction)
3303 index = CGF.Builder.CreateNeg(index, "idx.neg");
3304
3305 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3306 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3307 /*Accessed*/ false);
3308
3309 const PointerType *pointerType
3310 = pointerOperand->getType()->getAs<PointerType>();
3311 if (!pointerType) {
3312 QualType objectType = pointerOperand->getType()
3313 ->castAs<ObjCObjectPointerType>()
3314 ->getPointeeType();
3315 llvm::Value *objectSize
3316 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3317
3318 index = CGF.Builder.CreateMul(index, objectSize);
3319
3320 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3321 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3322 return CGF.Builder.CreateBitCast(result, pointer->getType());
3323 }
3324
3325 QualType elementType = pointerType->getPointeeType();
3326 if (const VariableArrayType *vla
3327 = CGF.getContext().getAsVariableArrayType(elementType)) {
3328 // The element count here is the total number of non-VLA elements.
3329 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3330
3331 // Effectively, the multiply by the VLA size is part of the GEP.
3332 // GEP indexes are signed, and scaling an index isn't permitted to
3333 // signed-overflow, so we use the same semantics for our explicit
3334 // multiply. We suppress this if overflow is not undefined behavior.
3335 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3336 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3337 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3338 } else {
3339 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3340 pointer =
3341 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3342 op.E->getExprLoc(), "add.ptr");
3343 }
3344 return pointer;
3345 }
3346
3347 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3348 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3349 // future proof.
3350 if (elementType->isVoidType() || elementType->isFunctionType()) {
3351 Value *result = CGF.EmitCastToVoidPtr(pointer);
3352 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3353 return CGF.Builder.CreateBitCast(result, pointer->getType());
3354 }
3355
3356 if (CGF.getLangOpts().isSignedOverflowDefined())
3357 return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3358
3359 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3360 op.E->getExprLoc(), "add.ptr");
3361}
3362
3363// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3364// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3365// the add operand respectively. This allows fmuladd to represent a*b-c, or
3366// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3367// efficient operations.
3368static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3369 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3370 bool negMul, bool negAdd) {
3371 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.")((!(negMul && negAdd) && "Only one of negMul and negAdd should be set."
) ? static_cast<void> (0) : __assert_fail ("!(negMul && negAdd) && \"Only one of negMul and negAdd should be set.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3371, __PRETTY_FUNCTION__))
;
3372
3373 Value *MulOp0 = MulOp->getOperand(0);
3374 Value *MulOp1 = MulOp->getOperand(1);
3375 if (negMul)
3376 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3377 if (negAdd)
3378 Addend = Builder.CreateFNeg(Addend, "neg");
3379
3380 Value *FMulAdd = nullptr;
3381 if (Builder.getIsFPConstrained()) {
3382 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&((isa<llvm::ConstrainedFPIntrinsic>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? static_cast<void> (0) : __assert_fail
("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3384, __PRETTY_FUNCTION__))
3383 "Only constrained operation should be created when Builder is in FP "((isa<llvm::ConstrainedFPIntrinsic>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? static_cast<void> (0) : __assert_fail
("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3384, __PRETTY_FUNCTION__))
3384 "constrained mode")((isa<llvm::ConstrainedFPIntrinsic>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? static_cast<void> (0) : __assert_fail
("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3384, __PRETTY_FUNCTION__))
;
3385 FMulAdd = Builder.CreateConstrainedFPCall(
3386 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3387 Addend->getType()),
3388 {MulOp0, MulOp1, Addend});
3389 } else {
3390 FMulAdd = Builder.CreateCall(
3391 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3392 {MulOp0, MulOp1, Addend});
3393 }
3394 MulOp->eraseFromParent();
3395
3396 return FMulAdd;
3397}
3398
3399// Check whether it would be legal to emit an fmuladd intrinsic call to
3400// represent op and if so, build the fmuladd.
3401//
3402// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3403// Does NOT check the type of the operation - it's assumed that this function
3404// will be called from contexts where it's known that the type is contractable.
3405static Value* tryEmitFMulAdd(const BinOpInfo &op,
3406 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3407 bool isSub=false) {
3408
3409 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3411, __PRETTY_FUNCTION__))
3410 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3411, __PRETTY_FUNCTION__))
3411 "Only fadd/fsub can be the root of an fmuladd.")(((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode
== BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."
) ? static_cast<void> (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3411, __PRETTY_FUNCTION__))
;
3412
3413 // Check whether this op is marked as fusable.
3414 if (!op.FPFeatures.allowFPContractWithinStatement())
3415 return nullptr;
3416
3417 // We have a potentially fusable op. Look for a mul on one of the operands.
3418 // Also, make sure that the mul result isn't used directly. In that case,
3419 // there's no point creating a muladd operation.
3420 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3421 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3422 LHSBinOp->use_empty())
3423 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3424 }
3425 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3426 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3427 RHSBinOp->use_empty())
3428 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3429 }
3430
3431 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
3432 if (LHSBinOp->getIntrinsicID() ==
3433 llvm::Intrinsic::experimental_constrained_fmul &&
3434 LHSBinOp->use_empty())
3435 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3436 }
3437 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
3438 if (RHSBinOp->getIntrinsicID() ==
3439 llvm::Intrinsic::experimental_constrained_fmul &&
3440 RHSBinOp->use_empty())
3441 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3442 }
3443
3444 return nullptr;
3445}
3446
3447Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3448 if (op.LHS->getType()->isPointerTy() ||
3449 op.RHS->getType()->isPointerTy())
3450 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3451
3452 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3453 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3454 case LangOptions::SOB_Defined:
3455 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3456 case LangOptions::SOB_Undefined:
3457 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3458 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3459 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3460 case LangOptions::SOB_Trapping:
3461 if (CanElideOverflowCheck(CGF.getContext(), op))
3462 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3463 return EmitOverflowCheckedBinOp(op);
3464 }
3465 }
3466
3467 if (op.Ty->isUnsignedIntegerType() &&
3468 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3469 !CanElideOverflowCheck(CGF.getContext(), op))
3470 return EmitOverflowCheckedBinOp(op);
3471
3472 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3473 // Try to form an fmuladd.
3474 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3475 return FMulAdd;
3476
3477 Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
3478 return propagateFMFlags(V, op);
3479 }
3480
3481 if (op.isFixedPointBinOp())
3482 return EmitFixedPointBinOp(op);
3483
3484 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3485}
3486
3487/// The resulting value must be calculated with exact precision, so the operands
3488/// may not be the same type.
3489Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3490 using llvm::APSInt;
3491 using llvm::ConstantInt;
3492
3493 const auto *BinOp = cast<BinaryOperator>(op.E);
3494
3495 // The result is a fixed point type and at least one of the operands is fixed
3496 // point while the other is either fixed point or an int. This resulting type
3497 // should be determined by Sema::handleFixedPointConversions().
3498 QualType ResultTy = op.Ty;
3499 QualType LHSTy = BinOp->getLHS()->getType();
3500 QualType RHSTy = BinOp->getRHS()->getType();
3501 ASTContext &Ctx = CGF.getContext();
3502 Value *LHS = op.LHS;
3503 Value *RHS = op.RHS;
3504
3505 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3506 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3507 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3508 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3509
3510 // Convert the operands to the full precision type.
3511 Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
3512 BinOp->getExprLoc());
3513 Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
3514 BinOp->getExprLoc());
3515
3516 // Perform the actual addition.
3517 Value *Result;
3518 switch (BinOp->getOpcode()) {
3519 case BO_Add: {
3520 if (ResultFixedSema.isSaturated()) {
3521 llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
3522 ? llvm::Intrinsic::sadd_sat
3523 : llvm::Intrinsic::uadd_sat;
3524 Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
3525 } else {
3526 Result = Builder.CreateAdd(FullLHS, FullRHS);
3527 }
3528 break;
3529 }
3530 case BO_Sub: {
3531 if (ResultFixedSema.isSaturated()) {
3532 llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
3533 ? llvm::Intrinsic::ssub_sat
3534 : llvm::Intrinsic::usub_sat;
3535 Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
3536 } else {
3537 Result = Builder.CreateSub(FullLHS, FullRHS);
3538 }
3539 break;
3540 }
3541 case BO_LT:
3542 return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
3543 : Builder.CreateICmpULT(FullLHS, FullRHS);
3544 case BO_GT:
3545 return CommonFixedSema.isSigned() ? Builder.CreateICmpSGT(FullLHS, FullRHS)
3546 : Builder.CreateICmpUGT(FullLHS, FullRHS);
3547 case BO_LE:
3548 return CommonFixedSema.isSigned() ? Builder.CreateICmpSLE(FullLHS, FullRHS)
3549 : Builder.CreateICmpULE(FullLHS, FullRHS);
3550 case BO_GE:
3551 return CommonFixedSema.isSigned() ? Builder.CreateICmpSGE(FullLHS, FullRHS)
3552 : Builder.CreateICmpUGE(FullLHS, FullRHS);
3553 case BO_EQ:
3554 // For equality operations, we assume any padding bits on unsigned types are
3555 // zero'd out. They could be overwritten through non-saturating operations
3556 // that cause overflow, but this leads to undefined behavior.
3557 return Builder.CreateICmpEQ(FullLHS, FullRHS);
3558 case BO_NE:
3559 return Builder.CreateICmpNE(FullLHS, FullRHS);
3560 case BO_Mul:
3561 case BO_Div:
3562 case BO_Shl:
3563 case BO_Shr:
3564 case BO_Cmp:
3565 case BO_LAnd:
3566 case BO_LOr:
3567 case BO_MulAssign:
3568 case BO_DivAssign:
3569 case BO_AddAssign:
3570 case BO_SubAssign:
3571 case BO_ShlAssign:
3572 case BO_ShrAssign:
3573 llvm_unreachable("Found unimplemented fixed point binary operation")::llvm::llvm_unreachable_internal("Found unimplemented fixed point binary operation"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3573)
;
3574 case BO_PtrMemD:
3575 case BO_PtrMemI:
3576 case BO_Rem:
3577 case BO_Xor:
3578 case BO_And:
3579 case BO_Or:
3580 case BO_Assign:
3581 case BO_RemAssign:
3582 case BO_AndAssign:
3583 case BO_XorAssign:
3584 case BO_OrAssign:
3585 case BO_Comma:
3586 llvm_unreachable("Found unsupported binary operation for fixed point types.")::llvm::llvm_unreachable_internal("Found unsupported binary operation for fixed point types."
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3586)
;
3587 }
3588
3589 // Convert to the result type.
3590 return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
3591 BinOp->getExprLoc());
3592}
3593
3594Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3595 // The LHS is always a pointer if either side is.
3596 if (!op.LHS->getType()->isPointerTy()) {
3597 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3598 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3599 case LangOptions::SOB_Defined:
3600 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3601 case LangOptions::SOB_Undefined:
3602 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3603 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3604 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3605 case LangOptions::SOB_Trapping:
3606 if (CanElideOverflowCheck(CGF.getContext(), op))
3607 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3608 return EmitOverflowCheckedBinOp(op);
3609 }
3610 }
3611
3612 if (op.Ty->isUnsignedIntegerType() &&
3613 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3614 !CanElideOverflowCheck(CGF.getContext(), op))
3615 return EmitOverflowCheckedBinOp(op);
3616
3617 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3618 // Try to form an fmuladd.
3619 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3620 return FMulAdd;
3621 Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
3622 return propagateFMFlags(V, op);
3623 }
3624
3625 if (op.isFixedPointBinOp())
3626 return EmitFixedPointBinOp(op);
3627
3628 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3629 }
3630
3631 // If the RHS is not a pointer, then we have normal pointer
3632 // arithmetic.
3633 if (!op.RHS->getType()->isPointerTy())
3634 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3635
3636 // Otherwise, this is a pointer subtraction.
3637
3638 // Do the raw subtraction part.
3639 llvm::Value *LHS
3640 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3641 llvm::Value *RHS
3642 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3643 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3644
3645 // Okay, figure out the element size.
3646 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3647 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3648
3649 llvm::Value *divisor = nullptr;
3650
3651 // For a variable-length array, this is going to be non-constant.
3652 if (const VariableArrayType *vla
3653 = CGF.getContext().getAsVariableArrayType(elementType)) {
3654 auto VlaSize = CGF.getVLASize(vla);
3655 elementType = VlaSize.Type;
3656 divisor = VlaSize.NumElts;
3657
3658 // Scale the number of non-VLA elements by the non-VLA element size.
3659 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3660 if (!eltSize.isOne())
3661 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3662
3663 // For everything elese, we can just compute it, safe in the
3664 // assumption that Sema won't let anything through that we can't
3665 // safely compute the size of.
3666 } else {
3667 CharUnits elementSize;
3668 // Handle GCC extension for pointer arithmetic on void* and
3669 // function pointer types.
3670 if (elementType->isVoidType() || elementType->isFunctionType())
3671 elementSize = CharUnits::One();
3672 else
3673 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3674
3675 // Don't even emit the divide for element size of 1.
3676 if (elementSize.isOne())
3677 return diffInChars;
3678
3679 divisor = CGF.CGM.getSize(elementSize);
3680 }
3681
3682 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3683 // pointer difference in C is only defined in the case where both operands
3684 // are pointing to elements of an array.
3685 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3686}
3687
3688Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3689 llvm::IntegerType *Ty;
3690 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3691 Ty = cast<llvm::IntegerType>(VT->getElementType());
3692 else
3693 Ty = cast<llvm::IntegerType>(LHS->getType());
3694 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3695}
3696
3697Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3698 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3699 // RHS to the same size as the LHS.
3700 Value *RHS = Ops.RHS;
3701 if (Ops.LHS->getType() != RHS->getType())
3702 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3703
3704 bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3705 Ops.Ty->hasSignedIntegerRepresentation() &&
3706 !CGF.getLangOpts().isSignedOverflowDefined() &&
3707 !CGF.getLangOpts().CPlusPlus2a;
3708 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3709 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3710 if (CGF.getLangOpts().OpenCL)
3711 RHS =
3712 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
3713 else if ((SanitizeBase || SanitizeExponent) &&
3714 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3715 CodeGenFunction::SanitizerScope SanScope(&CGF);
3716 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3717 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3718 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3719
3720 if (SanitizeExponent) {
3721 Checks.push_back(
3722 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3723 }
3724
3725 if (SanitizeBase) {
3726 // Check whether we are shifting any non-zero bits off the top of the
3727 // integer. We only emit this check if exponent is valid - otherwise
3728 // instructions below will have undefined behavior themselves.
3729 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3730 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3731 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3732 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3733 llvm::Value *PromotedWidthMinusOne =
3734 (RHS == Ops.RHS) ? WidthMinusOne
3735 : GetWidthMinusOneValue(Ops.LHS, RHS);
3736 CGF.EmitBlock(CheckShiftBase);
3737 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3738 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3739 /*NUW*/ true, /*NSW*/ true),
3740 "shl.check");
3741 if (CGF.getLangOpts().CPlusPlus) {
3742 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3743 // Under C++11's rules, shifting a 1 bit into the sign bit is
3744 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3745 // define signed left shifts, so we use the C99 and C++11 rules there).
3746 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3747 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3748 }
3749 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3750 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3751 CGF.EmitBlock(Cont);
3752 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3753 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3754 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3755 Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
3756 }
3757
3758 assert(!Checks.empty())((!Checks.empty()) ? static_cast<void> (0) : __assert_fail
("!Checks.empty()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3758, __PRETTY_FUNCTION__))
;
3759 EmitBinOpCheck(Checks, Ops);
3760 }
3761
3762 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3763}
3764
3765Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3766 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3767 // RHS to the same size as the LHS.
3768 Value *RHS = Ops.RHS;
3769 if (Ops.LHS->getType() != RHS->getType())
3770 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3771
3772 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3773 if (CGF.getLangOpts().OpenCL)
3774 RHS =
3775 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
3776 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3777 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3778 CodeGenFunction::SanitizerScope SanScope(&CGF);
3779 llvm::Value *Valid =
3780 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3781 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3782 }
3783
3784 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3785 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3786 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3787}
3788
3789enum IntrinsicType { VCMPEQ, VCMPGT };
3790// return corresponding comparison intrinsic for given vector type
3791static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3792 BuiltinType::Kind ElemKind) {
3793 switch (ElemKind) {
3794 default: llvm_unreachable("unexpected element type")::llvm::llvm_unreachable_internal("unexpected element type", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3794)
;
3795 case BuiltinType::Char_U:
3796 case BuiltinType::UChar:
3797 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3798 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3799 case BuiltinType::Char_S:
3800 case BuiltinType::SChar:
3801 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3802 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3803 case BuiltinType::UShort:
3804 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3805 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3806 case BuiltinType::Short:
3807 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3808 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3809 case BuiltinType::UInt:
3810 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3811 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3812 case BuiltinType::Int:
3813 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3814 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3815 case BuiltinType::ULong:
3816 case BuiltinType::ULongLong:
3817 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3818 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3819 case BuiltinType::Long:
3820 case BuiltinType::LongLong:
3821 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3822 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3823 case BuiltinType::Float:
3824 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3825 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3826 case BuiltinType::Double:
3827 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3828 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3829 }
3830}
3831
3832Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3833 llvm::CmpInst::Predicate UICmpOpc,
3834 llvm::CmpInst::Predicate SICmpOpc,
3835 llvm::CmpInst::Predicate FCmpOpc,
3836 bool IsSignaling) {
3837 TestAndClearIgnoreResultAssign();
3838 Value *Result;
3839 QualType LHSTy = E->getLHS()->getType();
3840 QualType RHSTy = E->getRHS()->getType();
3841 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3842 assert(E->getOpcode() == BO_EQ ||((E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) ?
static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3843, __PRETTY_FUNCTION__))
3843 E->getOpcode() == BO_NE)((E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE) ?
static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3843, __PRETTY_FUNCTION__))
;
3844 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3845 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3846 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3847 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3848 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3849 BinOpInfo BOInfo = EmitBinOps(E);
3850 Value *LHS = BOInfo.LHS;
3851 Value *RHS = BOInfo.RHS;
3852
3853 // If AltiVec, the comparison results in a numeric type, so we use
3854 // intrinsics comparing vectors and giving 0 or 1 as a result
3855 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3856 // constants for mapping CR6 register bits to predicate result
3857 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3858
3859 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3860
3861 // in several cases vector arguments order will be reversed
3862 Value *FirstVecArg = LHS,
3863 *SecondVecArg = RHS;
3864
3865 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
3866 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
3867
3868 switch(E->getOpcode()) {
3869 default: llvm_unreachable("is not a comparison operation")::llvm::llvm_unreachable_internal("is not a comparison operation"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3869)
;
3870 case BO_EQ:
3871 CR6 = CR6_LT;
3872 ID = GetIntrinsic(VCMPEQ, ElementKind);
3873 break;
3874 case BO_NE:
3875 CR6 = CR6_EQ;
3876 ID = GetIntrinsic(VCMPEQ, ElementKind);
3877 break;
3878 case BO_LT:
3879 CR6 = CR6_LT;
3880 ID = GetIntrinsic(VCMPGT, ElementKind);
3881 std::swap(FirstVecArg, SecondVecArg);
3882 break;
3883 case BO_GT:
3884 CR6 = CR6_LT;
3885 ID = GetIntrinsic(VCMPGT, ElementKind);
3886 break;
3887 case BO_LE:
3888 if (ElementKind == BuiltinType::Float) {
3889 CR6 = CR6_LT;
3890 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3891 std::swap(FirstVecArg, SecondVecArg);
3892 }
3893 else {
3894 CR6 = CR6_EQ;
3895 ID = GetIntrinsic(VCMPGT, ElementKind);
3896 }
3897 break;
3898 case BO_GE:
3899 if (ElementKind == BuiltinType::Float) {
3900 CR6 = CR6_LT;
3901 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3902 }
3903 else {
3904 CR6 = CR6_EQ;
3905 ID = GetIntrinsic(VCMPGT, ElementKind);
3906 std::swap(FirstVecArg, SecondVecArg);
3907 }
3908 break;
3909 }
3910
3911 Value *CR6Param = Builder.getInt32(CR6);
3912 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
3913 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
3914
3915 // The result type of intrinsic may not be same as E->getType().
3916 // If E->getType() is not BoolTy, EmitScalarConversion will do the
3917 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
3918 // do nothing, if ResultTy is not i1 at the same time, it will cause
3919 // crash later.
3920 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
3921 if (ResultTy->getBitWidth() > 1 &&
3922 E->getType() == CGF.getContext().BoolTy)
3923 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
3924 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3925 E->getExprLoc());
3926 }
3927
3928 if (BOInfo.isFixedPointBinOp()) {
3929 Result = EmitFixedPointBinOp(BOInfo);
3930 } else if (LHS->getType()->isFPOrFPVectorTy()) {
3931 if (!IsSignaling)
3932 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
3933 else
3934 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
3935 } else if (LHSTy->hasSignedIntegerRepresentation()) {
3936 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
3937 } else {
3938 // Unsigned integers and pointers.
3939
3940 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
3941 !isa<llvm::ConstantPointerNull>(LHS) &&
3942 !isa<llvm::ConstantPointerNull>(RHS)) {
3943
3944 // Dynamic information is required to be stripped for comparisons,
3945 // because it could leak the dynamic information. Based on comparisons
3946 // of pointers to dynamic objects, the optimizer can replace one pointer
3947 // with another, which might be incorrect in presence of invariant
3948 // groups. Comparison with null is safe because null does not carry any
3949 // dynamic information.
3950 if (LHSTy.mayBeDynamicClass())
3951 LHS = Builder.CreateStripInvariantGroup(LHS);
3952 if (RHSTy.mayBeDynamicClass())
3953 RHS = Builder.CreateStripInvariantGroup(RHS);
3954 }
3955
3956 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
3957 }
3958
3959 // If this is a vector comparison, sign extend the result to the appropriate
3960 // vector integer type and return it (don't convert to bool).
3961 if (LHSTy->isVectorType())
3962 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3963
3964 } else {
3965 // Complex Comparison: can only be an equality comparison.
3966 CodeGenFunction::ComplexPairTy LHS, RHS;
3967 QualType CETy;
3968 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
3969 LHS = CGF.EmitComplexExpr(E->getLHS());
3970 CETy = CTy->getElementType();
3971 } else {
3972 LHS.first = Visit(E->getLHS());
3973 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
3974 CETy = LHSTy;
3975 }
3976 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
3977 RHS = CGF.EmitComplexExpr(E->getRHS());
3978 assert(CGF.getContext().hasSameUnqualifiedType(CETy,((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3980, __PRETTY_FUNCTION__))
3979 CTy->getElementType()) &&((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3980, __PRETTY_FUNCTION__))
3980 "The element types must always match.")((CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType
()) && "The element types must always match.") ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3980, __PRETTY_FUNCTION__))
;
3981 (void)CTy;
3982 } else {
3983 RHS.first = Visit(E->getRHS());
3984 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
3985 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&((CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
"The element types must always match.") ? static_cast<void
> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3986, __PRETTY_FUNCTION__))
3986 "The element types must always match.")((CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
"The element types must always match.") ? static_cast<void
> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 3986, __PRETTY_FUNCTION__))
;
3987 }
3988
3989 Value *ResultR, *ResultI;
3990 if (CETy->isRealFloatingType()) {
3991 // As complex comparisons can only be equality comparisons, they
3992 // are never signaling comparisons.
3993 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
3994 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
3995 } else {
3996 // Complex comparisons can only be equality comparisons. As such, signed
3997 // and unsigned opcodes are the same.
3998 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
3999 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4000 }
4001
4002 if (E->getOpcode() == BO_EQ) {
4003 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4004 } else {
4005 assert(E->getOpcode() == BO_NE &&((E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"
) ? static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4006, __PRETTY_FUNCTION__))
4006 "Complex comparison other than == or != ?")((E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"
) ? static_cast<void> (0) : __assert_fail ("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4006, __PRETTY_FUNCTION__))
;
4007 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4008 }
4009 }
4010
4011 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4012 E->getExprLoc());
4013}
4014
4015Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4016 bool Ignore = TestAndClearIgnoreResultAssign();
4017
4018 Value *RHS;
4019 LValue LHS;
4020
4021 switch (E->getLHS()->getType().getObjCLifetime()) {
4022 case Qualifiers::OCL_Strong:
4023 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4024 break;
4025
4026 case Qualifiers::OCL_Autoreleasing:
4027 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4028 break;
4029
4030 case Qualifiers::OCL_ExplicitNone:
4031 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4032 break;
4033
4034 case Qualifiers::OCL_Weak:
4035 RHS = Visit(E->getRHS());
4036 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4037 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4038 break;
4039
4040 case Qualifiers::OCL_None:
4041 // __block variables need to have the rhs evaluated first, plus
4042 // this should improve codegen just a little.
4043 RHS = Visit(E->getRHS());
4044 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4045
4046 // Store the value into the LHS. Bit-fields are handled specially
4047 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4048 // 'An assignment expression has the value of the left operand after
4049 // the assignment...'.
4050 if (LHS.isBitField()) {
4051 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4052 } else {
4053 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4054 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4055 }
4056 }
4057
4058 // If the result is clearly ignored, return now.
4059 if (Ignore)
4060 return nullptr;
4061
4062 // The result of an assignment in C is the assigned r-value.
4063 if (!CGF.getLangOpts().CPlusPlus)
4064 return RHS;
4065
4066 // If the lvalue is non-volatile, return the computed value of the assignment.
4067 if (!LHS.isVolatileQualified())
4068 return RHS;
4069
4070 // Otherwise, reload the value.
4071 return EmitLoadOfLValue(LHS, E->getExprLoc());
4072}
4073
4074Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4075 // Perform vector logical and on comparisons with zero vectors.
4076 if (E->getType()->isVectorType()) {
4077 CGF.incrementProfileCounter(E);
4078
4079 Value *LHS = Visit(E->getLHS());
4080 Value *RHS = Visit(E->getRHS());
4081 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4082 if (LHS->getType()->isFPOrFPVectorTy()) {
4083 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4084 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4085 } else {
4086 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4087 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4088 }
4089 Value *And = Builder.CreateAnd(LHS, RHS);
4090 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4091 }
4092
4093 llvm::Type *ResTy = ConvertType(E->getType());
4094
4095 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4096 // If we have 1 && X, just emit X without inserting the control flow.
4097 bool LHSCondVal;
4098 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4099 if (LHSCondVal) { // If we have 1 && X, just emit X.
4100 CGF.incrementProfileCounter(E);
4101
4102 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4103 // ZExt result to int or bool.
4104 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4105 }
4106
4107 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4108 if (!CGF.ContainsLabel(E->getRHS()))
4109 return llvm::Constant::getNullValue(ResTy);
4110 }
4111
4112 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4113 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4114
4115 CodeGenFunction::ConditionalEvaluation eval(CGF);
4116
4117 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4118 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4119 CGF.getProfileCount(E->getRHS()));
4120
4121 // Any edges into the ContBlock are now from an (indeterminate number of)
4122 // edges from this first condition. All of these values will be false. Start
4123 // setting up the PHI node in the Cont Block for this.
4124 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4125 "", ContBlock);
4126 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4127 PI != PE; ++PI)
4128 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4129
4130 eval.begin(CGF);
4131 CGF.EmitBlock(RHSBlock);
4132 CGF.incrementProfileCounter(E);
4133 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4134 eval.end(CGF);
4135
4136 // Reaquire the RHS block, as there may be subblocks inserted.
4137 RHSBlock = Builder.GetInsertBlock();
4138
4139 // Emit an unconditional branch from this block to ContBlock.
4140 {
4141 // There is no need to emit line number for unconditional branch.
4142 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4143 CGF.EmitBlock(ContBlock);
4144 }
4145 // Insert an entry into the phi node for the edge with the value of RHSCond.
4146 PN->addIncoming(RHSCond, RHSBlock);
4147
4148 // Artificial location to preserve the scope information
4149 {
4150 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4151 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4152 }
4153
4154 // ZExt result to int.
4155 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4156}
4157
4158Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4159 // Perform vector logical or on comparisons with zero vectors.
4160 if (E->getType()->isVectorType()) {
4161 CGF.incrementProfileCounter(E);
4162
4163 Value *LHS = Visit(E->getLHS());
4164 Value *RHS = Visit(E->getRHS());
4165 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4166 if (LHS->getType()->isFPOrFPVectorTy()) {
4167 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4168 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4169 } else {
4170 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4171 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4172 }
4173 Value *Or = Builder.CreateOr(LHS, RHS);
4174 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4175 }
4176
4177 llvm::Type *ResTy = ConvertType(E->getType());
4178
4179 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4180 // If we have 0 || X, just emit X without inserting the control flow.
4181 bool LHSCondVal;
4182 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4183 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4184 CGF.incrementProfileCounter(E);
4185
4186 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4187 // ZExt result to int or bool.
4188 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4189 }
4190
4191 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4192 if (!CGF.ContainsLabel(E->getRHS()))
4193 return llvm::ConstantInt::get(ResTy, 1);
4194 }
4195
4196 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4197 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4198
4199 CodeGenFunction::ConditionalEvaluation eval(CGF);
4200
4201 // Branch on the LHS first. If it is true, go to the success (cont) block.
4202 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4203 CGF.getCurrentProfileCount() -
4204 CGF.getProfileCount(E->getRHS()));
4205
4206 // Any edges into the ContBlock are now from an (indeterminate number of)
4207 // edges from this first condition. All of these values will be true. Start
4208 // setting up the PHI node in the Cont Block for this.
4209 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4210 "", ContBlock);
4211 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4212 PI != PE; ++PI)
4213 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4214
4215 eval.begin(CGF);
4216
4217 // Emit the RHS condition as a bool value.
4218 CGF.EmitBlock(RHSBlock);
4219 CGF.incrementProfileCounter(E);
4220 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4221
4222 eval.end(CGF);
4223
4224 // Reaquire the RHS block, as there may be subblocks inserted.
4225 RHSBlock = Builder.GetInsertBlock();
4226
4227 // Emit an unconditional branch from this block to ContBlock. Insert an entry
4228 // into the phi node for the edge with the value of RHSCond.
4229 CGF.EmitBlock(ContBlock);
4230 PN->addIncoming(RHSCond, RHSBlock);
4231
4232 // ZExt result to int.
4233 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4234}
4235
4236Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4237 CGF.EmitIgnoredExpr(E->getLHS());
4238 CGF.EnsureInsertPoint();
4239 return Visit(E->getRHS());
4240}
4241
4242//===----------------------------------------------------------------------===//
4243// Other Operators
4244//===----------------------------------------------------------------------===//
4245
4246/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4247/// expression is cheap enough and side-effect-free enough to evaluate
4248/// unconditionally instead of conditionally. This is used to convert control
4249/// flow into selects in some cases.
4250static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4251 CodeGenFunction &CGF) {
4252 // Anything that is an integer or floating point constant is fine.
4253 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4254
4255 // Even non-volatile automatic variables can't be evaluated unconditionally.
4256 // Referencing a thread_local may cause non-trivial initialization work to
4257 // occur. If we're inside a lambda and one of the variables is from the scope
4258 // outside the lambda, that function may have returned already. Reading its
4259 // locals is a bad idea. Also, these reads may introduce races there didn't
4260 // exist in the source-level program.
4261}
4262
4263
4264Value *ScalarExprEmitter::
4265VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4266 TestAndClearIgnoreResultAssign();
4267
4268 // Bind the common expression if necessary.
4269 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4270
4271 Expr *condExpr = E->getCond();
4272 Expr *lhsExpr = E->getTrueExpr();
4273 Expr *rhsExpr = E->getFalseExpr();
4274
4275 // If the condition constant folds and can be elided, try to avoid emitting
4276 // the condition and the dead arm.
4277 bool CondExprBool;
4278 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4279 Expr *live = lhsExpr, *dead = rhsExpr;
4280 if (!CondExprBool) std::swap(live, dead);
4281
4282 // If the dead side doesn't have labels we need, just emit the Live part.
4283 if (!CGF.ContainsLabel(dead)) {
4284 if (CondExprBool)
4285 CGF.incrementProfileCounter(E);
4286 Value *Result = Visit(live);
4287
4288 // If the live part is a throw expression, it acts like it has a void
4289 // type, so evaluating it returns a null Value*. However, a conditional
4290 // with non-void type must return a non-null Value*.
4291 if (!Result && !E->getType()->isVoidType())
4292 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4293
4294 return Result;
4295 }
4296 }
4297
4298 // OpenCL: If the condition is a vector, we can treat this condition like
4299 // the select function.
4300 if (CGF.getLangOpts().OpenCL
4301 && condExpr->getType()->isVectorType()) {
4302 CGF.incrementProfileCounter(E);
4303
4304 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4305 llvm::Value *LHS = Visit(lhsExpr);
4306 llvm::Value *RHS = Visit(rhsExpr);
4307
4308 llvm::Type *condType = ConvertType(condExpr->getType());
4309 llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
4310
4311 unsigned numElem = vecTy->getNumElements();
4312 llvm::Type *elemType = vecTy->getElementType();
4313
4314 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4315 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4316 llvm::Value *tmp = Builder.CreateSExt(TestMSB,
4317 llvm::VectorType::get(elemType,
4318 numElem),
4319 "sext");
4320 llvm::Value *tmp2 = Builder.CreateNot(tmp);
4321
4322 // Cast float to int to perform ANDs if necessary.
4323 llvm::Value *RHSTmp = RHS;
4324 llvm::Value *LHSTmp = LHS;
4325 bool wasCast = false;
4326 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4327 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4328 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4329 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4330 wasCast = true;
4331 }
4332
4333 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4334 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4335 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4336 if (wasCast)
4337 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4338
4339 return tmp5;
4340 }
4341
4342 if (condExpr->getType()->isVectorType()) {
4343 CGF.incrementProfileCounter(E);
4344
4345 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4346 llvm::Value *LHS = Visit(lhsExpr);
4347 llvm::Value *RHS = Visit(rhsExpr);
4348
4349 llvm::Type *CondType = ConvertType(condExpr->getType());
4350 auto *VecTy = cast<llvm::VectorType>(CondType);
4351 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4352
4353 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4354 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4355 }
4356
4357 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4358 // select instead of as control flow. We can only do this if it is cheap and
4359 // safe to evaluate the LHS and RHS unconditionally.
4360 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4361 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4362 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4363 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4364
4365 CGF.incrementProfileCounter(E, StepV);
4366
4367 llvm::Value *LHS = Visit(lhsExpr);
4368 llvm::Value *RHS = Visit(rhsExpr);
4369 if (!LHS) {
4370 // If the conditional has void type, make sure we return a null Value*.
4371 assert(!RHS && "LHS and RHS types must match")((!RHS && "LHS and RHS types must match") ? static_cast
<void> (0) : __assert_fail ("!RHS && \"LHS and RHS types must match\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4371, __PRETTY_FUNCTION__))
;
4372 return nullptr;
4373 }
4374 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4375 }
4376
4377 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4378 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4379 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4380
4381 CodeGenFunction::ConditionalEvaluation eval(CGF);
4382 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4383 CGF.getProfileCount(lhsExpr));
4384
4385 CGF.EmitBlock(LHSBlock);
4386 CGF.incrementProfileCounter(E);
4387 eval.begin(CGF);
4388 Value *LHS = Visit(lhsExpr);
4389 eval.end(CGF);
4390
4391 LHSBlock = Builder.GetInsertBlock();
4392 Builder.CreateBr(ContBlock);
4393
4394 CGF.EmitBlock(RHSBlock);
4395 eval.begin(CGF);
4396 Value *RHS = Visit(rhsExpr);
4397 eval.end(CGF);
4398
4399 RHSBlock = Builder.GetInsertBlock();
4400 CGF.EmitBlock(ContBlock);
4401
4402 // If the LHS or RHS is a throw expression, it will be legitimately null.
4403 if (!LHS)
4404 return RHS;
4405 if (!RHS)
4406 return LHS;
4407
4408 // Create a PHI node for the real part.
4409 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4410 PN->addIncoming(LHS, LHSBlock);
4411 PN->addIncoming(RHS, RHSBlock);
4412 return PN;
4413}
4414
4415Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4416 return Visit(E->getChosenSubExpr());
4417}
4418
4419Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4420 QualType Ty = VE->getType();
4421
4422 if (Ty->isVariablyModifiedType())
4423 CGF.EmitVariablyModifiedType(Ty);
4424
4425 Address ArgValue = Address::invalid();
4426 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4427
4428 llvm::Type *ArgTy = ConvertType(VE->getType());
4429
4430 // If EmitVAArg fails, emit an error.
4431 if (!ArgPtr.isValid()) {
4432 CGF.ErrorUnsupported(VE, "va_arg expression");
4433 return llvm::UndefValue::get(ArgTy);
4434 }
4435
4436 // FIXME Volatility.
4437 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4438
4439 // If EmitVAArg promoted the type, we must truncate it.
4440 if (ArgTy != Val->getType()) {
4441 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4442 Val = Builder.CreateIntToPtr(Val, ArgTy);
4443 else
4444 Val = Builder.CreateTrunc(Val, ArgTy);
4445 }
4446
4447 return Val;
4448}
4449
4450Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4451 return CGF.EmitBlockLiteral(block);
4452}
4453
4454// Convert a vec3 to vec4, or vice versa.
4455static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4456 Value *Src, unsigned NumElementsDst) {
4457 llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
4458 SmallVector<llvm::Constant*, 4> Args;
4459 Args.push_back(Builder.getInt32(0));
4460 Args.push_back(Builder.getInt32(1));
4461 Args.push_back(Builder.getInt32(2));
4462 if (NumElementsDst == 4)
4463 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
4464 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
4465 return Builder.CreateShuffleVector(Src, UnV, Mask);
4466}
4467
4468// Create cast instructions for converting LLVM value \p Src to LLVM type \p
4469// DstTy. \p Src has the same size as \p DstTy. Both are single value types
4470// but could be scalar or vectors of different lengths, and either can be
4471// pointer.
4472// There are 4 cases:
4473// 1. non-pointer -> non-pointer : needs 1 bitcast
4474// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4475// 3. pointer -> non-pointer
4476// a) pointer -> intptr_t : needs 1 ptrtoint
4477// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4478// 4. non-pointer -> pointer
4479// a) intptr_t -> pointer : needs 1 inttoptr
4480// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4481// Note: for cases 3b and 4b two casts are required since LLVM casts do not
4482// allow casting directly between pointer types and non-integer non-pointer
4483// types.
4484static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4485 const llvm::DataLayout &DL,
4486 Value *Src, llvm::Type *DstTy,
4487 StringRef Name = "") {
4488 auto SrcTy = Src->getType();
4489
4490 // Case 1.
4491 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4492 return Builder.CreateBitCast(Src, DstTy, Name);
4493
4494 // Case 2.
4495 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4496 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4497
4498 // Case 3.
4499 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4500 // Case 3b.
4501 if (!DstTy->isIntegerTy())
4502 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4503 // Cases 3a and 3b.
4504 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4505 }
4506
4507 // Case 4b.
4508 if (!SrcTy->isIntegerTy())
4509 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4510 // Cases 4a and 4b.
4511 return Builder.CreateIntToPtr(Src, DstTy, Name);
4512}
4513
4514Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4515 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4516 llvm::Type *DstTy = ConvertType(E->getType());
4517
4518 llvm::Type *SrcTy = Src->getType();
4519 unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
4520 cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
4521 unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
4522 cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
4523
4524 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4525 // vector to get a vec4, then a bitcast if the target type is different.
4526 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4527 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4528
4529 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4530 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4531 DstTy);
4532 }
4533
4534 Src->setName("astype");
4535 return Src;
4536 }
4537
4538 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4539 // to vec4 if the original type is not vec4, then a shuffle vector to
4540 // get a vec3.
4541 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4542 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4543 auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
4544 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4545 Vec4Ty);
4546 }
4547
4548 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4549 Src->setName("astype");
4550 return Src;
4551 }
4552
4553 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4554 Src, DstTy, "astype");
4555}
4556
4557Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4558 return CGF.EmitAtomicExpr(E).getScalarVal();
4559}
4560
4561//===----------------------------------------------------------------------===//
4562// Entry Point into this File
4563//===----------------------------------------------------------------------===//
4564
4565/// Emit the computation of the specified expression of scalar type, ignoring
4566/// the result.
4567Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4568 assert(E && hasScalarEvaluationKind(E->getType()) &&((E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit") ? static_cast<void>
(0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4569, __PRETTY_FUNCTION__))
4569 "Invalid scalar expression to emit")((E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit") ? static_cast<void>
(0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4569, __PRETTY_FUNCTION__))
;
4570
4571 return ScalarExprEmitter(*this, IgnoreResultAssign)
4572 .Visit(const_cast<Expr *>(E));
4573}
4574
4575/// Emit a conversion from the specified type to the specified destination type,
4576/// both of which are LLVM scalar types.
4577Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4578 QualType DstTy,
4579 SourceLocation Loc) {
4580 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&((hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind
(DstTy) && "Invalid scalar expression to emit") ? static_cast
<void> (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4581, __PRETTY_FUNCTION__))
4581 "Invalid scalar expression to emit")((hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind
(DstTy) && "Invalid scalar expression to emit") ? static_cast
<void> (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4581, __PRETTY_FUNCTION__))
;
4582 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4583}
4584
4585/// Emit a conversion from the specified complex type to the specified
4586/// destination type, where the destination type is an LLVM scalar type.
4587Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4588 QualType SrcTy,
4589 QualType DstTy,
4590 SourceLocation Loc) {
4591 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&((SrcTy->isAnyComplexType() && hasScalarEvaluationKind
(DstTy) && "Invalid complex -> scalar conversion")
? static_cast<void> (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4592, __PRETTY_FUNCTION__))
4592 "Invalid complex -> scalar conversion")((SrcTy->isAnyComplexType() && hasScalarEvaluationKind
(DstTy) && "Invalid complex -> scalar conversion")
? static_cast<void> (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4592, __PRETTY_FUNCTION__))
;
4593 return ScalarExprEmitter(*this)
4594 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4595}
4596
4597
4598llvm::Value *CodeGenFunction::
4599EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4600 bool isInc, bool isPre) {
4601 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4602}
4603
4604LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4605 // object->isa or (*object).isa
4606 // Generate code as for: *(Class*)object
4607
4608 Expr *BaseExpr = E->getBase();
4609 Address Addr = Address::invalid();
4610 if (BaseExpr->isRValue()) {
4611 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4612 } else {
4613 Addr = EmitLValue(BaseExpr).getAddress(*this);
4614 }
4615
4616 // Cast the address to Class*.
4617 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4618 return MakeAddrLValue(Addr, E->getType());
4619}
4620
4621
4622LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4623 const CompoundAssignOperator *E) {
4624 ScalarExprEmitter Scalar(*this);
4625 Value *Result = nullptr;
4626 switch (E->getOpcode()) {
4627#define COMPOUND_OP(Op) \
4628 case BO_##Op##Assign: \
4629 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4630 Result)
4631 COMPOUND_OP(Mul);
4632 COMPOUND_OP(Div);
4633 COMPOUND_OP(Rem);
4634 COMPOUND_OP(Add);
4635 COMPOUND_OP(Sub);
4636 COMPOUND_OP(Shl);
4637 COMPOUND_OP(Shr);
4638 COMPOUND_OP(And);
4639 COMPOUND_OP(Xor);
4640 COMPOUND_OP(Or);
4641#undef COMPOUND_OP
4642
4643 case BO_PtrMemD:
4644 case BO_PtrMemI:
4645 case BO_Mul:
4646 case BO_Div:
4647 case BO_Rem:
4648 case BO_Add:
4649 case BO_Sub:
4650 case BO_Shl:
4651 case BO_Shr:
4652 case BO_LT:
4653 case BO_GT:
4654 case BO_LE:
4655 case BO_GE:
4656 case BO_EQ:
4657 case BO_NE:
4658 case BO_Cmp:
4659 case BO_And:
4660 case BO_Xor:
4661 case BO_Or:
4662 case BO_LAnd:
4663 case BO_LOr:
4664 case BO_Assign:
4665 case BO_Comma:
4666 llvm_unreachable("Not valid compound assignment operators")::llvm::llvm_unreachable_internal("Not valid compound assignment operators"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4666)
;
4667 }
4668
4669 llvm_unreachable("Unhandled compound assignment operator")::llvm::llvm_unreachable_internal("Unhandled compound assignment operator"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4669)
;
4670}
4671
4672struct GEPOffsetAndOverflow {
4673 // The total (signed) byte offset for the GEP.
4674 llvm::Value *TotalOffset;
4675 // The offset overflow flag - true if the total offset overflows.
4676 llvm::Value *OffsetOverflows;
4677};
4678
4679/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
4680/// and compute the total offset it applies from it's base pointer BasePtr.
4681/// Returns offset in bytes and a boolean flag whether an overflow happened
4682/// during evaluation.
4683static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
4684 llvm::LLVMContext &VMContext,
4685 CodeGenModule &CGM,
4686 CGBuilderTy &Builder) {
4687 const auto &DL = CGM.getDataLayout();
4688
4689 // The total (signed) byte offset for the GEP.
4690 llvm::Value *TotalOffset = nullptr;
4691
4692 // Was the GEP already reduced to a constant?
4693 if (isa<llvm::Constant>(GEPVal)) {
4694 // Compute the offset by casting both pointers to integers and subtracting:
4695 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
4696 Value *BasePtr_int =
4697 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
4698 Value *GEPVal_int =
4699 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
4700 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
4701 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
4702 }
4703
4704 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4705 assert(GEP->getPointerOperand() == BasePtr &&((GEP->getPointerOperand() == BasePtr && "BasePtr must be the the base of the GEP."
) ? static_cast<void> (0) : __assert_fail ("GEP->getPointerOperand() == BasePtr && \"BasePtr must be the the base of the GEP.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4706, __PRETTY_FUNCTION__))
4706 "BasePtr must be the the base of the GEP.")((GEP->getPointerOperand() == BasePtr && "BasePtr must be the the base of the GEP."
) ? static_cast<void> (0) : __assert_fail ("GEP->getPointerOperand() == BasePtr && \"BasePtr must be the the base of the GEP.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4706, __PRETTY_FUNCTION__))
;
4707 assert(GEP->isInBounds() && "Expected inbounds GEP")((GEP->isInBounds() && "Expected inbounds GEP") ? static_cast
<void> (0) : __assert_fail ("GEP->isInBounds() && \"Expected inbounds GEP\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4707, __PRETTY_FUNCTION__))
;
4708
4709 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4710
4711 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4712 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4713 auto *SAddIntrinsic =
4714 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4715 auto *SMulIntrinsic =
4716 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4717
4718 // The offset overflow flag - true if the total offset overflows.
4719 llvm::Value *OffsetOverflows = Builder.getFalse();
4720
4721 /// Return the result of the given binary operation.
4722 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4723 llvm::Value *RHS) -> llvm::Value * {
4724 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop")(((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"
) ? static_cast<void> (0) : __assert_fail ("(Opcode == BO_Add || Opcode == BO_Mul) && \"Can't eval binop\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4724, __PRETTY_FUNCTION__))
;
4725
4726 // If the operands are constants, return a constant result.
4727 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4728 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4729 llvm::APInt N;
4730 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4731 /*Signed=*/true, N);
4732 if (HasOverflow)
4733 OffsetOverflows = Builder.getTrue();
4734 return llvm::ConstantInt::get(VMContext, N);
4735 }
4736 }
4737
4738 // Otherwise, compute the result with checked arithmetic.
4739 auto *ResultAndOverflow = Builder.CreateCall(
4740 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4741 OffsetOverflows = Builder.CreateOr(
4742 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4743 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4744 };
4745
4746 // Determine the total byte offset by looking at each GEP operand.
4747 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4748 GTI != GTE; ++GTI) {
4749 llvm::Value *LocalOffset;
4750 auto *Index = GTI.getOperand();
4751 // Compute the local offset contributed by this indexing step:
4752 if (auto *STy = GTI.getStructTypeOrNull()) {
4753 // For struct indexing, the local offset is the byte position of the
4754 // specified field.
4755 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4756 LocalOffset = llvm::ConstantInt::get(
4757 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4758 } else {
4759 // Otherwise this is array-like indexing. The local offset is the index
4760 // multiplied by the element size.
4761 auto *ElementSize = llvm::ConstantInt::get(
4762 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4763 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4764 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4765 }
4766
4767 // If this is the first offset, set it as the total offset. Otherwise, add
4768 // the local offset into the running total.
4769 if (!TotalOffset || TotalOffset == Zero)
4770 TotalOffset = LocalOffset;
4771 else
4772 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4773 }
4774
4775 return {TotalOffset, OffsetOverflows};
4776}
4777
4778Value *
4779CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
4780 bool SignedIndices, bool IsSubtraction,
4781 SourceLocation Loc, const Twine &Name) {
4782 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4783
4784 // If the pointer overflow sanitizer isn't enabled, do nothing.
4785 if (!SanOpts.has(SanitizerKind::PointerOverflow))
4786 return GEPVal;
4787
4788 llvm::Type *PtrTy = Ptr->getType();
4789
4790 // Perform nullptr-and-offset check unless the nullptr is defined.
4791 bool PerformNullCheck = !NullPointerIsDefined(
4792 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
4793 // Check for overflows unless the GEP got constant-folded,
4794 // and only in the default address space
4795 bool PerformOverflowCheck =
4796 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
4797
4798 if (!(PerformNullCheck || PerformOverflowCheck))
4799 return GEPVal;
4800
4801 const auto &DL = CGM.getDataLayout();
4802
4803 SanitizerScope SanScope(this);
4804 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4805
4806 GEPOffsetAndOverflow EvaluatedGEP =
4807 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
4808
4809 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4812, __PRETTY_FUNCTION__))
4810 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4812, __PRETTY_FUNCTION__))
4811 "If the offset got constant-folded, we don't expect that there was an "(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4812, __PRETTY_FUNCTION__))
4812 "overflow.")(((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP
.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? static_cast<void> (0) : __assert_fail (
"(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4812, __PRETTY_FUNCTION__))
;
4813
4814 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4815
4816 // Common case: if the total offset is zero, and we are using C++ semantics,
4817 // where nullptr+0 is defined, don't emit a check.
4818 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
4819 return GEPVal;
4820
4821 // Now that we've computed the total offset, add it to the base pointer (with
4822 // wrapping semantics).
4823 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
4824 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
4825
4826 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
4827
4828 if (PerformNullCheck) {
4829 // In C++, if the base pointer evaluates to a null pointer value,
4830 // the only valid pointer this inbounds GEP can produce is also
4831 // a null pointer, so the offset must also evaluate to zero.
4832 // Likewise, if we have non-zero base pointer, we can not get null pointer
4833 // as a result, so the offset can not be -intptr_t(BasePtr).
4834 // In other words, both pointers are either null, or both are non-null,
4835 // or the behaviour is undefined.
4836 //
4837 // C, however, is more strict in this regard, and gives more
4838 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
4839 // So both the input to the 'gep inbounds' AND the output must not be null.
4840 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
4841 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
4842 auto *Valid =
4843 CGM.getLangOpts().CPlusPlus
4844 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
4845 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
4846 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
4847 }
4848
4849 if (PerformOverflowCheck) {
4850 // The GEP is valid if:
4851 // 1) The total offset doesn't overflow, and
4852 // 2) The sign of the difference between the computed address and the base
4853 // pointer matches the sign of the total offset.
4854 llvm::Value *ValidGEP;
4855 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
4856 if (SignedIndices) {
4857 // GEP is computed as `unsigned base + signed offset`, therefore:
4858 // * If offset was positive, then the computed pointer can not be
4859 // [unsigned] less than the base pointer, unless it overflowed.
4860 // * If offset was negative, then the computed pointer can not be
4861 // [unsigned] greater than the bas pointere, unless it overflowed.
4862 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4863 auto *PosOrZeroOffset =
4864 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
4865 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
4866 ValidGEP =
4867 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
4868 } else if (!IsSubtraction) {
4869 // GEP is computed as `unsigned base + unsigned offset`, therefore the
4870 // computed pointer can not be [unsigned] less than base pointer,
4871 // unless there was an overflow.
4872 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
4873 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4874 } else {
4875 // GEP is computed as `unsigned base - unsigned offset`, therefore the
4876 // computed pointer can not be [unsigned] greater than base pointer,
4877 // unless there was an overflow.
4878 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
4879 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
4880 }
4881 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
4882 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
4883 }
4884
4885 assert(!Checks.empty() && "Should have produced some checks.")((!Checks.empty() && "Should have produced some checks."
) ? static_cast<void> (0) : __assert_fail ("!Checks.empty() && \"Should have produced some checks.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/clang/lib/CodeGen/CGExprScalar.cpp"
, 4885, __PRETTY_FUNCTION__))
;
4886
4887 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
4888 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
4889 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4890 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
4891
4892 return GEPVal;
4893}