Bug Summary

File:clang/lib/CodeGen/CGExprScalar.cpp
Warning:line 2098, column 43
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CGExprScalar.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/clang/include -I tools/clang/include -I include -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-19-134126-35450-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/clang/lib/CodeGen/CGExprScalar.cpp

/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/clang/lib/CodeGen/CGExprScalar.cpp

1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/RecordLayout.h"
27#include "clang/AST/StmtVisitor.h"
28#include "clang/Basic/CodeGenOptions.h"
29#include "clang/Basic/TargetInfo.h"
30#include "llvm/ADT/APFixedPoint.h"
31#include "llvm/ADT/Optional.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/FixedPointBuilder.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GetElementPtrTypeIterator.h"
38#include "llvm/IR/GlobalVariable.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/IntrinsicsPowerPC.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/IR/Module.h"
43#include <cstdarg>
44
45using namespace clang;
46using namespace CodeGen;
47using llvm::Value;
48
49//===----------------------------------------------------------------------===//
50// Scalar Expression Emitter
51//===----------------------------------------------------------------------===//
52
53namespace {
54
55/// Determine whether the given binary operation may overflow.
56/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
57/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
58/// the returned overflow check is precise. The returned value is 'true' for
59/// all other opcodes, to be conservative.
60bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
61 BinaryOperator::Opcode Opcode, bool Signed,
62 llvm::APInt &Result) {
63 // Assume overflow is possible, unless we can prove otherwise.
64 bool Overflow = true;
65 const auto &LHSAP = LHS->getValue();
66 const auto &RHSAP = RHS->getValue();
67 if (Opcode == BO_Add) {
68 if (Signed)
69 Result = LHSAP.sadd_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.uadd_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Sub) {
73 if (Signed)
74 Result = LHSAP.ssub_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.usub_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Mul) {
78 if (Signed)
79 Result = LHSAP.smul_ov(RHSAP, Overflow);
80 else
81 Result = LHSAP.umul_ov(RHSAP, Overflow);
82 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
83 if (Signed && !RHS->isZero())
84 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
85 else
86 return false;
87 }
88 return Overflow;
89}
90
91struct BinOpInfo {
92 Value *LHS;
93 Value *RHS;
94 QualType Ty; // Computation Type.
95 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
96 FPOptions FPFeatures;
97 const Expr *E; // Entire expr, for error unsupported. May not be binop.
98
99 /// Check if the binop can result in integer overflow.
100 bool mayHaveIntegerOverflow() const {
101 // Without constant input, we can't rule out overflow.
102 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
103 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
104 if (!LHSCI || !RHSCI)
105 return true;
106
107 llvm::APInt Result;
108 return ::mayHaveIntegerOverflow(
109 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110 }
111
112 /// Check if the binop computes a division or a remainder.
113 bool isDivremOp() const {
114 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
115 Opcode == BO_RemAssign;
116 }
117
118 /// Check if the binop can result in an integer division by zero.
119 bool mayHaveIntegerDivisionByZero() const {
120 if (isDivremOp())
121 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
122 return CI->isZero();
123 return true;
124 }
125
126 /// Check if the binop can result in a float division by zero.
127 bool mayHaveFloatDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
130 return CFP->isZero();
131 return true;
132 }
133
134 /// Check if at least one operand is a fixed point type. In such cases, this
135 /// operation did not follow usual arithmetic conversion and both operands
136 /// might not be of the same type.
137 bool isFixedPointOp() const {
138 // We cannot simply check the result type since comparison operations return
139 // an int.
140 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
141 QualType LHSType = BinOp->getLHS()->getType();
142 QualType RHSType = BinOp->getRHS()->getType();
143 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
144 }
145 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146 return UnOp->getSubExpr()->getType()->isFixedPointType();
147 return false;
148 }
149};
150
151static bool MustVisitNullValue(const Expr *E) {
152 // If a null pointer expression's type is the C++0x nullptr_t, then
153 // it's not necessarily a simple constant and it must be evaluated
154 // for its potential side effects.
155 return E->getType()->isNullPtrType();
156}
157
158/// If \p E is a widened promoted integer, get its base (unpromoted) type.
159static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
160 const Expr *E) {
161 const Expr *Base = E->IgnoreImpCasts();
162 if (E == Base)
163 return llvm::None;
164
165 QualType BaseTy = Base->getType();
166 if (!BaseTy->isPromotableIntegerType() ||
167 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
168 return llvm::None;
169
170 return BaseTy;
171}
172
173/// Check if \p E is a widened promoted integer.
174static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
175 return getUnwidenedIntegerType(Ctx, E).hasValue();
176}
177
178/// Check if we can skip the overflow check for \p Op.
179static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
180 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&(static_cast <bool> ((isa<UnaryOperator>(Op.E) ||
isa<BinaryOperator>(Op.E)) && "Expected a unary or binary operator"
) ? void (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 181, __extension__ __PRETTY_FUNCTION__
))
181 "Expected a unary or binary operator")(static_cast <bool> ((isa<UnaryOperator>(Op.E) ||
isa<BinaryOperator>(Op.E)) && "Expected a unary or binary operator"
) ? void (0) : __assert_fail ("(isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && \"Expected a unary or binary operator\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 181, __extension__ __PRETTY_FUNCTION__
))
;
182
183 // If the binop has constant inputs and we can prove there is no overflow,
184 // we can elide the overflow check.
185 if (!Op.mayHaveIntegerOverflow())
186 return true;
187
188 // If a unary op has a widened operand, the op cannot overflow.
189 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
190 return !UO->canOverflow();
191
192 // We usually don't need overflow checks for binops with widened operands.
193 // Multiplication with promoted unsigned operands is a special case.
194 const auto *BO = cast<BinaryOperator>(Op.E);
195 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196 if (!OptionalLHSTy)
197 return false;
198
199 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200 if (!OptionalRHSTy)
201 return false;
202
203 QualType LHSTy = *OptionalLHSTy;
204 QualType RHSTy = *OptionalRHSTy;
205
206 // This is the simple case: binops without unsigned multiplication, and with
207 // widened operands. No overflow check is needed here.
208 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
209 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
210 return true;
211
212 // For unsigned multiplication the overflow check can be elided if either one
213 // of the unpromoted types are less than half the size of the promoted type.
214 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
215 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
216 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
217}
218
219class ScalarExprEmitter
220 : public StmtVisitor<ScalarExprEmitter, Value*> {
221 CodeGenFunction &CGF;
222 CGBuilderTy &Builder;
223 bool IgnoreResultAssign;
224 llvm::LLVMContext &VMContext;
225public:
226
227 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
228 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
229 VMContext(cgf.getLLVMContext()) {
230 }
231
232 //===--------------------------------------------------------------------===//
233 // Utilities
234 //===--------------------------------------------------------------------===//
235
236 bool TestAndClearIgnoreResultAssign() {
237 bool I = IgnoreResultAssign;
238 IgnoreResultAssign = false;
239 return I;
240 }
241
242 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
243 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
244 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
245 return CGF.EmitCheckedLValue(E, TCK);
246 }
247
248 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
249 const BinOpInfo &Info);
250
251 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
252 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
253 }
254
255 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
256 const AlignValueAttr *AVAttr = nullptr;
257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
258 const ValueDecl *VD = DRE->getDecl();
259
260 if (VD->getType()->isReferenceType()) {
261 if (const auto *TTy =
262 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
263 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
264 } else {
265 // Assumptions for function parameters are emitted at the start of the
266 // function, so there is no need to repeat that here,
267 // unless the alignment-assumption sanitizer is enabled,
268 // then we prefer the assumption over alignment attribute
269 // on IR function param.
270 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
271 return;
272
273 AVAttr = VD->getAttr<AlignValueAttr>();
274 }
275 }
276
277 if (!AVAttr)
278 if (const auto *TTy =
279 dyn_cast<TypedefType>(E->getType()))
280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281
282 if (!AVAttr)
283 return;
284
285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288 }
289
290 /// EmitLoadOfLValue - Given an expression with complex type that represents a
291 /// value l-value, this method emits the address of the l-value, then loads
292 /// and returns the result.
293 Value *EmitLoadOfLValue(const Expr *E) {
294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295 E->getExprLoc());
296
297 EmitLValueAlignmentAssumption(E, V);
298 return V;
299 }
300
301 /// EmitConversionToBool - Convert the specified expression value to a
302 /// boolean (i1) truth value. This is equivalent to "Val != 0".
303 Value *EmitConversionToBool(Value *Src, QualType DstTy);
304
305 /// Emit a check that a conversion from a floating-point type does not
306 /// overflow.
307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308 Value *Src, QualType SrcType, QualType DstType,
309 llvm::Type *DstTy, SourceLocation Loc);
310
311 /// Known implicit conversion check kinds.
312 /// Keep in sync with the enum of the same name in ubsan_handlers.h
313 enum ImplicitConversionCheckKind : unsigned char {
314 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
315 ICCK_UnsignedIntegerTruncation = 1,
316 ICCK_SignedIntegerTruncation = 2,
317 ICCK_IntegerSignChange = 3,
318 ICCK_SignedIntegerTruncationOrSignChange = 4,
319 };
320
321 /// Emit a check that an [implicit] truncation of an integer does not
322 /// discard any bits. It is not UB, so we use the value after truncation.
323 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
324 QualType DstType, SourceLocation Loc);
325
326 /// Emit a check that an [implicit] conversion of an integer does not change
327 /// the sign of the value. It is not UB, so we use the value after conversion.
328 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
329 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
330 QualType DstType, SourceLocation Loc);
331
332 /// Emit a conversion from the specified type to the specified destination
333 /// type, both of which are LLVM scalar types.
334 struct ScalarConversionOpts {
335 bool TreatBooleanAsSigned;
336 bool EmitImplicitIntegerTruncationChecks;
337 bool EmitImplicitIntegerSignChangeChecks;
338
339 ScalarConversionOpts()
340 : TreatBooleanAsSigned(false),
341 EmitImplicitIntegerTruncationChecks(false),
342 EmitImplicitIntegerSignChangeChecks(false) {}
343
344 ScalarConversionOpts(clang::SanitizerSet SanOpts)
345 : TreatBooleanAsSigned(false),
346 EmitImplicitIntegerTruncationChecks(
347 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
348 EmitImplicitIntegerSignChangeChecks(
349 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
350 };
351 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
352 llvm::Type *SrcTy, llvm::Type *DstTy,
353 ScalarConversionOpts Opts);
354 Value *
355 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
356 SourceLocation Loc,
357 ScalarConversionOpts Opts = ScalarConversionOpts());
358
359 /// Convert between either a fixed point and other fixed point or fixed point
360 /// and an integer.
361 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
362 SourceLocation Loc);
363
364 /// Emit a conversion from the specified complex type to the specified
365 /// destination type, where the destination type is an LLVM scalar type.
366 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
367 QualType SrcTy, QualType DstTy,
368 SourceLocation Loc);
369
370 /// EmitNullValue - Emit a value that corresponds to null for the given type.
371 Value *EmitNullValue(QualType Ty);
372
373 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
374 Value *EmitFloatToBoolConversion(Value *V) {
375 // Compare against 0.0 for fp scalars.
376 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
377 return Builder.CreateFCmpUNE(V, Zero, "tobool");
378 }
379
380 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
381 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
382 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
383
384 return Builder.CreateICmpNE(V, Zero, "tobool");
385 }
386
387 Value *EmitIntToBoolConversion(Value *V) {
388 // Because of the type rules of C, we often end up computing a
389 // logical value, then zero extending it to int, then wanting it
390 // as a logical value again. Optimize this common case.
391 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
392 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
393 Value *Result = ZI->getOperand(0);
394 // If there aren't any more uses, zap the instruction to save space.
395 // Note that there can be more uses, for example if this
396 // is the result of an assignment.
397 if (ZI->use_empty())
398 ZI->eraseFromParent();
399 return Result;
400 }
401 }
402
403 return Builder.CreateIsNotNull(V, "tobool");
404 }
405
406 //===--------------------------------------------------------------------===//
407 // Visitor Methods
408 //===--------------------------------------------------------------------===//
409
410 Value *Visit(Expr *E) {
411 ApplyDebugLocation DL(CGF, E);
412 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
413 }
414
415 Value *VisitStmt(Stmt *S) {
416 S->dump(llvm::errs(), CGF.getContext());
417 llvm_unreachable("Stmt can't have complex result type!")::llvm::llvm_unreachable_internal("Stmt can't have complex result type!"
, "clang/lib/CodeGen/CGExprScalar.cpp", 417)
;
418 }
419 Value *VisitExpr(Expr *S);
420
421 Value *VisitConstantExpr(ConstantExpr *E) {
422 // A constant expression of type 'void' generates no code and produces no
423 // value.
424 if (E->getType()->isVoidType())
425 return nullptr;
426
427 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
428 if (E->isGLValue())
429 return CGF.Builder.CreateLoad(Address(
430 Result, CGF.getContext().getTypeAlignInChars(E->getType())));
431 return Result;
432 }
433 return Visit(E->getSubExpr());
434 }
435 Value *VisitParenExpr(ParenExpr *PE) {
436 return Visit(PE->getSubExpr());
437 }
438 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
439 return Visit(E->getReplacement());
440 }
441 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
442 return Visit(GE->getResultExpr());
443 }
444 Value *VisitCoawaitExpr(CoawaitExpr *S) {
445 return CGF.EmitCoawaitExpr(*S).getScalarVal();
446 }
447 Value *VisitCoyieldExpr(CoyieldExpr *S) {
448 return CGF.EmitCoyieldExpr(*S).getScalarVal();
449 }
450 Value *VisitUnaryCoawait(const UnaryOperator *E) {
451 return Visit(E->getSubExpr());
452 }
453
454 // Leaves.
455 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
456 return Builder.getInt(E->getValue());
457 }
458 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
459 return Builder.getInt(E->getValue());
460 }
461 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
462 return llvm::ConstantFP::get(VMContext, E->getValue());
463 }
464 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
465 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
466 }
467 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
468 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
469 }
470 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
471 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
472 }
473 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
474 return EmitNullValue(E->getType());
475 }
476 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
477 return EmitNullValue(E->getType());
478 }
479 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
480 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
481 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
482 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
483 return Builder.CreateBitCast(V, ConvertType(E->getType()));
484 }
485
486 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
487 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
488 }
489
490 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
491 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
492 }
493
494 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
495
496 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
497 if (E->isGLValue())
498 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
499 E->getExprLoc());
500
501 // Otherwise, assume the mapping is the scalar directly.
502 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
503 }
504
505 // l-values.
506 Value *VisitDeclRefExpr(DeclRefExpr *E) {
507 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
508 return CGF.emitScalarConstant(Constant, E);
509 return EmitLoadOfLValue(E);
510 }
511
512 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
513 return CGF.EmitObjCSelectorExpr(E);
514 }
515 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
516 return CGF.EmitObjCProtocolExpr(E);
517 }
518 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
519 return EmitLoadOfLValue(E);
520 }
521 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
522 if (E->getMethodDecl() &&
523 E->getMethodDecl()->getReturnType()->isReferenceType())
524 return EmitLoadOfLValue(E);
525 return CGF.EmitObjCMessageExpr(E).getScalarVal();
526 }
527
528 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
529 LValue LV = CGF.EmitObjCIsaExpr(E);
530 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
531 return V;
532 }
533
534 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
535 VersionTuple Version = E->getVersion();
536
537 // If we're checking for a platform older than our minimum deployment
538 // target, we can fold the check away.
539 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
540 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
541
542 return CGF.EmitBuiltinAvailable(Version);
543 }
544
545 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
546 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
547 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
548 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
549 Value *VisitMemberExpr(MemberExpr *E);
550 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
551 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
552 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
553 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
554 // literals aren't l-values in C++. We do so simply because that's the
555 // cleanest way to handle compound literals in C++.
556 // See the discussion here: https://reviews.llvm.org/D64464
557 return EmitLoadOfLValue(E);
558 }
559
560 Value *VisitInitListExpr(InitListExpr *E);
561
562 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
563 assert(CGF.getArrayInitIndex() &&(static_cast <bool> (CGF.getArrayInitIndex() &&
"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?") ? void
(0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 564, __extension__ __PRETTY_FUNCTION__
))
564 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?")(static_cast <bool> (CGF.getArrayInitIndex() &&
"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?") ? void
(0) : __assert_fail ("CGF.getArrayInitIndex() && \"ArrayInitIndexExpr not inside an ArrayInitLoopExpr?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 564, __extension__ __PRETTY_FUNCTION__
))
;
565 return CGF.getArrayInitIndex();
566 }
567
568 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
569 return EmitNullValue(E->getType());
570 }
571 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
572 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
573 return VisitCastExpr(E);
574 }
575 Value *VisitCastExpr(CastExpr *E);
576
577 Value *VisitCallExpr(const CallExpr *E) {
578 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
579 return EmitLoadOfLValue(E);
580
581 Value *V = CGF.EmitCallExpr(E).getScalarVal();
582
583 EmitLValueAlignmentAssumption(E, V);
584 return V;
585 }
586
587 Value *VisitStmtExpr(const StmtExpr *E);
588
589 // Unary Operators.
590 Value *VisitUnaryPostDec(const UnaryOperator *E) {
591 LValue LV = EmitLValue(E->getSubExpr());
592 return EmitScalarPrePostIncDec(E, LV, false, false);
593 }
594 Value *VisitUnaryPostInc(const UnaryOperator *E) {
595 LValue LV = EmitLValue(E->getSubExpr());
596 return EmitScalarPrePostIncDec(E, LV, true, false);
597 }
598 Value *VisitUnaryPreDec(const UnaryOperator *E) {
599 LValue LV = EmitLValue(E->getSubExpr());
600 return EmitScalarPrePostIncDec(E, LV, false, true);
601 }
602 Value *VisitUnaryPreInc(const UnaryOperator *E) {
603 LValue LV = EmitLValue(E->getSubExpr());
604 return EmitScalarPrePostIncDec(E, LV, true, true);
605 }
606
607 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
608 llvm::Value *InVal,
609 bool IsInc);
610
611 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
612 bool isInc, bool isPre);
613
614
615 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
616 if (isa<MemberPointerType>(E->getType())) // never sugared
617 return CGF.CGM.getMemberPointerConstant(E);
618
619 return EmitLValue(E->getSubExpr()).getPointer(CGF);
620 }
621 Value *VisitUnaryDeref(const UnaryOperator *E) {
622 if (E->getType()->isVoidType())
623 return Visit(E->getSubExpr()); // the actual value should be unused
624 return EmitLoadOfLValue(E);
625 }
626 Value *VisitUnaryPlus(const UnaryOperator *E) {
627 // This differs from gcc, though, most likely due to a bug in gcc.
628 TestAndClearIgnoreResultAssign();
629 return Visit(E->getSubExpr());
630 }
631 Value *VisitUnaryMinus (const UnaryOperator *E);
632 Value *VisitUnaryNot (const UnaryOperator *E);
633 Value *VisitUnaryLNot (const UnaryOperator *E);
634 Value *VisitUnaryReal (const UnaryOperator *E);
635 Value *VisitUnaryImag (const UnaryOperator *E);
636 Value *VisitUnaryExtension(const UnaryOperator *E) {
637 return Visit(E->getSubExpr());
638 }
639
640 // C++
641 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
642 return EmitLoadOfLValue(E);
643 }
644 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
645 auto &Ctx = CGF.getContext();
646 APValue Evaluated =
647 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
648 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
649 SLE->getType());
650 }
651
652 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
653 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
654 return Visit(DAE->getExpr());
655 }
656 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
657 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
658 return Visit(DIE->getExpr());
659 }
660 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
661 return CGF.LoadCXXThis();
662 }
663
664 Value *VisitExprWithCleanups(ExprWithCleanups *E);
665 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
666 return CGF.EmitCXXNewExpr(E);
667 }
668 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
669 CGF.EmitCXXDeleteExpr(E);
670 return nullptr;
671 }
672
673 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
674 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
675 }
676
677 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
678 return Builder.getInt1(E->isSatisfied());
679 }
680
681 Value *VisitRequiresExpr(const RequiresExpr *E) {
682 return Builder.getInt1(E->isSatisfied());
683 }
684
685 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
686 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
687 }
688
689 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
690 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
691 }
692
693 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
694 // C++ [expr.pseudo]p1:
695 // The result shall only be used as the operand for the function call
696 // operator (), and the result of such a call has type void. The only
697 // effect is the evaluation of the postfix-expression before the dot or
698 // arrow.
699 CGF.EmitScalarExpr(E->getBase());
700 return nullptr;
701 }
702
703 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
704 return EmitNullValue(E->getType());
705 }
706
707 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
708 CGF.EmitCXXThrowExpr(E);
709 return nullptr;
710 }
711
712 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
713 return Builder.getInt1(E->getValue());
714 }
715
716 // Binary Operators.
717 Value *EmitMul(const BinOpInfo &Ops) {
718 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
719 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
720 case LangOptions::SOB_Defined:
721 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
722 case LangOptions::SOB_Undefined:
723 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
724 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
725 LLVM_FALLTHROUGH[[gnu::fallthrough]];
726 case LangOptions::SOB_Trapping:
727 if (CanElideOverflowCheck(CGF.getContext(), Ops))
728 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
729 return EmitOverflowCheckedBinOp(Ops);
730 }
731 }
732
733 if (Ops.Ty->isConstantMatrixType()) {
734 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
735 // We need to check the types of the operands of the operator to get the
736 // correct matrix dimensions.
737 auto *BO = cast<BinaryOperator>(Ops.E);
738 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
739 BO->getLHS()->getType().getCanonicalType());
740 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
741 BO->getRHS()->getType().getCanonicalType());
742 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
743 if (LHSMatTy && RHSMatTy)
744 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
745 LHSMatTy->getNumColumns(),
746 RHSMatTy->getNumColumns());
747 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
748 }
749
750 if (Ops.Ty->isUnsignedIntegerType() &&
751 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
752 !CanElideOverflowCheck(CGF.getContext(), Ops))
753 return EmitOverflowCheckedBinOp(Ops);
754
755 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
756 // Preserve the old values
757 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
758 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
759 }
760 if (Ops.isFixedPointOp())
761 return EmitFixedPointBinOp(Ops);
762 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
763 }
764 /// Create a binary op that checks for overflow.
765 /// Currently only supports +, - and *.
766 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
767
768 // Check for undefined division and modulus behaviors.
769 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
770 llvm::Value *Zero,bool isDiv);
771 // Common helper for getting how wide LHS of shift is.
772 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
773
774 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
775 // non powers of two.
776 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
777
778 Value *EmitDiv(const BinOpInfo &Ops);
779 Value *EmitRem(const BinOpInfo &Ops);
780 Value *EmitAdd(const BinOpInfo &Ops);
781 Value *EmitSub(const BinOpInfo &Ops);
782 Value *EmitShl(const BinOpInfo &Ops);
783 Value *EmitShr(const BinOpInfo &Ops);
784 Value *EmitAnd(const BinOpInfo &Ops) {
785 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
786 }
787 Value *EmitXor(const BinOpInfo &Ops) {
788 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
789 }
790 Value *EmitOr (const BinOpInfo &Ops) {
791 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
792 }
793
794 // Helper functions for fixed point binary operations.
795 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
796
797 BinOpInfo EmitBinOps(const BinaryOperator *E);
798 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
799 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
800 Value *&Result);
801
802 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
803 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
804
805 // Binary operators and binary compound assignment operators.
806#define HANDLEBINOP(OP) \
807 Value *VisitBin ## OP(const BinaryOperator *E) { \
808 return Emit ## OP(EmitBinOps(E)); \
809 } \
810 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
811 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
812 }
813 HANDLEBINOP(Mul)
814 HANDLEBINOP(Div)
815 HANDLEBINOP(Rem)
816 HANDLEBINOP(Add)
817 HANDLEBINOP(Sub)
818 HANDLEBINOP(Shl)
819 HANDLEBINOP(Shr)
820 HANDLEBINOP(And)
821 HANDLEBINOP(Xor)
822 HANDLEBINOP(Or)
823#undef HANDLEBINOP
824
825 // Comparisons.
826 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
827 llvm::CmpInst::Predicate SICmpOpc,
828 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
829#define VISITCOMP(CODE, UI, SI, FP, SIG) \
830 Value *VisitBin##CODE(const BinaryOperator *E) { \
831 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
832 llvm::FCmpInst::FP, SIG); }
833 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
834 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
835 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
836 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
837 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
838 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
839#undef VISITCOMP
840
841 Value *VisitBinAssign (const BinaryOperator *E);
842
843 Value *VisitBinLAnd (const BinaryOperator *E);
844 Value *VisitBinLOr (const BinaryOperator *E);
845 Value *VisitBinComma (const BinaryOperator *E);
846
847 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
848 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
849
850 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
851 return Visit(E->getSemanticForm());
852 }
853
854 // Other Operators.
855 Value *VisitBlockExpr(const BlockExpr *BE);
856 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
857 Value *VisitChooseExpr(ChooseExpr *CE);
858 Value *VisitVAArgExpr(VAArgExpr *VE);
859 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
860 return CGF.EmitObjCStringLiteral(E);
861 }
862 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
863 return CGF.EmitObjCBoxedExpr(E);
864 }
865 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
866 return CGF.EmitObjCArrayLiteral(E);
867 }
868 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
869 return CGF.EmitObjCDictionaryLiteral(E);
870 }
871 Value *VisitAsTypeExpr(AsTypeExpr *CE);
872 Value *VisitAtomicExpr(AtomicExpr *AE);
873};
874} // end anonymous namespace.
875
876//===----------------------------------------------------------------------===//
877// Utilities
878//===----------------------------------------------------------------------===//
879
880/// EmitConversionToBool - Convert the specified expression value to a
881/// boolean (i1) truth value. This is equivalent to "Val != 0".
882Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
883 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs")(static_cast <bool> (SrcType.isCanonical() && "EmitScalarConversion strips typedefs"
) ? void (0) : __assert_fail ("SrcType.isCanonical() && \"EmitScalarConversion strips typedefs\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 883, __extension__ __PRETTY_FUNCTION__
))
;
884
885 if (SrcType->isRealFloatingType())
886 return EmitFloatToBoolConversion(Src);
887
888 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
889 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
890
891 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&(static_cast <bool> ((SrcType->isIntegerType() || isa
<llvm::PointerType>(Src->getType())) && "Unknown scalar type to convert"
) ? void (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 892, __extension__ __PRETTY_FUNCTION__
))
892 "Unknown scalar type to convert")(static_cast <bool> ((SrcType->isIntegerType() || isa
<llvm::PointerType>(Src->getType())) && "Unknown scalar type to convert"
) ? void (0) : __assert_fail ("(SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && \"Unknown scalar type to convert\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 892, __extension__ __PRETTY_FUNCTION__
))
;
893
894 if (isa<llvm::IntegerType>(Src->getType()))
895 return EmitIntToBoolConversion(Src);
896
897 assert(isa<llvm::PointerType>(Src->getType()))(static_cast <bool> (isa<llvm::PointerType>(Src->
getType())) ? void (0) : __assert_fail ("isa<llvm::PointerType>(Src->getType())"
, "clang/lib/CodeGen/CGExprScalar.cpp", 897, __extension__ __PRETTY_FUNCTION__
))
;
898 return EmitPointerToBoolConversion(Src, SrcType);
899}
900
901void ScalarExprEmitter::EmitFloatConversionCheck(
902 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
903 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
904 assert(SrcType->isFloatingType() && "not a conversion from floating point")(static_cast <bool> (SrcType->isFloatingType() &&
"not a conversion from floating point") ? void (0) : __assert_fail
("SrcType->isFloatingType() && \"not a conversion from floating point\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 904, __extension__ __PRETTY_FUNCTION__
))
;
905 if (!isa<llvm::IntegerType>(DstTy))
906 return;
907
908 CodeGenFunction::SanitizerScope SanScope(&CGF);
909 using llvm::APFloat;
910 using llvm::APSInt;
911
912 llvm::Value *Check = nullptr;
913 const llvm::fltSemantics &SrcSema =
914 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
915
916 // Floating-point to integer. This has undefined behavior if the source is
917 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
918 // to an integer).
919 unsigned Width = CGF.getContext().getIntWidth(DstType);
920 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
921
922 APSInt Min = APSInt::getMinValue(Width, Unsigned);
923 APFloat MinSrc(SrcSema, APFloat::uninitialized);
924 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
925 APFloat::opOverflow)
926 // Don't need an overflow check for lower bound. Just check for
927 // -Inf/NaN.
928 MinSrc = APFloat::getInf(SrcSema, true);
929 else
930 // Find the largest value which is too small to represent (before
931 // truncation toward zero).
932 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
933
934 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
935 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
936 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
937 APFloat::opOverflow)
938 // Don't need an overflow check for upper bound. Just check for
939 // +Inf/NaN.
940 MaxSrc = APFloat::getInf(SrcSema, false);
941 else
942 // Find the smallest value which is too large to represent (before
943 // truncation toward zero).
944 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
945
946 // If we're converting from __half, convert the range to float to match
947 // the type of src.
948 if (OrigSrcType->isHalfType()) {
949 const llvm::fltSemantics &Sema =
950 CGF.getContext().getFloatTypeSemantics(SrcType);
951 bool IsInexact;
952 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
953 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
954 }
955
956 llvm::Value *GE =
957 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
958 llvm::Value *LE =
959 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
960 Check = Builder.CreateAnd(GE, LE);
961
962 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
963 CGF.EmitCheckTypeDescriptor(OrigSrcType),
964 CGF.EmitCheckTypeDescriptor(DstType)};
965 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
966 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
967}
968
969// Should be called within CodeGenFunction::SanitizerScope RAII scope.
970// Returns 'i1 false' when the truncation Src -> Dst was lossy.
971static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
972 std::pair<llvm::Value *, SanitizerMask>>
973EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
974 QualType DstType, CGBuilderTy &Builder) {
975 llvm::Type *SrcTy = Src->getType();
976 llvm::Type *DstTy = Dst->getType();
977 (void)DstTy; // Only used in assert()
978
979 // This should be truncation of integral types.
980 assert(Src != Dst)(static_cast <bool> (Src != Dst) ? void (0) : __assert_fail
("Src != Dst", "clang/lib/CodeGen/CGExprScalar.cpp", 980, __extension__
__PRETTY_FUNCTION__))
;
981 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits())(static_cast <bool> (SrcTy->getScalarSizeInBits() >
Dst->getType()->getScalarSizeInBits()) ? void (0) : __assert_fail
("SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()"
, "clang/lib/CodeGen/CGExprScalar.cpp", 981, __extension__ __PRETTY_FUNCTION__
))
;
982 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&(static_cast <bool> (isa<llvm::IntegerType>(SrcTy
) && isa<llvm::IntegerType>(DstTy) && "non-integer llvm type"
) ? void (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 983, __extension__ __PRETTY_FUNCTION__
))
983 "non-integer llvm type")(static_cast <bool> (isa<llvm::IntegerType>(SrcTy
) && isa<llvm::IntegerType>(DstTy) && "non-integer llvm type"
) ? void (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 983, __extension__ __PRETTY_FUNCTION__
))
;
984
985 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
986 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
987
988 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
989 // Else, it is a signed truncation.
990 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
991 SanitizerMask Mask;
992 if (!SrcSigned && !DstSigned) {
993 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
994 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
995 } else {
996 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
997 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
998 }
999
1000 llvm::Value *Check = nullptr;
1001 // 1. Extend the truncated value back to the same width as the Src.
1002 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1003 // 2. Equality-compare with the original source value
1004 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1005 // If the comparison result is 'i1 false', then the truncation was lossy.
1006 return std::make_pair(Kind, std::make_pair(Check, Mask));
1007}
1008
1009static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1010 QualType SrcType, QualType DstType) {
1011 return SrcType->isIntegerType() && DstType->isIntegerType();
1012}
1013
1014void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1015 Value *Dst, QualType DstType,
1016 SourceLocation Loc) {
1017 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1018 return;
1019
1020 // We only care about int->int conversions here.
1021 // We ignore conversions to/from pointer and/or bool.
1022 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1023 DstType))
1024 return;
1025
1026 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1027 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1028 // This must be truncation. Else we do not care.
1029 if (SrcBits <= DstBits)
1030 return;
1031
1032 assert(!DstType->isBooleanType() && "we should not get here with booleans.")(static_cast <bool> (!DstType->isBooleanType() &&
"we should not get here with booleans.") ? void (0) : __assert_fail
("!DstType->isBooleanType() && \"we should not get here with booleans.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1032, __extension__ __PRETTY_FUNCTION__
))
;
1033
1034 // If the integer sign change sanitizer is enabled,
1035 // and we are truncating from larger unsigned type to smaller signed type,
1036 // let that next sanitizer deal with it.
1037 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1038 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1039 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1040 (!SrcSigned && DstSigned))
1041 return;
1042
1043 CodeGenFunction::SanitizerScope SanScope(&CGF);
1044
1045 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1046 std::pair<llvm::Value *, SanitizerMask>>
1047 Check =
1048 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1049 // If the comparison result is 'i1 false', then the truncation was lossy.
1050
1051 // Do we care about this type of truncation?
1052 if (!CGF.SanOpts.has(Check.second.second))
1053 return;
1054
1055 llvm::Constant *StaticArgs[] = {
1056 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1057 CGF.EmitCheckTypeDescriptor(DstType),
1058 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1059 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1060 {Src, Dst});
1061}
1062
1063// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1064// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1065static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1066 std::pair<llvm::Value *, SanitizerMask>>
1067EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1068 QualType DstType, CGBuilderTy &Builder) {
1069 llvm::Type *SrcTy = Src->getType();
1070 llvm::Type *DstTy = Dst->getType();
1071
1072 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&(static_cast <bool> (isa<llvm::IntegerType>(SrcTy
) && isa<llvm::IntegerType>(DstTy) && "non-integer llvm type"
) ? void (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1073, __extension__ __PRETTY_FUNCTION__
))
1073 "non-integer llvm type")(static_cast <bool> (isa<llvm::IntegerType>(SrcTy
) && isa<llvm::IntegerType>(DstTy) && "non-integer llvm type"
) ? void (0) : __assert_fail ("isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && \"non-integer llvm type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1073, __extension__ __PRETTY_FUNCTION__
))
;
1074
1075 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1076 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1077 (void)SrcSigned; // Only used in assert()
1078 (void)DstSigned; // Only used in assert()
1079 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1080 unsigned DstBits = DstTy->getScalarSizeInBits();
1081 (void)SrcBits; // Only used in assert()
1082 (void)DstBits; // Only used in assert()
1083
1084 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&(static_cast <bool> (((SrcBits != DstBits) || (SrcSigned
!= DstSigned)) && "either the widths should be different, or the signednesses."
) ? void (0) : __assert_fail ("((SrcBits != DstBits) || (SrcSigned != DstSigned)) && \"either the widths should be different, or the signednesses.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1085, __extension__ __PRETTY_FUNCTION__
))
1085 "either the widths should be different, or the signednesses.")(static_cast <bool> (((SrcBits != DstBits) || (SrcSigned
!= DstSigned)) && "either the widths should be different, or the signednesses."
) ? void (0) : __assert_fail ("((SrcBits != DstBits) || (SrcSigned != DstSigned)) && \"either the widths should be different, or the signednesses.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1085, __extension__ __PRETTY_FUNCTION__
))
;
1086
1087 // NOTE: zero value is considered to be non-negative.
1088 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1089 const char *Name) -> Value * {
1090 // Is this value a signed type?
1091 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1092 llvm::Type *VTy = V->getType();
1093 if (!VSigned) {
1094 // If the value is unsigned, then it is never negative.
1095 // FIXME: can we encounter non-scalar VTy here?
1096 return llvm::ConstantInt::getFalse(VTy->getContext());
1097 }
1098 // Get the zero of the same type with which we will be comparing.
1099 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1100 // %V.isnegative = icmp slt %V, 0
1101 // I.e is %V *strictly* less than zero, does it have negative value?
1102 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1103 llvm::Twine(Name) + "." + V->getName() +
1104 ".negativitycheck");
1105 };
1106
1107 // 1. Was the old Value negative?
1108 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1109 // 2. Is the new Value negative?
1110 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1111 // 3. Now, was the 'negativity status' preserved during the conversion?
1112 // NOTE: conversion from negative to zero is considered to change the sign.
1113 // (We want to get 'false' when the conversion changed the sign)
1114 // So we should just equality-compare the negativity statuses.
1115 llvm::Value *Check = nullptr;
1116 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1117 // If the comparison result is 'false', then the conversion changed the sign.
1118 return std::make_pair(
1119 ScalarExprEmitter::ICCK_IntegerSignChange,
1120 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1121}
1122
1123void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1124 Value *Dst, QualType DstType,
1125 SourceLocation Loc) {
1126 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1127 return;
1128
1129 llvm::Type *SrcTy = Src->getType();
1130 llvm::Type *DstTy = Dst->getType();
1131
1132 // We only care about int->int conversions here.
1133 // We ignore conversions to/from pointer and/or bool.
1134 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1135 DstType))
1136 return;
1137
1138 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1139 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1140 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1141 unsigned DstBits = DstTy->getScalarSizeInBits();
1142
1143 // Now, we do not need to emit the check in *all* of the cases.
1144 // We can avoid emitting it in some obvious cases where it would have been
1145 // dropped by the opt passes (instcombine) always anyways.
1146 // If it's a cast between effectively the same type, no check.
1147 // NOTE: this is *not* equivalent to checking the canonical types.
1148 if (SrcSigned == DstSigned && SrcBits == DstBits)
1149 return;
1150 // At least one of the values needs to have signed type.
1151 // If both are unsigned, then obviously, neither of them can be negative.
1152 if (!SrcSigned && !DstSigned)
1153 return;
1154 // If the conversion is to *larger* *signed* type, then no check is needed.
1155 // Because either sign-extension happens (so the sign will remain),
1156 // or zero-extension will happen (the sign bit will be zero.)
1157 if ((DstBits > SrcBits) && DstSigned)
1158 return;
1159 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1160 (SrcBits > DstBits) && SrcSigned) {
1161 // If the signed integer truncation sanitizer is enabled,
1162 // and this is a truncation from signed type, then no check is needed.
1163 // Because here sign change check is interchangeable with truncation check.
1164 return;
1165 }
1166 // That's it. We can't rule out any more cases with the data we have.
1167
1168 CodeGenFunction::SanitizerScope SanScope(&CGF);
1169
1170 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1171 std::pair<llvm::Value *, SanitizerMask>>
1172 Check;
1173
1174 // Each of these checks needs to return 'false' when an issue was detected.
1175 ImplicitConversionCheckKind CheckKind;
1176 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1177 // So we can 'and' all the checks together, and still get 'false',
1178 // if at least one of the checks detected an issue.
1179
1180 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1181 CheckKind = Check.first;
1182 Checks.emplace_back(Check.second);
1183
1184 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1185 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1186 // If the signed integer truncation sanitizer was enabled,
1187 // and we are truncating from larger unsigned type to smaller signed type,
1188 // let's handle the case we skipped in that check.
1189 Check =
1190 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1191 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1192 Checks.emplace_back(Check.second);
1193 // If the comparison result is 'i1 false', then the truncation was lossy.
1194 }
1195
1196 llvm::Constant *StaticArgs[] = {
1197 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1198 CGF.EmitCheckTypeDescriptor(DstType),
1199 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1200 // EmitCheck() will 'and' all the checks together.
1201 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1202 {Src, Dst});
1203}
1204
1205Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1206 QualType DstType, llvm::Type *SrcTy,
1207 llvm::Type *DstTy,
1208 ScalarConversionOpts Opts) {
1209 // The Element types determine the type of cast to perform.
1210 llvm::Type *SrcElementTy;
1211 llvm::Type *DstElementTy;
1212 QualType SrcElementType;
1213 QualType DstElementType;
1214 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1215 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1216 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1217 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1218 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1219 } else {
1220 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&(static_cast <bool> (!SrcType->isMatrixType() &&
!DstType->isMatrixType() && "cannot cast between matrix and non-matrix types"
) ? void (0) : __assert_fail ("!SrcType->isMatrixType() && !DstType->isMatrixType() && \"cannot cast between matrix and non-matrix types\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1221, __extension__ __PRETTY_FUNCTION__
))
1221 "cannot cast between matrix and non-matrix types")(static_cast <bool> (!SrcType->isMatrixType() &&
!DstType->isMatrixType() && "cannot cast between matrix and non-matrix types"
) ? void (0) : __assert_fail ("!SrcType->isMatrixType() && !DstType->isMatrixType() && \"cannot cast between matrix and non-matrix types\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1221, __extension__ __PRETTY_FUNCTION__
))
;
1222 SrcElementTy = SrcTy;
1223 DstElementTy = DstTy;
1224 SrcElementType = SrcType;
1225 DstElementType = DstType;
1226 }
1227
1228 if (isa<llvm::IntegerType>(SrcElementTy)) {
1229 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1230 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1231 InputSigned = true;
1232 }
1233
1234 if (isa<llvm::IntegerType>(DstElementTy))
1235 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1236 if (InputSigned)
1237 return Builder.CreateSIToFP(Src, DstTy, "conv");
1238 return Builder.CreateUIToFP(Src, DstTy, "conv");
1239 }
1240
1241 if (isa<llvm::IntegerType>(DstElementTy)) {
1242 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion")(static_cast <bool> (SrcElementTy->isFloatingPointTy
() && "Unknown real conversion") ? void (0) : __assert_fail
("SrcElementTy->isFloatingPointTy() && \"Unknown real conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1242, __extension__ __PRETTY_FUNCTION__
))
;
1243 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1244
1245 // If we can't recognize overflow as undefined behavior, assume that
1246 // overflow saturates. This protects against normal optimizations if we are
1247 // compiling with non-standard FP semantics.
1248 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1249 llvm::Intrinsic::ID IID =
1250 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1251 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1252 }
1253
1254 if (IsSigned)
1255 return Builder.CreateFPToSI(Src, DstTy, "conv");
1256 return Builder.CreateFPToUI(Src, DstTy, "conv");
1257 }
1258
1259 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1260 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1261 return Builder.CreateFPExt(Src, DstTy, "conv");
1262}
1263
1264/// Emit a conversion from the specified type to the specified destination type,
1265/// both of which are LLVM scalar types.
1266Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1267 QualType DstType,
1268 SourceLocation Loc,
1269 ScalarConversionOpts Opts) {
1270 // All conversions involving fixed point types should be handled by the
1271 // EmitFixedPoint family functions. This is done to prevent bloating up this
1272 // function more, and although fixed point numbers are represented by
1273 // integers, we do not want to follow any logic that assumes they should be
1274 // treated as integers.
1275 // TODO(leonardchan): When necessary, add another if statement checking for
1276 // conversions to fixed point types from other types.
1277 if (SrcType->isFixedPointType()) {
1278 if (DstType->isBooleanType())
1279 // It is important that we check this before checking if the dest type is
1280 // an integer because booleans are technically integer types.
1281 // We do not need to check the padding bit on unsigned types if unsigned
1282 // padding is enabled because overflow into this bit is undefined
1283 // behavior.
1284 return Builder.CreateIsNotNull(Src, "tobool");
1285 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1286 DstType->isRealFloatingType())
1287 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1288
1289 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion from a fixed point type to another type."
, "clang/lib/CodeGen/CGExprScalar.cpp", 1290)
1290 "Unhandled scalar conversion from a fixed point type to another type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion from a fixed point type to another type."
, "clang/lib/CodeGen/CGExprScalar.cpp", 1290)
;
1291 } else if (DstType->isFixedPointType()) {
1292 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1293 // This also includes converting booleans and enums to fixed point types.
1294 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1295
1296 llvm_unreachable(::llvm::llvm_unreachable_internal("Unhandled scalar conversion to a fixed point type from another type."
, "clang/lib/CodeGen/CGExprScalar.cpp", 1297)
1297 "Unhandled scalar conversion to a fixed point type from another type.")::llvm::llvm_unreachable_internal("Unhandled scalar conversion to a fixed point type from another type."
, "clang/lib/CodeGen/CGExprScalar.cpp", 1297)
;
1298 }
1299
1300 QualType NoncanonicalSrcType = SrcType;
1301 QualType NoncanonicalDstType = DstType;
1302
1303 SrcType = CGF.getContext().getCanonicalType(SrcType);
1304 DstType = CGF.getContext().getCanonicalType(DstType);
1305 if (SrcType == DstType) return Src;
1306
1307 if (DstType->isVoidType()) return nullptr;
1308
1309 llvm::Value *OrigSrc = Src;
1310 QualType OrigSrcType = SrcType;
1311 llvm::Type *SrcTy = Src->getType();
1312
1313 // Handle conversions to bool first, they are special: comparisons against 0.
1314 if (DstType->isBooleanType())
1315 return EmitConversionToBool(Src, SrcType);
1316
1317 llvm::Type *DstTy = ConvertType(DstType);
1318
1319 // Cast from half through float if half isn't a native type.
1320 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1321 // Cast to FP using the intrinsic if the half type itself isn't supported.
1322 if (DstTy->isFloatingPointTy()) {
1323 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1324 return Builder.CreateCall(
1325 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1326 Src);
1327 } else {
1328 // Cast to other types through float, using either the intrinsic or FPExt,
1329 // depending on whether the half type itself is supported
1330 // (as opposed to operations on half, available with NativeHalfType).
1331 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1332 Src = Builder.CreateCall(
1333 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1334 CGF.CGM.FloatTy),
1335 Src);
1336 } else {
1337 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1338 }
1339 SrcType = CGF.getContext().FloatTy;
1340 SrcTy = CGF.FloatTy;
1341 }
1342 }
1343
1344 // Ignore conversions like int -> uint.
1345 if (SrcTy == DstTy) {
1346 if (Opts.EmitImplicitIntegerSignChangeChecks)
1347 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1348 NoncanonicalDstType, Loc);
1349
1350 return Src;
1351 }
1352
1353 // Handle pointer conversions next: pointers can only be converted to/from
1354 // other pointers and integers. Check for pointer types in terms of LLVM, as
1355 // some native types (like Obj-C id) may map to a pointer type.
1356 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1357 // The source value may be an integer, or a pointer.
1358 if (isa<llvm::PointerType>(SrcTy))
1359 return Builder.CreateBitCast(Src, DstTy, "conv");
1360
1361 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?")(static_cast <bool> (SrcType->isIntegerType() &&
"Not ptr->ptr or int->ptr conversion?") ? void (0) : __assert_fail
("SrcType->isIntegerType() && \"Not ptr->ptr or int->ptr conversion?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1361, __extension__ __PRETTY_FUNCTION__
))
;
1362 // First, convert to the correct width so that we control the kind of
1363 // extension.
1364 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1365 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1366 llvm::Value* IntResult =
1367 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1368 // Then, cast to pointer.
1369 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1370 }
1371
1372 if (isa<llvm::PointerType>(SrcTy)) {
1373 // Must be an ptr to int cast.
1374 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?")(static_cast <bool> (isa<llvm::IntegerType>(DstTy
) && "not ptr->int?") ? void (0) : __assert_fail (
"isa<llvm::IntegerType>(DstTy) && \"not ptr->int?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1374, __extension__ __PRETTY_FUNCTION__
))
;
1375 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1376 }
1377
1378 // A scalar can be splatted to an extended vector of the same element type
1379 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1380 // Sema should add casts to make sure that the source expression's type is
1381 // the same as the vector's element type (sans qualifiers)
1382 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==(static_cast <bool> (DstType->castAs<ExtVectorType
>()->getElementType().getTypePtr() == SrcType.getTypePtr
() && "Splatted expr doesn't match with vector element type?"
) ? void (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1384, __extension__ __PRETTY_FUNCTION__
))
1383 SrcType.getTypePtr() &&(static_cast <bool> (DstType->castAs<ExtVectorType
>()->getElementType().getTypePtr() == SrcType.getTypePtr
() && "Splatted expr doesn't match with vector element type?"
) ? void (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1384, __extension__ __PRETTY_FUNCTION__
))
1384 "Splatted expr doesn't match with vector element type?")(static_cast <bool> (DstType->castAs<ExtVectorType
>()->getElementType().getTypePtr() == SrcType.getTypePtr
() && "Splatted expr doesn't match with vector element type?"
) ? void (0) : __assert_fail ("DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == SrcType.getTypePtr() && \"Splatted expr doesn't match with vector element type?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1384, __extension__ __PRETTY_FUNCTION__
))
;
1385
1386 // Splat the element across to all elements
1387 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1388 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1389 }
1390
1391 if (SrcType->isMatrixType() && DstType->isMatrixType())
1392 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1393
1394 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1395 // Allow bitcast from vector to integer/fp of the same size.
1396 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1397 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1398 if (SrcSize == DstSize)
1399 return Builder.CreateBitCast(Src, DstTy, "conv");
1400
1401 // Conversions between vectors of different sizes are not allowed except
1402 // when vectors of half are involved. Operations on storage-only half
1403 // vectors require promoting half vector operands to float vectors and
1404 // truncating the result, which is either an int or float vector, to a
1405 // short or half vector.
1406
1407 // Source and destination are both expected to be vectors.
1408 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1409 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1410 (void)DstElementTy;
1411
1412 assert(((SrcElementTy->isIntegerTy() &&(static_cast <bool> (((SrcElementTy->isIntegerTy() &&
DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy
() && DstElementTy->isFloatingPointTy())) &&
"unexpected conversion between a floating-point vector and an "
"integer vector") ? void (0) : __assert_fail ("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1417, __extension__ __PRETTY_FUNCTION__
))
1413 DstElementTy->isIntegerTy()) ||(static_cast <bool> (((SrcElementTy->isIntegerTy() &&
DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy
() && DstElementTy->isFloatingPointTy())) &&
"unexpected conversion between a floating-point vector and an "
"integer vector") ? void (0) : __assert_fail ("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1417, __extension__ __PRETTY_FUNCTION__
))
1414 (SrcElementTy->isFloatingPointTy() &&(static_cast <bool> (((SrcElementTy->isIntegerTy() &&
DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy
() && DstElementTy->isFloatingPointTy())) &&
"unexpected conversion between a floating-point vector and an "
"integer vector") ? void (0) : __assert_fail ("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1417, __extension__ __PRETTY_FUNCTION__
))
1415 DstElementTy->isFloatingPointTy())) &&(static_cast <bool> (((SrcElementTy->isIntegerTy() &&
DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy
() && DstElementTy->isFloatingPointTy())) &&
"unexpected conversion between a floating-point vector and an "
"integer vector") ? void (0) : __assert_fail ("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1417, __extension__ __PRETTY_FUNCTION__
))
1416 "unexpected conversion between a floating-point vector and an "(static_cast <bool> (((SrcElementTy->isIntegerTy() &&
DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy
() && DstElementTy->isFloatingPointTy())) &&
"unexpected conversion between a floating-point vector and an "
"integer vector") ? void (0) : __assert_fail ("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1417, __extension__ __PRETTY_FUNCTION__
))
1417 "integer vector")(static_cast <bool> (((SrcElementTy->isIntegerTy() &&
DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy
() && DstElementTy->isFloatingPointTy())) &&
"unexpected conversion between a floating-point vector and an "
"integer vector") ? void (0) : __assert_fail ("((SrcElementTy->isIntegerTy() && DstElementTy->isIntegerTy()) || (SrcElementTy->isFloatingPointTy() && DstElementTy->isFloatingPointTy())) && \"unexpected conversion between a floating-point vector and an \" \"integer vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1417, __extension__ __PRETTY_FUNCTION__
))
;
1418
1419 // Truncate an i32 vector to an i16 vector.
1420 if (SrcElementTy->isIntegerTy())
1421 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1422
1423 // Truncate a float vector to a half vector.
1424 if (SrcSize > DstSize)
1425 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1426
1427 // Promote a half vector to a float vector.
1428 return Builder.CreateFPExt(Src, DstTy, "conv");
1429 }
1430
1431 // Finally, we have the arithmetic types: real int/float.
1432 Value *Res = nullptr;
1433 llvm::Type *ResTy = DstTy;
1434
1435 // An overflowing conversion has undefined behavior if either the source type
1436 // or the destination type is a floating-point type. However, we consider the
1437 // range of representable values for all floating-point types to be
1438 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1439 // floating-point type.
1440 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1441 OrigSrcType->isFloatingType())
1442 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1443 Loc);
1444
1445 // Cast to half through float if half isn't a native type.
1446 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1447 // Make sure we cast in a single step if from another FP type.
1448 if (SrcTy->isFloatingPointTy()) {
1449 // Use the intrinsic if the half type itself isn't supported
1450 // (as opposed to operations on half, available with NativeHalfType).
1451 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1452 return Builder.CreateCall(
1453 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1454 // If the half type is supported, just use an fptrunc.
1455 return Builder.CreateFPTrunc(Src, DstTy);
1456 }
1457 DstTy = CGF.FloatTy;
1458 }
1459
1460 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1461
1462 if (DstTy != ResTy) {
1463 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1464 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion")(static_cast <bool> (ResTy->isIntegerTy(16) &&
"Only half FP requires extra conversion") ? void (0) : __assert_fail
("ResTy->isIntegerTy(16) && \"Only half FP requires extra conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1464, __extension__ __PRETTY_FUNCTION__
))
;
1465 Res = Builder.CreateCall(
1466 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1467 Res);
1468 } else {
1469 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1470 }
1471 }
1472
1473 if (Opts.EmitImplicitIntegerTruncationChecks)
1474 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1475 NoncanonicalDstType, Loc);
1476
1477 if (Opts.EmitImplicitIntegerSignChangeChecks)
1478 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1479 NoncanonicalDstType, Loc);
1480
1481 return Res;
1482}
1483
1484Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1485 QualType DstTy,
1486 SourceLocation Loc) {
1487 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1488 llvm::Value *Result;
1489 if (SrcTy->isRealFloatingType())
1490 Result = FPBuilder.CreateFloatingToFixed(Src,
1491 CGF.getContext().getFixedPointSemantics(DstTy));
1492 else if (DstTy->isRealFloatingType())
1493 Result = FPBuilder.CreateFixedToFloating(Src,
1494 CGF.getContext().getFixedPointSemantics(SrcTy),
1495 ConvertType(DstTy));
1496 else {
1497 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1498 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1499
1500 if (DstTy->isIntegerType())
1501 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1502 DstFPSema.getWidth(),
1503 DstFPSema.isSigned());
1504 else if (SrcTy->isIntegerType())
1505 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1506 DstFPSema);
1507 else
1508 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1509 }
1510 return Result;
1511}
1512
1513/// Emit a conversion from the specified complex type to the specified
1514/// destination type, where the destination type is an LLVM scalar type.
1515Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1516 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1517 SourceLocation Loc) {
1518 // Get the source element type.
1519 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1520
1521 // Handle conversions to bool first, they are special: comparisons against 0.
1522 if (DstTy->isBooleanType()) {
1523 // Complex != 0 -> (Real != 0) | (Imag != 0)
1524 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1525 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1526 return Builder.CreateOr(Src.first, Src.second, "tobool");
1527 }
1528
1529 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1530 // the imaginary part of the complex value is discarded and the value of the
1531 // real part is converted according to the conversion rules for the
1532 // corresponding real type.
1533 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1534}
1535
1536Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1537 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1538}
1539
1540/// Emit a sanitization check for the given "binary" operation (which
1541/// might actually be a unary increment which has been lowered to a binary
1542/// operation). The check passes if all values in \p Checks (which are \c i1),
1543/// are \c true.
1544void ScalarExprEmitter::EmitBinOpCheck(
1545 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1546 assert(CGF.IsSanitizerScope)(static_cast <bool> (CGF.IsSanitizerScope) ? void (0) :
__assert_fail ("CGF.IsSanitizerScope", "clang/lib/CodeGen/CGExprScalar.cpp"
, 1546, __extension__ __PRETTY_FUNCTION__))
;
1547 SanitizerHandler Check;
1548 SmallVector<llvm::Constant *, 4> StaticData;
1549 SmallVector<llvm::Value *, 2> DynamicData;
1550
1551 BinaryOperatorKind Opcode = Info.Opcode;
1552 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1553 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1554
1555 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1556 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1557 if (UO && UO->getOpcode() == UO_Minus) {
1558 Check = SanitizerHandler::NegateOverflow;
1559 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1560 DynamicData.push_back(Info.RHS);
1561 } else {
1562 if (BinaryOperator::isShiftOp(Opcode)) {
1563 // Shift LHS negative or too large, or RHS out of bounds.
1564 Check = SanitizerHandler::ShiftOutOfBounds;
1565 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1566 StaticData.push_back(
1567 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1568 StaticData.push_back(
1569 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1570 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1571 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1572 Check = SanitizerHandler::DivremOverflow;
1573 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1574 } else {
1575 // Arithmetic overflow (+, -, *).
1576 switch (Opcode) {
1577 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1578 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1579 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1580 default: llvm_unreachable("unexpected opcode for bin op check")::llvm::llvm_unreachable_internal("unexpected opcode for bin op check"
, "clang/lib/CodeGen/CGExprScalar.cpp", 1580)
;
1581 }
1582 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1583 }
1584 DynamicData.push_back(Info.LHS);
1585 DynamicData.push_back(Info.RHS);
1586 }
1587
1588 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1589}
1590
1591//===----------------------------------------------------------------------===//
1592// Visitor Methods
1593//===----------------------------------------------------------------------===//
1594
1595Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1596 CGF.ErrorUnsupported(E, "scalar expression");
1597 if (E->getType()->isVoidType())
1598 return nullptr;
1599 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1600}
1601
1602Value *
1603ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1604 ASTContext &Context = CGF.getContext();
1605 llvm::Optional<LangAS> GlobalAS =
1606 Context.getTargetInfo().getConstantAddressSpace();
1607 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1608 E->ComputeName(Context), "__usn_str",
1609 static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default)));
1610
1611 unsigned ExprAS = Context.getTargetAddressSpace(E->getType());
1612
1613 if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS)
1614 return GlobalConstStr;
1615
1616 llvm::Type *EltTy = GlobalConstStr->getType()->getPointerElementType();
1617 llvm::PointerType *NewPtrTy = llvm::PointerType::get(EltTy, ExprAS);
1618 return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast");
1619}
1620
1621Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1622 // Vector Mask Case
1623 if (E->getNumSubExprs() == 2) {
1624 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1625 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1626 Value *Mask;
1627
1628 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1629 unsigned LHSElts = LTy->getNumElements();
1630
1631 Mask = RHS;
1632
1633 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1634
1635 // Mask off the high bits of each shuffle index.
1636 Value *MaskBits =
1637 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1638 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1639
1640 // newv = undef
1641 // mask = mask & maskbits
1642 // for each elt
1643 // n = extract mask i
1644 // x = extract val n
1645 // newv = insert newv, x, i
1646 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1647 MTy->getNumElements());
1648 Value* NewV = llvm::UndefValue::get(RTy);
1649 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1650 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1651 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1652
1653 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1654 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1655 }
1656 return NewV;
1657 }
1658
1659 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1660 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1661
1662 SmallVector<int, 32> Indices;
1663 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1664 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1665 // Check for -1 and output it as undef in the IR.
1666 if (Idx.isSigned() && Idx.isAllOnes())
1667 Indices.push_back(-1);
1668 else
1669 Indices.push_back(Idx.getZExtValue());
1670 }
1671
1672 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1673}
1674
1675Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1676 QualType SrcType = E->getSrcExpr()->getType(),
1677 DstType = E->getType();
1678
1679 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1680
1681 SrcType = CGF.getContext().getCanonicalType(SrcType);
1682 DstType = CGF.getContext().getCanonicalType(DstType);
1683 if (SrcType == DstType) return Src;
1684
1685 assert(SrcType->isVectorType() &&(static_cast <bool> (SrcType->isVectorType() &&
"ConvertVector source type must be a vector") ? void (0) : __assert_fail
("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1686, __extension__ __PRETTY_FUNCTION__
))
1686 "ConvertVector source type must be a vector")(static_cast <bool> (SrcType->isVectorType() &&
"ConvertVector source type must be a vector") ? void (0) : __assert_fail
("SrcType->isVectorType() && \"ConvertVector source type must be a vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1686, __extension__ __PRETTY_FUNCTION__
))
;
1687 assert(DstType->isVectorType() &&(static_cast <bool> (DstType->isVectorType() &&
"ConvertVector destination type must be a vector") ? void (0
) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1688, __extension__ __PRETTY_FUNCTION__
))
1688 "ConvertVector destination type must be a vector")(static_cast <bool> (DstType->isVectorType() &&
"ConvertVector destination type must be a vector") ? void (0
) : __assert_fail ("DstType->isVectorType() && \"ConvertVector destination type must be a vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1688, __extension__ __PRETTY_FUNCTION__
))
;
1689
1690 llvm::Type *SrcTy = Src->getType();
1691 llvm::Type *DstTy = ConvertType(DstType);
1692
1693 // Ignore conversions like int -> uint.
1694 if (SrcTy == DstTy)
1695 return Src;
1696
1697 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1698 DstEltType = DstType->castAs<VectorType>()->getElementType();
1699
1700 assert(SrcTy->isVectorTy() &&(static_cast <bool> (SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? void (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1701, __extension__ __PRETTY_FUNCTION__
))
1701 "ConvertVector source IR type must be a vector")(static_cast <bool> (SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"
) ? void (0) : __assert_fail ("SrcTy->isVectorTy() && \"ConvertVector source IR type must be a vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1701, __extension__ __PRETTY_FUNCTION__
))
;
1702 assert(DstTy->isVectorTy() &&(static_cast <bool> (DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? void (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1703, __extension__ __PRETTY_FUNCTION__
))
1703 "ConvertVector destination IR type must be a vector")(static_cast <bool> (DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"
) ? void (0) : __assert_fail ("DstTy->isVectorTy() && \"ConvertVector destination IR type must be a vector\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1703, __extension__ __PRETTY_FUNCTION__
))
;
1704
1705 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1706 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1707
1708 if (DstEltType->isBooleanType()) {
1709 assert((SrcEltTy->isFloatingPointTy() ||(static_cast <bool> ((SrcEltTy->isFloatingPointTy() ||
isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"
) ? void (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1710, __extension__ __PRETTY_FUNCTION__
))
1710 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion")(static_cast <bool> ((SrcEltTy->isFloatingPointTy() ||
isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"
) ? void (0) : __assert_fail ("(SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && \"Unknown boolean conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1710, __extension__ __PRETTY_FUNCTION__
))
;
1711
1712 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1713 if (SrcEltTy->isFloatingPointTy()) {
1714 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1715 } else {
1716 return Builder.CreateICmpNE(Src, Zero, "tobool");
1717 }
1718 }
1719
1720 // We have the arithmetic types: real int/float.
1721 Value *Res = nullptr;
1722
1723 if (isa<llvm::IntegerType>(SrcEltTy)) {
1724 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1725 if (isa<llvm::IntegerType>(DstEltTy))
1726 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1727 else if (InputSigned)
1728 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1729 else
1730 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1731 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1732 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion")(static_cast <bool> (SrcEltTy->isFloatingPointTy() &&
"Unknown real conversion") ? void (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1732, __extension__ __PRETTY_FUNCTION__
))
;
1733 if (DstEltType->isSignedIntegerOrEnumerationType())
1734 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1735 else
1736 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1737 } else {
1738 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&(static_cast <bool> (SrcEltTy->isFloatingPointTy() &&
DstEltTy->isFloatingPointTy() && "Unknown real conversion"
) ? void (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1739, __extension__ __PRETTY_FUNCTION__
))
1739 "Unknown real conversion")(static_cast <bool> (SrcEltTy->isFloatingPointTy() &&
DstEltTy->isFloatingPointTy() && "Unknown real conversion"
) ? void (0) : __assert_fail ("SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && \"Unknown real conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1739, __extension__ __PRETTY_FUNCTION__
))
;
1740 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1741 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1742 else
1743 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1744 }
1745
1746 return Res;
1747}
1748
1749Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1750 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1751 CGF.EmitIgnoredExpr(E->getBase());
1752 return CGF.emitScalarConstant(Constant, E);
1753 } else {
1754 Expr::EvalResult Result;
1755 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1756 llvm::APSInt Value = Result.Val.getInt();
1757 CGF.EmitIgnoredExpr(E->getBase());
1758 return Builder.getInt(Value);
1759 }
1760 }
1761
1762 return EmitLoadOfLValue(E);
1763}
1764
1765Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1766 TestAndClearIgnoreResultAssign();
1767
1768 // Emit subscript expressions in rvalue context's. For most cases, this just
1769 // loads the lvalue formed by the subscript expr. However, we have to be
1770 // careful, because the base of a vector subscript is occasionally an rvalue,
1771 // so we can't get it as an lvalue.
1772 if (!E->getBase()->getType()->isVectorType())
1773 return EmitLoadOfLValue(E);
1774
1775 // Handle the vector case. The base must be a vector, the index must be an
1776 // integer value.
1777 Value *Base = Visit(E->getBase());
1778 Value *Idx = Visit(E->getIdx());
1779 QualType IdxTy = E->getIdx()->getType();
1780
1781 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1782 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1783
1784 return Builder.CreateExtractElement(Base, Idx, "vecext");
1785}
1786
1787Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1788 TestAndClearIgnoreResultAssign();
1789
1790 // Handle the vector case. The base must be a vector, the index must be an
1791 // integer value.
1792 Value *RowIdx = Visit(E->getRowIdx());
1793 Value *ColumnIdx = Visit(E->getColumnIdx());
1794
1795 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
1796 unsigned NumRows = MatrixTy->getNumRows();
1797 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1798 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
1799 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
1800 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
1801
1802 Value *Matrix = Visit(E->getBase());
1803
1804 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1805 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
1806}
1807
1808static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1809 unsigned Off) {
1810 int MV = SVI->getMaskValue(Idx);
1811 if (MV == -1)
1812 return -1;
1813 return Off + MV;
1814}
1815
1816static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1817 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&(static_cast <bool> (llvm::ConstantInt::isValueValidForType
(I32Ty, C->getZExtValue()) && "Index operand too large for shufflevector mask!"
) ? void (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1818, __extension__ __PRETTY_FUNCTION__
))
1818 "Index operand too large for shufflevector mask!")(static_cast <bool> (llvm::ConstantInt::isValueValidForType
(I32Ty, C->getZExtValue()) && "Index operand too large for shufflevector mask!"
) ? void (0) : __assert_fail ("llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && \"Index operand too large for shufflevector mask!\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1818, __extension__ __PRETTY_FUNCTION__
))
;
1819 return C->getZExtValue();
1820}
1821
1822Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1823 bool Ignore = TestAndClearIgnoreResultAssign();
1824 (void)Ignore;
1825 assert (Ignore == false && "init list ignored")(static_cast <bool> (Ignore == false && "init list ignored"
) ? void (0) : __assert_fail ("Ignore == false && \"init list ignored\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 1825, __extension__ __PRETTY_FUNCTION__
))
;
1826 unsigned NumInitElements = E->getNumInits();
1827
1828 if (E->hadArrayRangeDesignator())
1829 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1830
1831 llvm::VectorType *VType =
1832 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1833
1834 if (!VType) {
1835 if (NumInitElements == 0) {
1836 // C++11 value-initialization for the scalar.
1837 return EmitNullValue(E->getType());
1838 }
1839 // We have a scalar in braces. Just use the first element.
1840 return Visit(E->getInit(0));
1841 }
1842
1843 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1844
1845 // Loop over initializers collecting the Value for each, and remembering
1846 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1847 // us to fold the shuffle for the swizzle into the shuffle for the vector
1848 // initializer, since LLVM optimizers generally do not want to touch
1849 // shuffles.
1850 unsigned CurIdx = 0;
1851 bool VIsUndefShuffle = false;
1852 llvm::Value *V = llvm::UndefValue::get(VType);
1853 for (unsigned i = 0; i != NumInitElements; ++i) {
1854 Expr *IE = E->getInit(i);
1855 Value *Init = Visit(IE);
1856 SmallVector<int, 16> Args;
1857
1858 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1859
1860 // Handle scalar elements. If the scalar initializer is actually one
1861 // element of a different vector of the same width, use shuffle instead of
1862 // extract+insert.
1863 if (!VVT) {
1864 if (isa<ExtVectorElementExpr>(IE)) {
1865 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1866
1867 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1868 ->getNumElements() == ResElts) {
1869 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1870 Value *LHS = nullptr, *RHS = nullptr;
1871 if (CurIdx == 0) {
1872 // insert into undef -> shuffle (src, undef)
1873 // shufflemask must use an i32
1874 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1875 Args.resize(ResElts, -1);
1876
1877 LHS = EI->getVectorOperand();
1878 RHS = V;
1879 VIsUndefShuffle = true;
1880 } else if (VIsUndefShuffle) {
1881 // insert into undefshuffle && size match -> shuffle (v, src)
1882 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1883 for (unsigned j = 0; j != CurIdx; ++j)
1884 Args.push_back(getMaskElt(SVV, j, 0));
1885 Args.push_back(ResElts + C->getZExtValue());
1886 Args.resize(ResElts, -1);
1887
1888 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1889 RHS = EI->getVectorOperand();
1890 VIsUndefShuffle = false;
1891 }
1892 if (!Args.empty()) {
1893 V = Builder.CreateShuffleVector(LHS, RHS, Args);
1894 ++CurIdx;
1895 continue;
1896 }
1897 }
1898 }
1899 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1900 "vecinit");
1901 VIsUndefShuffle = false;
1902 ++CurIdx;
1903 continue;
1904 }
1905
1906 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1907
1908 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1909 // input is the same width as the vector being constructed, generate an
1910 // optimized shuffle of the swizzle input into the result.
1911 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1912 if (isa<ExtVectorElementExpr>(IE)) {
1913 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1914 Value *SVOp = SVI->getOperand(0);
1915 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1916
1917 if (OpTy->getNumElements() == ResElts) {
1918 for (unsigned j = 0; j != CurIdx; ++j) {
1919 // If the current vector initializer is a shuffle with undef, merge
1920 // this shuffle directly into it.
1921 if (VIsUndefShuffle) {
1922 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1923 } else {
1924 Args.push_back(j);
1925 }
1926 }
1927 for (unsigned j = 0, je = InitElts; j != je; ++j)
1928 Args.push_back(getMaskElt(SVI, j, Offset));
1929 Args.resize(ResElts, -1);
1930
1931 if (VIsUndefShuffle)
1932 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1933
1934 Init = SVOp;
1935 }
1936 }
1937
1938 // Extend init to result vector length, and then shuffle its contribution
1939 // to the vector initializer into V.
1940 if (Args.empty()) {
1941 for (unsigned j = 0; j != InitElts; ++j)
1942 Args.push_back(j);
1943 Args.resize(ResElts, -1);
1944 Init = Builder.CreateShuffleVector(Init, Args, "vext");
1945
1946 Args.clear();
1947 for (unsigned j = 0; j != CurIdx; ++j)
1948 Args.push_back(j);
1949 for (unsigned j = 0; j != InitElts; ++j)
1950 Args.push_back(j + Offset);
1951 Args.resize(ResElts, -1);
1952 }
1953
1954 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1955 // merging subsequent shuffles into this one.
1956 if (CurIdx == 0)
1957 std::swap(V, Init);
1958 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
1959 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1960 CurIdx += InitElts;
1961 }
1962
1963 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1964 // Emit remaining default initializers.
1965 llvm::Type *EltTy = VType->getElementType();
1966
1967 // Emit remaining default initializers
1968 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1969 Value *Idx = Builder.getInt32(CurIdx);
1970 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1971 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1972 }
1973 return V;
1974}
1975
1976bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1977 const Expr *E = CE->getSubExpr();
1978
1979 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1980 return false;
1981
1982 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1983 // We always assume that 'this' is never null.
1984 return false;
1985 }
1986
1987 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1988 // And that glvalue casts are never null.
1989 if (ICE->isGLValue())
1990 return false;
1991 }
1992
1993 return true;
1994}
1995
1996// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1997// have to handle a more broad range of conversions than explicit casts, as they
1998// handle things like function to ptr-to-function decay etc.
1999Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2000 Expr *E = CE->getSubExpr();
2001 QualType DestTy = CE->getType();
2002 CastKind Kind = CE->getCastKind();
2003
2004 // These cases are generally not written to ignore the result of
2005 // evaluating their sub-expressions, so we clear this now.
2006 bool Ignored = TestAndClearIgnoreResultAssign();
2007
2008 // Since almost all cast kinds apply to scalars, this switch doesn't have
2009 // a default case, so the compiler will warn on a missing case. The cases
2010 // are in the same order as in the CastKind enum.
2011 switch (Kind) {
1
Control jumps to 'case CK_CPointerToObjCPointerCast:' at line 2033
2012 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!")::llvm::llvm_unreachable_internal("dependent cast kind in IR gen!"
, "clang/lib/CodeGen/CGExprScalar.cpp", 2012)
;
2013 case CK_BuiltinFnToFnPtr:
2014 llvm_unreachable("builtin functions are handled elsewhere")::llvm::llvm_unreachable_internal("builtin functions are handled elsewhere"
, "clang/lib/CodeGen/CGExprScalar.cpp", 2014)
;
2015
2016 case CK_LValueBitCast:
2017 case CK_ObjCObjectLValueCast: {
2018 Address Addr = EmitLValue(E).getAddress(CGF);
2019 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
2020 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2021 return EmitLoadOfLValue(LV, CE->getExprLoc());
2022 }
2023
2024 case CK_LValueToRValueBitCast: {
2025 LValue SourceLVal = CGF.EmitLValue(E);
2026 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
2027 CGF.ConvertTypeForMem(DestTy));
2028 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2029 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2030 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2031 }
2032
2033 case CK_CPointerToObjCPointerCast:
2034 case CK_BlockPointerToObjCPointerCast:
2035 case CK_AnyPointerToBlockPointerCast:
2036 case CK_BitCast: {
2037 Value *Src = Visit(const_cast<Expr*>(E));
2038 llvm::Type *SrcTy = Src->getType();
2039 llvm::Type *DstTy = ConvertType(DestTy);
2040 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2
Calling 'Type::isPtrOrPtrVectorTy'
8
Returning from 'Type::isPtrOrPtrVectorTy'
2041 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2042 llvm_unreachable("wrong cast for pointers in different address spaces"::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "clang/lib/CodeGen/CGExprScalar.cpp"
, 2043)
2043 "(must be an address space cast)!")::llvm::llvm_unreachable_internal("wrong cast for pointers in different address spaces"
"(must be an address space cast)!", "clang/lib/CodeGen/CGExprScalar.cpp"
, 2043)
;
2044 }
2045
2046 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
9
Assuming the condition is false
10
Taking false branch
2047 if (auto PT = DestTy->getAs<PointerType>())
2048 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2049 /*MayBeNull=*/true,
2050 CodeGenFunction::CFITCK_UnrelatedCast,
2051 CE->getBeginLoc());
2052 }
2053
2054 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
11
Assuming field 'StrictVTablePointers' is 0
12
Taking false branch
2055 const QualType SrcType = E->getType();
2056
2057 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2058 // Casting to pointer that could carry dynamic information (provided by
2059 // invariant.group) requires launder.
2060 Src = Builder.CreateLaunderInvariantGroup(Src);
2061 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2062 // Casting to pointer that does not carry dynamic information (provided
2063 // by invariant.group) requires stripping it. Note that we don't do it
2064 // if the source could not be dynamic type and destination could be
2065 // dynamic because dynamic information is already laundered. It is
2066 // because launder(strip(src)) == launder(src), so there is no need to
2067 // add extra strip before launder.
2068 Src = Builder.CreateStripInvariantGroup(Src);
2069 }
2070 }
2071
2072 // Update heapallocsite metadata when there is an explicit pointer cast.
2073 if (auto *CI
13.1
'CI' is null
13.1
'CI' is null
= dyn_cast<llvm::CallBase>(Src)) {
13
Assuming 'Src' is not a 'CallBase'
14
Taking false branch
2074 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2075 QualType PointeeType = DestTy->getPointeeType();
2076 if (!PointeeType.isNull())
2077 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2078 CE->getExprLoc());
2079 }
2080 }
2081
2082 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2083 // same element type, use the llvm.experimental.vector.insert intrinsic to
2084 // perform the bitcast.
2085 if (const auto *FixedSrc
15.1
'FixedSrc' is non-null
15.1
'FixedSrc' is non-null
= dyn_cast<llvm::FixedVectorType>(SrcTy)) {
15
Assuming 'SrcTy' is a 'FixedVectorType'
16
Taking true branch
2086 if (const auto *ScalableDst
17.1
'ScalableDst' is non-null
17.1
'ScalableDst' is non-null
= dyn_cast<llvm::ScalableVectorType>(DstTy)) {
17
Assuming 'DstTy' is a 'ScalableVectorType'
18
Taking true branch
2087 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
2088 // vector, use a vector insert and bitcast the result.
2089 bool NeedsBitCast = false;
2090 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
2091 llvm::Type *OrigType = DstTy;
2092 if (ScalableDst == PredType &&
19
Assuming 'ScalableDst' is equal to 'PredType'
21
Taking true branch
2093 FixedSrc->getElementType() == Builder.getInt8Ty()) {
20
Assuming the condition is true
2094 DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
2095 ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy);
22
Assuming 'DstTy' is not a 'ScalableVectorType'
23
Null pointer value stored to 'ScalableDst'
2096 NeedsBitCast = true;
2097 }
2098 if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
24
Called C++ object pointer is null
2099 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2100 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2101 llvm::Value *Result = Builder.CreateInsertVector(
2102 DstTy, UndefVec, Src, Zero, "castScalableSve");
2103 if (NeedsBitCast)
2104 Result = Builder.CreateBitCast(Result, OrigType);
2105 return Result;
2106 }
2107 }
2108 }
2109
2110 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2111 // same element type, use the llvm.experimental.vector.extract intrinsic to
2112 // perform the bitcast.
2113 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2114 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2115 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
2116 // vector, bitcast the source and use a vector extract.
2117 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
2118 if (ScalableSrc == PredType &&
2119 FixedDst->getElementType() == Builder.getInt8Ty()) {
2120 SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
2121 ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy);
2122 Src = Builder.CreateBitCast(Src, SrcTy);
2123 }
2124 if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2125 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2126 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2127 }
2128 }
2129 }
2130
2131 // Perform VLAT <-> VLST bitcast through memory.
2132 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2133 // require the element types of the vectors to be the same, we
2134 // need to keep this around for bitcasts between VLAT <-> VLST where
2135 // the element types of the vectors are not the same, until we figure
2136 // out a better way of doing these casts.
2137 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2138 isa<llvm::ScalableVectorType>(DstTy)) ||
2139 (isa<llvm::ScalableVectorType>(SrcTy) &&
2140 isa<llvm::FixedVectorType>(DstTy))) {
2141 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2142 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2143 CGF.EmitStoreOfScalar(Src, LV);
2144 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2145 "castFixedSve");
2146 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2147 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2148 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2149 }
2150
2151 return Builder.CreateBitCast(Src, DstTy);
2152 }
2153 case CK_AddressSpaceConversion: {
2154 Expr::EvalResult Result;
2155 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2156 Result.Val.isNullPointer()) {
2157 // If E has side effect, it is emitted even if its final result is a
2158 // null pointer. In that case, a DCE pass should be able to
2159 // eliminate the useless instructions emitted during translating E.
2160 if (Result.HasSideEffects)
2161 Visit(E);
2162 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2163 ConvertType(DestTy)), DestTy);
2164 }
2165 // Since target may map different address spaces in AST to the same address
2166 // space, an address space conversion may end up as a bitcast.
2167 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2168 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2169 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2170 }
2171 case CK_AtomicToNonAtomic:
2172 case CK_NonAtomicToAtomic:
2173 case CK_UserDefinedConversion:
2174 return Visit(const_cast<Expr*>(E));
2175
2176 case CK_NoOp: {
2177 llvm::Value *V = Visit(const_cast<Expr *>(E));
2178 if (V) {
2179 // CK_NoOp can model a pointer qualification conversion, which can remove
2180 // an array bound and change the IR type.
2181 // FIXME: Once pointee types are removed from IR, remove this.
2182 llvm::Type *T = ConvertType(DestTy);
2183 if (T != V->getType())
2184 V = Builder.CreateBitCast(V, T);
2185 }
2186 return V;
2187 }
2188
2189 case CK_BaseToDerived: {
2190 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2191 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!")(static_cast <bool> (DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"
) ? void (0) : __assert_fail ("DerivedClassDecl && \"BaseToDerived arg isn't a C++ object pointer!\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2191, __extension__ __PRETTY_FUNCTION__
))
;
2192
2193 Address Base = CGF.EmitPointerWithAlignment(E);
2194 Address Derived =
2195 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2196 CE->path_begin(), CE->path_end(),
2197 CGF.ShouldNullCheckClassCastValue(CE));
2198
2199 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2200 // performed and the object is not of the derived type.
2201 if (CGF.sanitizePerformTypeCheck())
2202 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2203 Derived.getPointer(), DestTy->getPointeeType());
2204
2205 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2206 CGF.EmitVTablePtrCheckForCast(
2207 DestTy->getPointeeType(), Derived.getPointer(),
2208 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2209 CE->getBeginLoc());
2210
2211 return Derived.getPointer();
2212 }
2213 case CK_UncheckedDerivedToBase:
2214 case CK_DerivedToBase: {
2215 // The EmitPointerWithAlignment path does this fine; just discard
2216 // the alignment.
2217 return CGF.EmitPointerWithAlignment(CE).getPointer();
2218 }
2219
2220 case CK_Dynamic: {
2221 Address V = CGF.EmitPointerWithAlignment(E);
2222 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2223 return CGF.EmitDynamicCast(V, DCE);
2224 }
2225
2226 case CK_ArrayToPointerDecay:
2227 return CGF.EmitArrayToPointerDecay(E).getPointer();
2228 case CK_FunctionToPointerDecay:
2229 return EmitLValue(E).getPointer(CGF);
2230
2231 case CK_NullToPointer:
2232 if (MustVisitNullValue(E))
2233 CGF.EmitIgnoredExpr(E);
2234
2235 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2236 DestTy);
2237
2238 case CK_NullToMemberPointer: {
2239 if (MustVisitNullValue(E))
2240 CGF.EmitIgnoredExpr(E);
2241
2242 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2243 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2244 }
2245
2246 case CK_ReinterpretMemberPointer:
2247 case CK_BaseToDerivedMemberPointer:
2248 case CK_DerivedToBaseMemberPointer: {
2249 Value *Src = Visit(E);
2250
2251 // Note that the AST doesn't distinguish between checked and
2252 // unchecked member pointer conversions, so we always have to
2253 // implement checked conversions here. This is inefficient when
2254 // actual control flow may be required in order to perform the
2255 // check, which it is for data member pointers (but not member
2256 // function pointers on Itanium and ARM).
2257 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2258 }
2259
2260 case CK_ARCProduceObject:
2261 return CGF.EmitARCRetainScalarExpr(E);
2262 case CK_ARCConsumeObject:
2263 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2264 case CK_ARCReclaimReturnedObject:
2265 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2266 case CK_ARCExtendBlockObject:
2267 return CGF.EmitARCExtendBlockObject(E);
2268
2269 case CK_CopyAndAutoreleaseBlockObject:
2270 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2271
2272 case CK_FloatingRealToComplex:
2273 case CK_FloatingComplexCast:
2274 case CK_IntegralRealToComplex:
2275 case CK_IntegralComplexCast:
2276 case CK_IntegralComplexToFloatingComplex:
2277 case CK_FloatingComplexToIntegralComplex:
2278 case CK_ConstructorConversion:
2279 case CK_ToUnion:
2280 llvm_unreachable("scalar cast to non-scalar value")::llvm::llvm_unreachable_internal("scalar cast to non-scalar value"
, "clang/lib/CodeGen/CGExprScalar.cpp", 2280)
;
2281
2282 case CK_LValueToRValue:
2283 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), DestTy)) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)"
, "clang/lib/CodeGen/CGExprScalar.cpp", 2283, __extension__ __PRETTY_FUNCTION__
))
;
2284 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!")(static_cast <bool> (E->isGLValue() && "lvalue-to-rvalue applied to r-value!"
) ? void (0) : __assert_fail ("E->isGLValue() && \"lvalue-to-rvalue applied to r-value!\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2284, __extension__ __PRETTY_FUNCTION__
))
;
2285 return Visit(const_cast<Expr*>(E));
2286
2287 case CK_IntegralToPointer: {
2288 Value *Src = Visit(const_cast<Expr*>(E));
2289
2290 // First, convert to the correct width so that we control the kind of
2291 // extension.
2292 auto DestLLVMTy = ConvertType(DestTy);
2293 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2294 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2295 llvm::Value* IntResult =
2296 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2297
2298 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2299
2300 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2301 // Going from integer to pointer that could be dynamic requires reloading
2302 // dynamic information from invariant.group.
2303 if (DestTy.mayBeDynamicClass())
2304 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2305 }
2306 return IntToPtr;
2307 }
2308 case CK_PointerToIntegral: {
2309 assert(!DestTy->isBooleanType() && "bool should use PointerToBool")(static_cast <bool> (!DestTy->isBooleanType() &&
"bool should use PointerToBool") ? void (0) : __assert_fail (
"!DestTy->isBooleanType() && \"bool should use PointerToBool\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2309, __extension__ __PRETTY_FUNCTION__
))
;
2310 auto *PtrExpr = Visit(E);
2311
2312 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2313 const QualType SrcType = E->getType();
2314
2315 // Casting to integer requires stripping dynamic information as it does
2316 // not carries it.
2317 if (SrcType.mayBeDynamicClass())
2318 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2319 }
2320
2321 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2322 }
2323 case CK_ToVoid: {
2324 CGF.EmitIgnoredExpr(E);
2325 return nullptr;
2326 }
2327 case CK_MatrixCast: {
2328 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2329 CE->getExprLoc());
2330 }
2331 case CK_VectorSplat: {
2332 llvm::Type *DstTy = ConvertType(DestTy);
2333 Value *Elt = Visit(const_cast<Expr*>(E));
2334 // Splat the element across to all elements
2335 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
2336 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2337 }
2338
2339 case CK_FixedPointCast:
2340 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2341 CE->getExprLoc());
2342
2343 case CK_FixedPointToBoolean:
2344 assert(E->getType()->isFixedPointType() &&(static_cast <bool> (E->getType()->isFixedPointType
() && "Expected src type to be fixed point type") ? void
(0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2345, __extension__ __PRETTY_FUNCTION__
))
2345 "Expected src type to be fixed point type")(static_cast <bool> (E->getType()->isFixedPointType
() && "Expected src type to be fixed point type") ? void
(0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2345, __extension__ __PRETTY_FUNCTION__
))
;
2346 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type")(static_cast <bool> (DestTy->isBooleanType() &&
"Expected dest type to be boolean type") ? void (0) : __assert_fail
("DestTy->isBooleanType() && \"Expected dest type to be boolean type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2346, __extension__ __PRETTY_FUNCTION__
))
;
2347 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2348 CE->getExprLoc());
2349
2350 case CK_FixedPointToIntegral:
2351 assert(E->getType()->isFixedPointType() &&(static_cast <bool> (E->getType()->isFixedPointType
() && "Expected src type to be fixed point type") ? void
(0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2352, __extension__ __PRETTY_FUNCTION__
))
2352 "Expected src type to be fixed point type")(static_cast <bool> (E->getType()->isFixedPointType
() && "Expected src type to be fixed point type") ? void
(0) : __assert_fail ("E->getType()->isFixedPointType() && \"Expected src type to be fixed point type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2352, __extension__ __PRETTY_FUNCTION__
))
;
2353 assert(DestTy->isIntegerType() && "Expected dest type to be an integer")(static_cast <bool> (DestTy->isIntegerType() &&
"Expected dest type to be an integer") ? void (0) : __assert_fail
("DestTy->isIntegerType() && \"Expected dest type to be an integer\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2353, __extension__ __PRETTY_FUNCTION__
))
;
2354 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2355 CE->getExprLoc());
2356
2357 case CK_IntegralToFixedPoint:
2358 assert(E->getType()->isIntegerType() &&(static_cast <bool> (E->getType()->isIntegerType(
) && "Expected src type to be an integer") ? void (0)
: __assert_fail ("E->getType()->isIntegerType() && \"Expected src type to be an integer\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2359, __extension__ __PRETTY_FUNCTION__
))
2359 "Expected src type to be an integer")(static_cast <bool> (E->getType()->isIntegerType(
) && "Expected src type to be an integer") ? void (0)
: __assert_fail ("E->getType()->isIntegerType() && \"Expected src type to be an integer\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2359, __extension__ __PRETTY_FUNCTION__
))
;
2360 assert(DestTy->isFixedPointType() &&(static_cast <bool> (DestTy->isFixedPointType() &&
"Expected dest type to be fixed point type") ? void (0) : __assert_fail
("DestTy->isFixedPointType() && \"Expected dest type to be fixed point type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2361, __extension__ __PRETTY_FUNCTION__
))
2361 "Expected dest type to be fixed point type")(static_cast <bool> (DestTy->isFixedPointType() &&
"Expected dest type to be fixed point type") ? void (0) : __assert_fail
("DestTy->isFixedPointType() && \"Expected dest type to be fixed point type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2361, __extension__ __PRETTY_FUNCTION__
))
;
2362 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2363 CE->getExprLoc());
2364
2365 case CK_IntegralCast: {
2366 ScalarConversionOpts Opts;
2367 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2368 if (!ICE->isPartOfExplicitCast())
2369 Opts = ScalarConversionOpts(CGF.SanOpts);
2370 }
2371 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2372 CE->getExprLoc(), Opts);
2373 }
2374 case CK_IntegralToFloating:
2375 case CK_FloatingToIntegral:
2376 case CK_FloatingCast:
2377 case CK_FixedPointToFloating:
2378 case CK_FloatingToFixedPoint: {
2379 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2380 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2381 CE->getExprLoc());
2382 }
2383 case CK_BooleanToSignedIntegral: {
2384 ScalarConversionOpts Opts;
2385 Opts.TreatBooleanAsSigned = true;
2386 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2387 CE->getExprLoc(), Opts);
2388 }
2389 case CK_IntegralToBoolean:
2390 return EmitIntToBoolConversion(Visit(E));
2391 case CK_PointerToBoolean:
2392 return EmitPointerToBoolConversion(Visit(E), E->getType());
2393 case CK_FloatingToBoolean: {
2394 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2395 return EmitFloatToBoolConversion(Visit(E));
2396 }
2397 case CK_MemberPointerToBoolean: {
2398 llvm::Value *MemPtr = Visit(E);
2399 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2400 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2401 }
2402
2403 case CK_FloatingComplexToReal:
2404 case CK_IntegralComplexToReal:
2405 return CGF.EmitComplexExpr(E, false, true).first;
2406
2407 case CK_FloatingComplexToBoolean:
2408 case CK_IntegralComplexToBoolean: {
2409 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2410
2411 // TODO: kill this function off, inline appropriate case here
2412 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2413 CE->getExprLoc());
2414 }
2415
2416 case CK_ZeroToOCLOpaqueType: {
2417 assert((DestTy->isEventT() || DestTy->isQueueT() ||(static_cast <bool> ((DestTy->isEventT() || DestTy->
isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) &&
"CK_ZeroToOCLEvent cast on non-event type") ? void (0) : __assert_fail
("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2419, __extension__ __PRETTY_FUNCTION__
))
2418 DestTy->isOCLIntelSubgroupAVCType()) &&(static_cast <bool> ((DestTy->isEventT() || DestTy->
isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) &&
"CK_ZeroToOCLEvent cast on non-event type") ? void (0) : __assert_fail
("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2419, __extension__ __PRETTY_FUNCTION__
))
2419 "CK_ZeroToOCLEvent cast on non-event type")(static_cast <bool> ((DestTy->isEventT() || DestTy->
isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) &&
"CK_ZeroToOCLEvent cast on non-event type") ? void (0) : __assert_fail
("(DestTy->isEventT() || DestTy->isQueueT() || DestTy->isOCLIntelSubgroupAVCType()) && \"CK_ZeroToOCLEvent cast on non-event type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2419, __extension__ __PRETTY_FUNCTION__
))
;
2420 return llvm::Constant::getNullValue(ConvertType(DestTy));
2421 }
2422
2423 case CK_IntToOCLSampler:
2424 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2425
2426 } // end of switch
2427
2428 llvm_unreachable("unknown scalar cast")::llvm::llvm_unreachable_internal("unknown scalar cast", "clang/lib/CodeGen/CGExprScalar.cpp"
, 2428)
;
2429}
2430
2431Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2432 CodeGenFunction::StmtExprEvaluation eval(CGF);
2433 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2434 !E->getType()->isVoidType());
2435 if (!RetAlloca.isValid())
2436 return nullptr;
2437 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2438 E->getExprLoc());
2439}
2440
2441Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2442 CodeGenFunction::RunCleanupsScope Scope(CGF);
2443 Value *V = Visit(E->getSubExpr());
2444 // Defend against dominance problems caused by jumps out of expression
2445 // evaluation through the shared cleanup block.
2446 Scope.ForceCleanup({&V});
2447 return V;
2448}
2449
2450//===----------------------------------------------------------------------===//
2451// Unary Operators
2452//===----------------------------------------------------------------------===//
2453
2454static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2455 llvm::Value *InVal, bool IsInc,
2456 FPOptions FPFeatures) {
2457 BinOpInfo BinOp;
2458 BinOp.LHS = InVal;
2459 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2460 BinOp.Ty = E->getType();
2461 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2462 BinOp.FPFeatures = FPFeatures;
2463 BinOp.E = E;
2464 return BinOp;
2465}
2466
2467llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2468 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2469 llvm::Value *Amount =
2470 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2471 StringRef Name = IsInc ? "inc" : "dec";
2472 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2473 case LangOptions::SOB_Defined:
2474 return Builder.CreateAdd(InVal, Amount, Name);
2475 case LangOptions::SOB_Undefined:
2476 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2477 return Builder.CreateNSWAdd(InVal, Amount, Name);
2478 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2479 case LangOptions::SOB_Trapping:
2480 if (!E->canOverflow())
2481 return Builder.CreateNSWAdd(InVal, Amount, Name);
2482 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2483 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2484 }
2485 llvm_unreachable("Unknown SignedOverflowBehaviorTy")::llvm::llvm_unreachable_internal("Unknown SignedOverflowBehaviorTy"
, "clang/lib/CodeGen/CGExprScalar.cpp", 2485)
;
2486}
2487
2488namespace {
2489/// Handles check and update for lastprivate conditional variables.
2490class OMPLastprivateConditionalUpdateRAII {
2491private:
2492 CodeGenFunction &CGF;
2493 const UnaryOperator *E;
2494
2495public:
2496 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2497 const UnaryOperator *E)
2498 : CGF(CGF), E(E) {}
2499 ~OMPLastprivateConditionalUpdateRAII() {
2500 if (CGF.getLangOpts().OpenMP)
2501 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2502 CGF, E->getSubExpr());
2503 }
2504};
2505} // namespace
2506
2507llvm::Value *
2508ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2509 bool isInc, bool isPre) {
2510 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2511 QualType type = E->getSubExpr()->getType();
2512 llvm::PHINode *atomicPHI = nullptr;
2513 llvm::Value *value;
2514 llvm::Value *input;
2515
2516 int amount = (isInc ? 1 : -1);
2517 bool isSubtraction = !isInc;
2518
2519 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2520 type = atomicTy->getValueType();
2521 if (isInc && type->isBooleanType()) {
2522 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2523 if (isPre) {
2524 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2525 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2526 return Builder.getTrue();
2527 }
2528 // For atomic bool increment, we just store true and return it for
2529 // preincrement, do an atomic swap with true for postincrement
2530 return Builder.CreateAtomicRMW(
2531 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2532 llvm::AtomicOrdering::SequentiallyConsistent);
2533 }
2534 // Special case for atomic increment / decrement on integers, emit
2535 // atomicrmw instructions. We skip this if we want to be doing overflow
2536 // checking, and fall into the slow path with the atomic cmpxchg loop.
2537 if (!type->isBooleanType() && type->isIntegerType() &&
2538 !(type->isUnsignedIntegerType() &&
2539 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2540 CGF.getLangOpts().getSignedOverflowBehavior() !=
2541 LangOptions::SOB_Trapping) {
2542 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2543 llvm::AtomicRMWInst::Sub;
2544 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2545 llvm::Instruction::Sub;
2546 llvm::Value *amt = CGF.EmitToMemory(
2547 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2548 llvm::Value *old =
2549 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2550 llvm::AtomicOrdering::SequentiallyConsistent);
2551 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2552 }
2553 value = EmitLoadOfLValue(LV, E->getExprLoc());
2554 input = value;
2555 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2556 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2557 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2558 value = CGF.EmitToMemory(value, type);
2559 Builder.CreateBr(opBB);
2560 Builder.SetInsertPoint(opBB);
2561 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2562 atomicPHI->addIncoming(value, startBB);
2563 value = atomicPHI;
2564 } else {
2565 value = EmitLoadOfLValue(LV, E->getExprLoc());
2566 input = value;
2567 }
2568
2569 // Special case of integer increment that we have to check first: bool++.
2570 // Due to promotion rules, we get:
2571 // bool++ -> bool = bool + 1
2572 // -> bool = (int)bool + 1
2573 // -> bool = ((int)bool + 1 != 0)
2574 // An interesting aspect of this is that increment is always true.
2575 // Decrement does not have this property.
2576 if (isInc && type->isBooleanType()) {
2577 value = Builder.getTrue();
2578
2579 // Most common case by far: integer increment.
2580 } else if (type->isIntegerType()) {
2581 QualType promotedType;
2582 bool canPerformLossyDemotionCheck = false;
2583 if (type->isPromotableIntegerType()) {
2584 promotedType = CGF.getContext().getPromotedIntegerType(type);
2585 assert(promotedType != type && "Shouldn't promote to the same type.")(static_cast <bool> (promotedType != type && "Shouldn't promote to the same type."
) ? void (0) : __assert_fail ("promotedType != type && \"Shouldn't promote to the same type.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2585, __extension__ __PRETTY_FUNCTION__
))
;
2586 canPerformLossyDemotionCheck = true;
2587 canPerformLossyDemotionCheck &=
2588 CGF.getContext().getCanonicalType(type) !=
2589 CGF.getContext().getCanonicalType(promotedType);
2590 canPerformLossyDemotionCheck &=
2591 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2592 type, promotedType);
2593 assert((!canPerformLossyDemotionCheck ||(static_cast <bool> ((!canPerformLossyDemotionCheck || type
->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType
() || ConvertType(type)->getScalarSizeInBits() == ConvertType
(promotedType)->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? void (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2600, __extension__ __PRETTY_FUNCTION__
))
2594 type->isSignedIntegerOrEnumerationType() ||(static_cast <bool> ((!canPerformLossyDemotionCheck || type
->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType
() || ConvertType(type)->getScalarSizeInBits() == ConvertType
(promotedType)->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? void (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2600, __extension__ __PRETTY_FUNCTION__
))
2595 promotedType->isSignedIntegerOrEnumerationType() ||(static_cast <bool> ((!canPerformLossyDemotionCheck || type
->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType
() || ConvertType(type)->getScalarSizeInBits() == ConvertType
(promotedType)->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? void (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2600, __extension__ __PRETTY_FUNCTION__
))
2596 ConvertType(type)->getScalarSizeInBits() ==(static_cast <bool> ((!canPerformLossyDemotionCheck || type
->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType
() || ConvertType(type)->getScalarSizeInBits() == ConvertType
(promotedType)->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? void (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2600, __extension__ __PRETTY_FUNCTION__
))
2597 ConvertType(promotedType)->getScalarSizeInBits()) &&(static_cast <bool> ((!canPerformLossyDemotionCheck || type
->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType
() || ConvertType(type)->getScalarSizeInBits() == ConvertType
(promotedType)->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? void (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2600, __extension__ __PRETTY_FUNCTION__
))
2598 "The following check expects that if we do promotion to different "(static_cast <bool> ((!canPerformLossyDemotionCheck || type
->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType
() || ConvertType(type)->getScalarSizeInBits() == ConvertType
(promotedType)->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? void (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2600, __extension__ __PRETTY_FUNCTION__
))
2599 "underlying canonical type, at least one of the types (either "(static_cast <bool> ((!canPerformLossyDemotionCheck || type
->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType
() || ConvertType(type)->getScalarSizeInBits() == ConvertType
(promotedType)->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? void (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2600, __extension__ __PRETTY_FUNCTION__
))
2600 "base or promoted) will be signed, or the bitwidths will match.")(static_cast <bool> ((!canPerformLossyDemotionCheck || type
->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType
() || ConvertType(type)->getScalarSizeInBits() == ConvertType
(promotedType)->getScalarSizeInBits()) && "The following check expects that if we do promotion to different "
"underlying canonical type, at least one of the types (either "
"base or promoted) will be signed, or the bitwidths will match."
) ? void (0) : __assert_fail ("(!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == ConvertType(promotedType)->getScalarSizeInBits()) && \"The following check expects that if we do promotion to different \" \"underlying canonical type, at least one of the types (either \" \"base or promoted) will be signed, or the bitwidths will match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2600, __extension__ __PRETTY_FUNCTION__
))
;
2601 }
2602 if (CGF.SanOpts.hasOneOf(
2603 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2604 canPerformLossyDemotionCheck) {
2605 // While `x += 1` (for `x` with width less than int) is modeled as
2606 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2607 // ease; inc/dec with width less than int can't overflow because of
2608 // promotion rules, so we omit promotion+demotion, which means that we can
2609 // not catch lossy "demotion". Because we still want to catch these cases
2610 // when the sanitizer is enabled, we perform the promotion, then perform
2611 // the increment/decrement in the wider type, and finally
2612 // perform the demotion. This will catch lossy demotions.
2613
2614 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2615 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2616 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2617 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2618 // emitted.
2619 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2620 ScalarConversionOpts(CGF.SanOpts));
2621
2622 // Note that signed integer inc/dec with width less than int can't
2623 // overflow because of promotion rules; we're just eliding a few steps
2624 // here.
2625 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2626 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2627 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2628 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2629 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2630 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2631 } else {
2632 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2633 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2634 }
2635
2636 // Next most common: pointer increment.
2637 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2638 QualType type = ptr->getPointeeType();
2639
2640 // VLA types don't have constant size.
2641 if (const VariableArrayType *vla
2642 = CGF.getContext().getAsVariableArrayType(type)) {
2643 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2644 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2645 llvm::Type *elemTy = value->getType()->getPointerElementType();
2646 if (CGF.getLangOpts().isSignedOverflowDefined())
2647 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
2648 else
2649 value = CGF.EmitCheckedInBoundsGEP(
2650 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
2651 E->getExprLoc(), "vla.inc");
2652
2653 // Arithmetic on function pointers (!) is just +-1.
2654 } else if (type->isFunctionType()) {
2655 llvm::Value *amt = Builder.getInt32(amount);
2656
2657 value = CGF.EmitCastToVoidPtr(value);
2658 if (CGF.getLangOpts().isSignedOverflowDefined())
2659 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
2660 else
2661 value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
2662 /*SignedIndices=*/false,
2663 isSubtraction, E->getExprLoc(),
2664 "incdec.funcptr");
2665 value = Builder.CreateBitCast(value, input->getType());
2666
2667 // For everything else, we can just do a simple increment.
2668 } else {
2669 llvm::Value *amt = Builder.getInt32(amount);
2670 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2671 if (CGF.getLangOpts().isSignedOverflowDefined())
2672 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
2673 else
2674 value = CGF.EmitCheckedInBoundsGEP(
2675 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
2676 E->getExprLoc(), "incdec.ptr");
2677 }
2678
2679 // Vector increment/decrement.
2680 } else if (type->isVectorType()) {
2681 if (type->hasIntegerRepresentation()) {
2682 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2683
2684 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2685 } else {
2686 value = Builder.CreateFAdd(
2687 value,
2688 llvm::ConstantFP::get(value->getType(), amount),
2689 isInc ? "inc" : "dec");
2690 }
2691
2692 // Floating point.
2693 } else if (type->isRealFloatingType()) {
2694 // Add the inc/dec to the real part.
2695 llvm::Value *amt;
2696 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2697
2698 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2699 // Another special case: half FP increment should be done via float
2700 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2701 value = Builder.CreateCall(
2702 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2703 CGF.CGM.FloatTy),
2704 input, "incdec.conv");
2705 } else {
2706 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2707 }
2708 }
2709
2710 if (value->getType()->isFloatTy())
2711 amt = llvm::ConstantFP::get(VMContext,
2712 llvm::APFloat(static_cast<float>(amount)));
2713 else if (value->getType()->isDoubleTy())
2714 amt = llvm::ConstantFP::get(VMContext,
2715 llvm::APFloat(static_cast<double>(amount)));
2716 else {
2717 // Remaining types are Half, LongDouble, __ibm128 or __float128. Convert
2718 // from float.
2719 llvm::APFloat F(static_cast<float>(amount));
2720 bool ignored;
2721 const llvm::fltSemantics *FS;
2722 // Don't use getFloatTypeSemantics because Half isn't
2723 // necessarily represented using the "half" LLVM type.
2724 if (value->getType()->isFP128Ty())
2725 FS = &CGF.getTarget().getFloat128Format();
2726 else if (value->getType()->isHalfTy())
2727 FS = &CGF.getTarget().getHalfFormat();
2728 else if (value->getType()->isPPC_FP128Ty())
2729 FS = &CGF.getTarget().getIbm128Format();
2730 else
2731 FS = &CGF.getTarget().getLongDoubleFormat();
2732 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2733 amt = llvm::ConstantFP::get(VMContext, F);
2734 }
2735 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2736
2737 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2738 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2739 value = Builder.CreateCall(
2740 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2741 CGF.CGM.FloatTy),
2742 value, "incdec.conv");
2743 } else {
2744 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2745 }
2746 }
2747
2748 // Fixed-point types.
2749 } else if (type->isFixedPointType()) {
2750 // Fixed-point types are tricky. In some cases, it isn't possible to
2751 // represent a 1 or a -1 in the type at all. Piggyback off of
2752 // EmitFixedPointBinOp to avoid having to reimplement saturation.
2753 BinOpInfo Info;
2754 Info.E = E;
2755 Info.Ty = E->getType();
2756 Info.Opcode = isInc ? BO_Add : BO_Sub;
2757 Info.LHS = value;
2758 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2759 // If the type is signed, it's better to represent this as +(-1) or -(-1),
2760 // since -1 is guaranteed to be representable.
2761 if (type->isSignedFixedPointType()) {
2762 Info.Opcode = isInc ? BO_Sub : BO_Add;
2763 Info.RHS = Builder.CreateNeg(Info.RHS);
2764 }
2765 // Now, convert from our invented integer literal to the type of the unary
2766 // op. This will upscale and saturate if necessary. This value can become
2767 // undef in some cases.
2768 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2769 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2770 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2771 value = EmitFixedPointBinOp(Info);
2772
2773 // Objective-C pointer types.
2774 } else {
2775 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2776 value = CGF.EmitCastToVoidPtr(value);
2777
2778 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2779 if (!isInc) size = -size;
2780 llvm::Value *sizeValue =
2781 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2782
2783 if (CGF.getLangOpts().isSignedOverflowDefined())
2784 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
2785 else
2786 value = CGF.EmitCheckedInBoundsGEP(
2787 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
2788 E->getExprLoc(), "incdec.objptr");
2789 value = Builder.CreateBitCast(value, input->getType());
2790 }
2791
2792 if (atomicPHI) {
2793 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2794 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2795 auto Pair = CGF.EmitAtomicCompareExchange(
2796 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2797 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2798 llvm::Value *success = Pair.second;
2799 atomicPHI->addIncoming(old, curBlock);
2800 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2801 Builder.SetInsertPoint(contBB);
2802 return isPre ? value : input;
2803 }
2804
2805 // Store the updated result through the lvalue.
2806 if (LV.isBitField())
2807 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2808 else
2809 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2810
2811 // If this is a postinc, return the value read from memory, otherwise use the
2812 // updated value.
2813 return isPre ? value : input;
2814}
2815
2816
2817
2818Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2819 TestAndClearIgnoreResultAssign();
2820 Value *Op = Visit(E->getSubExpr());
2821
2822 // Generate a unary FNeg for FP ops.
2823 if (Op->getType()->isFPOrFPVectorTy())
2824 return Builder.CreateFNeg(Op, "fneg");
2825
2826 // Emit unary minus with EmitSub so we handle overflow cases etc.
2827 BinOpInfo BinOp;
2828 BinOp.RHS = Op;
2829 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2830 BinOp.Ty = E->getType();
2831 BinOp.Opcode = BO_Sub;
2832 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2833 BinOp.E = E;
2834 return EmitSub(BinOp);
2835}
2836
2837Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2838 TestAndClearIgnoreResultAssign();
2839 Value *Op = Visit(E->getSubExpr());
2840 return Builder.CreateNot(Op, "neg");
2841}
2842
2843Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2844 // Perform vector logical not on comparison with zero vector.
2845 if (E->getType()->isVectorType() &&
2846 E->getType()->castAs<VectorType>()->getVectorKind() ==
2847 VectorType::GenericVector) {
2848 Value *Oper = Visit(E->getSubExpr());
2849 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2850 Value *Result;
2851 if (Oper->getType()->isFPOrFPVectorTy()) {
2852 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2853 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2854 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2855 } else
2856 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2857 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2858 }
2859
2860 // Compare operand to zero.
2861 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2862
2863 // Invert value.
2864 // TODO: Could dynamically modify easy computations here. For example, if
2865 // the operand is an icmp ne, turn into icmp eq.
2866 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2867
2868 // ZExt result to the expr type.
2869 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2870}
2871
2872Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2873 // Try folding the offsetof to a constant.
2874 Expr::EvalResult EVResult;
2875 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2876 llvm::APSInt Value = EVResult.Val.getInt();
2877 return Builder.getInt(Value);
2878 }
2879
2880 // Loop over the components of the offsetof to compute the value.
2881 unsigned n = E->getNumComponents();
2882 llvm::Type* ResultType = ConvertType(E->getType());
2883 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2884 QualType CurrentType = E->getTypeSourceInfo()->getType();
2885 for (unsigned i = 0; i != n; ++i) {
2886 OffsetOfNode ON = E->getComponent(i);
2887 llvm::Value *Offset = nullptr;
2888 switch (ON.getKind()) {
2889 case OffsetOfNode::Array: {
2890 // Compute the index
2891 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2892 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2893 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2894 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2895
2896 // Save the element type
2897 CurrentType =
2898 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2899
2900 // Compute the element size
2901 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2902 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2903
2904 // Multiply out to compute the result
2905 Offset = Builder.CreateMul(Idx, ElemSize);
2906 break;
2907 }
2908
2909 case OffsetOfNode::Field: {
2910 FieldDecl *MemberDecl = ON.getField();
2911 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2912 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2913
2914 // Compute the index of the field in its parent.
2915 unsigned i = 0;
2916 // FIXME: It would be nice if we didn't have to loop here!
2917 for (RecordDecl::field_iterator Field = RD->field_begin(),
2918 FieldEnd = RD->field_end();
2919 Field != FieldEnd; ++Field, ++i) {
2920 if (*Field == MemberDecl)
2921 break;
2922 }
2923 assert(i < RL.getFieldCount() && "offsetof field in wrong type")(static_cast <bool> (i < RL.getFieldCount() &&
"offsetof field in wrong type") ? void (0) : __assert_fail (
"i < RL.getFieldCount() && \"offsetof field in wrong type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 2923, __extension__ __PRETTY_FUNCTION__
))
;
2924
2925 // Compute the offset to the field
2926 int64_t OffsetInt = RL.getFieldOffset(i) /
2927 CGF.getContext().getCharWidth();
2928 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2929
2930 // Save the element type.
2931 CurrentType = MemberDecl->getType();
2932 break;
2933 }
2934
2935 case OffsetOfNode::Identifier:
2936 llvm_unreachable("dependent __builtin_offsetof")::llvm::llvm_unreachable_internal("dependent __builtin_offsetof"
, "clang/lib/CodeGen/CGExprScalar.cpp", 2936)
;
2937
2938 case OffsetOfNode::Base: {
2939 if (ON.getBase()->isVirtual()) {
2940 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2941 continue;
2942 }
2943
2944 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2945 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2946
2947 // Save the element type.
2948 CurrentType = ON.getBase()->getType();
2949
2950 // Compute the offset to the base.
2951 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2952 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2953 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2954 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2955 break;
2956 }
2957 }
2958 Result = Builder.CreateAdd(Result, Offset);
2959 }
2960 return Result;
2961}
2962
2963/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2964/// argument of the sizeof expression as an integer.
2965Value *
2966ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2967 const UnaryExprOrTypeTraitExpr *E) {
2968 QualType TypeToSize = E->getTypeOfArgument();
2969 if (E->getKind() == UETT_SizeOf) {
2970 if (const VariableArrayType *VAT =
2971 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2972 if (E->isArgumentType()) {
2973 // sizeof(type) - make sure to emit the VLA size.
2974 CGF.EmitVariablyModifiedType(TypeToSize);
2975 } else {
2976 // C99 6.5.3.4p2: If the argument is an expression of type
2977 // VLA, it is evaluated.
2978 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2979 }
2980
2981 auto VlaSize = CGF.getVLASize(VAT);
2982 llvm::Value *size = VlaSize.NumElts;
2983
2984 // Scale the number of non-VLA elements by the non-VLA element size.
2985 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2986 if (!eltSize.isOne())
2987 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2988
2989 return size;
2990 }
2991 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2992 auto Alignment =
2993 CGF.getContext()
2994 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2995 E->getTypeOfArgument()->getPointeeType()))
2996 .getQuantity();
2997 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2998 }
2999
3000 // If this isn't sizeof(vla), the result must be constant; use the constant
3001 // folding logic so we don't have to duplicate it here.
3002 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3003}
3004
3005Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
3006 Expr *Op = E->getSubExpr();
3007 if (Op->getType()->isAnyComplexType()) {
3008 // If it's an l-value, load through the appropriate subobject l-value.
3009 // Note that we have to ask E because Op might be an l-value that
3010 // this won't work for, e.g. an Obj-C property.
3011 if (E->isGLValue())
3012 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
3013 E->getExprLoc()).getScalarVal();
3014
3015 // Otherwise, calculate and project.
3016 return CGF.EmitComplexExpr(Op, false, true).first;
3017 }
3018
3019 return Visit(Op);
3020}
3021
3022Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
3023 Expr *Op = E->getSubExpr();
3024 if (Op->getType()->isAnyComplexType()) {
3025 // If it's an l-value, load through the appropriate subobject l-value.
3026 // Note that we have to ask E because Op might be an l-value that
3027 // this won't work for, e.g. an Obj-C property.
3028 if (Op->isGLValue())
3029 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
3030 E->getExprLoc()).getScalarVal();
3031
3032 // Otherwise, calculate and project.
3033 return CGF.EmitComplexExpr(Op, true, false).second;
3034 }
3035
3036 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3037 // effects are evaluated, but not the actual value.
3038 if (Op->isGLValue())
3039 CGF.EmitLValue(Op);
3040 else
3041 CGF.EmitScalarExpr(Op, true);
3042 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3043}
3044
3045//===----------------------------------------------------------------------===//
3046// Binary Operators
3047//===----------------------------------------------------------------------===//
3048
3049BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
3050 TestAndClearIgnoreResultAssign();
3051 BinOpInfo Result;
3052 Result.LHS = Visit(E->getLHS());
3053 Result.RHS = Visit(E->getRHS());
3054 Result.Ty = E->getType();
3055 Result.Opcode = E->getOpcode();
3056 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3057 Result.E = E;
3058 return Result;
3059}
3060
3061LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3062 const CompoundAssignOperator *E,
3063 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3064 Value *&Result) {
3065 QualType LHSTy = E->getLHS()->getType();
3066 BinOpInfo OpInfo;
3067
3068 if (E->getComputationResultType()->isAnyComplexType())
3069 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3070
3071 // Emit the RHS first. __block variables need to have the rhs evaluated
3072 // first, plus this should improve codegen a little.
3073 OpInfo.RHS = Visit(E->getRHS());
3074 OpInfo.Ty = E->getComputationResultType();
3075 OpInfo.Opcode = E->getOpcode();
3076 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3077 OpInfo.E = E;
3078 // Load/convert the LHS.
3079 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3080
3081 llvm::PHINode *atomicPHI = nullptr;
3082 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3083 QualType type = atomicTy->getValueType();
3084 if (!type->isBooleanType() && type->isIntegerType() &&
3085 !(type->isUnsignedIntegerType() &&
3086 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3087 CGF.getLangOpts().getSignedOverflowBehavior() !=
3088 LangOptions::SOB_Trapping) {
3089 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3090 llvm::Instruction::BinaryOps Op;
3091 switch (OpInfo.Opcode) {
3092 // We don't have atomicrmw operands for *, %, /, <<, >>
3093 case BO_MulAssign: case BO_DivAssign:
3094 case BO_RemAssign:
3095 case BO_ShlAssign:
3096 case BO_ShrAssign:
3097 break;
3098 case BO_AddAssign:
3099 AtomicOp = llvm::AtomicRMWInst::Add;
3100 Op = llvm::Instruction::Add;
3101 break;
3102 case BO_SubAssign:
3103 AtomicOp = llvm::AtomicRMWInst::Sub;
3104 Op = llvm::Instruction::Sub;
3105 break;
3106 case BO_AndAssign:
3107 AtomicOp = llvm::AtomicRMWInst::And;
3108 Op = llvm::Instruction::And;
3109 break;
3110 case BO_XorAssign:
3111 AtomicOp = llvm::AtomicRMWInst::Xor;
3112 Op = llvm::Instruction::Xor;
3113 break;
3114 case BO_OrAssign:
3115 AtomicOp = llvm::AtomicRMWInst::Or;
3116 Op = llvm::Instruction::Or;
3117 break;
3118 default:
3119 llvm_unreachable("Invalid compound assignment type")::llvm::llvm_unreachable_internal("Invalid compound assignment type"
, "clang/lib/CodeGen/CGExprScalar.cpp", 3119)
;
3120 }
3121 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3122 llvm::Value *Amt = CGF.EmitToMemory(
3123 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3124 E->getExprLoc()),
3125 LHSTy);
3126 Value *OldVal = Builder.CreateAtomicRMW(
3127 AtomicOp, LHSLV.getPointer(CGF), Amt,
3128 llvm::AtomicOrdering::SequentiallyConsistent);
3129
3130 // Since operation is atomic, the result type is guaranteed to be the
3131 // same as the input in LLVM terms.
3132 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3133 return LHSLV;
3134 }
3135 }
3136 // FIXME: For floating point types, we should be saving and restoring the
3137 // floating point environment in the loop.
3138 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3139 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3140 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3141 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3142 Builder.CreateBr(opBB);
3143 Builder.SetInsertPoint(opBB);
3144 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3145 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3146 OpInfo.LHS = atomicPHI;
3147 }
3148 else
3149 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3150
3151 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3152 SourceLocation Loc = E->getExprLoc();
3153 OpInfo.LHS =
3154 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
3155
3156 // Expand the binary operator.
3157 Result = (this->*Func)(OpInfo);
3158
3159 // Convert the result back to the LHS type,
3160 // potentially with Implicit Conversion sanitizer check.
3161 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3162 Loc, ScalarConversionOpts(CGF.SanOpts));
3163
3164 if (atomicPHI) {
3165 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3166 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3167 auto Pair = CGF.EmitAtomicCompareExchange(
3168 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3169 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3170 llvm::Value *success = Pair.second;
3171 atomicPHI->addIncoming(old, curBlock);
3172 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3173 Builder.SetInsertPoint(contBB);
3174 return LHSLV;
3175 }
3176
3177 // Store the result value into the LHS lvalue. Bit-fields are handled
3178 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3179 // 'An assignment expression has the value of the left operand after the
3180 // assignment...'.
3181 if (LHSLV.isBitField())
3182 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3183 else
3184 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3185
3186 if (CGF.getLangOpts().OpenMP)
3187 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3188 E->getLHS());
3189 return LHSLV;
3190}
3191
3192Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3193 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3194 bool Ignore = TestAndClearIgnoreResultAssign();
3195 Value *RHS = nullptr;
3196 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3197
3198 // If the result is clearly ignored, return now.
3199 if (Ignore)
3200 return nullptr;
3201
3202 // The result of an assignment in C is the assigned r-value.
3203 if (!CGF.getLangOpts().CPlusPlus)
3204 return RHS;
3205
3206 // If the lvalue is non-volatile, return the computed value of the assignment.
3207 if (!LHS.isVolatileQualified())
3208 return RHS;
3209
3210 // Otherwise, reload the value.
3211 return EmitLoadOfLValue(LHS, E->getExprLoc());
3212}
3213
3214void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3215 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3216 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3217
3218 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3219 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3220 SanitizerKind::IntegerDivideByZero));
3221 }
3222
3223 const auto *BO = cast<BinaryOperator>(Ops.E);
3224 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3225 Ops.Ty->hasSignedIntegerRepresentation() &&
3226 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3227 Ops.mayHaveIntegerOverflow()) {
3228 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3229
3230 llvm::Value *IntMin =
3231 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3232 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3233
3234 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3235 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3236 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3237 Checks.push_back(
3238 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3239 }
3240
3241 if (Checks.size() > 0)
3242 EmitBinOpCheck(Checks, Ops);
3243}
3244
3245Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3246 {
3247 CodeGenFunction::SanitizerScope SanScope(&CGF);
3248 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3249 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3250 Ops.Ty->isIntegerType() &&
3251 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3252 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3253 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3254 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3255 Ops.Ty->isRealFloatingType() &&
3256 Ops.mayHaveFloatDivisionByZero()) {
3257 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3258 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3259 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3260 Ops);
3261 }
3262 }
3263
3264 if (Ops.Ty->isConstantMatrixType()) {
3265 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3266 // We need to check the types of the operands of the operator to get the
3267 // correct matrix dimensions.
3268 auto *BO = cast<BinaryOperator>(Ops.E);
3269 (void)BO;
3270 assert((static_cast <bool> (isa<ConstantMatrixType>(BO->
getLHS()->getType().getCanonicalType()) && "first operand must be a matrix"
) ? void (0) : __assert_fail ("isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && \"first operand must be a matrix\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3272, __extension__ __PRETTY_FUNCTION__
))
3271 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&(static_cast <bool> (isa<ConstantMatrixType>(BO->
getLHS()->getType().getCanonicalType()) && "first operand must be a matrix"
) ? void (0) : __assert_fail ("isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && \"first operand must be a matrix\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3272, __extension__ __PRETTY_FUNCTION__
))
3272 "first operand must be a matrix")(static_cast <bool> (isa<ConstantMatrixType>(BO->
getLHS()->getType().getCanonicalType()) && "first operand must be a matrix"
) ? void (0) : __assert_fail ("isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && \"first operand must be a matrix\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3272, __extension__ __PRETTY_FUNCTION__
))
;
3273 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&(static_cast <bool> (BO->getRHS()->getType().getCanonicalType
()->isArithmeticType() && "second operand must be an arithmetic type"
) ? void (0) : __assert_fail ("BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && \"second operand must be an arithmetic type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3274, __extension__ __PRETTY_FUNCTION__
))
3274 "second operand must be an arithmetic type")(static_cast <bool> (BO->getRHS()->getType().getCanonicalType
()->isArithmeticType() && "second operand must be an arithmetic type"
) ? void (0) : __assert_fail ("BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && \"second operand must be an arithmetic type\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3274, __extension__ __PRETTY_FUNCTION__
))
;
3275 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3276 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3277 Ops.Ty->hasUnsignedIntegerRepresentation());
3278 }
3279
3280 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3281 llvm::Value *Val;
3282 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3283 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3284 if ((CGF.getLangOpts().OpenCL &&
3285 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
3286 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
3287 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
3288 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3289 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3290 // build option allows an application to specify that single precision
3291 // floating-point divide (x/y and 1/x) and sqrt used in the program
3292 // source are correctly rounded.
3293 llvm::Type *ValTy = Val->getType();
3294 if (ValTy->isFloatTy() ||
3295 (isa<llvm::VectorType>(ValTy) &&
3296 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3297 CGF.SetFPAccuracy(Val, 2.5);
3298 }
3299 return Val;
3300 }
3301 else if (Ops.isFixedPointOp())
3302 return EmitFixedPointBinOp(Ops);
3303 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3304 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3305 else
3306 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3307}
3308
3309Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3310 // Rem in C can't be a floating point type: C99 6.5.5p2.
3311 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3312 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3313 Ops.Ty->isIntegerType() &&
3314 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3315 CodeGenFunction::SanitizerScope SanScope(&CGF);
3316 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3317 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3318 }
3319
3320 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3321 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3322 else
3323 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3324}
3325
3326Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3327 unsigned IID;
3328 unsigned OpID = 0;
3329 SanitizerHandler OverflowKind;
3330
3331 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3332 switch (Ops.Opcode) {
3333 case BO_Add:
3334 case BO_AddAssign:
3335 OpID = 1;
3336 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3337 llvm::Intrinsic::uadd_with_overflow;
3338 OverflowKind = SanitizerHandler::AddOverflow;
3339 break;
3340 case BO_Sub:
3341 case BO_SubAssign:
3342 OpID = 2;
3343 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3344 llvm::Intrinsic::usub_with_overflow;
3345 OverflowKind = SanitizerHandler::SubOverflow;
3346 break;
3347 case BO_Mul:
3348 case BO_MulAssign:
3349 OpID = 3;
3350 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3351 llvm::Intrinsic::umul_with_overflow;
3352 OverflowKind = SanitizerHandler::MulOverflow;
3353 break;
3354 default:
3355 llvm_unreachable("Unsupported operation for overflow detection")::llvm::llvm_unreachable_internal("Unsupported operation for overflow detection"
, "clang/lib/CodeGen/CGExprScalar.cpp", 3355)
;
3356 }
3357 OpID <<= 1;
3358 if (isSigned)
3359 OpID |= 1;
3360
3361 CodeGenFunction::SanitizerScope SanScope(&CGF);
3362 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3363
3364 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3365
3366 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3367 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3368 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3369
3370 // Handle overflow with llvm.trap if no custom handler has been specified.
3371 const std::string *handlerName =
3372 &CGF.getLangOpts().OverflowHandler;
3373 if (handlerName->empty()) {
3374 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3375 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3376 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3377 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3378 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3379 : SanitizerKind::UnsignedIntegerOverflow;
3380 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3381 } else
3382 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3383 return result;
3384 }
3385
3386 // Branch in case of overflow.
3387 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3388 llvm::BasicBlock *continueBB =
3389 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3390 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3391
3392 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3393
3394 // If an overflow handler is set, then we want to call it and then use its
3395 // result, if it returns.
3396 Builder.SetInsertPoint(overflowBB);
3397
3398 // Get the overflow handler.
3399 llvm::Type *Int8Ty = CGF.Int8Ty;
3400 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3401 llvm::FunctionType *handlerTy =
3402 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3403 llvm::FunctionCallee handler =
3404 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3405
3406 // Sign extend the args to 64-bit, so that we can use the same handler for
3407 // all types of overflow.
3408 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3409 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3410
3411 // Call the handler with the two arguments, the operation, and the size of
3412 // the result.
3413 llvm::Value *handlerArgs[] = {
3414 lhs,
3415 rhs,
3416 Builder.getInt8(OpID),
3417 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3418 };
3419 llvm::Value *handlerResult =
3420 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3421
3422 // Truncate the result back to the desired size.
3423 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3424 Builder.CreateBr(continueBB);
3425
3426 Builder.SetInsertPoint(continueBB);
3427 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3428 phi->addIncoming(result, initialBB);
3429 phi->addIncoming(handlerResult, overflowBB);
3430
3431 return phi;
3432}
3433
3434/// Emit pointer + index arithmetic.
3435static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3436 const BinOpInfo &op,
3437 bool isSubtraction) {
3438 // Must have binary (not unary) expr here. Unary pointer
3439 // increment/decrement doesn't use this path.
3440 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3441
3442 Value *pointer = op.LHS;
3443 Expr *pointerOperand = expr->getLHS();
3444 Value *index = op.RHS;
3445 Expr *indexOperand = expr->getRHS();
3446
3447 // In a subtraction, the LHS is always the pointer.
3448 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3449 std::swap(pointer, index);
3450 std::swap(pointerOperand, indexOperand);
3451 }
3452
3453 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3454
3455 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3456 auto &DL = CGF.CGM.getDataLayout();
3457 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3458
3459 // Some versions of glibc and gcc use idioms (particularly in their malloc
3460 // routines) that add a pointer-sized integer (known to be a pointer value)
3461 // to a null pointer in order to cast the value back to an integer or as
3462 // part of a pointer alignment algorithm. This is undefined behavior, but
3463 // we'd like to be able to compile programs that use it.
3464 //
3465 // Normally, we'd generate a GEP with a null-pointer base here in response
3466 // to that code, but it's also UB to dereference a pointer created that
3467 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3468 // generate a direct cast of the integer value to a pointer.
3469 //
3470 // The idiom (p = nullptr + N) is not met if any of the following are true:
3471 //
3472 // The operation is subtraction.
3473 // The index is not pointer-sized.
3474 // The pointer type is not byte-sized.
3475 //
3476 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3477 op.Opcode,
3478 expr->getLHS(),
3479 expr->getRHS()))
3480 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3481
3482 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3483 // Zero-extend or sign-extend the pointer value according to
3484 // whether the index is signed or not.
3485 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3486 "idx.ext");
3487 }
3488
3489 // If this is subtraction, negate the index.
3490 if (isSubtraction)
3491 index = CGF.Builder.CreateNeg(index, "idx.neg");
3492
3493 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3494 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3495 /*Accessed*/ false);
3496
3497 const PointerType *pointerType
3498 = pointerOperand->getType()->getAs<PointerType>();
3499 if (!pointerType) {
3500 QualType objectType = pointerOperand->getType()
3501 ->castAs<ObjCObjectPointerType>()
3502 ->getPointeeType();
3503 llvm::Value *objectSize
3504 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3505
3506 index = CGF.Builder.CreateMul(index, objectSize);
3507
3508 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3509 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
3510 return CGF.Builder.CreateBitCast(result, pointer->getType());
3511 }
3512
3513 QualType elementType = pointerType->getPointeeType();
3514 if (const VariableArrayType *vla
3515 = CGF.getContext().getAsVariableArrayType(elementType)) {
3516 // The element count here is the total number of non-VLA elements.
3517 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3518
3519 // Effectively, the multiply by the VLA size is part of the GEP.
3520 // GEP indexes are signed, and scaling an index isn't permitted to
3521 // signed-overflow, so we use the same semantics for our explicit
3522 // multiply. We suppress this if overflow is not undefined behavior.
3523 llvm::Type *elemTy = pointer->getType()->getPointerElementType();
3524 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3525 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3526 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3527 } else {
3528 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3529 pointer = CGF.EmitCheckedInBoundsGEP(
3530 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3531 "add.ptr");
3532 }
3533 return pointer;
3534 }
3535
3536 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3537 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3538 // future proof.
3539 if (elementType->isVoidType() || elementType->isFunctionType()) {
3540 Value *result = CGF.EmitCastToVoidPtr(pointer);
3541 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
3542 return CGF.Builder.CreateBitCast(result, pointer->getType());
3543 }
3544
3545 llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType);
3546 if (CGF.getLangOpts().isSignedOverflowDefined())
3547 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3548
3549 return CGF.EmitCheckedInBoundsGEP(
3550 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3551 "add.ptr");
3552}
3553
3554// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3555// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3556// the add operand respectively. This allows fmuladd to represent a*b-c, or
3557// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3558// efficient operations.
3559static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3560 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3561 bool negMul, bool negAdd) {
3562 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.")(static_cast <bool> (!(negMul && negAdd) &&
"Only one of negMul and negAdd should be set.") ? void (0) :
__assert_fail ("!(negMul && negAdd) && \"Only one of negMul and negAdd should be set.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3562, __extension__ __PRETTY_FUNCTION__
))
;
3563
3564 Value *MulOp0 = MulOp->getOperand(0);
3565 Value *MulOp1 = MulOp->getOperand(1);
3566 if (negMul)
3567 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3568 if (negAdd)
3569 Addend = Builder.CreateFNeg(Addend, "neg");
3570
3571 Value *FMulAdd = nullptr;
3572 if (Builder.getIsFPConstrained()) {
3573 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&(static_cast <bool> (isa<llvm::ConstrainedFPIntrinsic
>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? void (0) : __assert_fail ("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3575, __extension__ __PRETTY_FUNCTION__
))
3574 "Only constrained operation should be created when Builder is in FP "(static_cast <bool> (isa<llvm::ConstrainedFPIntrinsic
>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? void (0) : __assert_fail ("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3575, __extension__ __PRETTY_FUNCTION__
))
3575 "constrained mode")(static_cast <bool> (isa<llvm::ConstrainedFPIntrinsic
>(MulOp) && "Only constrained operation should be created when Builder is in FP "
"constrained mode") ? void (0) : __assert_fail ("isa<llvm::ConstrainedFPIntrinsic>(MulOp) && \"Only constrained operation should be created when Builder is in FP \" \"constrained mode\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3575, __extension__ __PRETTY_FUNCTION__
))
;
3576 FMulAdd = Builder.CreateConstrainedFPCall(
3577 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3578 Addend->getType()),
3579 {MulOp0, MulOp1, Addend});
3580 } else {
3581 FMulAdd = Builder.CreateCall(
3582 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3583 {MulOp0, MulOp1, Addend});
3584 }
3585 MulOp->eraseFromParent();
3586
3587 return FMulAdd;
3588}
3589
3590// Check whether it would be legal to emit an fmuladd intrinsic call to
3591// represent op and if so, build the fmuladd.
3592//
3593// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3594// Does NOT check the type of the operation - it's assumed that this function
3595// will be called from contexts where it's known that the type is contractable.
3596static Value* tryEmitFMulAdd(const BinOpInfo &op,
3597 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3598 bool isSub=false) {
3599
3600 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||(static_cast <bool> ((op.Opcode == BO_Add || op.Opcode ==
BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign
) && "Only fadd/fsub can be the root of an fmuladd.")
? void (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3602, __extension__ __PRETTY_FUNCTION__
))
3601 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&(static_cast <bool> ((op.Opcode == BO_Add || op.Opcode ==
BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign
) && "Only fadd/fsub can be the root of an fmuladd.")
? void (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3602, __extension__ __PRETTY_FUNCTION__
))
3602 "Only fadd/fsub can be the root of an fmuladd.")(static_cast <bool> ((op.Opcode == BO_Add || op.Opcode ==
BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign
) && "Only fadd/fsub can be the root of an fmuladd.")
? void (0) : __assert_fail ("(op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && \"Only fadd/fsub can be the root of an fmuladd.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 3602, __extension__ __PRETTY_FUNCTION__
))
;
3603
3604 // Check whether this op is marked as fusable.
3605 if (!op.FPFeatures.allowFPContractWithinStatement())
3606 return nullptr;
3607
3608 // We have a potentially fusable op. Look for a mul on one of the operands.
3609 // Also, make sure that the mul result isn't used directly. In that case,
3610 // there's no point creating a muladd operation.
3611 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3612 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3613 LHSBinOp->use_empty())
3614 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3615 }
3616 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3617 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3618 RHSBinOp->use_empty())
3619 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3620 }
3621
3622 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
3623 if (LHSBinOp->getIntrinsicID() ==
3624 llvm::Intrinsic::experimental_constrained_fmul &&
3625 LHSBinOp->use_empty())
3626 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3627 }
3628 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
3629 if (RHSBinOp->getIntrinsicID() ==
3630 llvm::Intrinsic::experimental_constrained_fmul &&
3631 RHSBinOp->use_empty())
3632 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3633 }
3634
3635 return nullptr;
3636}
3637
3638Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3639 if (op.LHS->getType()->isPointerTy() ||
3640 op.RHS->getType()->isPointerTy())
3641 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3642
3643 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3644 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3645 case LangOptions::SOB_Defined:
3646 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3647 case LangOptions::SOB_Undefined:
3648 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3649 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3650 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3651 case LangOptions::SOB_Trapping:
3652 if (CanElideOverflowCheck(CGF.getContext(), op))
3653 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3654 return EmitOverflowCheckedBinOp(op);
3655 }
3656 }
3657
3658 if (op.Ty->isConstantMatrixType()) {
3659 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3660 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3661 return MB.CreateAdd(op.LHS, op.RHS);
3662 }
3663
3664 if (op.Ty->isUnsignedIntegerType() &&
3665 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3666 !CanElideOverflowCheck(CGF.getContext(), op))
3667 return EmitOverflowCheckedBinOp(op);
3668
3669 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3670 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3671 // Try to form an fmuladd.
3672 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3673 return FMulAdd;
3674
3675 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
3676 }
3677
3678 if (op.isFixedPointOp())
3679 return EmitFixedPointBinOp(op);
3680
3681 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3682}
3683
3684/// The resulting value must be calculated with exact precision, so the operands
3685/// may not be the same type.
3686Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3687 using llvm::APSInt;
3688 using llvm::ConstantInt;
3689
3690 // This is either a binary operation where at least one of the operands is
3691 // a fixed-point type, or a unary operation where the operand is a fixed-point
3692 // type. The result type of a binary operation is determined by
3693 // Sema::handleFixedPointConversions().
3694 QualType ResultTy = op.Ty;
3695 QualType LHSTy, RHSTy;
3696 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
3697 RHSTy = BinOp->getRHS()->getType();
3698 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
3699 // For compound assignment, the effective type of the LHS at this point
3700 // is the computation LHS type, not the actual LHS type, and the final
3701 // result type is not the type of the expression but rather the
3702 // computation result type.
3703 LHSTy = CAO->getComputationLHSType();
3704 ResultTy = CAO->getComputationResultType();
3705 } else
3706 LHSTy = BinOp->getLHS()->getType();
3707 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
3708 LHSTy = UnOp->getSubExpr()->getType();
3709 RHSTy = UnOp->getSubExpr()->getType();
3710 }
3711 ASTContext &Ctx = CGF.getContext();
3712 Value *LHS = op.LHS;
3713 Value *RHS = op.RHS;
3714
3715 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3716 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3717 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3718 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3719
3720 // Perform the actual operation.
3721 Value *Result;
3722 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3723 switch (op.Opcode) {
3724 case BO_AddAssign:
3725 case BO_Add:
3726 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
3727 break;
3728 case BO_SubAssign:
3729 case BO_Sub:
3730 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
3731 break;
3732 case BO_MulAssign:
3733 case BO_Mul:
3734 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
3735 break;
3736 case BO_DivAssign:
3737 case BO_Div:
3738 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
3739 break;
3740 case BO_ShlAssign:
3741 case BO_Shl:
3742 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
3743 break;
3744 case BO_ShrAssign:
3745 case BO_Shr:
3746 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
3747 break;
3748 case BO_LT:
3749 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3750 case BO_GT:
3751 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3752 case BO_LE:
3753 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3754 case BO_GE:
3755 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3756 case BO_EQ:
3757 // For equality operations, we assume any padding bits on unsigned types are
3758 // zero'd out. They could be overwritten through non-saturating operations
3759 // that cause overflow, but this leads to undefined behavior.
3760 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
3761 case BO_NE:
3762 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3763 case BO_Cmp:
3764 case BO_LAnd:
3765 case BO_LOr:
3766 llvm_unreachable("Found unimplemented fixed point binary operation")::llvm::llvm_unreachable_internal("Found unimplemented fixed point binary operation"
, "clang/lib/CodeGen/CGExprScalar.cpp", 3766)
;
3767 case BO_PtrMemD:
3768 case BO_PtrMemI:
3769 case BO_Rem:
3770 case BO_Xor:
3771 case BO_And:
3772 case BO_Or:
3773 case BO_Assign:
3774 case BO_RemAssign:
3775 case BO_AndAssign:
3776 case BO_XorAssign:
3777 case BO_OrAssign:
3778 case BO_Comma:
3779 llvm_unreachable("Found unsupported binary operation for fixed point types.")::llvm::llvm_unreachable_internal("Found unsupported binary operation for fixed point types."
, "clang/lib/CodeGen/CGExprScalar.cpp", 3779)
;
3780 }
3781
3782 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
3783 BinaryOperator::isShiftAssignOp(op.Opcode);
3784 // Convert to the result type.
3785 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
3786 : CommonFixedSema,
3787 ResultFixedSema);
3788}
3789
3790Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3791 // The LHS is always a pointer if either side is.
3792 if (!op.LHS->getType()->isPointerTy()) {
3793 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3794 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3795 case LangOptions::SOB_Defined:
3796 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3797 case LangOptions::SOB_Undefined:
3798 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3799 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3800 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3801 case LangOptions::SOB_Trapping:
3802 if (CanElideOverflowCheck(CGF.getContext(), op))
3803 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3804 return EmitOverflowCheckedBinOp(op);
3805 }
3806 }
3807
3808 if (op.Ty->isConstantMatrixType()) {
3809 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3810 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3811 return MB.CreateSub(op.LHS, op.RHS);
3812 }
3813
3814 if (op.Ty->isUnsignedIntegerType() &&
3815 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3816 !CanElideOverflowCheck(CGF.getContext(), op))
3817 return EmitOverflowCheckedBinOp(op);
3818
3819 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3820 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3821 // Try to form an fmuladd.
3822 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3823 return FMulAdd;
3824 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
3825 }
3826
3827 if (op.isFixedPointOp())
3828 return EmitFixedPointBinOp(op);
3829
3830 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3831 }
3832
3833 // If the RHS is not a pointer, then we have normal pointer
3834 // arithmetic.
3835 if (!op.RHS->getType()->isPointerTy())
3836 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3837
3838 // Otherwise, this is a pointer subtraction.
3839
3840 // Do the raw subtraction part.
3841 llvm::Value *LHS
3842 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3843 llvm::Value *RHS
3844 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3845 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3846
3847 // Okay, figure out the element size.
3848 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3849 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3850
3851 llvm::Value *divisor = nullptr;
3852
3853 // For a variable-length array, this is going to be non-constant.
3854 if (const VariableArrayType *vla
3855 = CGF.getContext().getAsVariableArrayType(elementType)) {
3856 auto VlaSize = CGF.getVLASize(vla);
3857 elementType = VlaSize.Type;
3858 divisor = VlaSize.NumElts;
3859
3860 // Scale the number of non-VLA elements by the non-VLA element size.
3861 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3862 if (!eltSize.isOne())
3863 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3864
3865 // For everything elese, we can just compute it, safe in the
3866 // assumption that Sema won't let anything through that we can't
3867 // safely compute the size of.
3868 } else {
3869 CharUnits elementSize;
3870 // Handle GCC extension for pointer arithmetic on void* and
3871 // function pointer types.
3872 if (elementType->isVoidType() || elementType->isFunctionType())
3873 elementSize = CharUnits::One();
3874 else
3875 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3876
3877 // Don't even emit the divide for element size of 1.
3878 if (elementSize.isOne())
3879 return diffInChars;
3880
3881 divisor = CGF.CGM.getSize(elementSize);
3882 }
3883
3884 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3885 // pointer difference in C is only defined in the case where both operands
3886 // are pointing to elements of an array.
3887 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3888}
3889
3890Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3891 llvm::IntegerType *Ty;
3892 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3893 Ty = cast<llvm::IntegerType>(VT->getElementType());
3894 else
3895 Ty = cast<llvm::IntegerType>(LHS->getType());
3896 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3897}
3898
3899Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
3900 const Twine &Name) {
3901 llvm::IntegerType *Ty;
3902 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3903 Ty = cast<llvm::IntegerType>(VT->getElementType());
3904 else
3905 Ty = cast<llvm::IntegerType>(LHS->getType());
3906
3907 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
3908 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
3909
3910 return Builder.CreateURem(
3911 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
3912}
3913
3914Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3915 // TODO: This misses out on the sanitizer check below.
3916 if (Ops.isFixedPointOp())
3917 return EmitFixedPointBinOp(Ops);
3918
3919 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3920 // RHS to the same size as the LHS.
3921 Value *RHS = Ops.RHS;
3922 if (Ops.LHS->getType() != RHS->getType())
3923 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3924
3925 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3926 Ops.Ty->hasSignedIntegerRepresentation() &&
3927 !CGF.getLangOpts().isSignedOverflowDefined() &&
3928 !CGF.getLangOpts().CPlusPlus20;
3929 bool SanitizeUnsignedBase =
3930 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
3931 Ops.Ty->hasUnsignedIntegerRepresentation();
3932 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
3933 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3934 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3935 if (CGF.getLangOpts().OpenCL)
3936 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
3937 else if ((SanitizeBase || SanitizeExponent) &&
3938 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3939 CodeGenFunction::SanitizerScope SanScope(&CGF);
3940 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3941 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3942 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3943
3944 if (SanitizeExponent) {
3945 Checks.push_back(
3946 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3947 }
3948
3949 if (SanitizeBase) {
3950 // Check whether we are shifting any non-zero bits off the top of the
3951 // integer. We only emit this check if exponent is valid - otherwise
3952 // instructions below will have undefined behavior themselves.
3953 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3954 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3955 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3956 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3957 llvm::Value *PromotedWidthMinusOne =
3958 (RHS == Ops.RHS) ? WidthMinusOne
3959 : GetWidthMinusOneValue(Ops.LHS, RHS);
3960 CGF.EmitBlock(CheckShiftBase);
3961 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3962 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3963 /*NUW*/ true, /*NSW*/ true),
3964 "shl.check");
3965 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
3966 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3967 // Under C++11's rules, shifting a 1 bit into the sign bit is
3968 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3969 // define signed left shifts, so we use the C99 and C++11 rules there).
3970 // Unsigned shifts can always shift into the top bit.
3971 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3972 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3973 }
3974 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3975 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3976 CGF.EmitBlock(Cont);
3977 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3978 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3979 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3980 Checks.push_back(std::make_pair(
3981 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
3982 : SanitizerKind::UnsignedShiftBase));
3983 }
3984
3985 assert(!Checks.empty())(static_cast <bool> (!Checks.empty()) ? void (0) : __assert_fail
("!Checks.empty()", "clang/lib/CodeGen/CGExprScalar.cpp", 3985
, __extension__ __PRETTY_FUNCTION__))
;
3986 EmitBinOpCheck(Checks, Ops);
3987 }
3988
3989 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3990}
3991
3992Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3993 // TODO: This misses out on the sanitizer check below.
3994 if (Ops.isFixedPointOp())
3995 return EmitFixedPointBinOp(Ops);
3996
3997 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3998 // RHS to the same size as the LHS.
3999 Value *RHS = Ops.RHS;
4000 if (Ops.LHS->getType() != RHS->getType())
4001 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4002
4003 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4004 if (CGF.getLangOpts().OpenCL)
4005 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4006 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4007 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4008 CodeGenFunction::SanitizerScope SanScope(&CGF);
4009 llvm::Value *Valid =
4010 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
4011 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
4012 }
4013
4014 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4015 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4016 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4017}
4018
4019enum IntrinsicType { VCMPEQ, VCMPGT };
4020// return corresponding comparison intrinsic for given vector type
4021static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4022 BuiltinType::Kind ElemKind) {
4023 switch (ElemKind) {
4024 default: llvm_unreachable("unexpected element type")::llvm::llvm_unreachable_internal("unexpected element type", "clang/lib/CodeGen/CGExprScalar.cpp"
, 4024)
;
4025 case BuiltinType::Char_U:
4026 case BuiltinType::UChar:
4027 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4028 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4029 case BuiltinType::Char_S:
4030 case BuiltinType::SChar:
4031 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4032 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4033 case BuiltinType::UShort:
4034 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4035 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4036 case BuiltinType::Short:
4037 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4038 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4039 case BuiltinType::UInt:
4040 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4041 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4042 case BuiltinType::Int:
4043 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4044 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4045 case BuiltinType::ULong:
4046 case BuiltinType::ULongLong:
4047 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4048 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4049 case BuiltinType::Long:
4050 case BuiltinType::LongLong:
4051 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4052 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4053 case BuiltinType::Float:
4054 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4055 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4056 case BuiltinType::Double:
4057 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4058 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4059 case BuiltinType::UInt128:
4060 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4061 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4062 case BuiltinType::Int128:
4063 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4064 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4065 }
4066}
4067
4068Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4069 llvm::CmpInst::Predicate UICmpOpc,
4070 llvm::CmpInst::Predicate SICmpOpc,
4071 llvm::CmpInst::Predicate FCmpOpc,
4072 bool IsSignaling) {
4073 TestAndClearIgnoreResultAssign();
4074 Value *Result;
4075 QualType LHSTy = E->getLHS()->getType();
4076 QualType RHSTy = E->getRHS()->getType();
4077 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4078 assert(E->getOpcode() == BO_EQ ||(static_cast <bool> (E->getOpcode() == BO_EQ || E->
getOpcode() == BO_NE) ? void (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "clang/lib/CodeGen/CGExprScalar.cpp", 4079, __extension__ __PRETTY_FUNCTION__
))
4079 E->getOpcode() == BO_NE)(static_cast <bool> (E->getOpcode() == BO_EQ || E->
getOpcode() == BO_NE) ? void (0) : __assert_fail ("E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE"
, "clang/lib/CodeGen/CGExprScalar.cpp", 4079, __extension__ __PRETTY_FUNCTION__
))
;
4080 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4081 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4082 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4083 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4084 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4085 BinOpInfo BOInfo = EmitBinOps(E);
4086 Value *LHS = BOInfo.LHS;
4087 Value *RHS = BOInfo.RHS;
4088
4089 // If AltiVec, the comparison results in a numeric type, so we use
4090 // intrinsics comparing vectors and giving 0 or 1 as a result
4091 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4092 // constants for mapping CR6 register bits to predicate result
4093 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4094
4095 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4096
4097 // in several cases vector arguments order will be reversed
4098 Value *FirstVecArg = LHS,
4099 *SecondVecArg = RHS;
4100
4101 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4102 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4103
4104 switch(E->getOpcode()) {
4105 default: llvm_unreachable("is not a comparison operation")::llvm::llvm_unreachable_internal("is not a comparison operation"
, "clang/lib/CodeGen/CGExprScalar.cpp", 4105)
;
4106 case BO_EQ:
4107 CR6 = CR6_LT;
4108 ID = GetIntrinsic(VCMPEQ, ElementKind);
4109 break;
4110 case BO_NE:
4111 CR6 = CR6_EQ;
4112 ID = GetIntrinsic(VCMPEQ, ElementKind);
4113 break;
4114 case BO_LT:
4115 CR6 = CR6_LT;
4116 ID = GetIntrinsic(VCMPGT, ElementKind);
4117 std::swap(FirstVecArg, SecondVecArg);
4118 break;
4119 case BO_GT:
4120 CR6 = CR6_LT;
4121 ID = GetIntrinsic(VCMPGT, ElementKind);
4122 break;
4123 case BO_LE:
4124 if (ElementKind == BuiltinType::Float) {
4125 CR6 = CR6_LT;
4126 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4127 std::swap(FirstVecArg, SecondVecArg);
4128 }
4129 else {
4130 CR6 = CR6_EQ;
4131 ID = GetIntrinsic(VCMPGT, ElementKind);
4132 }
4133 break;
4134 case BO_GE:
4135 if (ElementKind == BuiltinType::Float) {
4136 CR6 = CR6_LT;
4137 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4138 }
4139 else {
4140 CR6 = CR6_EQ;
4141 ID = GetIntrinsic(VCMPGT, ElementKind);
4142 std::swap(FirstVecArg, SecondVecArg);
4143 }
4144 break;
4145 }
4146
4147 Value *CR6Param = Builder.getInt32(CR6);
4148 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4149 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4150
4151 // The result type of intrinsic may not be same as E->getType().
4152 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4153 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4154 // do nothing, if ResultTy is not i1 at the same time, it will cause
4155 // crash later.
4156 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4157 if (ResultTy->getBitWidth() > 1 &&
4158 E->getType() == CGF.getContext().BoolTy)
4159 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4160 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4161 E->getExprLoc());
4162 }
4163
4164 if (BOInfo.isFixedPointOp()) {
4165 Result = EmitFixedPointBinOp(BOInfo);
4166 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4167 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4168 if (!IsSignaling)
4169 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4170 else
4171 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4172 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4173 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4174 } else {
4175 // Unsigned integers and pointers.
4176
4177 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4178 !isa<llvm::ConstantPointerNull>(LHS) &&
4179 !isa<llvm::ConstantPointerNull>(RHS)) {
4180
4181 // Dynamic information is required to be stripped for comparisons,
4182 // because it could leak the dynamic information. Based on comparisons
4183 // of pointers to dynamic objects, the optimizer can replace one pointer
4184 // with another, which might be incorrect in presence of invariant
4185 // groups. Comparison with null is safe because null does not carry any
4186 // dynamic information.
4187 if (LHSTy.mayBeDynamicClass())
4188 LHS = Builder.CreateStripInvariantGroup(LHS);
4189 if (RHSTy.mayBeDynamicClass())
4190 RHS = Builder.CreateStripInvariantGroup(RHS);
4191 }
4192
4193 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4194 }
4195
4196 // If this is a vector comparison, sign extend the result to the appropriate
4197 // vector integer type and return it (don't convert to bool).
4198 if (LHSTy->isVectorType())
4199 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4200
4201 } else {
4202 // Complex Comparison: can only be an equality comparison.
4203 CodeGenFunction::ComplexPairTy LHS, RHS;
4204 QualType CETy;
4205 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4206 LHS = CGF.EmitComplexExpr(E->getLHS());
4207 CETy = CTy->getElementType();
4208 } else {
4209 LHS.first = Visit(E->getLHS());
4210 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4211 CETy = LHSTy;
4212 }
4213 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4214 RHS = CGF.EmitComplexExpr(E->getRHS());
4215 assert(CGF.getContext().hasSameUnqualifiedType(CETy,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(CETy, CTy->getElementType()) && "The element types must always match."
) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4217, __extension__ __PRETTY_FUNCTION__
))
4216 CTy->getElementType()) &&(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(CETy, CTy->getElementType()) && "The element types must always match."
) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4217, __extension__ __PRETTY_FUNCTION__
))
4217 "The element types must always match.")(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(CETy, CTy->getElementType()) && "The element types must always match."
) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && \"The element types must always match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4217, __extension__ __PRETTY_FUNCTION__
))
;
4218 (void)CTy;
4219 } else {
4220 RHS.first = Visit(E->getRHS());
4221 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4222 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(CETy, RHSTy) && "The element types must always match."
) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4223, __extension__ __PRETTY_FUNCTION__
))
4223 "The element types must always match.")(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(CETy, RHSTy) && "The element types must always match."
) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && \"The element types must always match.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4223, __extension__ __PRETTY_FUNCTION__
))
;
4224 }
4225
4226 Value *ResultR, *ResultI;
4227 if (CETy->isRealFloatingType()) {
4228 // As complex comparisons can only be equality comparisons, they
4229 // are never signaling comparisons.
4230 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4231 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4232 } else {
4233 // Complex comparisons can only be equality comparisons. As such, signed
4234 // and unsigned opcodes are the same.
4235 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4236 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4237 }
4238
4239 if (E->getOpcode() == BO_EQ) {
4240 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4241 } else {
4242 assert(E->getOpcode() == BO_NE &&(static_cast <bool> (E->getOpcode() == BO_NE &&
"Complex comparison other than == or != ?") ? void (0) : __assert_fail
("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4243, __extension__ __PRETTY_FUNCTION__
))
4243 "Complex comparison other than == or != ?")(static_cast <bool> (E->getOpcode() == BO_NE &&
"Complex comparison other than == or != ?") ? void (0) : __assert_fail
("E->getOpcode() == BO_NE && \"Complex comparison other than == or != ?\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4243, __extension__ __PRETTY_FUNCTION__
))
;
4244 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4245 }
4246 }
4247
4248 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4249 E->getExprLoc());
4250}
4251
4252Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4253 bool Ignore = TestAndClearIgnoreResultAssign();
4254
4255 Value *RHS;
4256 LValue LHS;
4257
4258 switch (E->getLHS()->getType().getObjCLifetime()) {
4259 case Qualifiers::OCL_Strong:
4260 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4261 break;
4262
4263 case Qualifiers::OCL_Autoreleasing:
4264 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4265 break;
4266
4267 case Qualifiers::OCL_ExplicitNone:
4268 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4269 break;
4270
4271 case Qualifiers::OCL_Weak:
4272 RHS = Visit(E->getRHS());
4273 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4274 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4275 break;
4276
4277 case Qualifiers::OCL_None:
4278 // __block variables need to have the rhs evaluated first, plus
4279 // this should improve codegen just a little.
4280 RHS = Visit(E->getRHS());
4281 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4282
4283 // Store the value into the LHS. Bit-fields are handled specially
4284 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4285 // 'An assignment expression has the value of the left operand after
4286 // the assignment...'.
4287 if (LHS.isBitField()) {
4288 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4289 } else {
4290 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4291 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4292 }
4293 }
4294
4295 // If the result is clearly ignored, return now.
4296 if (Ignore)
4297 return nullptr;
4298
4299 // The result of an assignment in C is the assigned r-value.
4300 if (!CGF.getLangOpts().CPlusPlus)
4301 return RHS;
4302
4303 // If the lvalue is non-volatile, return the computed value of the assignment.
4304 if (!LHS.isVolatileQualified())
4305 return RHS;
4306
4307 // Otherwise, reload the value.
4308 return EmitLoadOfLValue(LHS, E->getExprLoc());
4309}
4310
4311Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4312 // Perform vector logical and on comparisons with zero vectors.
4313 if (E->getType()->isVectorType()) {
4314 CGF.incrementProfileCounter(E);
4315
4316 Value *LHS = Visit(E->getLHS());
4317 Value *RHS = Visit(E->getRHS());
4318 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4319 if (LHS->getType()->isFPOrFPVectorTy()) {
4320 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4321 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4322 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4323 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4324 } else {
4325 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4326 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4327 }
4328 Value *And = Builder.CreateAnd(LHS, RHS);
4329 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4330 }
4331
4332 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4333 llvm::Type *ResTy = ConvertType(E->getType());
4334
4335 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4336 // If we have 1 && X, just emit X without inserting the control flow.
4337 bool LHSCondVal;
4338 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4339 if (LHSCondVal) { // If we have 1 && X, just emit X.
4340 CGF.incrementProfileCounter(E);
4341
4342 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4343
4344 // If we're generating for profiling or coverage, generate a branch to a
4345 // block that increments the RHS counter needed to track branch condition
4346 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4347 // "FalseBlock" after the increment is done.
4348 if (InstrumentRegions &&
4349 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4350 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4351 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4352 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4353 CGF.EmitBlock(RHSBlockCnt);
4354 CGF.incrementProfileCounter(E->getRHS());
4355 CGF.EmitBranch(FBlock);
4356 CGF.EmitBlock(FBlock);
4357 }
4358
4359 // ZExt result to int or bool.
4360 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4361 }
4362
4363 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4364 if (!CGF.ContainsLabel(E->getRHS()))
4365 return llvm::Constant::getNullValue(ResTy);
4366 }
4367
4368 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4369 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4370
4371 CodeGenFunction::ConditionalEvaluation eval(CGF);
4372
4373 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4374 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4375 CGF.getProfileCount(E->getRHS()));
4376
4377 // Any edges into the ContBlock are now from an (indeterminate number of)
4378 // edges from this first condition. All of these values will be false. Start
4379 // setting up the PHI node in the Cont Block for this.
4380 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4381 "", ContBlock);
4382 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4383 PI != PE; ++PI)
4384 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4385
4386 eval.begin(CGF);
4387 CGF.EmitBlock(RHSBlock);
4388 CGF.incrementProfileCounter(E);
4389 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4390 eval.end(CGF);
4391
4392 // Reaquire the RHS block, as there may be subblocks inserted.
4393 RHSBlock = Builder.GetInsertBlock();
4394
4395 // If we're generating for profiling or coverage, generate a branch on the
4396 // RHS to a block that increments the RHS true counter needed to track branch
4397 // condition coverage.
4398 if (InstrumentRegions &&
4399 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4400 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4401 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4402 CGF.EmitBlock(RHSBlockCnt);
4403 CGF.incrementProfileCounter(E->getRHS());
4404 CGF.EmitBranch(ContBlock);
4405 PN->addIncoming(RHSCond, RHSBlockCnt);
4406 }
4407
4408 // Emit an unconditional branch from this block to ContBlock.
4409 {
4410 // There is no need to emit line number for unconditional branch.
4411 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4412 CGF.EmitBlock(ContBlock);
4413 }
4414 // Insert an entry into the phi node for the edge with the value of RHSCond.
4415 PN->addIncoming(RHSCond, RHSBlock);
4416
4417 // Artificial location to preserve the scope information
4418 {
4419 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4420 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4421 }
4422
4423 // ZExt result to int.
4424 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4425}
4426
4427Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4428 // Perform vector logical or on comparisons with zero vectors.
4429 if (E->getType()->isVectorType()) {
4430 CGF.incrementProfileCounter(E);
4431
4432 Value *LHS = Visit(E->getLHS());
4433 Value *RHS = Visit(E->getRHS());
4434 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4435 if (LHS->getType()->isFPOrFPVectorTy()) {
4436 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4437 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4438 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4439 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4440 } else {
4441 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4442 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4443 }
4444 Value *Or = Builder.CreateOr(LHS, RHS);
4445 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4446 }
4447
4448 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4449 llvm::Type *ResTy = ConvertType(E->getType());
4450
4451 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4452 // If we have 0 || X, just emit X without inserting the control flow.
4453 bool LHSCondVal;
4454 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4455 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4456 CGF.incrementProfileCounter(E);
4457
4458 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4459
4460 // If we're generating for profiling or coverage, generate a branch to a
4461 // block that increments the RHS counter need to track branch condition
4462 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4463 // "FalseBlock" after the increment is done.
4464 if (InstrumentRegions &&
4465 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4466 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
4467 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4468 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
4469 CGF.EmitBlock(RHSBlockCnt);
4470 CGF.incrementProfileCounter(E->getRHS());
4471 CGF.EmitBranch(FBlock);
4472 CGF.EmitBlock(FBlock);
4473 }
4474
4475 // ZExt result to int or bool.
4476 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4477 }
4478
4479 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4480 if (!CGF.ContainsLabel(E->getRHS()))
4481 return llvm::ConstantInt::get(ResTy, 1);
4482 }
4483
4484 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4485 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4486
4487 CodeGenFunction::ConditionalEvaluation eval(CGF);
4488
4489 // Branch on the LHS first. If it is true, go to the success (cont) block.
4490 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4491 CGF.getCurrentProfileCount() -
4492 CGF.getProfileCount(E->getRHS()));
4493
4494 // Any edges into the ContBlock are now from an (indeterminate number of)
4495 // edges from this first condition. All of these values will be true. Start
4496 // setting up the PHI node in the Cont Block for this.
4497 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4498 "", ContBlock);
4499 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4500 PI != PE; ++PI)
4501 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4502
4503 eval.begin(CGF);
4504
4505 // Emit the RHS condition as a bool value.
4506 CGF.EmitBlock(RHSBlock);
4507 CGF.incrementProfileCounter(E);
4508 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4509
4510 eval.end(CGF);
4511
4512 // Reaquire the RHS block, as there may be subblocks inserted.
4513 RHSBlock = Builder.GetInsertBlock();
4514
4515 // If we're generating for profiling or coverage, generate a branch on the
4516 // RHS to a block that increments the RHS true counter needed to track branch
4517 // condition coverage.
4518 if (InstrumentRegions &&
4519 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4520 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4521 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
4522 CGF.EmitBlock(RHSBlockCnt);
4523 CGF.incrementProfileCounter(E->getRHS());
4524 CGF.EmitBranch(ContBlock);
4525 PN->addIncoming(RHSCond, RHSBlockCnt);
4526 }
4527
4528 // Emit an unconditional branch from this block to ContBlock. Insert an entry
4529 // into the phi node for the edge with the value of RHSCond.
4530 CGF.EmitBlock(ContBlock);
4531 PN->addIncoming(RHSCond, RHSBlock);
4532
4533 // ZExt result to int.
4534 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4535}
4536
4537Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4538 CGF.EmitIgnoredExpr(E->getLHS());
4539 CGF.EnsureInsertPoint();
4540 return Visit(E->getRHS());
4541}
4542
4543//===----------------------------------------------------------------------===//
4544// Other Operators
4545//===----------------------------------------------------------------------===//
4546
4547/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4548/// expression is cheap enough and side-effect-free enough to evaluate
4549/// unconditionally instead of conditionally. This is used to convert control
4550/// flow into selects in some cases.
4551static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4552 CodeGenFunction &CGF) {
4553 // Anything that is an integer or floating point constant is fine.
4554 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4555
4556 // Even non-volatile automatic variables can't be evaluated unconditionally.
4557 // Referencing a thread_local may cause non-trivial initialization work to
4558 // occur. If we're inside a lambda and one of the variables is from the scope
4559 // outside the lambda, that function may have returned already. Reading its
4560 // locals is a bad idea. Also, these reads may introduce races there didn't
4561 // exist in the source-level program.
4562}
4563
4564
4565Value *ScalarExprEmitter::
4566VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4567 TestAndClearIgnoreResultAssign();
4568
4569 // Bind the common expression if necessary.
4570 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4571
4572 Expr *condExpr = E->getCond();
4573 Expr *lhsExpr = E->getTrueExpr();
4574 Expr *rhsExpr = E->getFalseExpr();
4575
4576 // If the condition constant folds and can be elided, try to avoid emitting
4577 // the condition and the dead arm.
4578 bool CondExprBool;
4579 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4580 Expr *live = lhsExpr, *dead = rhsExpr;
4581 if (!CondExprBool) std::swap(live, dead);
4582
4583 // If the dead side doesn't have labels we need, just emit the Live part.
4584 if (!CGF.ContainsLabel(dead)) {
4585 if (CondExprBool)
4586 CGF.incrementProfileCounter(E);
4587 Value *Result = Visit(live);
4588
4589 // If the live part is a throw expression, it acts like it has a void
4590 // type, so evaluating it returns a null Value*. However, a conditional
4591 // with non-void type must return a non-null Value*.
4592 if (!Result && !E->getType()->isVoidType())
4593 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4594
4595 return Result;
4596 }
4597 }
4598
4599 // OpenCL: If the condition is a vector, we can treat this condition like
4600 // the select function.
4601 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
4602 condExpr->getType()->isExtVectorType()) {
4603 CGF.incrementProfileCounter(E);
4604
4605 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4606 llvm::Value *LHS = Visit(lhsExpr);
4607 llvm::Value *RHS = Visit(rhsExpr);
4608
4609 llvm::Type *condType = ConvertType(condExpr->getType());
4610 auto *vecTy = cast<llvm::FixedVectorType>(condType);
4611
4612 unsigned numElem = vecTy->getNumElements();
4613 llvm::Type *elemType = vecTy->getElementType();
4614
4615 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4616 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4617 llvm::Value *tmp = Builder.CreateSExt(
4618 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
4619 llvm::Value *tmp2 = Builder.CreateNot(tmp);
4620
4621 // Cast float to int to perform ANDs if necessary.
4622 llvm::Value *RHSTmp = RHS;
4623 llvm::Value *LHSTmp = LHS;
4624 bool wasCast = false;
4625 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4626 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4627 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4628 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4629 wasCast = true;
4630 }
4631
4632 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4633 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4634 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4635 if (wasCast)
4636 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4637
4638 return tmp5;
4639 }
4640
4641 if (condExpr->getType()->isVectorType()) {
4642 CGF.incrementProfileCounter(E);
4643
4644 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4645 llvm::Value *LHS = Visit(lhsExpr);
4646 llvm::Value *RHS = Visit(rhsExpr);
4647
4648 llvm::Type *CondType = ConvertType(condExpr->getType());
4649 auto *VecTy = cast<llvm::VectorType>(CondType);
4650 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4651
4652 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4653 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4654 }
4655
4656 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4657 // select instead of as control flow. We can only do this if it is cheap and
4658 // safe to evaluate the LHS and RHS unconditionally.
4659 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4660 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4661 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4662 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4663
4664 CGF.incrementProfileCounter(E, StepV);
4665
4666 llvm::Value *LHS = Visit(lhsExpr);
4667 llvm::Value *RHS = Visit(rhsExpr);
4668 if (!LHS) {
4669 // If the conditional has void type, make sure we return a null Value*.
4670 assert(!RHS && "LHS and RHS types must match")(static_cast <bool> (!RHS && "LHS and RHS types must match"
) ? void (0) : __assert_fail ("!RHS && \"LHS and RHS types must match\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4670, __extension__ __PRETTY_FUNCTION__
))
;
4671 return nullptr;
4672 }
4673 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4674 }
4675
4676 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4677 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4678 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4679
4680 CodeGenFunction::ConditionalEvaluation eval(CGF);
4681 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4682 CGF.getProfileCount(lhsExpr));
4683
4684 CGF.EmitBlock(LHSBlock);
4685 CGF.incrementProfileCounter(E);
4686 eval.begin(CGF);
4687 Value *LHS = Visit(lhsExpr);
4688 eval.end(CGF);
4689
4690 LHSBlock = Builder.GetInsertBlock();
4691 Builder.CreateBr(ContBlock);
4692
4693 CGF.EmitBlock(RHSBlock);
4694 eval.begin(CGF);
4695 Value *RHS = Visit(rhsExpr);
4696 eval.end(CGF);
4697
4698 RHSBlock = Builder.GetInsertBlock();
4699 CGF.EmitBlock(ContBlock);
4700
4701 // If the LHS or RHS is a throw expression, it will be legitimately null.
4702 if (!LHS)
4703 return RHS;
4704 if (!RHS)
4705 return LHS;
4706
4707 // Create a PHI node for the real part.
4708 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4709 PN->addIncoming(LHS, LHSBlock);
4710 PN->addIncoming(RHS, RHSBlock);
4711 return PN;
4712}
4713
4714Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4715 return Visit(E->getChosenSubExpr());
4716}
4717
4718Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4719 QualType Ty = VE->getType();
4720
4721 if (Ty->isVariablyModifiedType())
4722 CGF.EmitVariablyModifiedType(Ty);
4723
4724 Address ArgValue = Address::invalid();
4725 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4726
4727 llvm::Type *ArgTy = ConvertType(VE->getType());
4728
4729 // If EmitVAArg fails, emit an error.
4730 if (!ArgPtr.isValid()) {
4731 CGF.ErrorUnsupported(VE, "va_arg expression");
4732 return llvm::UndefValue::get(ArgTy);
4733 }
4734
4735 // FIXME Volatility.
4736 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4737
4738 // If EmitVAArg promoted the type, we must truncate it.
4739 if (ArgTy != Val->getType()) {
4740 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4741 Val = Builder.CreateIntToPtr(Val, ArgTy);
4742 else
4743 Val = Builder.CreateTrunc(Val, ArgTy);
4744 }
4745
4746 return Val;
4747}
4748
4749Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4750 return CGF.EmitBlockLiteral(block);
4751}
4752
4753// Convert a vec3 to vec4, or vice versa.
4754static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4755 Value *Src, unsigned NumElementsDst) {
4756 static constexpr int Mask[] = {0, 1, 2, -1};
4757 return Builder.CreateShuffleVector(Src,
4758 llvm::makeArrayRef(Mask, NumElementsDst));
4759}
4760
4761// Create cast instructions for converting LLVM value \p Src to LLVM type \p
4762// DstTy. \p Src has the same size as \p DstTy. Both are single value types
4763// but could be scalar or vectors of different lengths, and either can be
4764// pointer.
4765// There are 4 cases:
4766// 1. non-pointer -> non-pointer : needs 1 bitcast
4767// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4768// 3. pointer -> non-pointer
4769// a) pointer -> intptr_t : needs 1 ptrtoint
4770// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4771// 4. non-pointer -> pointer
4772// a) intptr_t -> pointer : needs 1 inttoptr
4773// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4774// Note: for cases 3b and 4b two casts are required since LLVM casts do not
4775// allow casting directly between pointer types and non-integer non-pointer
4776// types.
4777static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4778 const llvm::DataLayout &DL,
4779 Value *Src, llvm::Type *DstTy,
4780 StringRef Name = "") {
4781 auto SrcTy = Src->getType();
4782
4783 // Case 1.
4784 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4785 return Builder.CreateBitCast(Src, DstTy, Name);
4786
4787 // Case 2.
4788 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4789 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4790
4791 // Case 3.
4792 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4793 // Case 3b.
4794 if (!DstTy->isIntegerTy())
4795 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4796 // Cases 3a and 3b.
4797 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4798 }
4799
4800 // Case 4b.
4801 if (!SrcTy->isIntegerTy())
4802 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4803 // Cases 4a and 4b.
4804 return Builder.CreateIntToPtr(Src, DstTy, Name);
4805}
4806
4807Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4808 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4809 llvm::Type *DstTy = ConvertType(E->getType());
4810
4811 llvm::Type *SrcTy = Src->getType();
4812 unsigned NumElementsSrc =
4813 isa<llvm::VectorType>(SrcTy)
4814 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
4815 : 0;
4816 unsigned NumElementsDst =
4817 isa<llvm::VectorType>(DstTy)
4818 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
4819 : 0;
4820
4821 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4822 // vector to get a vec4, then a bitcast if the target type is different.
4823 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4824 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4825 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4826 DstTy);
4827
4828 Src->setName("astype");
4829 return Src;
4830 }
4831
4832 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4833 // to vec4 if the original type is not vec4, then a shuffle vector to
4834 // get a vec3.
4835 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4836 auto *Vec4Ty = llvm::FixedVectorType::get(
4837 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
4838 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4839 Vec4Ty);
4840
4841 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4842 Src->setName("astype");
4843 return Src;
4844 }
4845
4846 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4847 Src, DstTy, "astype");
4848}
4849
4850Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4851 return CGF.EmitAtomicExpr(E).getScalarVal();
4852}
4853
4854//===----------------------------------------------------------------------===//
4855// Entry Point into this File
4856//===----------------------------------------------------------------------===//
4857
4858/// Emit the computation of the specified expression of scalar type, ignoring
4859/// the result.
4860Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4861 assert(E && hasScalarEvaluationKind(E->getType()) &&(static_cast <bool> (E && hasScalarEvaluationKind
(E->getType()) && "Invalid scalar expression to emit"
) ? void (0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4862, __extension__ __PRETTY_FUNCTION__
))
4862 "Invalid scalar expression to emit")(static_cast <bool> (E && hasScalarEvaluationKind
(E->getType()) && "Invalid scalar expression to emit"
) ? void (0) : __assert_fail ("E && hasScalarEvaluationKind(E->getType()) && \"Invalid scalar expression to emit\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4862, __extension__ __PRETTY_FUNCTION__
))
;
4863
4864 return ScalarExprEmitter(*this, IgnoreResultAssign)
4865 .Visit(const_cast<Expr *>(E));
4866}
4867
4868/// Emit a conversion from the specified type to the specified destination type,
4869/// both of which are LLVM scalar types.
4870Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4871 QualType DstTy,
4872 SourceLocation Loc) {
4873 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&(static_cast <bool> (hasScalarEvaluationKind(SrcTy) &&
hasScalarEvaluationKind(DstTy) && "Invalid scalar expression to emit"
) ? void (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4874, __extension__ __PRETTY_FUNCTION__
))
4874 "Invalid scalar expression to emit")(static_cast <bool> (hasScalarEvaluationKind(SrcTy) &&
hasScalarEvaluationKind(DstTy) && "Invalid scalar expression to emit"
) ? void (0) : __assert_fail ("hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && \"Invalid scalar expression to emit\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4874, __extension__ __PRETTY_FUNCTION__
))
;
4875 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4876}
4877
4878/// Emit a conversion from the specified complex type to the specified
4879/// destination type, where the destination type is an LLVM scalar type.
4880Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4881 QualType SrcTy,
4882 QualType DstTy,
4883 SourceLocation Loc) {
4884 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&(static_cast <bool> (SrcTy->isAnyComplexType() &&
hasScalarEvaluationKind(DstTy) && "Invalid complex -> scalar conversion"
) ? void (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4885, __extension__ __PRETTY_FUNCTION__
))
4885 "Invalid complex -> scalar conversion")(static_cast <bool> (SrcTy->isAnyComplexType() &&
hasScalarEvaluationKind(DstTy) && "Invalid complex -> scalar conversion"
) ? void (0) : __assert_fail ("SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && \"Invalid complex -> scalar conversion\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4885, __extension__ __PRETTY_FUNCTION__
))
;
4886 return ScalarExprEmitter(*this)
4887 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4888}
4889
4890
4891llvm::Value *CodeGenFunction::
4892EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4893 bool isInc, bool isPre) {
4894 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4895}
4896
4897LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4898 // object->isa or (*object).isa
4899 // Generate code as for: *(Class*)object
4900
4901 Expr *BaseExpr = E->getBase();
4902 Address Addr = Address::invalid();
4903 if (BaseExpr->isPRValue()) {
4904 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4905 } else {
4906 Addr = EmitLValue(BaseExpr).getAddress(*this);
4907 }
4908
4909 // Cast the address to Class*.
4910 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4911 return MakeAddrLValue(Addr, E->getType());
4912}
4913
4914
4915LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4916 const CompoundAssignOperator *E) {
4917 ScalarExprEmitter Scalar(*this);
4918 Value *Result = nullptr;
4919 switch (E->getOpcode()) {
4920#define COMPOUND_OP(Op) \
4921 case BO_##Op##Assign: \
4922 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4923 Result)
4924 COMPOUND_OP(Mul);
4925 COMPOUND_OP(Div);
4926 COMPOUND_OP(Rem);
4927 COMPOUND_OP(Add);
4928 COMPOUND_OP(Sub);
4929 COMPOUND_OP(Shl);
4930 COMPOUND_OP(Shr);
4931 COMPOUND_OP(And);
4932 COMPOUND_OP(Xor);
4933 COMPOUND_OP(Or);
4934#undef COMPOUND_OP
4935
4936 case BO_PtrMemD:
4937 case BO_PtrMemI:
4938 case BO_Mul:
4939 case BO_Div:
4940 case BO_Rem:
4941 case BO_Add:
4942 case BO_Sub:
4943 case BO_Shl:
4944 case BO_Shr:
4945 case BO_LT:
4946 case BO_GT:
4947 case BO_LE:
4948 case BO_GE:
4949 case BO_EQ:
4950 case BO_NE:
4951 case BO_Cmp:
4952 case BO_And:
4953 case BO_Xor:
4954 case BO_Or:
4955 case BO_LAnd:
4956 case BO_LOr:
4957 case BO_Assign:
4958 case BO_Comma:
4959 llvm_unreachable("Not valid compound assignment operators")::llvm::llvm_unreachable_internal("Not valid compound assignment operators"
, "clang/lib/CodeGen/CGExprScalar.cpp", 4959)
;
4960 }
4961
4962 llvm_unreachable("Unhandled compound assignment operator")::llvm::llvm_unreachable_internal("Unhandled compound assignment operator"
, "clang/lib/CodeGen/CGExprScalar.cpp", 4962)
;
4963}
4964
4965struct GEPOffsetAndOverflow {
4966 // The total (signed) byte offset for the GEP.
4967 llvm::Value *TotalOffset;
4968 // The offset overflow flag - true if the total offset overflows.
4969 llvm::Value *OffsetOverflows;
4970};
4971
4972/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
4973/// and compute the total offset it applies from it's base pointer BasePtr.
4974/// Returns offset in bytes and a boolean flag whether an overflow happened
4975/// during evaluation.
4976static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
4977 llvm::LLVMContext &VMContext,
4978 CodeGenModule &CGM,
4979 CGBuilderTy &Builder) {
4980 const auto &DL = CGM.getDataLayout();
4981
4982 // The total (signed) byte offset for the GEP.
4983 llvm::Value *TotalOffset = nullptr;
4984
4985 // Was the GEP already reduced to a constant?
4986 if (isa<llvm::Constant>(GEPVal)) {
4987 // Compute the offset by casting both pointers to integers and subtracting:
4988 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
4989 Value *BasePtr_int =
4990 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
4991 Value *GEPVal_int =
4992 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
4993 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
4994 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
4995 }
4996
4997 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4998 assert(GEP->getPointerOperand() == BasePtr &&(static_cast <bool> (GEP->getPointerOperand() == BasePtr
&& "BasePtr must be the base of the GEP.") ? void (0
) : __assert_fail ("GEP->getPointerOperand() == BasePtr && \"BasePtr must be the base of the GEP.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4999, __extension__ __PRETTY_FUNCTION__
))
4999 "BasePtr must be the base of the GEP.")(static_cast <bool> (GEP->getPointerOperand() == BasePtr
&& "BasePtr must be the base of the GEP.") ? void (0
) : __assert_fail ("GEP->getPointerOperand() == BasePtr && \"BasePtr must be the base of the GEP.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 4999, __extension__ __PRETTY_FUNCTION__
))
;
5000 assert(GEP->isInBounds() && "Expected inbounds GEP")(static_cast <bool> (GEP->isInBounds() && "Expected inbounds GEP"
) ? void (0) : __assert_fail ("GEP->isInBounds() && \"Expected inbounds GEP\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 5000, __extension__ __PRETTY_FUNCTION__
))
;
5001
5002 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5003
5004 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5005 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5006 auto *SAddIntrinsic =
5007 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
5008 auto *SMulIntrinsic =
5009 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
5010
5011 // The offset overflow flag - true if the total offset overflows.
5012 llvm::Value *OffsetOverflows = Builder.getFalse();
5013
5014 /// Return the result of the given binary operation.
5015 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5016 llvm::Value *RHS) -> llvm::Value * {
5017 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop")(static_cast <bool> ((Opcode == BO_Add || Opcode == BO_Mul
) && "Can't eval binop") ? void (0) : __assert_fail (
"(Opcode == BO_Add || Opcode == BO_Mul) && \"Can't eval binop\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 5017, __extension__ __PRETTY_FUNCTION__
))
;
5018
5019 // If the operands are constants, return a constant result.
5020 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
5021 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
5022 llvm::APInt N;
5023 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
5024 /*Signed=*/true, N);
5025 if (HasOverflow)
5026 OffsetOverflows = Builder.getTrue();
5027 return llvm::ConstantInt::get(VMContext, N);
5028 }
5029 }
5030
5031 // Otherwise, compute the result with checked arithmetic.
5032 auto *ResultAndOverflow = Builder.CreateCall(
5033 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
5034 OffsetOverflows = Builder.CreateOr(
5035 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
5036 return Builder.CreateExtractValue(ResultAndOverflow, 0);
5037 };
5038
5039 // Determine the total byte offset by looking at each GEP operand.
5040 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
5041 GTI != GTE; ++GTI) {
5042 llvm::Value *LocalOffset;
5043 auto *Index = GTI.getOperand();
5044 // Compute the local offset contributed by this indexing step:
5045 if (auto *STy = GTI.getStructTypeOrNull()) {
5046 // For struct indexing, the local offset is the byte position of the
5047 // specified field.
5048 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
5049 LocalOffset = llvm::ConstantInt::get(
5050 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
5051 } else {
5052 // Otherwise this is array-like indexing. The local offset is the index
5053 // multiplied by the element size.
5054 auto *ElementSize = llvm::ConstantInt::get(
5055 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
5056 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
5057 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
5058 }
5059
5060 // If this is the first offset, set it as the total offset. Otherwise, add
5061 // the local offset into the running total.
5062 if (!TotalOffset || TotalOffset == Zero)
5063 TotalOffset = LocalOffset;
5064 else
5065 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
5066 }
5067
5068 return {TotalOffset, OffsetOverflows};
5069}
5070
5071Value *
5072CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
5073 ArrayRef<Value *> IdxList,
5074 bool SignedIndices, bool IsSubtraction,
5075 SourceLocation Loc, const Twine &Name) {
5076 llvm::Type *PtrTy = Ptr->getType();
5077 Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name);
5078
5079 // If the pointer overflow sanitizer isn't enabled, do nothing.
5080 if (!SanOpts.has(SanitizerKind::PointerOverflow))
5081 return GEPVal;
5082
5083 // Perform nullptr-and-offset check unless the nullptr is defined.
5084 bool PerformNullCheck = !NullPointerIsDefined(
5085 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
5086 // Check for overflows unless the GEP got constant-folded,
5087 // and only in the default address space
5088 bool PerformOverflowCheck =
5089 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5090
5091 if (!(PerformNullCheck || PerformOverflowCheck))
5092 return GEPVal;
5093
5094 const auto &DL = CGM.getDataLayout();
5095
5096 SanitizerScope SanScope(this);
5097 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5098
5099 GEPOffsetAndOverflow EvaluatedGEP =
5100 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
5101
5102 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||(static_cast <bool> ((!isa<llvm::Constant>(EvaluatedGEP
.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse
()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? void (0) : __assert_fail ("(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 5105, __extension__ __PRETTY_FUNCTION__
))
5103 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&(static_cast <bool> ((!isa<llvm::Constant>(EvaluatedGEP
.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse
()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? void (0) : __assert_fail ("(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 5105, __extension__ __PRETTY_FUNCTION__
))
5104 "If the offset got constant-folded, we don't expect that there was an "(static_cast <bool> ((!isa<llvm::Constant>(EvaluatedGEP
.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse
()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? void (0) : __assert_fail ("(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 5105, __extension__ __PRETTY_FUNCTION__
))
5105 "overflow.")(static_cast <bool> ((!isa<llvm::Constant>(EvaluatedGEP
.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse
()) && "If the offset got constant-folded, we don't expect that there was an "
"overflow.") ? void (0) : __assert_fail ("(!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && \"If the offset got constant-folded, we don't expect that there was an \" \"overflow.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 5105, __extension__ __PRETTY_FUNCTION__
))
;
5106
5107 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5108
5109 // Common case: if the total offset is zero, and we are using C++ semantics,
5110 // where nullptr+0 is defined, don't emit a check.
5111 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5112 return GEPVal;
5113
5114 // Now that we've computed the total offset, add it to the base pointer (with
5115 // wrapping semantics).
5116 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
5117 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
5118
5119 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
5120
5121 if (PerformNullCheck) {
5122 // In C++, if the base pointer evaluates to a null pointer value,
5123 // the only valid pointer this inbounds GEP can produce is also
5124 // a null pointer, so the offset must also evaluate to zero.
5125 // Likewise, if we have non-zero base pointer, we can not get null pointer
5126 // as a result, so the offset can not be -intptr_t(BasePtr).
5127 // In other words, both pointers are either null, or both are non-null,
5128 // or the behaviour is undefined.
5129 //
5130 // C, however, is more strict in this regard, and gives more
5131 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5132 // So both the input to the 'gep inbounds' AND the output must not be null.
5133 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
5134 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
5135 auto *Valid =
5136 CGM.getLangOpts().CPlusPlus
5137 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
5138 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
5139 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
5140 }
5141
5142 if (PerformOverflowCheck) {
5143 // The GEP is valid if:
5144 // 1) The total offset doesn't overflow, and
5145 // 2) The sign of the difference between the computed address and the base
5146 // pointer matches the sign of the total offset.
5147 llvm::Value *ValidGEP;
5148 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
5149 if (SignedIndices) {
5150 // GEP is computed as `unsigned base + signed offset`, therefore:
5151 // * If offset was positive, then the computed pointer can not be
5152 // [unsigned] less than the base pointer, unless it overflowed.
5153 // * If offset was negative, then the computed pointer can not be
5154 // [unsigned] greater than the bas pointere, unless it overflowed.
5155 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5156 auto *PosOrZeroOffset =
5157 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
5158 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
5159 ValidGEP =
5160 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
5161 } else if (!IsSubtraction) {
5162 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5163 // computed pointer can not be [unsigned] less than base pointer,
5164 // unless there was an overflow.
5165 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5166 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5167 } else {
5168 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5169 // computed pointer can not be [unsigned] greater than base pointer,
5170 // unless there was an overflow.
5171 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5172 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
5173 }
5174 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
5175 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
5176 }
5177
5178 assert(!Checks.empty() && "Should have produced some checks.")(static_cast <bool> (!Checks.empty() && "Should have produced some checks."
) ? void (0) : __assert_fail ("!Checks.empty() && \"Should have produced some checks.\""
, "clang/lib/CodeGen/CGExprScalar.cpp", 5178, __extension__ __PRETTY_FUNCTION__
))
;
5179
5180 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5181 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5182 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5183 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5184
5185 return GEPVal;
5186}

/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include/llvm/IR/Type.h

<
1//===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the Type class. For more "Type"
10// stuff, look in DerivedTypes.h.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_TYPE_H
15#define LLVM_IR_TYPE_H
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/Support/CBindingWrapping.h"
20#include "llvm/Support/Casting.h"
21#include "llvm/Support/Compiler.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/TypeSize.h"
24#include <cassert>
25#include <cstdint>
26#include <iterator>
27
28namespace llvm {
29
30class IntegerType;
31struct fltSemantics;
32class LLVMContext;
33class PointerType;
34class raw_ostream;
35class StringRef;
36
37/// The instances of the Type class are immutable: once they are created,
38/// they are never changed. Also note that only one instance of a particular
39/// type is ever created. Thus seeing if two types are equal is a matter of
40/// doing a trivial pointer comparison. To enforce that no two equal instances
41/// are created, Type instances can only be created via static factory methods
42/// in class Type and in derived classes. Once allocated, Types are never
43/// free'd.
44///
45class Type {
46public:
47 //===--------------------------------------------------------------------===//
48 /// Definitions of all of the base types for the Type system. Based on this
49 /// value, you can cast to a class defined in DerivedTypes.h.
50 /// Note: If you add an element to this, you need to add an element to the
51 /// Type::getPrimitiveType function, or else things will break!
52 /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
53 ///
54 enum TypeID {
55 // PrimitiveTypes
56 HalfTyID = 0, ///< 16-bit floating point type
57 BFloatTyID, ///< 16-bit floating point type (7-bit significand)
58 FloatTyID, ///< 32-bit floating point type
59 DoubleTyID, ///< 64-bit floating point type
60 X86_FP80TyID, ///< 80-bit floating point type (X87)
61 FP128TyID, ///< 128-bit floating point type (112-bit significand)
62 PPC_FP128TyID, ///< 128-bit floating point type (two 64-bits, PowerPC)
63 VoidTyID, ///< type with no size
64 LabelTyID, ///< Labels
65 MetadataTyID, ///< Metadata
66 X86_MMXTyID, ///< MMX vectors (64 bits, X86 specific)
67 X86_AMXTyID, ///< AMX vectors (8192 bits, X86 specific)
68 TokenTyID, ///< Tokens
69
70 // Derived types... see DerivedTypes.h file.
71 IntegerTyID, ///< Arbitrary bit width integers
72 FunctionTyID, ///< Functions
73 PointerTyID, ///< Pointers
74 StructTyID, ///< Structures
75 ArrayTyID, ///< Arrays
76 FixedVectorTyID, ///< Fixed width SIMD vector type
77 ScalableVectorTyID ///< Scalable SIMD vector type
78 };
79
80private:
81 /// This refers to the LLVMContext in which this type was uniqued.
82 LLVMContext &Context;
83
84 TypeID ID : 8; // The current base type of this type.
85 unsigned SubclassData : 24; // Space for subclasses to store data.
86 // Note that this should be synchronized with
87 // MAX_INT_BITS value in IntegerType class.
88
89protected:
90 friend class LLVMContextImpl;
91
92 explicit Type(LLVMContext &C, TypeID tid)
93 : Context(C), ID(tid), SubclassData(0) {}
94 ~Type() = default;
95
96 unsigned getSubclassData() const { return SubclassData; }
97
98 void setSubclassData(unsigned val) {
99 SubclassData = val;
100 // Ensure we don't have any accidental truncation.
101 assert(getSubclassData() == val && "Subclass data too large for field")(static_cast <bool> (getSubclassData() == val &&
"Subclass data too large for field") ? void (0) : __assert_fail
("getSubclassData() == val && \"Subclass data too large for field\""
, "llvm/include/llvm/IR/Type.h", 101, __extension__ __PRETTY_FUNCTION__
))
;
102 }
103
104 /// Keeps track of how many Type*'s there are in the ContainedTys list.
105 unsigned NumContainedTys = 0;
106
107 /// A pointer to the array of Types contained by this Type. For example, this
108 /// includes the arguments of a function type, the elements of a structure,
109 /// the pointee of a pointer, the element type of an array, etc. This pointer
110 /// may be 0 for types that don't contain other types (Integer, Double,
111 /// Float).
112 Type * const *ContainedTys = nullptr;
113
114public:
115 /// Print the current type.
116 /// Omit the type details if \p NoDetails == true.
117 /// E.g., let %st = type { i32, i16 }
118 /// When \p NoDetails is true, we only print %st.
119 /// Put differently, \p NoDetails prints the type as if
120 /// inlined with the operands when printing an instruction.
121 void print(raw_ostream &O, bool IsForDebug = false,
122 bool NoDetails = false) const;
123
124 void dump() const;
125
126 /// Return the LLVMContext in which this type was uniqued.
127 LLVMContext &getContext() const { return Context; }
128
129 //===--------------------------------------------------------------------===//
130 // Accessors for working with types.
131 //
132
133 /// Return the type id for the type. This will return one of the TypeID enum
134 /// elements defined above.
135 TypeID getTypeID() const { return ID; }
136
137 /// Return true if this is 'void'.
138 bool isVoidTy() const { return getTypeID() == VoidTyID; }
139
140 /// Return true if this is 'half', a 16-bit IEEE fp type.
141 bool isHalfTy() const { return getTypeID() == HalfTyID; }
142
143 /// Return true if this is 'bfloat', a 16-bit bfloat type.
144 bool isBFloatTy() const { return getTypeID() == BFloatTyID; }
145
146 /// Return true if this is 'float', a 32-bit IEEE fp type.
147 bool isFloatTy() const { return getTypeID() == FloatTyID; }
148
149 /// Return true if this is 'double', a 64-bit IEEE fp type.
150 bool isDoubleTy() const { return getTypeID() == DoubleTyID; }
151
152 /// Return true if this is x86 long double.
153 bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }
154
155 /// Return true if this is 'fp128'.
156 bool isFP128Ty() const { return getTypeID() == FP128TyID; }
157
158 /// Return true if this is powerpc long double.
159 bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
160
161 /// Return true if this is one of the six floating-point types
162 bool isFloatingPointTy() const {
163 return getTypeID() == HalfTyID || getTypeID() == BFloatTyID ||
164 getTypeID() == FloatTyID || getTypeID() == DoubleTyID ||
165 getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
166 getTypeID() == PPC_FP128TyID;
167 }
168
169 const fltSemantics &getFltSemantics() const;
170
171 /// Return true if this is X86 MMX.
172 bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }
173
174 /// Return true if this is X86 AMX.
175 bool isX86_AMXTy() const { return getTypeID() == X86_AMXTyID; }
176
177 /// Return true if this is a FP type or a vector of FP.
178 bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
179
180 /// Return true if this is 'label'.
181 bool isLabelTy() const { return getTypeID() == LabelTyID; }
182
183 /// Return true if this is 'metadata'.
184 bool isMetadataTy() const { return getTypeID() == MetadataTyID; }
185
186 /// Return true if this is 'token'.
187 bool isTokenTy() const { return getTypeID() == TokenTyID; }
188
189 /// True if this is an instance of IntegerType.
190 bool isIntegerTy() const { return getTypeID() == IntegerTyID; }
191
192 /// Return true if this is an IntegerType of the given width.
193 bool isIntegerTy(unsigned Bitwidth) const;
194
195 /// Return true if this is an integer type or a vector of integer types.
196 bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
197
198 /// Return true if this is an integer type or a vector of integer types of
199 /// the given width.
200 bool isIntOrIntVectorTy(unsigned BitWidth) const {
201 return getScalarType()->isIntegerTy(BitWidth);
202 }
203
204 /// Return true if this is an integer type or a pointer type.
205 bool isIntOrPtrTy() const { return isIntegerTy() || isPointerTy(); }
206
207 /// True if this is an instance of FunctionType.
208 bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
209
210 /// True if this is an instance of StructType.
211 bool isStructTy() const { return getTypeID() == StructTyID; }
212
213 /// True if this is an instance of ArrayType.
214 bool isArrayTy() const { return getTypeID() == ArrayTyID; }
215
216 /// True if this is an instance of PointerType.
217 bool isPointerTy() const { return getTypeID() == PointerTyID; }
4
Assuming the condition is false
5
Returning zero, which participates in a condition later
218
219 /// True if this is an instance of an opaque PointerType.
220 bool isOpaquePointerTy() const;
221
222 /// Return true if this is a pointer type or a vector of pointer types.
223 bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
3
Calling 'Type::isPointerTy'
6
Returning from 'Type::isPointerTy'
7
Returning zero, which participates in a condition later
224
225 /// True if this is an instance of VectorType.
226 inline bool isVectorTy() const {
227 return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID;
228 }
229
230 /// Return true if this type could be converted with a lossless BitCast to
231 /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the
232 /// same size only where no re-interpretation of the bits is done.
233 /// Determine if this type could be losslessly bitcast to Ty
234 bool canLosslesslyBitCastTo(Type *Ty) const;
235
236 /// Return true if this type is empty, that is, it has no elements or all of
237 /// its elements are empty.
238 bool isEmptyTy() const;
239
240 /// Return true if the type is "first class", meaning it is a valid type for a
241 /// Value.
242 bool isFirstClassType() const {
243 return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
244 }
245
246 /// Return true if the type is a valid type for a register in codegen. This
247 /// includes all first-class types except struct and array types.
248 bool isSingleValueType() const {
249 return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
250 isPointerTy() || isVectorTy() || isX86_AMXTy();
251 }
252
253 /// Return true if the type is an aggregate type. This means it is valid as
254 /// the first operand of an insertvalue or extractvalue instruction. This
255 /// includes struct and array types, but does not include vector types.
256 bool isAggregateType() const {
257 return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
258 }
259
260 /// Return true if it makes sense to take the size of this type. To get the
261 /// actual size for a particular target, it is reasonable to use the
262 /// DataLayout subsystem to do this.
263 bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
264 // If it's a primitive, it is always sized.
265 if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
266 getTypeID() == PointerTyID || getTypeID() == X86_MMXTyID ||
267 getTypeID() == X86_AMXTyID)
268 return true;
269 // If it is not something that can have a size (e.g. a function or label),
270 // it doesn't have a size.
271 if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && !isVectorTy())
272 return false;
273 // Otherwise we have to try harder to decide.
274 return isSizedDerivedType(Visited);
275 }
276
277 /// Return the basic size of this type if it is a primitive type. These are
278 /// fixed by LLVM and are not target-dependent.
279 /// This will return zero if the type does not have a size or is not a
280 /// primitive type.
281 ///
282 /// If this is a scalable vector type, the scalable property will be set and
283 /// the runtime size will be a positive integer multiple of the base size.
284 ///
285 /// Note that this may not reflect the size of memory allocated for an
286 /// instance of the type or the number of bytes that are written when an
287 /// instance of the type is stored to memory. The DataLayout class provides
288 /// additional query functions to provide this information.
289 ///
290 TypeSize getPrimitiveSizeInBits() const LLVM_READONLY__attribute__((__pure__));
291
292 /// If this is a vector type, return the getPrimitiveSizeInBits value for the
293 /// element type. Otherwise return the getPrimitiveSizeInBits value for this
294 /// type.
295 unsigned getScalarSizeInBits() const LLVM_READONLY__attribute__((__pure__));
296
297 /// Return the width of the mantissa of this type. This is only valid on
298 /// floating-point types. If the FP type does not have a stable mantissa (e.g.
299 /// ppc long double), this method returns -1.
300 int getFPMantissaWidth() const;
301
302 /// Return whether the type is IEEE compatible, as defined by the eponymous
303 /// method in APFloat.
304 bool isIEEE() const;
305
306 /// If this is a vector type, return the element type, otherwise return
307 /// 'this'.
308 inline Type *getScalarType() const {
309 if (isVectorTy())
310 return getContainedType(0);
311 return const_cast<Type *>(this);
312 }
313
314 //===--------------------------------------------------------------------===//
315 // Type Iteration support.
316 //
317 using subtype_iterator = Type * const *;
318
319 subtype_iterator subtype_begin() const { return ContainedTys; }
320 subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];}
321 ArrayRef<Type*> subtypes() const {
322 return makeArrayRef(subtype_begin(), subtype_end());
323 }
324
325 using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>;
326
327 subtype_reverse_iterator subtype_rbegin() const {
328 return subtype_reverse_iterator(subtype_end());
329 }
330 subtype_reverse_iterator subtype_rend() const {
331 return subtype_reverse_iterator(subtype_begin());
332 }
333
334 /// This method is used to implement the type iterator (defined at the end of
335 /// the file). For derived types, this returns the types 'contained' in the
336 /// derived type.
337 Type *getContainedType(unsigned i) const {
338 assert(i < NumContainedTys && "Index out of range!")(static_cast <bool> (i < NumContainedTys && "Index out of range!"
) ? void (0) : __assert_fail ("i < NumContainedTys && \"Index out of range!\""
, "llvm/include/llvm/IR/Type.h", 338, __extension__ __PRETTY_FUNCTION__
))
;
339 return ContainedTys[i];
340 }
341
342 /// Return the number of types in the derived type.
343 unsigned getNumContainedTypes() const { return NumContainedTys; }
344
345 //===--------------------------------------------------------------------===//
346 // Helper methods corresponding to subclass methods. This forces a cast to
347 // the specified subclass and calls its accessor. "getArrayNumElements" (for
348 // example) is shorthand for cast<ArrayType>(Ty)->getNumElements(). This is
349 // only intended to cover the core methods that are frequently used, helper
350 // methods should not be added here.
351
352 inline unsigned getIntegerBitWidth() const;
353
354 inline Type *getFunctionParamType(unsigned i) const;
355 inline unsigned getFunctionNumParams() const;
356 inline bool isFunctionVarArg() const;
357
358 inline StringRef getStructName() const;
359 inline unsigned getStructNumElements() const;
360 inline Type *getStructElementType(unsigned N) const;
361
362 inline uint64_t getArrayNumElements() const;
363
364 Type *getArrayElementType() const {
365 assert(getTypeID() == ArrayTyID)(static_cast <bool> (getTypeID() == ArrayTyID) ? void (
0) : __assert_fail ("getTypeID() == ArrayTyID", "llvm/include/llvm/IR/Type.h"
, 365, __extension__ __PRETTY_FUNCTION__))
;
366 return ContainedTys[0];
367 }
368
369 Type *getPointerElementType() const {
370 assert(getTypeID() == PointerTyID)(static_cast <bool> (getTypeID() == PointerTyID) ? void
(0) : __assert_fail ("getTypeID() == PointerTyID", "llvm/include/llvm/IR/Type.h"
, 370, __extension__ __PRETTY_FUNCTION__))
;
371 assert(NumContainedTys &&(static_cast <bool> (NumContainedTys && "Attempting to get element type of opaque pointer"
) ? void (0) : __assert_fail ("NumContainedTys && \"Attempting to get element type of opaque pointer\""
, "llvm/include/llvm/IR/Type.h", 372, __extension__ __PRETTY_FUNCTION__
))
372 "Attempting to get element type of opaque pointer")(static_cast <bool> (NumContainedTys && "Attempting to get element type of opaque pointer"
) ? void (0) : __assert_fail ("NumContainedTys && \"Attempting to get element type of opaque pointer\""
, "llvm/include/llvm/IR/Type.h", 372, __extension__ __PRETTY_FUNCTION__
))
;
373 return ContainedTys[0];
374 }
375
376 /// Given vector type, change the element type,
377 /// whilst keeping the old number of elements.
378 /// For non-vectors simply returns \p EltTy.
379 inline Type *getWithNewType(Type *EltTy) const;
380
381 /// Given an integer or vector type, change the lane bitwidth to NewBitwidth,
382 /// whilst keeping the old number of lanes.
383 inline Type *getWithNewBitWidth(unsigned NewBitWidth) const;
384
385 /// Given scalar/vector integer type, returns a type with elements twice as
386 /// wide as in the original type. For vectors, preserves element count.
387 inline Type *getExtendedType() const;
388
389 /// Get the address space of this pointer or pointer vector type.
390 inline unsigned getPointerAddressSpace() const;
391
392 //===--------------------------------------------------------------------===//
393 // Static members exported by the Type class itself. Useful for getting
394 // instances of Type.
395 //
396
397 /// Return a type based on an identifier.
398 static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber);
399
400 //===--------------------------------------------------------------------===//
401 // These are the builtin types that are always available.
402 //
403 static Type *getVoidTy(LLVMContext &C);
404 static Type *getLabelTy(LLVMContext &C);
405 static Type *getHalfTy(LLVMContext &C);
406 static Type *getBFloatTy(LLVMContext &C);
407 static Type *getFloatTy(LLVMContext &C);
408 static Type *getDoubleTy(LLVMContext &C);
409 static Type *getMetadataTy(LLVMContext &C);
410 static Type *getX86_FP80Ty(LLVMContext &C);
411 static Type *getFP128Ty(LLVMContext &C);
412 static Type *getPPC_FP128Ty(LLVMContext &C);
413 static Type *getX86_MMXTy(LLVMContext &C);
414 static Type *getX86_AMXTy(LLVMContext &C);
415 static Type *getTokenTy(LLVMContext &C);
416 static IntegerType *getIntNTy(LLVMContext &C, unsigned N);
417 static IntegerType *getInt1Ty(LLVMContext &C);
418 static IntegerType *getInt8Ty(LLVMContext &C);
419 static IntegerType *getInt16Ty(LLVMContext &C);
420 static IntegerType *getInt32Ty(LLVMContext &C);
421 static IntegerType *getInt64Ty(LLVMContext &C);
422 static IntegerType *getInt128Ty(LLVMContext &C);
423 template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) {
424 int noOfBits = sizeof(ScalarTy) * CHAR_BIT8;
425 if (std::is_integral<ScalarTy>::value) {
426 return (Type*) Type::getIntNTy(C, noOfBits);
427 } else if (std::is_floating_point<ScalarTy>::value) {
428 switch (noOfBits) {
429 case 32:
430 return Type::getFloatTy(C);
431 case 64:
432 return Type::getDoubleTy(C);
433 }
434 }
435 llvm_unreachable("Unsupported type in Type::getScalarTy")::llvm::llvm_unreachable_internal("Unsupported type in Type::getScalarTy"
, "llvm/include/llvm/IR/Type.h", 435)
;
436 }
437 static Type *getFloatingPointTy(LLVMContext &C, const fltSemantics &S);
438
439 //===--------------------------------------------------------------------===//
440 // Convenience methods for getting pointer types with one of the above builtin
441 // types as pointee.
442 //
443 static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
444 static PointerType *getBFloatPtrTy(LLVMContext &C, unsigned AS = 0);
445 static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
446 static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
447 static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
448 static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
449 static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
450 static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
451 static PointerType *getX86_AMXPtrTy(LLVMContext &C, unsigned AS = 0);
452 static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0);
453 static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
454 static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
455 static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
456 static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
457 static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
458
459 /// Return a pointer to the current type. This is equivalent to
460 /// PointerType::get(Foo, AddrSpace).
461 /// TODO: Remove this after opaque pointer transition is complete.
462 PointerType *getPointerTo(unsigned AddrSpace = 0) const;