Bug Summary

File:llvm/include/llvm/IR/Instructions.h
Warning:line 1259, column 33
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGExprScalar.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/clang/lib/CodeGen -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/lib/CodeGen/CGExprScalar.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/lib/CodeGen/CGExprScalar.cpp

1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/RecordLayout.h"
27#include "clang/AST/StmtVisitor.h"
28#include "clang/Basic/CodeGenOptions.h"
29#include "clang/Basic/TargetInfo.h"
30#include "llvm/ADT/APFixedPoint.h"
31#include "llvm/ADT/Optional.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/FixedPointBuilder.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GetElementPtrTypeIterator.h"
38#include "llvm/IR/GlobalVariable.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/IntrinsicsPowerPC.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/IR/Module.h"
43#include <cstdarg>
44
45using namespace clang;
46using namespace CodeGen;
47using llvm::Value;
48
49//===----------------------------------------------------------------------===//
50// Scalar Expression Emitter
51//===----------------------------------------------------------------------===//
52
53namespace {
54
55/// Determine whether the given binary operation may overflow.
56/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
57/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
58/// the returned overflow check is precise. The returned value is 'true' for
59/// all other opcodes, to be conservative.
60bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
61 BinaryOperator::Opcode Opcode, bool Signed,
62 llvm::APInt &Result) {
63 // Assume overflow is possible, unless we can prove otherwise.
64 bool Overflow = true;
65 const auto &LHSAP = LHS->getValue();
66 const auto &RHSAP = RHS->getValue();
67 if (Opcode == BO_Add) {
68 if (Signed)
69 Result = LHSAP.sadd_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.uadd_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Sub) {
73 if (Signed)
74 Result = LHSAP.ssub_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.usub_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Mul) {
78 if (Signed)
79 Result = LHSAP.smul_ov(RHSAP, Overflow);
80 else
81 Result = LHSAP.umul_ov(RHSAP, Overflow);
82 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
83 if (Signed && !RHS->isZero())
84 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
85 else
86 return false;
87 }
88 return Overflow;
89}
90
91struct BinOpInfo {
92 Value *LHS;
93 Value *RHS;
94 QualType Ty; // Computation Type.
95 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
96 FPOptions FPFeatures;
97 const Expr *E; // Entire expr, for error unsupported. May not be binop.
98
99 /// Check if the binop can result in integer overflow.
100 bool mayHaveIntegerOverflow() const {
101 // Without constant input, we can't rule out overflow.
102 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
103 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
104 if (!LHSCI || !RHSCI)
105 return true;
106
107 llvm::APInt Result;
108 return ::mayHaveIntegerOverflow(
109 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110 }
111
112 /// Check if the binop computes a division or a remainder.
113 bool isDivremOp() const {
114 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
115 Opcode == BO_RemAssign;
116 }
117
118 /// Check if the binop can result in an integer division by zero.
119 bool mayHaveIntegerDivisionByZero() const {
120 if (isDivremOp())
121 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
122 return CI->isZero();
123 return true;
124 }
125
126 /// Check if the binop can result in a float division by zero.
127 bool mayHaveFloatDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
130 return CFP->isZero();
131 return true;
132 }
133
134 /// Check if at least one operand is a fixed point type. In such cases, this
135 /// operation did not follow usual arithmetic conversion and both operands
136 /// might not be of the same type.
137 bool isFixedPointOp() const {
138 // We cannot simply check the result type since comparison operations return
139 // an int.
140 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
141 QualType LHSType = BinOp->getLHS()->getType();
142 QualType RHSType = BinOp->getRHS()->getType();
143 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
144 }
145 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146 return UnOp->getSubExpr()->getType()->isFixedPointType();
147 return false;
148 }
149};
150
151static bool MustVisitNullValue(const Expr *E) {
152 // If a null pointer expression's type is the C++0x nullptr_t, then
153 // it's not necessarily a simple constant and it must be evaluated
154 // for its potential side effects.
155 return E->getType()->isNullPtrType();
156}
157
158/// If \p E is a widened promoted integer, get its base (unpromoted) type.
159static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
160 const Expr *E) {
161 const Expr *Base = E->IgnoreImpCasts();
162 if (E == Base)
163 return llvm::None;
164
165 QualType BaseTy = Base->getType();
166 if (!BaseTy->isPromotableIntegerType() ||
167 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
168 return llvm::None;
169
170 return BaseTy;
171}
172
173/// Check if \p E is a widened promoted integer.
174static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
175 return getUnwidenedIntegerType(Ctx, E).hasValue();
176}
177
178/// Check if we can skip the overflow check for \p Op.
179static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
180 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&(static_cast<void> (0))
181 "Expected a unary or binary operator")(static_cast<void> (0));
182
183 // If the binop has constant inputs and we can prove there is no overflow,
184 // we can elide the overflow check.
185 if (!Op.mayHaveIntegerOverflow())
186 return true;
187
188 // If a unary op has a widened operand, the op cannot overflow.
189 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
190 return !UO->canOverflow();
191
192 // We usually don't need overflow checks for binops with widened operands.
193 // Multiplication with promoted unsigned operands is a special case.
194 const auto *BO = cast<BinaryOperator>(Op.E);
195 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196 if (!OptionalLHSTy)
197 return false;
198
199 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200 if (!OptionalRHSTy)
201 return false;
202
203 QualType LHSTy = *OptionalLHSTy;
204 QualType RHSTy = *OptionalRHSTy;
205
206 // This is the simple case: binops without unsigned multiplication, and with
207 // widened operands. No overflow check is needed here.
208 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
209 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
210 return true;
211
212 // For unsigned multiplication the overflow check can be elided if either one
213 // of the unpromoted types are less than half the size of the promoted type.
214 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
215 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
216 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
217}
218
219class ScalarExprEmitter
220 : public StmtVisitor<ScalarExprEmitter, Value*> {
221 CodeGenFunction &CGF;
222 CGBuilderTy &Builder;
223 bool IgnoreResultAssign;
224 llvm::LLVMContext &VMContext;
225public:
226
227 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
228 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
229 VMContext(cgf.getLLVMContext()) {
230 }
231
232 //===--------------------------------------------------------------------===//
233 // Utilities
234 //===--------------------------------------------------------------------===//
235
236 bool TestAndClearIgnoreResultAssign() {
237 bool I = IgnoreResultAssign;
238 IgnoreResultAssign = false;
239 return I;
240 }
241
242 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
243 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
244 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
245 return CGF.EmitCheckedLValue(E, TCK);
246 }
247
248 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
249 const BinOpInfo &Info);
250
251 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
252 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
253 }
254
255 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
256 const AlignValueAttr *AVAttr = nullptr;
257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
258 const ValueDecl *VD = DRE->getDecl();
259
260 if (VD->getType()->isReferenceType()) {
261 if (const auto *TTy =
262 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
263 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
264 } else {
265 // Assumptions for function parameters are emitted at the start of the
266 // function, so there is no need to repeat that here,
267 // unless the alignment-assumption sanitizer is enabled,
268 // then we prefer the assumption over alignment attribute
269 // on IR function param.
270 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
271 return;
272
273 AVAttr = VD->getAttr<AlignValueAttr>();
274 }
275 }
276
277 if (!AVAttr)
278 if (const auto *TTy =
279 dyn_cast<TypedefType>(E->getType()))
280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281
282 if (!AVAttr)
283 return;
284
285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288 }
289
290 /// EmitLoadOfLValue - Given an expression with complex type that represents a
291 /// value l-value, this method emits the address of the l-value, then loads
292 /// and returns the result.
293 Value *EmitLoadOfLValue(const Expr *E) {
294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295 E->getExprLoc());
296
297 EmitLValueAlignmentAssumption(E, V);
298 return V;
299 }
300
301 /// EmitConversionToBool - Convert the specified expression value to a
302 /// boolean (i1) truth value. This is equivalent to "Val != 0".
303 Value *EmitConversionToBool(Value *Src, QualType DstTy);
304
305 /// Emit a check that a conversion from a floating-point type does not
306 /// overflow.
307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308 Value *Src, QualType SrcType, QualType DstType,
309 llvm::Type *DstTy, SourceLocation Loc);
310
311 /// Known implicit conversion check kinds.
312 /// Keep in sync with the enum of the same name in ubsan_handlers.h
313 enum ImplicitConversionCheckKind : unsigned char {
314 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
315 ICCK_UnsignedIntegerTruncation = 1,
316 ICCK_SignedIntegerTruncation = 2,
317 ICCK_IntegerSignChange = 3,
318 ICCK_SignedIntegerTruncationOrSignChange = 4,
319 };
320
321 /// Emit a check that an [implicit] truncation of an integer does not
322 /// discard any bits. It is not UB, so we use the value after truncation.
323 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
324 QualType DstType, SourceLocation Loc);
325
326 /// Emit a check that an [implicit] conversion of an integer does not change
327 /// the sign of the value. It is not UB, so we use the value after conversion.
328 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
329 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
330 QualType DstType, SourceLocation Loc);
331
332 /// Emit a conversion from the specified type to the specified destination
333 /// type, both of which are LLVM scalar types.
334 struct ScalarConversionOpts {
335 bool TreatBooleanAsSigned;
336 bool EmitImplicitIntegerTruncationChecks;
337 bool EmitImplicitIntegerSignChangeChecks;
338
339 ScalarConversionOpts()
340 : TreatBooleanAsSigned(false),
341 EmitImplicitIntegerTruncationChecks(false),
342 EmitImplicitIntegerSignChangeChecks(false) {}
343
344 ScalarConversionOpts(clang::SanitizerSet SanOpts)
345 : TreatBooleanAsSigned(false),
346 EmitImplicitIntegerTruncationChecks(
347 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
348 EmitImplicitIntegerSignChangeChecks(
349 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
350 };
351 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
352 llvm::Type *SrcTy, llvm::Type *DstTy,
353 ScalarConversionOpts Opts);
354 Value *
355 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
356 SourceLocation Loc,
357 ScalarConversionOpts Opts = ScalarConversionOpts());
358
359 /// Convert between either a fixed point and other fixed point or fixed point
360 /// and an integer.
361 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
362 SourceLocation Loc);
363
364 /// Emit a conversion from the specified complex type to the specified
365 /// destination type, where the destination type is an LLVM scalar type.
366 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
367 QualType SrcTy, QualType DstTy,
368 SourceLocation Loc);
369
370 /// EmitNullValue - Emit a value that corresponds to null for the given type.
371 Value *EmitNullValue(QualType Ty);
372
373 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
374 Value *EmitFloatToBoolConversion(Value *V) {
375 // Compare against 0.0 for fp scalars.
376 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
377 return Builder.CreateFCmpUNE(V, Zero, "tobool");
378 }
379
380 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
381 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
382 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
383
384 return Builder.CreateICmpNE(V, Zero, "tobool");
385 }
386
387 Value *EmitIntToBoolConversion(Value *V) {
388 // Because of the type rules of C, we often end up computing a
389 // logical value, then zero extending it to int, then wanting it
390 // as a logical value again. Optimize this common case.
391 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
392 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
393 Value *Result = ZI->getOperand(0);
394 // If there aren't any more uses, zap the instruction to save space.
395 // Note that there can be more uses, for example if this
396 // is the result of an assignment.
397 if (ZI->use_empty())
398 ZI->eraseFromParent();
399 return Result;
400 }
401 }
402
403 return Builder.CreateIsNotNull(V, "tobool");
404 }
405
406 //===--------------------------------------------------------------------===//
407 // Visitor Methods
408 //===--------------------------------------------------------------------===//
409
410 Value *Visit(Expr *E) {
411 ApplyDebugLocation DL(CGF, E);
412 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
413 }
414
415 Value *VisitStmt(Stmt *S) {
416 S->dump(llvm::errs(), CGF.getContext());
417 llvm_unreachable("Stmt can't have complex result type!")__builtin_unreachable();
418 }
419 Value *VisitExpr(Expr *S);
420
421 Value *VisitConstantExpr(ConstantExpr *E) {
422 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
423 if (E->isGLValue())
424 return CGF.Builder.CreateLoad(Address(
425 Result, CGF.getContext().getTypeAlignInChars(E->getType())));
426 return Result;
427 }
428 return Visit(E->getSubExpr());
429 }
430 Value *VisitParenExpr(ParenExpr *PE) {
431 return Visit(PE->getSubExpr());
432 }
433 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
434 return Visit(E->getReplacement());
435 }
436 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
437 return Visit(GE->getResultExpr());
438 }
439 Value *VisitCoawaitExpr(CoawaitExpr *S) {
440 return CGF.EmitCoawaitExpr(*S).getScalarVal();
441 }
442 Value *VisitCoyieldExpr(CoyieldExpr *S) {
443 return CGF.EmitCoyieldExpr(*S).getScalarVal();
444 }
445 Value *VisitUnaryCoawait(const UnaryOperator *E) {
446 return Visit(E->getSubExpr());
447 }
448
449 // Leaves.
450 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
451 return Builder.getInt(E->getValue());
452 }
453 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
454 return Builder.getInt(E->getValue());
455 }
456 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
457 return llvm::ConstantFP::get(VMContext, E->getValue());
458 }
459 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
460 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
461 }
462 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
463 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
464 }
465 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
466 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
467 }
468 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
469 return EmitNullValue(E->getType());
470 }
471 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
472 return EmitNullValue(E->getType());
473 }
474 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
475 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
476 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
477 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
478 return Builder.CreateBitCast(V, ConvertType(E->getType()));
479 }
480
481 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
482 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
483 }
484
485 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
486 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
487 }
488
489 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
490
491 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
492 if (E->isGLValue())
493 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
494 E->getExprLoc());
495
496 // Otherwise, assume the mapping is the scalar directly.
497 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
498 }
499
500 // l-values.
501 Value *VisitDeclRefExpr(DeclRefExpr *E) {
502 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
503 return CGF.emitScalarConstant(Constant, E);
504 return EmitLoadOfLValue(E);
505 }
506
507 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
508 return CGF.EmitObjCSelectorExpr(E);
509 }
510 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
511 return CGF.EmitObjCProtocolExpr(E);
512 }
513 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
514 return EmitLoadOfLValue(E);
515 }
516 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
517 if (E->getMethodDecl() &&
518 E->getMethodDecl()->getReturnType()->isReferenceType())
519 return EmitLoadOfLValue(E);
520 return CGF.EmitObjCMessageExpr(E).getScalarVal();
521 }
522
523 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
524 LValue LV = CGF.EmitObjCIsaExpr(E);
525 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
526 return V;
527 }
528
529 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
530 VersionTuple Version = E->getVersion();
531
532 // If we're checking for a platform older than our minimum deployment
533 // target, we can fold the check away.
534 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
535 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
536
537 return CGF.EmitBuiltinAvailable(Version);
538 }
539
540 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
541 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
542 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
543 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
544 Value *VisitMemberExpr(MemberExpr *E);
545 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
546 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
547 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
548 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
549 // literals aren't l-values in C++. We do so simply because that's the
550 // cleanest way to handle compound literals in C++.
551 // See the discussion here: https://reviews.llvm.org/D64464
552 return EmitLoadOfLValue(E);
553 }
554
555 Value *VisitInitListExpr(InitListExpr *E);
556
557 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
558 assert(CGF.getArrayInitIndex() &&(static_cast<void> (0))
559 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?")(static_cast<void> (0));
560 return CGF.getArrayInitIndex();
561 }
562
563 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
564 return EmitNullValue(E->getType());
565 }
566 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
567 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
568 return VisitCastExpr(E);
569 }
570 Value *VisitCastExpr(CastExpr *E);
571
572 Value *VisitCallExpr(const CallExpr *E) {
573 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
574 return EmitLoadOfLValue(E);
575
576 Value *V = CGF.EmitCallExpr(E).getScalarVal();
577
578 EmitLValueAlignmentAssumption(E, V);
579 return V;
580 }
581
582 Value *VisitStmtExpr(const StmtExpr *E);
583
584 // Unary Operators.
585 Value *VisitUnaryPostDec(const UnaryOperator *E) {
586 LValue LV = EmitLValue(E->getSubExpr());
587 return EmitScalarPrePostIncDec(E, LV, false, false);
588 }
589 Value *VisitUnaryPostInc(const UnaryOperator *E) {
590 LValue LV = EmitLValue(E->getSubExpr());
591 return EmitScalarPrePostIncDec(E, LV, true, false);
592 }
593 Value *VisitUnaryPreDec(const UnaryOperator *E) {
594 LValue LV = EmitLValue(E->getSubExpr());
595 return EmitScalarPrePostIncDec(E, LV, false, true);
596 }
597 Value *VisitUnaryPreInc(const UnaryOperator *E) {
598 LValue LV = EmitLValue(E->getSubExpr());
599 return EmitScalarPrePostIncDec(E, LV, true, true);
600 }
601
602 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
603 llvm::Value *InVal,
604 bool IsInc);
605
606 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
607 bool isInc, bool isPre);
608
609
610 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
611 if (isa<MemberPointerType>(E->getType())) // never sugared
612 return CGF.CGM.getMemberPointerConstant(E);
613
614 return EmitLValue(E->getSubExpr()).getPointer(CGF);
615 }
616 Value *VisitUnaryDeref(const UnaryOperator *E) {
617 if (E->getType()->isVoidType())
618 return Visit(E->getSubExpr()); // the actual value should be unused
619 return EmitLoadOfLValue(E);
620 }
621 Value *VisitUnaryPlus(const UnaryOperator *E) {
622 // This differs from gcc, though, most likely due to a bug in gcc.
623 TestAndClearIgnoreResultAssign();
624 return Visit(E->getSubExpr());
625 }
626 Value *VisitUnaryMinus (const UnaryOperator *E);
627 Value *VisitUnaryNot (const UnaryOperator *E);
628 Value *VisitUnaryLNot (const UnaryOperator *E);
629 Value *VisitUnaryReal (const UnaryOperator *E);
630 Value *VisitUnaryImag (const UnaryOperator *E);
631 Value *VisitUnaryExtension(const UnaryOperator *E) {
632 return Visit(E->getSubExpr());
633 }
634
635 // C++
636 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
637 return EmitLoadOfLValue(E);
638 }
639 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
640 auto &Ctx = CGF.getContext();
641 APValue Evaluated =
642 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
643 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
644 SLE->getType());
645 }
646
647 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
648 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
649 return Visit(DAE->getExpr());
650 }
651 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
652 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
653 return Visit(DIE->getExpr());
654 }
655 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
656 return CGF.LoadCXXThis();
657 }
658
659 Value *VisitExprWithCleanups(ExprWithCleanups *E);
660 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
661 return CGF.EmitCXXNewExpr(E);
662 }
663 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
664 CGF.EmitCXXDeleteExpr(E);
665 return nullptr;
666 }
667
668 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
669 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
670 }
671
672 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
673 return Builder.getInt1(E->isSatisfied());
674 }
675
676 Value *VisitRequiresExpr(const RequiresExpr *E) {
677 return Builder.getInt1(E->isSatisfied());
678 }
679
680 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
681 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
682 }
683
684 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
685 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
686 }
687
688 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
689 // C++ [expr.pseudo]p1:
690 // The result shall only be used as the operand for the function call
691 // operator (), and the result of such a call has type void. The only
692 // effect is the evaluation of the postfix-expression before the dot or
693 // arrow.
694 CGF.EmitScalarExpr(E->getBase());
695 return nullptr;
696 }
697
698 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
699 return EmitNullValue(E->getType());
700 }
701
702 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
703 CGF.EmitCXXThrowExpr(E);
704 return nullptr;
705 }
706
707 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
708 return Builder.getInt1(E->getValue());
709 }
710
711 // Binary Operators.
712 Value *EmitMul(const BinOpInfo &Ops) {
713 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
714 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
715 case LangOptions::SOB_Defined:
716 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
717 case LangOptions::SOB_Undefined:
718 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
719 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
720 LLVM_FALLTHROUGH[[gnu::fallthrough]];
721 case LangOptions::SOB_Trapping:
722 if (CanElideOverflowCheck(CGF.getContext(), Ops))
723 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
724 return EmitOverflowCheckedBinOp(Ops);
725 }
726 }
727
728 if (Ops.Ty->isConstantMatrixType()) {
729 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
730 // We need to check the types of the operands of the operator to get the
731 // correct matrix dimensions.
732 auto *BO = cast<BinaryOperator>(Ops.E);
733 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
734 BO->getLHS()->getType().getCanonicalType());
735 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
736 BO->getRHS()->getType().getCanonicalType());
737 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
738 if (LHSMatTy && RHSMatTy)
739 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
740 LHSMatTy->getNumColumns(),
741 RHSMatTy->getNumColumns());
742 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
743 }
744
745 if (Ops.Ty->isUnsignedIntegerType() &&
746 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
747 !CanElideOverflowCheck(CGF.getContext(), Ops))
748 return EmitOverflowCheckedBinOp(Ops);
749
750 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
751 // Preserve the old values
752 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
753 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
754 }
755 if (Ops.isFixedPointOp())
756 return EmitFixedPointBinOp(Ops);
757 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
758 }
759 /// Create a binary op that checks for overflow.
760 /// Currently only supports +, - and *.
761 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
762
763 // Check for undefined division and modulus behaviors.
764 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
765 llvm::Value *Zero,bool isDiv);
766 // Common helper for getting how wide LHS of shift is.
767 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
768
769 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
770 // non powers of two.
771 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
772
773 Value *EmitDiv(const BinOpInfo &Ops);
774 Value *EmitRem(const BinOpInfo &Ops);
775 Value *EmitAdd(const BinOpInfo &Ops);
776 Value *EmitSub(const BinOpInfo &Ops);
777 Value *EmitShl(const BinOpInfo &Ops);
778 Value *EmitShr(const BinOpInfo &Ops);
779 Value *EmitAnd(const BinOpInfo &Ops) {
780 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
781 }
782 Value *EmitXor(const BinOpInfo &Ops) {
783 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
784 }
785 Value *EmitOr (const BinOpInfo &Ops) {
786 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
787 }
788
789 // Helper functions for fixed point binary operations.
790 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
791
792 BinOpInfo EmitBinOps(const BinaryOperator *E);
793 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
794 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
795 Value *&Result);
796
797 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
798 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
799
800 // Binary operators and binary compound assignment operators.
801#define HANDLEBINOP(OP) \
802 Value *VisitBin ## OP(const BinaryOperator *E) { \
803 return Emit ## OP(EmitBinOps(E)); \
804 } \
805 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
806 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
807 }
808 HANDLEBINOP(Mul)
809 HANDLEBINOP(Div)
810 HANDLEBINOP(Rem)
811 HANDLEBINOP(Add)
812 HANDLEBINOP(Sub)
813 HANDLEBINOP(Shl)
814 HANDLEBINOP(Shr)
815 HANDLEBINOP(And)
816 HANDLEBINOP(Xor)
817 HANDLEBINOP(Or)
818#undef HANDLEBINOP
819
820 // Comparisons.
821 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
822 llvm::CmpInst::Predicate SICmpOpc,
823 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
824#define VISITCOMP(CODE, UI, SI, FP, SIG) \
825 Value *VisitBin##CODE(const BinaryOperator *E) { \
826 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
827 llvm::FCmpInst::FP, SIG); }
828 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
829 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
830 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
831 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
832 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
833 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
834#undef VISITCOMP
835
836 Value *VisitBinAssign (const BinaryOperator *E);
837
838 Value *VisitBinLAnd (const BinaryOperator *E);
839 Value *VisitBinLOr (const BinaryOperator *E);
840 Value *VisitBinComma (const BinaryOperator *E);
841
842 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
843 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
844
845 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
846 return Visit(E->getSemanticForm());
847 }
848
849 // Other Operators.
850 Value *VisitBlockExpr(const BlockExpr *BE);
851 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
852 Value *VisitChooseExpr(ChooseExpr *CE);
853 Value *VisitVAArgExpr(VAArgExpr *VE);
854 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
855 return CGF.EmitObjCStringLiteral(E);
856 }
857 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
858 return CGF.EmitObjCBoxedExpr(E);
859 }
860 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
861 return CGF.EmitObjCArrayLiteral(E);
862 }
863 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
864 return CGF.EmitObjCDictionaryLiteral(E);
865 }
866 Value *VisitAsTypeExpr(AsTypeExpr *CE);
867 Value *VisitAtomicExpr(AtomicExpr *AE);
868};
869} // end anonymous namespace.
870
871//===----------------------------------------------------------------------===//
872// Utilities
873//===----------------------------------------------------------------------===//
874
875/// EmitConversionToBool - Convert the specified expression value to a
876/// boolean (i1) truth value. This is equivalent to "Val != 0".
877Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
878 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs")(static_cast<void> (0));
879
880 if (SrcType->isRealFloatingType())
881 return EmitFloatToBoolConversion(Src);
882
883 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
884 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
885
886 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&(static_cast<void> (0))
887 "Unknown scalar type to convert")(static_cast<void> (0));
888
889 if (isa<llvm::IntegerType>(Src->getType()))
890 return EmitIntToBoolConversion(Src);
891
892 assert(isa<llvm::PointerType>(Src->getType()))(static_cast<void> (0));
893 return EmitPointerToBoolConversion(Src, SrcType);
894}
895
896void ScalarExprEmitter::EmitFloatConversionCheck(
897 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
898 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
899 assert(SrcType->isFloatingType() && "not a conversion from floating point")(static_cast<void> (0));
900 if (!isa<llvm::IntegerType>(DstTy))
901 return;
902
903 CodeGenFunction::SanitizerScope SanScope(&CGF);
904 using llvm::APFloat;
905 using llvm::APSInt;
906
907 llvm::Value *Check = nullptr;
908 const llvm::fltSemantics &SrcSema =
909 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
910
911 // Floating-point to integer. This has undefined behavior if the source is
912 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
913 // to an integer).
914 unsigned Width = CGF.getContext().getIntWidth(DstType);
915 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
916
917 APSInt Min = APSInt::getMinValue(Width, Unsigned);
918 APFloat MinSrc(SrcSema, APFloat::uninitialized);
919 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
920 APFloat::opOverflow)
921 // Don't need an overflow check for lower bound. Just check for
922 // -Inf/NaN.
923 MinSrc = APFloat::getInf(SrcSema, true);
924 else
925 // Find the largest value which is too small to represent (before
926 // truncation toward zero).
927 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
928
929 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
930 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
931 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
932 APFloat::opOverflow)
933 // Don't need an overflow check for upper bound. Just check for
934 // +Inf/NaN.
935 MaxSrc = APFloat::getInf(SrcSema, false);
936 else
937 // Find the smallest value which is too large to represent (before
938 // truncation toward zero).
939 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
940
941 // If we're converting from __half, convert the range to float to match
942 // the type of src.
943 if (OrigSrcType->isHalfType()) {
944 const llvm::fltSemantics &Sema =
945 CGF.getContext().getFloatTypeSemantics(SrcType);
946 bool IsInexact;
947 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
948 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
949 }
950
951 llvm::Value *GE =
952 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
953 llvm::Value *LE =
954 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
955 Check = Builder.CreateAnd(GE, LE);
956
957 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
958 CGF.EmitCheckTypeDescriptor(OrigSrcType),
959 CGF.EmitCheckTypeDescriptor(DstType)};
960 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
961 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
962}
963
964// Should be called within CodeGenFunction::SanitizerScope RAII scope.
965// Returns 'i1 false' when the truncation Src -> Dst was lossy.
966static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
967 std::pair<llvm::Value *, SanitizerMask>>
968EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
969 QualType DstType, CGBuilderTy &Builder) {
970 llvm::Type *SrcTy = Src->getType();
971 llvm::Type *DstTy = Dst->getType();
972 (void)DstTy; // Only used in assert()
973
974 // This should be truncation of integral types.
975 assert(Src != Dst)(static_cast<void> (0));
976 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits())(static_cast<void> (0));
977 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&(static_cast<void> (0))
978 "non-integer llvm type")(static_cast<void> (0));
979
980 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
981 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
982
983 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
984 // Else, it is a signed truncation.
985 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
986 SanitizerMask Mask;
987 if (!SrcSigned && !DstSigned) {
988 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
989 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
990 } else {
991 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
992 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
993 }
994
995 llvm::Value *Check = nullptr;
996 // 1. Extend the truncated value back to the same width as the Src.
997 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
998 // 2. Equality-compare with the original source value
999 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1000 // If the comparison result is 'i1 false', then the truncation was lossy.
1001 return std::make_pair(Kind, std::make_pair(Check, Mask));
1002}
1003
1004static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1005 QualType SrcType, QualType DstType) {
1006 return SrcType->isIntegerType() && DstType->isIntegerType();
1007}
1008
1009void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1010 Value *Dst, QualType DstType,
1011 SourceLocation Loc) {
1012 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1013 return;
1014
1015 // We only care about int->int conversions here.
1016 // We ignore conversions to/from pointer and/or bool.
1017 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1018 DstType))
1019 return;
1020
1021 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1022 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1023 // This must be truncation. Else we do not care.
1024 if (SrcBits <= DstBits)
1025 return;
1026
1027 assert(!DstType->isBooleanType() && "we should not get here with booleans.")(static_cast<void> (0));
1028
1029 // If the integer sign change sanitizer is enabled,
1030 // and we are truncating from larger unsigned type to smaller signed type,
1031 // let that next sanitizer deal with it.
1032 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1033 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1034 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1035 (!SrcSigned && DstSigned))
1036 return;
1037
1038 CodeGenFunction::SanitizerScope SanScope(&CGF);
1039
1040 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1041 std::pair<llvm::Value *, SanitizerMask>>
1042 Check =
1043 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1044 // If the comparison result is 'i1 false', then the truncation was lossy.
1045
1046 // Do we care about this type of truncation?
1047 if (!CGF.SanOpts.has(Check.second.second))
1048 return;
1049
1050 llvm::Constant *StaticArgs[] = {
1051 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1052 CGF.EmitCheckTypeDescriptor(DstType),
1053 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1054 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1055 {Src, Dst});
1056}
1057
1058// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1059// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1060static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1061 std::pair<llvm::Value *, SanitizerMask>>
1062EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1063 QualType DstType, CGBuilderTy &Builder) {
1064 llvm::Type *SrcTy = Src->getType();
1065 llvm::Type *DstTy = Dst->getType();
1066
1067 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&(static_cast<void> (0))
1068 "non-integer llvm type")(static_cast<void> (0));
1069
1070 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1071 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1072 (void)SrcSigned; // Only used in assert()
1073 (void)DstSigned; // Only used in assert()
1074 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1075 unsigned DstBits = DstTy->getScalarSizeInBits();
1076 (void)SrcBits; // Only used in assert()
1077 (void)DstBits; // Only used in assert()
1078
1079 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&(static_cast<void> (0))
1080 "either the widths should be different, or the signednesses.")(static_cast<void> (0));
1081
1082 // NOTE: zero value is considered to be non-negative.
1083 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1084 const char *Name) -> Value * {
1085 // Is this value a signed type?
1086 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1087 llvm::Type *VTy = V->getType();
1088 if (!VSigned) {
1089 // If the value is unsigned, then it is never negative.
1090 // FIXME: can we encounter non-scalar VTy here?
1091 return llvm::ConstantInt::getFalse(VTy->getContext());
1092 }
1093 // Get the zero of the same type with which we will be comparing.
1094 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1095 // %V.isnegative = icmp slt %V, 0
1096 // I.e is %V *strictly* less than zero, does it have negative value?
1097 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1098 llvm::Twine(Name) + "." + V->getName() +
1099 ".negativitycheck");
1100 };
1101
1102 // 1. Was the old Value negative?
1103 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1104 // 2. Is the new Value negative?
1105 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1106 // 3. Now, was the 'negativity status' preserved during the conversion?
1107 // NOTE: conversion from negative to zero is considered to change the sign.
1108 // (We want to get 'false' when the conversion changed the sign)
1109 // So we should just equality-compare the negativity statuses.
1110 llvm::Value *Check = nullptr;
1111 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1112 // If the comparison result is 'false', then the conversion changed the sign.
1113 return std::make_pair(
1114 ScalarExprEmitter::ICCK_IntegerSignChange,
1115 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1116}
1117
1118void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1119 Value *Dst, QualType DstType,
1120 SourceLocation Loc) {
1121 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1122 return;
1123
1124 llvm::Type *SrcTy = Src->getType();
1125 llvm::Type *DstTy = Dst->getType();
1126
1127 // We only care about int->int conversions here.
1128 // We ignore conversions to/from pointer and/or bool.
1129 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1130 DstType))
1131 return;
1132
1133 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1134 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1135 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1136 unsigned DstBits = DstTy->getScalarSizeInBits();
1137
1138 // Now, we do not need to emit the check in *all* of the cases.
1139 // We can avoid emitting it in some obvious cases where it would have been
1140 // dropped by the opt passes (instcombine) always anyways.
1141 // If it's a cast between effectively the same type, no check.
1142 // NOTE: this is *not* equivalent to checking the canonical types.
1143 if (SrcSigned == DstSigned && SrcBits == DstBits)
1144 return;
1145 // At least one of the values needs to have signed type.
1146 // If both are unsigned, then obviously, neither of them can be negative.
1147 if (!SrcSigned && !DstSigned)
1148 return;
1149 // If the conversion is to *larger* *signed* type, then no check is needed.
1150 // Because either sign-extension happens (so the sign will remain),
1151 // or zero-extension will happen (the sign bit will be zero.)
1152 if ((DstBits > SrcBits) && DstSigned)
1153 return;
1154 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1155 (SrcBits > DstBits) && SrcSigned) {
1156 // If the signed integer truncation sanitizer is enabled,
1157 // and this is a truncation from signed type, then no check is needed.
1158 // Because here sign change check is interchangeable with truncation check.
1159 return;
1160 }
1161 // That's it. We can't rule out any more cases with the data we have.
1162
1163 CodeGenFunction::SanitizerScope SanScope(&CGF);
1164
1165 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1166 std::pair<llvm::Value *, SanitizerMask>>
1167 Check;
1168
1169 // Each of these checks needs to return 'false' when an issue was detected.
1170 ImplicitConversionCheckKind CheckKind;
1171 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1172 // So we can 'and' all the checks together, and still get 'false',
1173 // if at least one of the checks detected an issue.
1174
1175 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1176 CheckKind = Check.first;
1177 Checks.emplace_back(Check.second);
1178
1179 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1180 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1181 // If the signed integer truncation sanitizer was enabled,
1182 // and we are truncating from larger unsigned type to smaller signed type,
1183 // let's handle the case we skipped in that check.
1184 Check =
1185 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1186 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1187 Checks.emplace_back(Check.second);
1188 // If the comparison result is 'i1 false', then the truncation was lossy.
1189 }
1190
1191 llvm::Constant *StaticArgs[] = {
1192 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1193 CGF.EmitCheckTypeDescriptor(DstType),
1194 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1195 // EmitCheck() will 'and' all the checks together.
1196 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1197 {Src, Dst});
1198}
1199
1200Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1201 QualType DstType, llvm::Type *SrcTy,
1202 llvm::Type *DstTy,
1203 ScalarConversionOpts Opts) {
1204 // The Element types determine the type of cast to perform.
1205 llvm::Type *SrcElementTy;
1206 llvm::Type *DstElementTy;
1207 QualType SrcElementType;
1208 QualType DstElementType;
1209 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1210 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1211 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1212 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1213 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1214 } else {
1215 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&(static_cast<void> (0))
1216 "cannot cast between matrix and non-matrix types")(static_cast<void> (0));
1217 SrcElementTy = SrcTy;
1218 DstElementTy = DstTy;
1219 SrcElementType = SrcType;
1220 DstElementType = DstType;
1221 }
1222
1223 if (isa<llvm::IntegerType>(SrcElementTy)) {
1224 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1225 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1226 InputSigned = true;
1227 }
1228
1229 if (isa<llvm::IntegerType>(DstElementTy))
1230 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1231 if (InputSigned)
1232 return Builder.CreateSIToFP(Src, DstTy, "conv");
1233 return Builder.CreateUIToFP(Src, DstTy, "conv");
1234 }
1235
1236 if (isa<llvm::IntegerType>(DstElementTy)) {
1237 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion")(static_cast<void> (0));
1238 if (DstElementType->isSignedIntegerOrEnumerationType())
1239 return Builder.CreateFPToSI(Src, DstTy, "conv");
1240 return Builder.CreateFPToUI(Src, DstTy, "conv");
1241 }
1242
1243 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1244 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1245 return Builder.CreateFPExt(Src, DstTy, "conv");
1246}
1247
1248/// Emit a conversion from the specified type to the specified destination type,
1249/// both of which are LLVM scalar types.
1250Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1251 QualType DstType,
1252 SourceLocation Loc,
1253 ScalarConversionOpts Opts) {
1254 // All conversions involving fixed point types should be handled by the
1255 // EmitFixedPoint family functions. This is done to prevent bloating up this
1256 // function more, and although fixed point numbers are represented by
1257 // integers, we do not want to follow any logic that assumes they should be
1258 // treated as integers.
1259 // TODO(leonardchan): When necessary, add another if statement checking for
1260 // conversions to fixed point types from other types.
1261 if (SrcType->isFixedPointType()) {
1262 if (DstType->isBooleanType())
1263 // It is important that we check this before checking if the dest type is
1264 // an integer because booleans are technically integer types.
1265 // We do not need to check the padding bit on unsigned types if unsigned
1266 // padding is enabled because overflow into this bit is undefined
1267 // behavior.
1268 return Builder.CreateIsNotNull(Src, "tobool");
1269 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1270 DstType->isRealFloatingType())
1271 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1272
1273 llvm_unreachable(__builtin_unreachable()
1274 "Unhandled scalar conversion from a fixed point type to another type.")__builtin_unreachable();
1275 } else if (DstType->isFixedPointType()) {
1276 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1277 // This also includes converting booleans and enums to fixed point types.
1278 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1279
1280 llvm_unreachable(__builtin_unreachable()
1281 "Unhandled scalar conversion to a fixed point type from another type.")__builtin_unreachable();
1282 }
1283
1284 QualType NoncanonicalSrcType = SrcType;
1285 QualType NoncanonicalDstType = DstType;
1286
1287 SrcType = CGF.getContext().getCanonicalType(SrcType);
1288 DstType = CGF.getContext().getCanonicalType(DstType);
1289 if (SrcType == DstType) return Src;
1290
1291 if (DstType->isVoidType()) return nullptr;
1292
1293 llvm::Value *OrigSrc = Src;
1294 QualType OrigSrcType = SrcType;
1295 llvm::Type *SrcTy = Src->getType();
1296
1297 // Handle conversions to bool first, they are special: comparisons against 0.
1298 if (DstType->isBooleanType())
1299 return EmitConversionToBool(Src, SrcType);
1300
1301 llvm::Type *DstTy = ConvertType(DstType);
1302
1303 // Cast from half through float if half isn't a native type.
1304 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1305 // Cast to FP using the intrinsic if the half type itself isn't supported.
1306 if (DstTy->isFloatingPointTy()) {
1307 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1308 return Builder.CreateCall(
1309 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1310 Src);
1311 } else {
1312 // Cast to other types through float, using either the intrinsic or FPExt,
1313 // depending on whether the half type itself is supported
1314 // (as opposed to operations on half, available with NativeHalfType).
1315 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1316 Src = Builder.CreateCall(
1317 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1318 CGF.CGM.FloatTy),
1319 Src);
1320 } else {
1321 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1322 }
1323 SrcType = CGF.getContext().FloatTy;
1324 SrcTy = CGF.FloatTy;
1325 }
1326 }
1327
1328 // Ignore conversions like int -> uint.
1329 if (SrcTy == DstTy) {
1330 if (Opts.EmitImplicitIntegerSignChangeChecks)
1331 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1332 NoncanonicalDstType, Loc);
1333
1334 return Src;
1335 }
1336
1337 // Handle pointer conversions next: pointers can only be converted to/from
1338 // other pointers and integers. Check for pointer types in terms of LLVM, as
1339 // some native types (like Obj-C id) may map to a pointer type.
1340 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1341 // The source value may be an integer, or a pointer.
1342 if (isa<llvm::PointerType>(SrcTy))
1343 return Builder.CreateBitCast(Src, DstTy, "conv");
1344
1345 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?")(static_cast<void> (0));
1346 // First, convert to the correct width so that we control the kind of
1347 // extension.
1348 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1349 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1350 llvm::Value* IntResult =
1351 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1352 // Then, cast to pointer.
1353 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1354 }
1355
1356 if (isa<llvm::PointerType>(SrcTy)) {
1357 // Must be an ptr to int cast.
1358 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?")(static_cast<void> (0));
1359 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1360 }
1361
1362 // A scalar can be splatted to an extended vector of the same element type
1363 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1364 // Sema should add casts to make sure that the source expression's type is
1365 // the same as the vector's element type (sans qualifiers)
1366 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==(static_cast<void> (0))
1367 SrcType.getTypePtr() &&(static_cast<void> (0))
1368 "Splatted expr doesn't match with vector element type?")(static_cast<void> (0));
1369
1370 // Splat the element across to all elements
1371 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1372 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1373 }
1374
1375 if (SrcType->isMatrixType() && DstType->isMatrixType())
1376 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1377
1378 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1379 // Allow bitcast from vector to integer/fp of the same size.
1380 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1381 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1382 if (SrcSize == DstSize)
1383 return Builder.CreateBitCast(Src, DstTy, "conv");
1384
1385 // Conversions between vectors of different sizes are not allowed except
1386 // when vectors of half are involved. Operations on storage-only half
1387 // vectors require promoting half vector operands to float vectors and
1388 // truncating the result, which is either an int or float vector, to a
1389 // short or half vector.
1390
1391 // Source and destination are both expected to be vectors.
1392 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1393 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1394 (void)DstElementTy;
1395
1396 assert(((SrcElementTy->isIntegerTy() &&(static_cast<void> (0))
1397 DstElementTy->isIntegerTy()) ||(static_cast<void> (0))
1398 (SrcElementTy->isFloatingPointTy() &&(static_cast<void> (0))
1399 DstElementTy->isFloatingPointTy())) &&(static_cast<void> (0))
1400 "unexpected conversion between a floating-point vector and an "(static_cast<void> (0))
1401 "integer vector")(static_cast<void> (0));
1402
1403 // Truncate an i32 vector to an i16 vector.
1404 if (SrcElementTy->isIntegerTy())
1405 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1406
1407 // Truncate a float vector to a half vector.
1408 if (SrcSize > DstSize)
1409 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1410
1411 // Promote a half vector to a float vector.
1412 return Builder.CreateFPExt(Src, DstTy, "conv");
1413 }
1414
1415 // Finally, we have the arithmetic types: real int/float.
1416 Value *Res = nullptr;
1417 llvm::Type *ResTy = DstTy;
1418
1419 // An overflowing conversion has undefined behavior if either the source type
1420 // or the destination type is a floating-point type. However, we consider the
1421 // range of representable values for all floating-point types to be
1422 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1423 // floating-point type.
1424 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1425 OrigSrcType->isFloatingType())
1426 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1427 Loc);
1428
1429 // Cast to half through float if half isn't a native type.
1430 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1431 // Make sure we cast in a single step if from another FP type.
1432 if (SrcTy->isFloatingPointTy()) {
1433 // Use the intrinsic if the half type itself isn't supported
1434 // (as opposed to operations on half, available with NativeHalfType).
1435 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1436 return Builder.CreateCall(
1437 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1438 // If the half type is supported, just use an fptrunc.
1439 return Builder.CreateFPTrunc(Src, DstTy);
1440 }
1441 DstTy = CGF.FloatTy;
1442 }
1443
1444 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1445
1446 if (DstTy != ResTy) {
1447 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1448 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion")(static_cast<void> (0));
1449 Res = Builder.CreateCall(
1450 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1451 Res);
1452 } else {
1453 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1454 }
1455 }
1456
1457 if (Opts.EmitImplicitIntegerTruncationChecks)
1458 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1459 NoncanonicalDstType, Loc);
1460
1461 if (Opts.EmitImplicitIntegerSignChangeChecks)
1462 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1463 NoncanonicalDstType, Loc);
1464
1465 return Res;
1466}
1467
1468Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1469 QualType DstTy,
1470 SourceLocation Loc) {
1471 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1472 llvm::Value *Result;
1473 if (SrcTy->isRealFloatingType())
1474 Result = FPBuilder.CreateFloatingToFixed(Src,
1475 CGF.getContext().getFixedPointSemantics(DstTy));
1476 else if (DstTy->isRealFloatingType())
1477 Result = FPBuilder.CreateFixedToFloating(Src,
1478 CGF.getContext().getFixedPointSemantics(SrcTy),
1479 ConvertType(DstTy));
1480 else {
1481 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1482 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1483
1484 if (DstTy->isIntegerType())
1485 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1486 DstFPSema.getWidth(),
1487 DstFPSema.isSigned());
1488 else if (SrcTy->isIntegerType())
1489 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1490 DstFPSema);
1491 else
1492 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1493 }
1494 return Result;
1495}
1496
1497/// Emit a conversion from the specified complex type to the specified
1498/// destination type, where the destination type is an LLVM scalar type.
1499Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1500 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1501 SourceLocation Loc) {
1502 // Get the source element type.
1503 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1504
1505 // Handle conversions to bool first, they are special: comparisons against 0.
1506 if (DstTy->isBooleanType()) {
1507 // Complex != 0 -> (Real != 0) | (Imag != 0)
1508 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1509 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1510 return Builder.CreateOr(Src.first, Src.second, "tobool");
1511 }
1512
1513 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1514 // the imaginary part of the complex value is discarded and the value of the
1515 // real part is converted according to the conversion rules for the
1516 // corresponding real type.
1517 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1518}
1519
1520Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1521 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1522}
1523
1524/// Emit a sanitization check for the given "binary" operation (which
1525/// might actually be a unary increment which has been lowered to a binary
1526/// operation). The check passes if all values in \p Checks (which are \c i1),
1527/// are \c true.
1528void ScalarExprEmitter::EmitBinOpCheck(
1529 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1530 assert(CGF.IsSanitizerScope)(static_cast<void> (0));
1531 SanitizerHandler Check;
1532 SmallVector<llvm::Constant *, 4> StaticData;
1533 SmallVector<llvm::Value *, 2> DynamicData;
1534
1535 BinaryOperatorKind Opcode = Info.Opcode;
1536 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1537 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1538
1539 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1540 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1541 if (UO && UO->getOpcode() == UO_Minus) {
1542 Check = SanitizerHandler::NegateOverflow;
1543 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1544 DynamicData.push_back(Info.RHS);
1545 } else {
1546 if (BinaryOperator::isShiftOp(Opcode)) {
1547 // Shift LHS negative or too large, or RHS out of bounds.
1548 Check = SanitizerHandler::ShiftOutOfBounds;
1549 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1550 StaticData.push_back(
1551 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1552 StaticData.push_back(
1553 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1554 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1555 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1556 Check = SanitizerHandler::DivremOverflow;
1557 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1558 } else {
1559 // Arithmetic overflow (+, -, *).
1560 switch (Opcode) {
1561 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1562 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1563 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1564 default: llvm_unreachable("unexpected opcode for bin op check")__builtin_unreachable();
1565 }
1566 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1567 }
1568 DynamicData.push_back(Info.LHS);
1569 DynamicData.push_back(Info.RHS);
1570 }
1571
1572 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1573}
1574
1575//===----------------------------------------------------------------------===//
1576// Visitor Methods
1577//===----------------------------------------------------------------------===//
1578
1579Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1580 CGF.ErrorUnsupported(E, "scalar expression");
1581 if (E->getType()->isVoidType())
1582 return nullptr;
1583 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1584}
1585
1586Value *
1587ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1588 ASTContext &Context = CGF.getContext();
1589 llvm::Optional<LangAS> GlobalAS =
1590 Context.getTargetInfo().getConstantAddressSpace();
1591 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1592 E->ComputeName(Context), "__usn_str",
1593 static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default)));
1594
1595 unsigned ExprAS = Context.getTargetAddressSpace(E->getType());
1596
1597 if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS)
1598 return GlobalConstStr;
1599
1600 llvm::Type *EltTy = GlobalConstStr->getType()->getPointerElementType();
1601 llvm::PointerType *NewPtrTy = llvm::PointerType::get(EltTy, ExprAS);
1602 return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast");
1603}
1604
1605Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1606 // Vector Mask Case
1607 if (E->getNumSubExprs() == 2) {
1608 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1609 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1610 Value *Mask;
1611
1612 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1613 unsigned LHSElts = LTy->getNumElements();
1614
1615 Mask = RHS;
1616
1617 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1618
1619 // Mask off the high bits of each shuffle index.
1620 Value *MaskBits =
1621 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1622 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1623
1624 // newv = undef
1625 // mask = mask & maskbits
1626 // for each elt
1627 // n = extract mask i
1628 // x = extract val n
1629 // newv = insert newv, x, i
1630 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1631 MTy->getNumElements());
1632 Value* NewV = llvm::UndefValue::get(RTy);
1633 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1634 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1635 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1636
1637 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1638 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1639 }
1640 return NewV;
1641 }
1642
1643 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1644 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1645
1646 SmallVector<int, 32> Indices;
1647 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1648 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1649 // Check for -1 and output it as undef in the IR.
1650 if (Idx.isSigned() && Idx.isAllOnesValue())
1651 Indices.push_back(-1);
1652 else
1653 Indices.push_back(Idx.getZExtValue());
1654 }
1655
1656 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1657}
1658
1659Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1660 QualType SrcType = E->getSrcExpr()->getType(),
1661 DstType = E->getType();
1662
1663 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1664
1665 SrcType = CGF.getContext().getCanonicalType(SrcType);
1666 DstType = CGF.getContext().getCanonicalType(DstType);
1667 if (SrcType == DstType) return Src;
1668
1669 assert(SrcType->isVectorType() &&(static_cast<void> (0))
1670 "ConvertVector source type must be a vector")(static_cast<void> (0));
1671 assert(DstType->isVectorType() &&(static_cast<void> (0))
1672 "ConvertVector destination type must be a vector")(static_cast<void> (0));
1673
1674 llvm::Type *SrcTy = Src->getType();
1675 llvm::Type *DstTy = ConvertType(DstType);
1676
1677 // Ignore conversions like int -> uint.
1678 if (SrcTy == DstTy)
1679 return Src;
1680
1681 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1682 DstEltType = DstType->castAs<VectorType>()->getElementType();
1683
1684 assert(SrcTy->isVectorTy() &&(static_cast<void> (0))
1685 "ConvertVector source IR type must be a vector")(static_cast<void> (0));
1686 assert(DstTy->isVectorTy() &&(static_cast<void> (0))
1687 "ConvertVector destination IR type must be a vector")(static_cast<void> (0));
1688
1689 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1690 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1691
1692 if (DstEltType->isBooleanType()) {
1693 assert((SrcEltTy->isFloatingPointTy() ||(static_cast<void> (0))
1694 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion")(static_cast<void> (0));
1695
1696 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1697 if (SrcEltTy->isFloatingPointTy()) {
1698 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1699 } else {
1700 return Builder.CreateICmpNE(Src, Zero, "tobool");
1701 }
1702 }
1703
1704 // We have the arithmetic types: real int/float.
1705 Value *Res = nullptr;
1706
1707 if (isa<llvm::IntegerType>(SrcEltTy)) {
1708 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1709 if (isa<llvm::IntegerType>(DstEltTy))
1710 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1711 else if (InputSigned)
1712 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1713 else
1714 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1715 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1716 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion")(static_cast<void> (0));
1717 if (DstEltType->isSignedIntegerOrEnumerationType())
1718 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1719 else
1720 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1721 } else {
1722 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&(static_cast<void> (0))
1723 "Unknown real conversion")(static_cast<void> (0));
1724 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1725 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1726 else
1727 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1728 }
1729
1730 return Res;
1731}
1732
1733Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1734 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1735 CGF.EmitIgnoredExpr(E->getBase());
1736 return CGF.emitScalarConstant(Constant, E);
1737 } else {
1738 Expr::EvalResult Result;
1739 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1740 llvm::APSInt Value = Result.Val.getInt();
1741 CGF.EmitIgnoredExpr(E->getBase());
1742 return Builder.getInt(Value);
1743 }
1744 }
1745
1746 return EmitLoadOfLValue(E);
1747}
1748
1749Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1750 TestAndClearIgnoreResultAssign();
1751
1752 // Emit subscript expressions in rvalue context's. For most cases, this just
1753 // loads the lvalue formed by the subscript expr. However, we have to be
1754 // careful, because the base of a vector subscript is occasionally an rvalue,
1755 // so we can't get it as an lvalue.
1756 if (!E->getBase()->getType()->isVectorType())
1757 return EmitLoadOfLValue(E);
1758
1759 // Handle the vector case. The base must be a vector, the index must be an
1760 // integer value.
1761 Value *Base = Visit(E->getBase());
1762 Value *Idx = Visit(E->getIdx());
1763 QualType IdxTy = E->getIdx()->getType();
1764
1765 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1766 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1767
1768 return Builder.CreateExtractElement(Base, Idx, "vecext");
1769}
1770
1771Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1772 TestAndClearIgnoreResultAssign();
1773
1774 // Handle the vector case. The base must be a vector, the index must be an
1775 // integer value.
1776 Value *RowIdx = Visit(E->getRowIdx());
1777 Value *ColumnIdx = Visit(E->getColumnIdx());
1778 Value *Matrix = Visit(E->getBase());
1779
1780 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1781 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1782 return MB.CreateExtractElement(
1783 Matrix, RowIdx, ColumnIdx,
1784 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
1785}
1786
1787static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1788 unsigned Off) {
1789 int MV = SVI->getMaskValue(Idx);
1790 if (MV == -1)
1791 return -1;
1792 return Off + MV;
1793}
1794
1795static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1796 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&(static_cast<void> (0))
1797 "Index operand too large for shufflevector mask!")(static_cast<void> (0));
1798 return C->getZExtValue();
1799}
1800
1801Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1802 bool Ignore = TestAndClearIgnoreResultAssign();
1803 (void)Ignore;
1804 assert (Ignore == false && "init list ignored")(static_cast<void> (0));
1805 unsigned NumInitElements = E->getNumInits();
1806
1807 if (E->hadArrayRangeDesignator())
1808 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1809
1810 llvm::VectorType *VType =
1811 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1812
1813 if (!VType) {
1814 if (NumInitElements == 0) {
1815 // C++11 value-initialization for the scalar.
1816 return EmitNullValue(E->getType());
1817 }
1818 // We have a scalar in braces. Just use the first element.
1819 return Visit(E->getInit(0));
1820 }
1821
1822 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1823
1824 // Loop over initializers collecting the Value for each, and remembering
1825 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1826 // us to fold the shuffle for the swizzle into the shuffle for the vector
1827 // initializer, since LLVM optimizers generally do not want to touch
1828 // shuffles.
1829 unsigned CurIdx = 0;
1830 bool VIsUndefShuffle = false;
1831 llvm::Value *V = llvm::UndefValue::get(VType);
1832 for (unsigned i = 0; i != NumInitElements; ++i) {
1833 Expr *IE = E->getInit(i);
1834 Value *Init = Visit(IE);
1835 SmallVector<int, 16> Args;
1836
1837 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1838
1839 // Handle scalar elements. If the scalar initializer is actually one
1840 // element of a different vector of the same width, use shuffle instead of
1841 // extract+insert.
1842 if (!VVT) {
1843 if (isa<ExtVectorElementExpr>(IE)) {
1844 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1845
1846 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1847 ->getNumElements() == ResElts) {
1848 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1849 Value *LHS = nullptr, *RHS = nullptr;
1850 if (CurIdx == 0) {
1851 // insert into undef -> shuffle (src, undef)
1852 // shufflemask must use an i32
1853 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1854 Args.resize(ResElts, -1);
1855
1856 LHS = EI->getVectorOperand();
1857 RHS = V;
1858 VIsUndefShuffle = true;
1859 } else if (VIsUndefShuffle) {
1860 // insert into undefshuffle && size match -> shuffle (v, src)
1861 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1862 for (unsigned j = 0; j != CurIdx; ++j)
1863 Args.push_back(getMaskElt(SVV, j, 0));
1864 Args.push_back(ResElts + C->getZExtValue());
1865 Args.resize(ResElts, -1);
1866
1867 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1868 RHS = EI->getVectorOperand();
1869 VIsUndefShuffle = false;
1870 }
1871 if (!Args.empty()) {
1872 V = Builder.CreateShuffleVector(LHS, RHS, Args);
1873 ++CurIdx;
1874 continue;
1875 }
1876 }
1877 }
1878 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1879 "vecinit");
1880 VIsUndefShuffle = false;
1881 ++CurIdx;
1882 continue;
1883 }
1884
1885 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1886
1887 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1888 // input is the same width as the vector being constructed, generate an
1889 // optimized shuffle of the swizzle input into the result.
1890 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1891 if (isa<ExtVectorElementExpr>(IE)) {
1892 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1893 Value *SVOp = SVI->getOperand(0);
1894 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1895
1896 if (OpTy->getNumElements() == ResElts) {
1897 for (unsigned j = 0; j != CurIdx; ++j) {
1898 // If the current vector initializer is a shuffle with undef, merge
1899 // this shuffle directly into it.
1900 if (VIsUndefShuffle) {
1901 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1902 } else {
1903 Args.push_back(j);
1904 }
1905 }
1906 for (unsigned j = 0, je = InitElts; j != je; ++j)
1907 Args.push_back(getMaskElt(SVI, j, Offset));
1908 Args.resize(ResElts, -1);
1909
1910 if (VIsUndefShuffle)
1911 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1912
1913 Init = SVOp;
1914 }
1915 }
1916
1917 // Extend init to result vector length, and then shuffle its contribution
1918 // to the vector initializer into V.
1919 if (Args.empty()) {
1920 for (unsigned j = 0; j != InitElts; ++j)
1921 Args.push_back(j);
1922 Args.resize(ResElts, -1);
1923 Init = Builder.CreateShuffleVector(Init, Args, "vext");
1924
1925 Args.clear();
1926 for (unsigned j = 0; j != CurIdx; ++j)
1927 Args.push_back(j);
1928 for (unsigned j = 0; j != InitElts; ++j)
1929 Args.push_back(j + Offset);
1930 Args.resize(ResElts, -1);
1931 }
1932
1933 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1934 // merging subsequent shuffles into this one.
1935 if (CurIdx == 0)
1936 std::swap(V, Init);
1937 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
1938 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1939 CurIdx += InitElts;
1940 }
1941
1942 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1943 // Emit remaining default initializers.
1944 llvm::Type *EltTy = VType->getElementType();
1945
1946 // Emit remaining default initializers
1947 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1948 Value *Idx = Builder.getInt32(CurIdx);
1949 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1950 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1951 }
1952 return V;
1953}
1954
1955bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1956 const Expr *E = CE->getSubExpr();
1957
1958 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1959 return false;
1960
1961 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1962 // We always assume that 'this' is never null.
1963 return false;
1964 }
1965
1966 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1967 // And that glvalue casts are never null.
1968 if (ICE->isGLValue())
1969 return false;
1970 }
1971
1972 return true;
1973}
1974
1975// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1976// have to handle a more broad range of conversions than explicit casts, as they
1977// handle things like function to ptr-to-function decay etc.
1978Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1979 Expr *E = CE->getSubExpr();
1980 QualType DestTy = CE->getType();
1981 CastKind Kind = CE->getCastKind();
1982
1983 // These cases are generally not written to ignore the result of
1984 // evaluating their sub-expressions, so we clear this now.
1985 bool Ignored = TestAndClearIgnoreResultAssign();
1986
1987 // Since almost all cast kinds apply to scalars, this switch doesn't have
1988 // a default case, so the compiler will warn on a missing case. The cases
1989 // are in the same order as in the CastKind enum.
1990 switch (Kind) {
1991 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!")__builtin_unreachable();
1992 case CK_BuiltinFnToFnPtr:
1993 llvm_unreachable("builtin functions are handled elsewhere")__builtin_unreachable();
1994
1995 case CK_LValueBitCast:
1996 case CK_ObjCObjectLValueCast: {
1997 Address Addr = EmitLValue(E).getAddress(CGF);
1998 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1999 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2000 return EmitLoadOfLValue(LV, CE->getExprLoc());
2001 }
2002
2003 case CK_LValueToRValueBitCast: {
2004 LValue SourceLVal = CGF.EmitLValue(E);
2005 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
2006 CGF.ConvertTypeForMem(DestTy));
2007 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2008 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2009 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2010 }
2011
2012 case CK_CPointerToObjCPointerCast:
2013 case CK_BlockPointerToObjCPointerCast:
2014 case CK_AnyPointerToBlockPointerCast:
2015 case CK_BitCast: {
2016 Value *Src = Visit(const_cast<Expr*>(E));
2017 llvm::Type *SrcTy = Src->getType();
2018 llvm::Type *DstTy = ConvertType(DestTy);
2019 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2020 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2021 llvm_unreachable("wrong cast for pointers in different address spaces"__builtin_unreachable()
2022 "(must be an address space cast)!")__builtin_unreachable();
2023 }
2024
2025 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2026 if (auto PT = DestTy->getAs<PointerType>())
2027 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2028 /*MayBeNull=*/true,
2029 CodeGenFunction::CFITCK_UnrelatedCast,
2030 CE->getBeginLoc());
2031 }
2032
2033 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2034 const QualType SrcType = E->getType();
2035
2036 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2037 // Casting to pointer that could carry dynamic information (provided by
2038 // invariant.group) requires launder.
2039 Src = Builder.CreateLaunderInvariantGroup(Src);
2040 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2041 // Casting to pointer that does not carry dynamic information (provided
2042 // by invariant.group) requires stripping it. Note that we don't do it
2043 // if the source could not be dynamic type and destination could be
2044 // dynamic because dynamic information is already laundered. It is
2045 // because launder(strip(src)) == launder(src), so there is no need to
2046 // add extra strip before launder.
2047 Src = Builder.CreateStripInvariantGroup(Src);
2048 }
2049 }
2050
2051 // Update heapallocsite metadata when there is an explicit pointer cast.
2052 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2053 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2054 QualType PointeeType = DestTy->getPointeeType();
2055 if (!PointeeType.isNull())
2056 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2057 CE->getExprLoc());
2058 }
2059 }
2060
2061 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2062 // same element type, use the llvm.experimental.vector.insert intrinsic to
2063 // perform the bitcast.
2064 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2065 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2066 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
2067 // vector, use a vector insert and bitcast the result.
2068 bool NeedsBitCast = false;
2069 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
2070 llvm::Type *OrigType = DstTy;
2071 if (ScalableDst == PredType &&
2072 FixedSrc->getElementType() == Builder.getInt8Ty()) {
2073 DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
2074 ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy);
2075 NeedsBitCast = true;
2076 }
2077 if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2078 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2079 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2080 llvm::Value *Result = Builder.CreateInsertVector(
2081 DstTy, UndefVec, Src, Zero, "castScalableSve");
2082 if (NeedsBitCast)
2083 Result = Builder.CreateBitCast(Result, OrigType);
2084 return Result;
2085 }
2086 }
2087 }
2088
2089 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2090 // same element type, use the llvm.experimental.vector.extract intrinsic to
2091 // perform the bitcast.
2092 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2093 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2094 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
2095 // vector, bitcast the source and use a vector extract.
2096 auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
2097 if (ScalableSrc == PredType &&
2098 FixedDst->getElementType() == Builder.getInt8Ty()) {
2099 SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
2100 ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy);
2101 Src = Builder.CreateBitCast(Src, SrcTy);
2102 }
2103 if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2104 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2105 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2106 }
2107 }
2108 }
2109
2110 // Perform VLAT <-> VLST bitcast through memory.
2111 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2112 // require the element types of the vectors to be the same, we
2113 // need to keep this around for bitcasts between VLAT <-> VLST where
2114 // the element types of the vectors are not the same, until we figure
2115 // out a better way of doing these casts.
2116 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2117 isa<llvm::ScalableVectorType>(DstTy)) ||
2118 (isa<llvm::ScalableVectorType>(SrcTy) &&
2119 isa<llvm::FixedVectorType>(DstTy))) {
2120 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2121 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2122 CGF.EmitStoreOfScalar(Src, LV);
2123 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2124 "castFixedSve");
2125 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2126 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2127 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2128 }
2129
2130 return Builder.CreateBitCast(Src, DstTy);
2131 }
2132 case CK_AddressSpaceConversion: {
2133 Expr::EvalResult Result;
2134 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2135 Result.Val.isNullPointer()) {
2136 // If E has side effect, it is emitted even if its final result is a
2137 // null pointer. In that case, a DCE pass should be able to
2138 // eliminate the useless instructions emitted during translating E.
2139 if (Result.HasSideEffects)
2140 Visit(E);
2141 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2142 ConvertType(DestTy)), DestTy);
2143 }
2144 // Since target may map different address spaces in AST to the same address
2145 // space, an address space conversion may end up as a bitcast.
2146 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2147 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2148 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2149 }
2150 case CK_AtomicToNonAtomic:
2151 case CK_NonAtomicToAtomic:
2152 case CK_NoOp:
2153 case CK_UserDefinedConversion:
2154 return Visit(const_cast<Expr*>(E));
2155
2156 case CK_BaseToDerived: {
2157 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2158 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!")(static_cast<void> (0));
2159
2160 Address Base = CGF.EmitPointerWithAlignment(E);
2161 Address Derived =
2162 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2163 CE->path_begin(), CE->path_end(),
2164 CGF.ShouldNullCheckClassCastValue(CE));
2165
2166 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2167 // performed and the object is not of the derived type.
2168 if (CGF.sanitizePerformTypeCheck())
2169 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2170 Derived.getPointer(), DestTy->getPointeeType());
2171
2172 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2173 CGF.EmitVTablePtrCheckForCast(
2174 DestTy->getPointeeType(), Derived.getPointer(),
2175 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2176 CE->getBeginLoc());
2177
2178 return Derived.getPointer();
2179 }
2180 case CK_UncheckedDerivedToBase:
2181 case CK_DerivedToBase: {
2182 // The EmitPointerWithAlignment path does this fine; just discard
2183 // the alignment.
2184 return CGF.EmitPointerWithAlignment(CE).getPointer();
2185 }
2186
2187 case CK_Dynamic: {
2188 Address V = CGF.EmitPointerWithAlignment(E);
2189 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2190 return CGF.EmitDynamicCast(V, DCE);
2191 }
2192
2193 case CK_ArrayToPointerDecay:
2194 return CGF.EmitArrayToPointerDecay(E).getPointer();
2195 case CK_FunctionToPointerDecay:
2196 return EmitLValue(E).getPointer(CGF);
2197
2198 case CK_NullToPointer:
2199 if (MustVisitNullValue(E))
2200 CGF.EmitIgnoredExpr(E);
2201
2202 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2203 DestTy);
2204
2205 case CK_NullToMemberPointer: {
2206 if (MustVisitNullValue(E))
2207 CGF.EmitIgnoredExpr(E);
2208
2209 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2210 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2211 }
2212
2213 case CK_ReinterpretMemberPointer:
2214 case CK_BaseToDerivedMemberPointer:
2215 case CK_DerivedToBaseMemberPointer: {
2216 Value *Src = Visit(E);
2217
2218 // Note that the AST doesn't distinguish between checked and
2219 // unchecked member pointer conversions, so we always have to
2220 // implement checked conversions here. This is inefficient when
2221 // actual control flow may be required in order to perform the
2222 // check, which it is for data member pointers (but not member
2223 // function pointers on Itanium and ARM).
2224 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2225 }
2226
2227 case CK_ARCProduceObject:
2228 return CGF.EmitARCRetainScalarExpr(E);
2229 case CK_ARCConsumeObject:
2230 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2231 case CK_ARCReclaimReturnedObject:
2232 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2233 case CK_ARCExtendBlockObject:
2234 return CGF.EmitARCExtendBlockObject(E);
2235
2236 case CK_CopyAndAutoreleaseBlockObject:
2237 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2238
2239 case CK_FloatingRealToComplex:
2240 case CK_FloatingComplexCast:
2241 case CK_IntegralRealToComplex:
2242 case CK_IntegralComplexCast:
2243 case CK_IntegralComplexToFloatingComplex:
2244 case CK_FloatingComplexToIntegralComplex:
2245 case CK_ConstructorConversion:
2246 case CK_ToUnion:
2247 llvm_unreachable("scalar cast to non-scalar value")__builtin_unreachable();
2248
2249 case CK_LValueToRValue:
2250 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy))(static_cast<void> (0));
2251 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!")(static_cast<void> (0));
2252 return Visit(const_cast<Expr*>(E));
2253
2254 case CK_IntegralToPointer: {
2255 Value *Src = Visit(const_cast<Expr*>(E));
2256
2257 // First, convert to the correct width so that we control the kind of
2258 // extension.
2259 auto DestLLVMTy = ConvertType(DestTy);
2260 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2261 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2262 llvm::Value* IntResult =
2263 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2264
2265 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2266
2267 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2268 // Going from integer to pointer that could be dynamic requires reloading
2269 // dynamic information from invariant.group.
2270 if (DestTy.mayBeDynamicClass())
2271 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2272 }
2273 return IntToPtr;
2274 }
2275 case CK_PointerToIntegral: {
2276 assert(!DestTy->isBooleanType() && "bool should use PointerToBool")(static_cast<void> (0));
2277 auto *PtrExpr = Visit(E);
2278
2279 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2280 const QualType SrcType = E->getType();
2281
2282 // Casting to integer requires stripping dynamic information as it does
2283 // not carries it.
2284 if (SrcType.mayBeDynamicClass())
2285 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2286 }
2287
2288 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2289 }
2290 case CK_ToVoid: {
2291 CGF.EmitIgnoredExpr(E);
2292 return nullptr;
2293 }
2294 case CK_MatrixCast: {
2295 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2296 CE->getExprLoc());
2297 }
2298 case CK_VectorSplat: {
2299 llvm::Type *DstTy = ConvertType(DestTy);
2300 Value *Elt = Visit(const_cast<Expr*>(E));
2301 // Splat the element across to all elements
2302 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
2303 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2304 }
2305
2306 case CK_FixedPointCast:
2307 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2308 CE->getExprLoc());
2309
2310 case CK_FixedPointToBoolean:
2311 assert(E->getType()->isFixedPointType() &&(static_cast<void> (0))
2312 "Expected src type to be fixed point type")(static_cast<void> (0));
2313 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type")(static_cast<void> (0));
2314 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2315 CE->getExprLoc());
2316
2317 case CK_FixedPointToIntegral:
2318 assert(E->getType()->isFixedPointType() &&(static_cast<void> (0))
2319 "Expected src type to be fixed point type")(static_cast<void> (0));
2320 assert(DestTy->isIntegerType() && "Expected dest type to be an integer")(static_cast<void> (0));
2321 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2322 CE->getExprLoc());
2323
2324 case CK_IntegralToFixedPoint:
2325 assert(E->getType()->isIntegerType() &&(static_cast<void> (0))
2326 "Expected src type to be an integer")(static_cast<void> (0));
2327 assert(DestTy->isFixedPointType() &&(static_cast<void> (0))
2328 "Expected dest type to be fixed point type")(static_cast<void> (0));
2329 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2330 CE->getExprLoc());
2331
2332 case CK_IntegralCast: {
2333 ScalarConversionOpts Opts;
2334 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2335 if (!ICE->isPartOfExplicitCast())
2336 Opts = ScalarConversionOpts(CGF.SanOpts);
2337 }
2338 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2339 CE->getExprLoc(), Opts);
2340 }
2341 case CK_IntegralToFloating:
2342 case CK_FloatingToIntegral:
2343 case CK_FloatingCast:
2344 case CK_FixedPointToFloating:
2345 case CK_FloatingToFixedPoint: {
2346 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2347 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2348 CE->getExprLoc());
2349 }
2350 case CK_BooleanToSignedIntegral: {
2351 ScalarConversionOpts Opts;
2352 Opts.TreatBooleanAsSigned = true;
2353 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2354 CE->getExprLoc(), Opts);
2355 }
2356 case CK_IntegralToBoolean:
2357 return EmitIntToBoolConversion(Visit(E));
2358 case CK_PointerToBoolean:
2359 return EmitPointerToBoolConversion(Visit(E), E->getType());
2360 case CK_FloatingToBoolean: {
2361 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2362 return EmitFloatToBoolConversion(Visit(E));
2363 }
2364 case CK_MemberPointerToBoolean: {
2365 llvm::Value *MemPtr = Visit(E);
2366 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2367 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2368 }
2369
2370 case CK_FloatingComplexToReal:
2371 case CK_IntegralComplexToReal:
2372 return CGF.EmitComplexExpr(E, false, true).first;
2373
2374 case CK_FloatingComplexToBoolean:
2375 case CK_IntegralComplexToBoolean: {
2376 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2377
2378 // TODO: kill this function off, inline appropriate case here
2379 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2380 CE->getExprLoc());
2381 }
2382
2383 case CK_ZeroToOCLOpaqueType: {
2384 assert((DestTy->isEventT() || DestTy->isQueueT() ||(static_cast<void> (0))
2385 DestTy->isOCLIntelSubgroupAVCType()) &&(static_cast<void> (0))
2386 "CK_ZeroToOCLEvent cast on non-event type")(static_cast<void> (0));
2387 return llvm::Constant::getNullValue(ConvertType(DestTy));
2388 }
2389
2390 case CK_IntToOCLSampler:
2391 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2392
2393 } // end of switch
2394
2395 llvm_unreachable("unknown scalar cast")__builtin_unreachable();
2396}
2397
2398Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2399 CodeGenFunction::StmtExprEvaluation eval(CGF);
2400 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2401 !E->getType()->isVoidType());
2402 if (!RetAlloca.isValid())
2403 return nullptr;
2404 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2405 E->getExprLoc());
2406}
2407
2408Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2409 CodeGenFunction::RunCleanupsScope Scope(CGF);
2410 Value *V = Visit(E->getSubExpr());
2411 // Defend against dominance problems caused by jumps out of expression
2412 // evaluation through the shared cleanup block.
2413 Scope.ForceCleanup({&V});
2414 return V;
2415}
2416
2417//===----------------------------------------------------------------------===//
2418// Unary Operators
2419//===----------------------------------------------------------------------===//
2420
2421static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2422 llvm::Value *InVal, bool IsInc,
2423 FPOptions FPFeatures) {
2424 BinOpInfo BinOp;
2425 BinOp.LHS = InVal;
2426 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2427 BinOp.Ty = E->getType();
2428 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2429 BinOp.FPFeatures = FPFeatures;
2430 BinOp.E = E;
2431 return BinOp;
2432}
2433
2434llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2435 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2436 llvm::Value *Amount =
2437 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2438 StringRef Name = IsInc ? "inc" : "dec";
2439 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2440 case LangOptions::SOB_Defined:
2441 return Builder.CreateAdd(InVal, Amount, Name);
2442 case LangOptions::SOB_Undefined:
2443 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2444 return Builder.CreateNSWAdd(InVal, Amount, Name);
2445 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2446 case LangOptions::SOB_Trapping:
2447 if (!E->canOverflow())
2448 return Builder.CreateNSWAdd(InVal, Amount, Name);
2449 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2450 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2451 }
2452 llvm_unreachable("Unknown SignedOverflowBehaviorTy")__builtin_unreachable();
2453}
2454
2455namespace {
2456/// Handles check and update for lastprivate conditional variables.
2457class OMPLastprivateConditionalUpdateRAII {
2458private:
2459 CodeGenFunction &CGF;
2460 const UnaryOperator *E;
2461
2462public:
2463 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2464 const UnaryOperator *E)
2465 : CGF(CGF), E(E) {}
2466 ~OMPLastprivateConditionalUpdateRAII() {
2467 if (CGF.getLangOpts().OpenMP)
2468 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2469 CGF, E->getSubExpr());
2470 }
2471};
2472} // namespace
2473
2474llvm::Value *
2475ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2476 bool isInc, bool isPre) {
2477 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2478 QualType type = E->getSubExpr()->getType();
2479 llvm::PHINode *atomicPHI = nullptr;
2480 llvm::Value *value;
2481 llvm::Value *input;
2482
2483 int amount = (isInc ? 1 : -1);
2484 bool isSubtraction = !isInc;
2485
2486 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2487 type = atomicTy->getValueType();
2488 if (isInc && type->isBooleanType()) {
2489 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2490 if (isPre) {
2491 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2492 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2493 return Builder.getTrue();
2494 }
2495 // For atomic bool increment, we just store true and return it for
2496 // preincrement, do an atomic swap with true for postincrement
2497 return Builder.CreateAtomicRMW(
2498 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2499 llvm::AtomicOrdering::SequentiallyConsistent);
2500 }
2501 // Special case for atomic increment / decrement on integers, emit
2502 // atomicrmw instructions. We skip this if we want to be doing overflow
2503 // checking, and fall into the slow path with the atomic cmpxchg loop.
2504 if (!type->isBooleanType() && type->isIntegerType() &&
2505 !(type->isUnsignedIntegerType() &&
2506 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2507 CGF.getLangOpts().getSignedOverflowBehavior() !=
2508 LangOptions::SOB_Trapping) {
2509 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2510 llvm::AtomicRMWInst::Sub;
2511 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2512 llvm::Instruction::Sub;
2513 llvm::Value *amt = CGF.EmitToMemory(
2514 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2515 llvm::Value *old =
2516 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2517 llvm::AtomicOrdering::SequentiallyConsistent);
2518 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2519 }
2520 value = EmitLoadOfLValue(LV, E->getExprLoc());
2521 input = value;
2522 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2523 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2524 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2525 value = CGF.EmitToMemory(value, type);
2526 Builder.CreateBr(opBB);
2527 Builder.SetInsertPoint(opBB);
2528 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2529 atomicPHI->addIncoming(value, startBB);
2530 value = atomicPHI;
2531 } else {
2532 value = EmitLoadOfLValue(LV, E->getExprLoc());
2533 input = value;
2534 }
2535
2536 // Special case of integer increment that we have to check first: bool++.
2537 // Due to promotion rules, we get:
2538 // bool++ -> bool = bool + 1
2539 // -> bool = (int)bool + 1
2540 // -> bool = ((int)bool + 1 != 0)
2541 // An interesting aspect of this is that increment is always true.
2542 // Decrement does not have this property.
2543 if (isInc && type->isBooleanType()) {
2544 value = Builder.getTrue();
2545
2546 // Most common case by far: integer increment.
2547 } else if (type->isIntegerType()) {
2548 QualType promotedType;
2549 bool canPerformLossyDemotionCheck = false;
2550 if (type->isPromotableIntegerType()) {
2551 promotedType = CGF.getContext().getPromotedIntegerType(type);
2552 assert(promotedType != type && "Shouldn't promote to the same type.")(static_cast<void> (0));
2553 canPerformLossyDemotionCheck = true;
2554 canPerformLossyDemotionCheck &=
2555 CGF.getContext().getCanonicalType(type) !=
2556 CGF.getContext().getCanonicalType(promotedType);
2557 canPerformLossyDemotionCheck &=
2558 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2559 type, promotedType);
2560 assert((!canPerformLossyDemotionCheck ||(static_cast<void> (0))
2561 type->isSignedIntegerOrEnumerationType() ||(static_cast<void> (0))
2562 promotedType->isSignedIntegerOrEnumerationType() ||(static_cast<void> (0))
2563 ConvertType(type)->getScalarSizeInBits() ==(static_cast<void> (0))
2564 ConvertType(promotedType)->getScalarSizeInBits()) &&(static_cast<void> (0))
2565 "The following check expects that if we do promotion to different "(static_cast<void> (0))
2566 "underlying canonical type, at least one of the types (either "(static_cast<void> (0))
2567 "base or promoted) will be signed, or the bitwidths will match.")(static_cast<void> (0));
2568 }
2569 if (CGF.SanOpts.hasOneOf(
2570 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2571 canPerformLossyDemotionCheck) {
2572 // While `x += 1` (for `x` with width less than int) is modeled as
2573 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2574 // ease; inc/dec with width less than int can't overflow because of
2575 // promotion rules, so we omit promotion+demotion, which means that we can
2576 // not catch lossy "demotion". Because we still want to catch these cases
2577 // when the sanitizer is enabled, we perform the promotion, then perform
2578 // the increment/decrement in the wider type, and finally
2579 // perform the demotion. This will catch lossy demotions.
2580
2581 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2582 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2583 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2584 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2585 // emitted.
2586 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2587 ScalarConversionOpts(CGF.SanOpts));
2588
2589 // Note that signed integer inc/dec with width less than int can't
2590 // overflow because of promotion rules; we're just eliding a few steps
2591 // here.
2592 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2593 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2594 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2595 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2596 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2597 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2598 } else {
2599 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2600 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2601 }
2602
2603 // Next most common: pointer increment.
2604 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2605 QualType type = ptr->getPointeeType();
2606
2607 // VLA types don't have constant size.
2608 if (const VariableArrayType *vla
2609 = CGF.getContext().getAsVariableArrayType(type)) {
2610 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2611 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2612 if (CGF.getLangOpts().isSignedOverflowDefined())
2613 value = Builder.CreateGEP(value->getType()->getPointerElementType(),
2614 value, numElts, "vla.inc");
2615 else
2616 value = CGF.EmitCheckedInBoundsGEP(
2617 value, numElts, /*SignedIndices=*/false, isSubtraction,
2618 E->getExprLoc(), "vla.inc");
2619
2620 // Arithmetic on function pointers (!) is just +-1.
2621 } else if (type->isFunctionType()) {
2622 llvm::Value *amt = Builder.getInt32(amount);
2623
2624 value = CGF.EmitCastToVoidPtr(value);
2625 if (CGF.getLangOpts().isSignedOverflowDefined())
2626 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
2627 else
2628 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2629 isSubtraction, E->getExprLoc(),
2630 "incdec.funcptr");
2631 value = Builder.CreateBitCast(value, input->getType());
2632
2633 // For everything else, we can just do a simple increment.
2634 } else {
2635 llvm::Value *amt = Builder.getInt32(amount);
2636 if (CGF.getLangOpts().isSignedOverflowDefined())
2637 value = Builder.CreateGEP(value->getType()->getPointerElementType(),
2638 value, amt, "incdec.ptr");
2639 else
2640 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2641 isSubtraction, E->getExprLoc(),
2642 "incdec.ptr");
2643 }
2644
2645 // Vector increment/decrement.
2646 } else if (type->isVectorType()) {
2647 if (type->hasIntegerRepresentation()) {
2648 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2649
2650 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2651 } else {
2652 value = Builder.CreateFAdd(
2653 value,
2654 llvm::ConstantFP::get(value->getType(), amount),
2655 isInc ? "inc" : "dec");
2656 }
2657
2658 // Floating point.
2659 } else if (type->isRealFloatingType()) {
2660 // Add the inc/dec to the real part.
2661 llvm::Value *amt;
2662 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2663
2664 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2665 // Another special case: half FP increment should be done via float
2666 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2667 value = Builder.CreateCall(
2668 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2669 CGF.CGM.FloatTy),
2670 input, "incdec.conv");
2671 } else {
2672 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2673 }
2674 }
2675
2676 if (value->getType()->isFloatTy())
2677 amt = llvm::ConstantFP::get(VMContext,
2678 llvm::APFloat(static_cast<float>(amount)));
2679 else if (value->getType()->isDoubleTy())
2680 amt = llvm::ConstantFP::get(VMContext,
2681 llvm::APFloat(static_cast<double>(amount)));
2682 else {
2683 // Remaining types are Half, LongDouble or __float128. Convert from float.
2684 llvm::APFloat F(static_cast<float>(amount));
2685 bool ignored;
2686 const llvm::fltSemantics *FS;
2687 // Don't use getFloatTypeSemantics because Half isn't
2688 // necessarily represented using the "half" LLVM type.
2689 if (value->getType()->isFP128Ty())
2690 FS = &CGF.getTarget().getFloat128Format();
2691 else if (value->getType()->isHalfTy())
2692 FS = &CGF.getTarget().getHalfFormat();
2693 else
2694 FS = &CGF.getTarget().getLongDoubleFormat();
2695 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2696 amt = llvm::ConstantFP::get(VMContext, F);
2697 }
2698 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2699
2700 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2701 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2702 value = Builder.CreateCall(
2703 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2704 CGF.CGM.FloatTy),
2705 value, "incdec.conv");
2706 } else {
2707 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2708 }
2709 }
2710
2711 // Fixed-point types.
2712 } else if (type->isFixedPointType()) {
2713 // Fixed-point types are tricky. In some cases, it isn't possible to
2714 // represent a 1 or a -1 in the type at all. Piggyback off of
2715 // EmitFixedPointBinOp to avoid having to reimplement saturation.
2716 BinOpInfo Info;
2717 Info.E = E;
2718 Info.Ty = E->getType();
2719 Info.Opcode = isInc ? BO_Add : BO_Sub;
2720 Info.LHS = value;
2721 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2722 // If the type is signed, it's better to represent this as +(-1) or -(-1),
2723 // since -1 is guaranteed to be representable.
2724 if (type->isSignedFixedPointType()) {
2725 Info.Opcode = isInc ? BO_Sub : BO_Add;
2726 Info.RHS = Builder.CreateNeg(Info.RHS);
2727 }
2728 // Now, convert from our invented integer literal to the type of the unary
2729 // op. This will upscale and saturate if necessary. This value can become
2730 // undef in some cases.
2731 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2732 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2733 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2734 value = EmitFixedPointBinOp(Info);
2735
2736 // Objective-C pointer types.
2737 } else {
2738 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2739 value = CGF.EmitCastToVoidPtr(value);
2740
2741 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2742 if (!isInc) size = -size;
2743 llvm::Value *sizeValue =
2744 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2745
2746 if (CGF.getLangOpts().isSignedOverflowDefined())
2747 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
2748 else
2749 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2750 /*SignedIndices=*/false, isSubtraction,
2751 E->getExprLoc(), "incdec.objptr");
2752 value = Builder.CreateBitCast(value, input->getType());
2753 }
2754
2755 if (atomicPHI) {
2756 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2757 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2758 auto Pair = CGF.EmitAtomicCompareExchange(
2759 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2760 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2761 llvm::Value *success = Pair.second;
2762 atomicPHI->addIncoming(old, curBlock);
2763 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2764 Builder.SetInsertPoint(contBB);
2765 return isPre ? value : input;
2766 }
2767
2768 // Store the updated result through the lvalue.
2769 if (LV.isBitField())
2770 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2771 else
2772 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2773
2774 // If this is a postinc, return the value read from memory, otherwise use the
2775 // updated value.
2776 return isPre ? value : input;
2777}
2778
2779
2780
2781Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2782 TestAndClearIgnoreResultAssign();
2783 Value *Op = Visit(E->getSubExpr());
2784
2785 // Generate a unary FNeg for FP ops.
2786 if (Op->getType()->isFPOrFPVectorTy())
1
Taking false branch
2787 return Builder.CreateFNeg(Op, "fneg");
2788
2789 // Emit unary minus with EmitSub so we handle overflow cases etc.
2790 BinOpInfo BinOp;
2791 BinOp.RHS = Op;
2792 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2793 BinOp.Ty = E->getType();
2794 BinOp.Opcode = BO_Sub;
2795 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2796 BinOp.E = E;
2797 return EmitSub(BinOp);
2
Calling 'ScalarExprEmitter::EmitSub'
2798}
2799
2800Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2801 TestAndClearIgnoreResultAssign();
2802 Value *Op = Visit(E->getSubExpr());
2803 return Builder.CreateNot(Op, "neg");
2804}
2805
2806Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2807 // Perform vector logical not on comparison with zero vector.
2808 if (E->getType()->isVectorType() &&
2809 E->getType()->castAs<VectorType>()->getVectorKind() ==
2810 VectorType::GenericVector) {
2811 Value *Oper = Visit(E->getSubExpr());
2812 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2813 Value *Result;
2814 if (Oper->getType()->isFPOrFPVectorTy()) {
2815 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2816 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2817 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2818 } else
2819 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2820 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2821 }
2822
2823 // Compare operand to zero.
2824 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2825
2826 // Invert value.
2827 // TODO: Could dynamically modify easy computations here. For example, if
2828 // the operand is an icmp ne, turn into icmp eq.
2829 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2830
2831 // ZExt result to the expr type.
2832 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2833}
2834
2835Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2836 // Try folding the offsetof to a constant.
2837 Expr::EvalResult EVResult;
2838 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2839 llvm::APSInt Value = EVResult.Val.getInt();
2840 return Builder.getInt(Value);
2841 }
2842
2843 // Loop over the components of the offsetof to compute the value.
2844 unsigned n = E->getNumComponents();
2845 llvm::Type* ResultType = ConvertType(E->getType());
2846 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2847 QualType CurrentType = E->getTypeSourceInfo()->getType();
2848 for (unsigned i = 0; i != n; ++i) {
2849 OffsetOfNode ON = E->getComponent(i);
2850 llvm::Value *Offset = nullptr;
2851 switch (ON.getKind()) {
2852 case OffsetOfNode::Array: {
2853 // Compute the index
2854 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2855 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2856 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2857 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2858
2859 // Save the element type
2860 CurrentType =
2861 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2862
2863 // Compute the element size
2864 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2865 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2866
2867 // Multiply out to compute the result
2868 Offset = Builder.CreateMul(Idx, ElemSize);
2869 break;
2870 }
2871
2872 case OffsetOfNode::Field: {
2873 FieldDecl *MemberDecl = ON.getField();
2874 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2875 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2876
2877 // Compute the index of the field in its parent.
2878 unsigned i = 0;
2879 // FIXME: It would be nice if we didn't have to loop here!
2880 for (RecordDecl::field_iterator Field = RD->field_begin(),
2881 FieldEnd = RD->field_end();
2882 Field != FieldEnd; ++Field, ++i) {
2883 if (*Field == MemberDecl)
2884 break;
2885 }
2886 assert(i < RL.getFieldCount() && "offsetof field in wrong type")(static_cast<void> (0));
2887
2888 // Compute the offset to the field
2889 int64_t OffsetInt = RL.getFieldOffset(i) /
2890 CGF.getContext().getCharWidth();
2891 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2892
2893 // Save the element type.
2894 CurrentType = MemberDecl->getType();
2895 break;
2896 }
2897
2898 case OffsetOfNode::Identifier:
2899 llvm_unreachable("dependent __builtin_offsetof")__builtin_unreachable();
2900
2901 case OffsetOfNode::Base: {
2902 if (ON.getBase()->isVirtual()) {
2903 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2904 continue;
2905 }
2906
2907 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2908 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2909
2910 // Save the element type.
2911 CurrentType = ON.getBase()->getType();
2912
2913 // Compute the offset to the base.
2914 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2915 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2916 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2917 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2918 break;
2919 }
2920 }
2921 Result = Builder.CreateAdd(Result, Offset);
2922 }
2923 return Result;
2924}
2925
2926/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2927/// argument of the sizeof expression as an integer.
2928Value *
2929ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2930 const UnaryExprOrTypeTraitExpr *E) {
2931 QualType TypeToSize = E->getTypeOfArgument();
2932 if (E->getKind() == UETT_SizeOf) {
2933 if (const VariableArrayType *VAT =
2934 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2935 if (E->isArgumentType()) {
2936 // sizeof(type) - make sure to emit the VLA size.
2937 CGF.EmitVariablyModifiedType(TypeToSize);
2938 } else {
2939 // C99 6.5.3.4p2: If the argument is an expression of type
2940 // VLA, it is evaluated.
2941 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2942 }
2943
2944 auto VlaSize = CGF.getVLASize(VAT);
2945 llvm::Value *size = VlaSize.NumElts;
2946
2947 // Scale the number of non-VLA elements by the non-VLA element size.
2948 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2949 if (!eltSize.isOne())
2950 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2951
2952 return size;
2953 }
2954 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2955 auto Alignment =
2956 CGF.getContext()
2957 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2958 E->getTypeOfArgument()->getPointeeType()))
2959 .getQuantity();
2960 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2961 }
2962
2963 // If this isn't sizeof(vla), the result must be constant; use the constant
2964 // folding logic so we don't have to duplicate it here.
2965 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2966}
2967
2968Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2969 Expr *Op = E->getSubExpr();
2970 if (Op->getType()->isAnyComplexType()) {
2971 // If it's an l-value, load through the appropriate subobject l-value.
2972 // Note that we have to ask E because Op might be an l-value that
2973 // this won't work for, e.g. an Obj-C property.
2974 if (E->isGLValue())
2975 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2976 E->getExprLoc()).getScalarVal();
2977
2978 // Otherwise, calculate and project.
2979 return CGF.EmitComplexExpr(Op, false, true).first;
2980 }
2981
2982 return Visit(Op);
2983}
2984
2985Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2986 Expr *Op = E->getSubExpr();
2987 if (Op->getType()->isAnyComplexType()) {
2988 // If it's an l-value, load through the appropriate subobject l-value.
2989 // Note that we have to ask E because Op might be an l-value that
2990 // this won't work for, e.g. an Obj-C property.
2991 if (Op->isGLValue())
2992 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2993 E->getExprLoc()).getScalarVal();
2994
2995 // Otherwise, calculate and project.
2996 return CGF.EmitComplexExpr(Op, true, false).second;
2997 }
2998
2999 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3000 // effects are evaluated, but not the actual value.
3001 if (Op->isGLValue())
3002 CGF.EmitLValue(Op);
3003 else
3004 CGF.EmitScalarExpr(Op, true);
3005 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3006}
3007
3008//===----------------------------------------------------------------------===//
3009// Binary Operators
3010//===----------------------------------------------------------------------===//
3011
3012BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
3013 TestAndClearIgnoreResultAssign();
3014 BinOpInfo Result;
3015 Result.LHS = Visit(E->getLHS());
3016 Result.RHS = Visit(E->getRHS());
3017 Result.Ty = E->getType();
3018 Result.Opcode = E->getOpcode();
3019 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3020 Result.E = E;
3021 return Result;
3022}
3023
3024LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3025 const CompoundAssignOperator *E,
3026 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3027 Value *&Result) {
3028 QualType LHSTy = E->getLHS()->getType();
3029 BinOpInfo OpInfo;
3030
3031 if (E->getComputationResultType()->isAnyComplexType())
3032 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3033
3034 // Emit the RHS first. __block variables need to have the rhs evaluated
3035 // first, plus this should improve codegen a little.
3036 OpInfo.RHS = Visit(E->getRHS());
3037 OpInfo.Ty = E->getComputationResultType();
3038 OpInfo.Opcode = E->getOpcode();
3039 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3040 OpInfo.E = E;
3041 // Load/convert the LHS.
3042 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3043
3044 llvm::PHINode *atomicPHI = nullptr;
3045 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3046 QualType type = atomicTy->getValueType();
3047 if (!type->isBooleanType() && type->isIntegerType() &&
3048 !(type->isUnsignedIntegerType() &&
3049 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3050 CGF.getLangOpts().getSignedOverflowBehavior() !=
3051 LangOptions::SOB_Trapping) {
3052 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3053 llvm::Instruction::BinaryOps Op;
3054 switch (OpInfo.Opcode) {
3055 // We don't have atomicrmw operands for *, %, /, <<, >>
3056 case BO_MulAssign: case BO_DivAssign:
3057 case BO_RemAssign:
3058 case BO_ShlAssign:
3059 case BO_ShrAssign:
3060 break;
3061 case BO_AddAssign:
3062 AtomicOp = llvm::AtomicRMWInst::Add;
3063 Op = llvm::Instruction::Add;
3064 break;
3065 case BO_SubAssign:
3066 AtomicOp = llvm::AtomicRMWInst::Sub;
3067 Op = llvm::Instruction::Sub;
3068 break;
3069 case BO_AndAssign:
3070 AtomicOp = llvm::AtomicRMWInst::And;
3071 Op = llvm::Instruction::And;
3072 break;
3073 case BO_XorAssign:
3074 AtomicOp = llvm::AtomicRMWInst::Xor;
3075 Op = llvm::Instruction::Xor;
3076 break;
3077 case BO_OrAssign:
3078 AtomicOp = llvm::AtomicRMWInst::Or;
3079 Op = llvm::Instruction::Or;
3080 break;
3081 default:
3082 llvm_unreachable("Invalid compound assignment type")__builtin_unreachable();
3083 }
3084 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3085 llvm::Value *Amt = CGF.EmitToMemory(
3086 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3087 E->getExprLoc()),
3088 LHSTy);
3089 Value *OldVal = Builder.CreateAtomicRMW(
3090 AtomicOp, LHSLV.getPointer(CGF), Amt,
3091 llvm::AtomicOrdering::SequentiallyConsistent);
3092
3093 // Since operation is atomic, the result type is guaranteed to be the
3094 // same as the input in LLVM terms.
3095 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3096 return LHSLV;
3097 }
3098 }
3099 // FIXME: For floating point types, we should be saving and restoring the
3100 // floating point environment in the loop.
3101 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3102 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3103 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3104 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3105 Builder.CreateBr(opBB);
3106 Builder.SetInsertPoint(opBB);
3107 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3108 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3109 OpInfo.LHS = atomicPHI;
3110 }
3111 else
3112 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3113
3114 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3115 SourceLocation Loc = E->getExprLoc();
3116 OpInfo.LHS =
3117 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
3118
3119 // Expand the binary operator.
3120 Result = (this->*Func)(OpInfo);
3121
3122 // Convert the result back to the LHS type,
3123 // potentially with Implicit Conversion sanitizer check.
3124 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3125 Loc, ScalarConversionOpts(CGF.SanOpts));
3126
3127 if (atomicPHI) {
3128 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3129 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3130 auto Pair = CGF.EmitAtomicCompareExchange(
3131 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3132 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3133 llvm::Value *success = Pair.second;
3134 atomicPHI->addIncoming(old, curBlock);
3135 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3136 Builder.SetInsertPoint(contBB);
3137 return LHSLV;
3138 }
3139
3140 // Store the result value into the LHS lvalue. Bit-fields are handled
3141 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3142 // 'An assignment expression has the value of the left operand after the
3143 // assignment...'.
3144 if (LHSLV.isBitField())
3145 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3146 else
3147 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3148
3149 if (CGF.getLangOpts().OpenMP)
3150 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3151 E->getLHS());
3152 return LHSLV;
3153}
3154
3155Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3156 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3157 bool Ignore = TestAndClearIgnoreResultAssign();
3158 Value *RHS = nullptr;
3159 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3160
3161 // If the result is clearly ignored, return now.
3162 if (Ignore)
3163 return nullptr;
3164
3165 // The result of an assignment in C is the assigned r-value.
3166 if (!CGF.getLangOpts().CPlusPlus)
3167 return RHS;
3168
3169 // If the lvalue is non-volatile, return the computed value of the assignment.
3170 if (!LHS.isVolatileQualified())
3171 return RHS;
3172
3173 // Otherwise, reload the value.
3174 return EmitLoadOfLValue(LHS, E->getExprLoc());
3175}
3176
3177void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3178 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3179 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3180
3181 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3182 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3183 SanitizerKind::IntegerDivideByZero));
3184 }
3185
3186 const auto *BO = cast<BinaryOperator>(Ops.E);
3187 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3188 Ops.Ty->hasSignedIntegerRepresentation() &&
3189 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3190 Ops.mayHaveIntegerOverflow()) {
3191 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3192
3193 llvm::Value *IntMin =
3194 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3195 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3196
3197 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3198 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3199 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3200 Checks.push_back(
3201 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3202 }
3203
3204 if (Checks.size() > 0)
3205 EmitBinOpCheck(Checks, Ops);
3206}
3207
3208Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3209 {
3210 CodeGenFunction::SanitizerScope SanScope(&CGF);
3211 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3212 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3213 Ops.Ty->isIntegerType() &&
3214 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3215 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3216 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3217 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3218 Ops.Ty->isRealFloatingType() &&
3219 Ops.mayHaveFloatDivisionByZero()) {
3220 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3221 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3222 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3223 Ops);
3224 }
3225 }
3226
3227 if (Ops.Ty->isConstantMatrixType()) {
3228 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3229 // We need to check the types of the operands of the operator to get the
3230 // correct matrix dimensions.
3231 auto *BO = cast<BinaryOperator>(Ops.E);
3232 (void)BO;
3233 assert((static_cast<void> (0))
3234 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&(static_cast<void> (0))
3235 "first operand must be a matrix")(static_cast<void> (0));
3236 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&(static_cast<void> (0))
3237 "second operand must be an arithmetic type")(static_cast<void> (0));
3238 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3239 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3240 Ops.Ty->hasUnsignedIntegerRepresentation());
3241 }
3242
3243 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3244 llvm::Value *Val;
3245 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3246 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3247 if ((CGF.getLangOpts().OpenCL &&
3248 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
3249 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
3250 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
3251 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3252 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3253 // build option allows an application to specify that single precision
3254 // floating-point divide (x/y and 1/x) and sqrt used in the program
3255 // source are correctly rounded.
3256 llvm::Type *ValTy = Val->getType();
3257 if (ValTy->isFloatTy() ||
3258 (isa<llvm::VectorType>(ValTy) &&
3259 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3260 CGF.SetFPAccuracy(Val, 2.5);
3261 }
3262 return Val;
3263 }
3264 else if (Ops.isFixedPointOp())
3265 return EmitFixedPointBinOp(Ops);
3266 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3267 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3268 else
3269 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3270}
3271
3272Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3273 // Rem in C can't be a floating point type: C99 6.5.5p2.
3274 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3275 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3276 Ops.Ty->isIntegerType() &&
3277 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3278 CodeGenFunction::SanitizerScope SanScope(&CGF);
3279 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3280 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3281 }
3282
3283 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3284 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3285 else
3286 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3287}
3288
3289Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3290 unsigned IID;
3291 unsigned OpID = 0;
3292 SanitizerHandler OverflowKind;
3293
3294 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3295 switch (Ops.Opcode) {
3296 case BO_Add:
3297 case BO_AddAssign:
3298 OpID = 1;
3299 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3300 llvm::Intrinsic::uadd_with_overflow;
3301 OverflowKind = SanitizerHandler::AddOverflow;
3302 break;
3303 case BO_Sub:
3304 case BO_SubAssign:
3305 OpID = 2;
3306 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3307 llvm::Intrinsic::usub_with_overflow;
3308 OverflowKind = SanitizerHandler::SubOverflow;
3309 break;
3310 case BO_Mul:
3311 case BO_MulAssign:
3312 OpID = 3;
3313 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3314 llvm::Intrinsic::umul_with_overflow;
3315 OverflowKind = SanitizerHandler::MulOverflow;
3316 break;
3317 default:
3318 llvm_unreachable("Unsupported operation for overflow detection")__builtin_unreachable();
3319 }
3320 OpID <<= 1;
3321 if (isSigned)
3322 OpID |= 1;
3323
3324 CodeGenFunction::SanitizerScope SanScope(&CGF);
3325 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3326
3327 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3328
3329 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3330 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3331 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3332
3333 // Handle overflow with llvm.trap if no custom handler has been specified.
3334 const std::string *handlerName =
3335 &CGF.getLangOpts().OverflowHandler;
3336 if (handlerName->empty()) {
3337 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3338 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3339 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3340 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3341 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3342 : SanitizerKind::UnsignedIntegerOverflow;
3343 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3344 } else
3345 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3346 return result;
3347 }
3348
3349 // Branch in case of overflow.
3350 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3351 llvm::BasicBlock *continueBB =
3352 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3353 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3354
3355 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3356
3357 // If an overflow handler is set, then we want to call it and then use its
3358 // result, if it returns.
3359 Builder.SetInsertPoint(overflowBB);
3360
3361 // Get the overflow handler.
3362 llvm::Type *Int8Ty = CGF.Int8Ty;
3363 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3364 llvm::FunctionType *handlerTy =
3365 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3366 llvm::FunctionCallee handler =
3367 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3368
3369 // Sign extend the args to 64-bit, so that we can use the same handler for
3370 // all types of overflow.
3371 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3372 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3373
3374 // Call the handler with the two arguments, the operation, and the size of
3375 // the result.
3376 llvm::Value *handlerArgs[] = {
3377 lhs,
3378 rhs,
3379 Builder.getInt8(OpID),
3380 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3381 };
3382 llvm::Value *handlerResult =
3383 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3384
3385 // Truncate the result back to the desired size.
3386 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3387 Builder.CreateBr(continueBB);
3388
3389 Builder.SetInsertPoint(continueBB);
3390 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3391 phi->addIncoming(result, initialBB);
3392 phi->addIncoming(handlerResult, overflowBB);
3393
3394 return phi;
3395}
3396
3397/// Emit pointer + index arithmetic.
3398static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3399 const BinOpInfo &op,
3400 bool isSubtraction) {
3401 // Must have binary (not unary) expr here. Unary pointer
3402 // increment/decrement doesn't use this path.
3403 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
6
Field 'E' is a 'BinaryOperator'
3404
3405 Value *pointer = op.LHS;
3406 Expr *pointerOperand = expr->getLHS();
3407 Value *index = op.RHS;
3408 Expr *indexOperand = expr->getRHS();
3409
3410 // In a subtraction, the LHS is always the pointer.
3411 if (!isSubtraction
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
6.1
'isSubtraction' is true
&& !pointer->getType()->isPointerTy()) {
3412 std::swap(pointer, index);
3413 std::swap(pointerOperand, indexOperand);
3414 }
3415
3416 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3417
3418 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
7
The object is a 'IntegerType'
3419 auto &DL = CGF.CGM.getDataLayout();
3420 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
8
The object is a 'PointerType'
3421
3422 // Some versions of glibc and gcc use idioms (particularly in their malloc
3423 // routines) that add a pointer-sized integer (known to be a pointer value)
3424 // to a null pointer in order to cast the value back to an integer or as
3425 // part of a pointer alignment algorithm. This is undefined behavior, but
3426 // we'd like to be able to compile programs that use it.
3427 //
3428 // Normally, we'd generate a GEP with a null-pointer base here in response
3429 // to that code, but it's also UB to dereference a pointer created that
3430 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3431 // generate a direct cast of the integer value to a pointer.
3432 //
3433 // The idiom (p = nullptr + N) is not met if any of the following are true:
3434 //
3435 // The operation is subtraction.
3436 // The index is not pointer-sized.
3437 // The pointer type is not byte-sized.
3438 //
3439 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
9
Assuming the condition is false
10
Taking false branch
3440 op.Opcode,
3441 expr->getLHS(),
3442 expr->getRHS()))
3443 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3444
3445 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
11
Assuming the condition is false
12
Taking false branch
3446 // Zero-extend or sign-extend the pointer value according to
3447 // whether the index is signed or not.
3448 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3449 "idx.ext");
3450 }
3451
3452 // If this is subtraction, negate the index.
3453 if (isSubtraction
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
12.1
'isSubtraction' is true
)
13
Taking true branch
3454 index = CGF.Builder.CreateNeg(index, "idx.neg");
3455
3456 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
14
Assuming the condition is false
15
Taking false branch
3457 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3458 /*Accessed*/ false);
3459
3460 const PointerType *pointerType
3461 = pointerOperand->getType()->getAs<PointerType>();
16
Assuming the object is a 'PointerType'
3462 if (!pointerType) {
17
Assuming 'pointerType' is non-null
18
Taking false branch
3463 QualType objectType = pointerOperand->getType()
3464 ->castAs<ObjCObjectPointerType>()
3465 ->getPointeeType();
3466 llvm::Value *objectSize
3467 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3468
3469 index = CGF.Builder.CreateMul(index, objectSize);
3470
3471 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3472 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
3473 return CGF.Builder.CreateBitCast(result, pointer->getType());
3474 }
3475
3476 QualType elementType = pointerType->getPointeeType();
3477 if (const VariableArrayType *vla
22.1
'vla' is null
22.1
'vla' is null
22.1
'vla' is null
22.1
'vla' is null
22.1
'vla' is null
22.1
'vla' is null
3478 = CGF.getContext().getAsVariableArrayType(elementType)) {
19
Calling 'ASTContext::getAsVariableArrayType'
22
Returning from 'ASTContext::getAsVariableArrayType'
3479 // The element count here is the total number of non-VLA elements. 3480 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 3481 3482 // Effectively, the multiply by the VLA size is part of the GEP. 3483 // GEP indexes are signed, and scaling an index isn't permitted to 3484 // signed-overflow, so we use the same semantics for our explicit 3485 // multiply. We suppress this if overflow is not undefined behavior. 3486 if (CGF.getLangOpts().isSignedOverflowDefined()) { 3487 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 3488 pointer = CGF.Builder.CreateGEP( 3489 pointer->getType()->getPointerElementType(), pointer, index, 3490 "add.ptr"); 3491 } else { 3492 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 3493 pointer = 3494 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction, 3495 op.E->getExprLoc(), "add.ptr"); 3496 } 3497 return pointer; 3498 } 3499 3500 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 3501 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 3502 // future proof. 3503 if (elementType->isVoidType() || elementType->isFunctionType()) {
23
Calling 'Type::isVoidType'
30
Returning from 'Type::isVoidType'
31
Calling 'Type::isFunctionType'
34
Returning from 'Type::isFunctionType'
35
Taking false branch
3504 Value *result = CGF.EmitCastToVoidPtr(pointer); 3505 result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr"); 3506 return CGF.Builder.CreateBitCast(result, pointer->getType()); 3507 } 3508 3509 if (CGF.getLangOpts().isSignedOverflowDefined())
36
Calling 'LangOptions::isSignedOverflowDefined'
39
Returning from 'LangOptions::isSignedOverflowDefined'
40
Taking false branch
3510 return CGF.Builder.CreateGEP( 3511 pointer->getType()->getPointerElementType(), pointer, index, "add.ptr"); 3512 3513 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
41
Calling 'CodeGenFunction::EmitCheckedInBoundsGEP'
3514 op.E->getExprLoc(), "add.ptr"); 3515} 3516 3517// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 3518// Addend. Use negMul and negAdd to negate the first operand of the Mul or 3519// the add operand respectively. This allows fmuladd to represent a*b-c, or 3520// c-a*b. Patterns in LLVM should catch the negated forms and translate them to 3521// efficient operations. 3522static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 3523 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3524 bool negMul, bool negAdd) { 3525 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.")(static_cast<void> (0)); 3526 3527 Value *MulOp0 = MulOp->getOperand(0); 3528 Value *MulOp1 = MulOp->getOperand(1); 3529 if (negMul) 3530 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 3531 if (negAdd) 3532 Addend = Builder.CreateFNeg(Addend, "neg"); 3533 3534 Value *FMulAdd = nullptr; 3535 if (Builder.getIsFPConstrained()) { 3536 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&(static_cast<void> (0)) 3537 "Only constrained operation should be created when Builder is in FP "(static_cast<void> (0)) 3538 "constrained mode")(static_cast<void> (0)); 3539 FMulAdd = Builder.CreateConstrainedFPCall( 3540 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 3541 Addend->getType()), 3542 {MulOp0, MulOp1, Addend}); 3543 } else { 3544 FMulAdd = Builder.CreateCall( 3545 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 3546 {MulOp0, MulOp1, Addend}); 3547 } 3548 MulOp->eraseFromParent(); 3549 3550 return FMulAdd; 3551} 3552 3553// Check whether it would be legal to emit an fmuladd intrinsic call to 3554// represent op and if so, build the fmuladd. 3555// 3556// Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 3557// Does NOT check the type of the operation - it's assumed that this function 3558// will be called from contexts where it's known that the type is contractable. 3559static Value* tryEmitFMulAdd(const BinOpInfo &op, 3560 const CodeGenFunction &CGF, CGBuilderTy &Builder, 3561 bool isSub=false) { 3562 3563 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||(static_cast<void> (0)) 3564 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&(static_cast<void> (0)) 3565 "Only fadd/fsub can be the root of an fmuladd.")(static_cast<void> (0)); 3566 3567 // Check whether this op is marked as fusable. 3568 if (!op.FPFeatures.allowFPContractWithinStatement()) 3569 return nullptr; 3570 3571 // We have a potentially fusable op. Look for a mul on one of the operands. 3572 // Also, make sure that the mul result isn't used directly. In that case, 3573 // there's no point creating a muladd operation. 3574 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { 3575 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 3576 LHSBinOp->use_empty()) 3577 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3578 } 3579 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { 3580 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 3581 RHSBinOp->use_empty()) 3582 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3583 } 3584 3585 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) { 3586 if (LHSBinOp->getIntrinsicID() == 3587 llvm::Intrinsic::experimental_constrained_fmul && 3588 LHSBinOp->use_empty()) 3589 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); 3590 } 3591 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) { 3592 if (RHSBinOp->getIntrinsicID() == 3593 llvm::Intrinsic::experimental_constrained_fmul && 3594 RHSBinOp->use_empty()) 3595 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); 3596 } 3597 3598 return nullptr; 3599} 3600 3601Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 3602 if (op.LHS->getType()->isPointerTy() || 3603 op.RHS->getType()->isPointerTy()) 3604 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 3605 3606 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3607 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3608 case LangOptions::SOB_Defined: 3609 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3610 case LangOptions::SOB_Undefined: 3611 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3612 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3613 LLVM_FALLTHROUGH[[gnu::fallthrough]]; 3614 case LangOptions::SOB_Trapping: 3615 if (CanElideOverflowCheck(CGF.getContext(), op)) 3616 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 3617 return EmitOverflowCheckedBinOp(op); 3618 } 3619 } 3620 3621 if (op.Ty->isConstantMatrixType()) { 3622 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3623 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3624 return MB.CreateAdd(op.LHS, op.RHS); 3625 } 3626 3627 if (op.Ty->isUnsignedIntegerType() && 3628 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3629 !CanElideOverflowCheck(CGF.getContext(), op)) 3630 return EmitOverflowCheckedBinOp(op); 3631 3632 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3633 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3634 // Try to form an fmuladd. 3635 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 3636 return FMulAdd; 3637 3638 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 3639 } 3640 3641 if (op.isFixedPointOp()) 3642 return EmitFixedPointBinOp(op); 3643 3644 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 3645} 3646 3647/// The resulting value must be calculated with exact precision, so the operands 3648/// may not be the same type. 3649Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 3650 using llvm::APSInt; 3651 using llvm::ConstantInt; 3652 3653 // This is either a binary operation where at least one of the operands is 3654 // a fixed-point type, or a unary operation where the operand is a fixed-point 3655 // type. The result type of a binary operation is determined by 3656 // Sema::handleFixedPointConversions(). 3657 QualType ResultTy = op.Ty; 3658 QualType LHSTy, RHSTy; 3659 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 3660 RHSTy = BinOp->getRHS()->getType(); 3661 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 3662 // For compound assignment, the effective type of the LHS at this point 3663 // is the computation LHS type, not the actual LHS type, and the final 3664 // result type is not the type of the expression but rather the 3665 // computation result type. 3666 LHSTy = CAO->getComputationLHSType(); 3667 ResultTy = CAO->getComputationResultType(); 3668 } else 3669 LHSTy = BinOp->getLHS()->getType(); 3670 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 3671 LHSTy = UnOp->getSubExpr()->getType(); 3672 RHSTy = UnOp->getSubExpr()->getType(); 3673 } 3674 ASTContext &Ctx = CGF.getContext(); 3675 Value *LHS = op.LHS; 3676 Value *RHS = op.RHS; 3677 3678 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 3679 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 3680 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 3681 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 3682 3683 // Perform the actual operation. 3684 Value *Result; 3685 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 3686 switch (op.Opcode) { 3687 case BO_AddAssign: 3688 case BO_Add: 3689 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema); 3690 break; 3691 case BO_SubAssign: 3692 case BO_Sub: 3693 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema); 3694 break; 3695 case BO_MulAssign: 3696 case BO_Mul: 3697 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema); 3698 break; 3699 case BO_DivAssign: 3700 case BO_Div: 3701 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema); 3702 break; 3703 case BO_ShlAssign: 3704 case BO_Shl: 3705 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS); 3706 break; 3707 case BO_ShrAssign: 3708 case BO_Shr: 3709 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS); 3710 break; 3711 case BO_LT: 3712 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3713 case BO_GT: 3714 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema); 3715 case BO_LE: 3716 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3717 case BO_GE: 3718 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3719 case BO_EQ: 3720 // For equality operations, we assume any padding bits on unsigned types are 3721 // zero'd out. They could be overwritten through non-saturating operations 3722 // that cause overflow, but this leads to undefined behavior. 3723 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema); 3724 case BO_NE: 3725 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema); 3726 case BO_Cmp: 3727 case BO_LAnd: 3728 case BO_LOr: 3729 llvm_unreachable("Found unimplemented fixed point binary operation")__builtin_unreachable(); 3730 case BO_PtrMemD: 3731 case BO_PtrMemI: 3732 case BO_Rem: 3733 case BO_Xor: 3734 case BO_And: 3735 case BO_Or: 3736 case BO_Assign: 3737 case BO_RemAssign: 3738 case BO_AndAssign: 3739 case BO_XorAssign: 3740 case BO_OrAssign: 3741 case BO_Comma: 3742 llvm_unreachable("Found unsupported binary operation for fixed point types.")__builtin_unreachable(); 3743 } 3744 3745 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) || 3746 BinaryOperator::isShiftAssignOp(op.Opcode); 3747 // Convert to the result type. 3748 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema 3749 : CommonFixedSema, 3750 ResultFixedSema); 3751} 3752 3753Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 3754 // The LHS is always a pointer if either side is. 3755 if (!op.LHS->getType()->isPointerTy()) {
3
Taking false branch
3756 if (op.Ty->isSignedIntegerOrEnumerationType()) { 3757 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 3758 case LangOptions::SOB_Defined: 3759 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3760 case LangOptions::SOB_Undefined: 3761 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 3762 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3763 LLVM_FALLTHROUGH[[gnu::fallthrough]]; 3764 case LangOptions::SOB_Trapping: 3765 if (CanElideOverflowCheck(CGF.getContext(), op)) 3766 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 3767 return EmitOverflowCheckedBinOp(op); 3768 } 3769 } 3770 3771 if (op.Ty->isConstantMatrixType()) { 3772 llvm::MatrixBuilder<CGBuilderTy> MB(Builder); 3773 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3774 return MB.CreateSub(op.LHS, op.RHS); 3775 } 3776 3777 if (op.Ty->isUnsignedIntegerType() && 3778 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3779 !CanElideOverflowCheck(CGF.getContext(), op)) 3780 return EmitOverflowCheckedBinOp(op); 3781 3782 if (op.LHS->getType()->isFPOrFPVectorTy()) { 3783 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 3784 // Try to form an fmuladd. 3785 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 3786 return FMulAdd; 3787 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 3788 } 3789 3790 if (op.isFixedPointOp()) 3791 return EmitFixedPointBinOp(op); 3792 3793 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 3794 } 3795 3796 // If the RHS is not a pointer, then we have normal pointer 3797 // arithmetic. 3798 if (!op.RHS->getType()->isPointerTy())
4
Taking true branch
3799 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
5
Calling 'emitPointerArithmetic'
3800 3801 // Otherwise, this is a pointer subtraction. 3802 3803 // Do the raw subtraction part. 3804 llvm::Value *LHS 3805 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 3806 llvm::Value *RHS 3807 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 3808 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 3809 3810 // Okay, figure out the element size. 3811 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3812 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 3813 3814 llvm::Value *divisor = nullptr; 3815 3816 // For a variable-length array, this is going to be non-constant. 3817 if (const VariableArrayType *vla 3818 = CGF.getContext().getAsVariableArrayType(elementType)) { 3819 auto VlaSize = CGF.getVLASize(vla); 3820 elementType = VlaSize.Type; 3821 divisor = VlaSize.NumElts; 3822 3823 // Scale the number of non-VLA elements by the non-VLA element size. 3824 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 3825 if (!eltSize.isOne()) 3826 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 3827 3828 // For everything elese, we can just compute it, safe in the 3829 // assumption that Sema won't let anything through that we can't 3830 // safely compute the size of. 3831 } else { 3832 CharUnits elementSize; 3833 // Handle GCC extension for pointer arithmetic on void* and 3834 // function pointer types. 3835 if (elementType->isVoidType() || elementType->isFunctionType()) 3836 elementSize = CharUnits::One(); 3837 else 3838 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 3839 3840 // Don't even emit the divide for element size of 1. 3841 if (elementSize.isOne()) 3842 return diffInChars; 3843 3844 divisor = CGF.CGM.getSize(elementSize); 3845 } 3846 3847 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 3848 // pointer difference in C is only defined in the case where both operands 3849 // are pointing to elements of an array. 3850 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 3851} 3852 3853Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { 3854 llvm::IntegerType *Ty; 3855 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3856 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3857 else 3858 Ty = cast<llvm::IntegerType>(LHS->getType()); 3859 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); 3860} 3861 3862Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 3863 const Twine &Name) { 3864 llvm::IntegerType *Ty; 3865 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 3866 Ty = cast<llvm::IntegerType>(VT->getElementType()); 3867 else 3868 Ty = cast<llvm::IntegerType>(LHS->getType()); 3869 3870 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 3871 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name); 3872 3873 return Builder.CreateURem( 3874 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 3875} 3876 3877Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 3878 // TODO: This misses out on the sanitizer check below. 3879 if (Ops.isFixedPointOp()) 3880 return EmitFixedPointBinOp(Ops); 3881 3882 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3883 // RHS to the same size as the LHS. 3884 Value *RHS = Ops.RHS; 3885 if (Ops.LHS->getType() != RHS->getType()) 3886 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3887 3888 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 3889 Ops.Ty->hasSignedIntegerRepresentation() && 3890 !CGF.getLangOpts().isSignedOverflowDefined() && 3891 !CGF.getLangOpts().CPlusPlus20; 3892 bool SanitizeUnsignedBase = 3893 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && 3894 Ops.Ty->hasUnsignedIntegerRepresentation(); 3895 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; 3896 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 3897 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3898 if (CGF.getLangOpts().OpenCL) 3899 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 3900 else if ((SanitizeBase || SanitizeExponent) && 3901 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3902 CodeGenFunction::SanitizerScope SanScope(&CGF); 3903 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; 3904 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); 3905 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 3906 3907 if (SanitizeExponent) { 3908 Checks.push_back( 3909 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); 3910 } 3911 3912 if (SanitizeBase) { 3913 // Check whether we are shifting any non-zero bits off the top of the 3914 // integer. We only emit this check if exponent is valid - otherwise 3915 // instructions below will have undefined behavior themselves. 3916 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 3917 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3918 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 3919 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 3920 llvm::Value *PromotedWidthMinusOne = 3921 (RHS == Ops.RHS) ? WidthMinusOne 3922 : GetWidthMinusOneValue(Ops.LHS, RHS); 3923 CGF.EmitBlock(CheckShiftBase); 3924 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 3925 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 3926 /*NUW*/ true, /*NSW*/ true), 3927 "shl.check"); 3928 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) { 3929 // In C99, we are not permitted to shift a 1 bit into the sign bit. 3930 // Under C++11's rules, shifting a 1 bit into the sign bit is 3931 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 3932 // define signed left shifts, so we use the C99 and C++11 rules there). 3933 // Unsigned shifts can always shift into the top bit. 3934 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 3935 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 3936 } 3937 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 3938 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 3939 CGF.EmitBlock(Cont); 3940 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 3941 BaseCheck->addIncoming(Builder.getTrue(), Orig); 3942 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 3943 Checks.push_back(std::make_pair( 3944 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase 3945 : SanitizerKind::UnsignedShiftBase)); 3946 } 3947 3948 assert(!Checks.empty())(static_cast<void> (0)); 3949 EmitBinOpCheck(Checks, Ops); 3950 } 3951 3952 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 3953} 3954 3955Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 3956 // TODO: This misses out on the sanitizer check below. 3957 if (Ops.isFixedPointOp()) 3958 return EmitFixedPointBinOp(Ops); 3959 3960 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 3961 // RHS to the same size as the LHS. 3962 Value *RHS = Ops.RHS; 3963 if (Ops.LHS->getType() != RHS->getType()) 3964 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 3965 3966 // OpenCL 6.3j: shift values are effectively % word size of LHS. 3967 if (CGF.getLangOpts().OpenCL) 3968 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 3969 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 3970 isa<llvm::IntegerType>(Ops.LHS->getType())) { 3971 CodeGenFunction::SanitizerScope SanScope(&CGF); 3972 llvm::Value *Valid = 3973 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); 3974 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); 3975 } 3976 3977 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3978 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 3979 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 3980} 3981 3982enum IntrinsicType { VCMPEQ, VCMPGT }; 3983// return corresponding comparison intrinsic for given vector type 3984static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 3985 BuiltinType::Kind ElemKind) { 3986 switch (ElemKind) { 3987 default: llvm_unreachable("unexpected element type")__builtin_unreachable(); 3988 case BuiltinType::Char_U: 3989 case BuiltinType::UChar: 3990 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 3991 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 3992 case BuiltinType::Char_S: 3993 case BuiltinType::SChar: 3994 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 3995 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 3996 case BuiltinType::UShort: 3997 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 3998 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 3999 case BuiltinType::Short: 4000 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4001 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 4002 case BuiltinType::UInt: 4003 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4004 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 4005 case BuiltinType::Int: 4006 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4007 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 4008 case BuiltinType::ULong: 4009 case BuiltinType::ULongLong: 4010 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4011 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 4012 case BuiltinType::Long: 4013 case BuiltinType::LongLong: 4014 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4015 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 4016 case BuiltinType::Float: 4017 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 4018 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 4019 case BuiltinType::Double: 4020 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 4021 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 4022 case BuiltinType::UInt128: 4023 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4024 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; 4025 case BuiltinType::Int128: 4026 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4027 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; 4028 } 4029} 4030 4031Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 4032 llvm::CmpInst::Predicate UICmpOpc, 4033 llvm::CmpInst::Predicate SICmpOpc, 4034 llvm::CmpInst::Predicate FCmpOpc, 4035 bool IsSignaling) { 4036 TestAndClearIgnoreResultAssign(); 4037 Value *Result; 4038 QualType LHSTy = E->getLHS()->getType(); 4039 QualType RHSTy = E->getRHS()->getType(); 4040 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 4041 assert(E->getOpcode() == BO_EQ ||(static_cast<void> (0)) 4042 E->getOpcode() == BO_NE)(static_cast<void> (0)); 4043 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 4044 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 4045 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 4046 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 4047 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 4048 BinOpInfo BOInfo = EmitBinOps(E); 4049 Value *LHS = BOInfo.LHS; 4050 Value *RHS = BOInfo.RHS; 4051 4052 // If AltiVec, the comparison results in a numeric type, so we use 4053 // intrinsics comparing vectors and giving 0 or 1 as a result 4054 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 4055 // constants for mapping CR6 register bits to predicate result 4056 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 4057 4058 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 4059 4060 // in several cases vector arguments order will be reversed 4061 Value *FirstVecArg = LHS, 4062 *SecondVecArg = RHS; 4063 4064 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 4065 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 4066 4067 switch(E->getOpcode()) { 4068 default: llvm_unreachable("is not a comparison operation")__builtin_unreachable(); 4069 case BO_EQ: 4070 CR6 = CR6_LT; 4071 ID = GetIntrinsic(VCMPEQ, ElementKind); 4072 break; 4073 case BO_NE: 4074 CR6 = CR6_EQ; 4075 ID = GetIntrinsic(VCMPEQ, ElementKind); 4076 break; 4077 case BO_LT: 4078 CR6 = CR6_LT; 4079 ID = GetIntrinsic(VCMPGT, ElementKind); 4080 std::swap(FirstVecArg, SecondVecArg); 4081 break; 4082 case BO_GT: 4083 CR6 = CR6_LT; 4084 ID = GetIntrinsic(VCMPGT, ElementKind); 4085 break; 4086 case BO_LE: 4087 if (ElementKind == BuiltinType::Float) { 4088 CR6 = CR6_LT; 4089 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4090 std::swap(FirstVecArg, SecondVecArg); 4091 } 4092 else { 4093 CR6 = CR6_EQ; 4094 ID = GetIntrinsic(VCMPGT, ElementKind); 4095 } 4096 break; 4097 case BO_GE: 4098 if (ElementKind == BuiltinType::Float) { 4099 CR6 = CR6_LT; 4100 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4101 } 4102 else { 4103 CR6 = CR6_EQ; 4104 ID = GetIntrinsic(VCMPGT, ElementKind); 4105 std::swap(FirstVecArg, SecondVecArg); 4106 } 4107 break; 4108 } 4109 4110 Value *CR6Param = Builder.getInt32(CR6); 4111 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4112 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4113 4114 // The result type of intrinsic may not be same as E->getType(). 4115 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4116 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4117 // do nothing, if ResultTy is not i1 at the same time, it will cause 4118 // crash later. 4119 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4120 if (ResultTy->getBitWidth() > 1 && 4121 E->getType() == CGF.getContext().BoolTy) 4122 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4123 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4124 E->getExprLoc()); 4125 } 4126 4127 if (BOInfo.isFixedPointOp()) { 4128 Result = EmitFixedPointBinOp(BOInfo); 4129 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4130 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4131 if (!IsSignaling) 4132 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4133 else 4134 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4135 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4136 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4137 } else { 4138 // Unsigned integers and pointers. 4139 4140 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4141 !isa<llvm::ConstantPointerNull>(LHS) && 4142 !isa<llvm::ConstantPointerNull>(RHS)) { 4143 4144 // Dynamic information is required to be stripped for comparisons, 4145 // because it could leak the dynamic information. Based on comparisons 4146 // of pointers to dynamic objects, the optimizer can replace one pointer 4147 // with another, which might be incorrect in presence of invariant 4148 // groups. Comparison with null is safe because null does not carry any 4149 // dynamic information. 4150 if (LHSTy.mayBeDynamicClass()) 4151 LHS = Builder.CreateStripInvariantGroup(LHS); 4152 if (RHSTy.mayBeDynamicClass()) 4153 RHS = Builder.CreateStripInvariantGroup(RHS); 4154 } 4155 4156 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4157 } 4158 4159 // If this is a vector comparison, sign extend the result to the appropriate 4160 // vector integer type and return it (don't convert to bool). 4161 if (LHSTy->isVectorType()) 4162 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4163 4164 } else { 4165 // Complex Comparison: can only be an equality comparison. 4166 CodeGenFunction::ComplexPairTy LHS, RHS; 4167 QualType CETy; 4168 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4169 LHS = CGF.EmitComplexExpr(E->getLHS()); 4170 CETy = CTy->getElementType(); 4171 } else { 4172 LHS.first = Visit(E->getLHS()); 4173 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4174 CETy = LHSTy; 4175 } 4176 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4177 RHS = CGF.EmitComplexExpr(E->getRHS()); 4178 assert(CGF.getContext().hasSameUnqualifiedType(CETy,(static_cast<void> (0)) 4179 CTy->getElementType()) &&(static_cast<void> (0)) 4180 "The element types must always match.")(static_cast<void> (0)); 4181 (void)CTy; 4182 } else { 4183 RHS.first = Visit(E->getRHS()); 4184 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4185 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&(static_cast<void> (0)) 4186 "The element types must always match.")(static_cast<void> (0)); 4187 } 4188 4189 Value *ResultR, *ResultI; 4190 if (CETy->isRealFloatingType()) { 4191 // As complex comparisons can only be equality comparisons, they 4192 // are never signaling comparisons. 4193 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4194 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4195 } else { 4196 // Complex comparisons can only be equality comparisons. As such, signed 4197 // and unsigned opcodes are the same. 4198 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4199 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4200 } 4201 4202 if (E->getOpcode() == BO_EQ) { 4203 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4204 } else { 4205 assert(E->getOpcode() == BO_NE &&(static_cast<void> (0)) 4206 "Complex comparison other than == or != ?")(static_cast<void> (0)); 4207 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4208 } 4209 } 4210 4211 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4212 E->getExprLoc()); 4213} 4214 4215Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4216 bool Ignore = TestAndClearIgnoreResultAssign(); 4217 4218 Value *RHS; 4219 LValue LHS; 4220 4221 switch (E->getLHS()->getType().getObjCLifetime()) { 4222 case Qualifiers::OCL_Strong: 4223 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4224 break; 4225 4226 case Qualifiers::OCL_Autoreleasing: 4227 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4228 break; 4229 4230 case Qualifiers::OCL_ExplicitNone: 4231 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4232 break; 4233 4234 case Qualifiers::OCL_Weak: 4235 RHS = Visit(E->getRHS()); 4236 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4237 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); 4238 break; 4239 4240 case Qualifiers::OCL_None: 4241 // __block variables need to have the rhs evaluated first, plus 4242 // this should improve codegen just a little. 4243 RHS = Visit(E->getRHS()); 4244 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4245 4246 // Store the value into the LHS. Bit-fields are handled specially 4247 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4248 // 'An assignment expression has the value of the left operand after 4249 // the assignment...'. 4250 if (LHS.isBitField()) { 4251 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4252 } else { 4253 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4254 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4255 } 4256 } 4257 4258 // If the result is clearly ignored, return now. 4259 if (Ignore) 4260 return nullptr; 4261 4262 // The result of an assignment in C is the assigned r-value. 4263 if (!CGF.getLangOpts().CPlusPlus) 4264 return RHS; 4265 4266 // If the lvalue is non-volatile, return the computed value of the assignment. 4267 if (!LHS.isVolatileQualified()) 4268 return RHS; 4269 4270 // Otherwise, reload the value. 4271 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4272} 4273 4274Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4275 // Perform vector logical and on comparisons with zero vectors. 4276 if (E->getType()->isVectorType()) { 4277 CGF.incrementProfileCounter(E); 4278 4279 Value *LHS = Visit(E->getLHS()); 4280 Value *RHS = Visit(E->getRHS()); 4281 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4282 if (LHS->getType()->isFPOrFPVectorTy()) { 4283 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4284 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4285 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4286 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4287 } else { 4288 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4289 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4290 } 4291 Value *And = Builder.CreateAnd(LHS, RHS); 4292 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4293 } 4294 4295 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4296 llvm::Type *ResTy = ConvertType(E->getType()); 4297 4298 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4299 // If we have 1 && X, just emit X without inserting the control flow. 4300 bool LHSCondVal; 4301 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4302 if (LHSCondVal) { // If we have 1 && X, just emit X. 4303 CGF.incrementProfileCounter(E); 4304 4305 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4306 4307 // If we're generating for profiling or coverage, generate a branch to a 4308 // block that increments the RHS counter needed to track branch condition 4309 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4310 // "FalseBlock" after the increment is done. 4311 if (InstrumentRegions && 4312 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4313 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end"); 4314 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4315 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock); 4316 CGF.EmitBlock(RHSBlockCnt); 4317 CGF.incrementProfileCounter(E->getRHS()); 4318 CGF.EmitBranch(FBlock); 4319 CGF.EmitBlock(FBlock); 4320 } 4321 4322 // ZExt result to int or bool. 4323 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 4324 } 4325 4326 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 4327 if (!CGF.ContainsLabel(E->getRHS())) 4328 return llvm::Constant::getNullValue(ResTy); 4329 } 4330 4331 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 4332 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 4333 4334 CodeGenFunction::ConditionalEvaluation eval(CGF); 4335 4336 // Branch on the LHS first. If it is false, go to the failure (cont) block. 4337 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 4338 CGF.getProfileCount(E->getRHS())); 4339 4340 // Any edges into the ContBlock are now from an (indeterminate number of) 4341 // edges from this first condition. All of these values will be false. Start 4342 // setting up the PHI node in the Cont Block for this. 4343 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4344 "", ContBlock); 4345 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4346 PI != PE; ++PI) 4347 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 4348 4349 eval.begin(CGF); 4350 CGF.EmitBlock(RHSBlock); 4351 CGF.incrementProfileCounter(E); 4352 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4353 eval.end(CGF); 4354 4355 // Reaquire the RHS block, as there may be subblocks inserted. 4356 RHSBlock = Builder.GetInsertBlock(); 4357 4358 // If we're generating for profiling or coverage, generate a branch on the 4359 // RHS to a block that increments the RHS true counter needed to track branch 4360 // condition coverage. 4361 if (InstrumentRegions && 4362 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4363 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 4364 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock); 4365 CGF.EmitBlock(RHSBlockCnt); 4366 CGF.incrementProfileCounter(E->getRHS()); 4367 CGF.EmitBranch(ContBlock); 4368 PN->addIncoming(RHSCond, RHSBlockCnt); 4369 } 4370 4371 // Emit an unconditional branch from this block to ContBlock. 4372 { 4373 // There is no need to emit line number for unconditional branch. 4374 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 4375 CGF.EmitBlock(ContBlock); 4376 } 4377 // Insert an entry into the phi node for the edge with the value of RHSCond. 4378 PN->addIncoming(RHSCond, RHSBlock); 4379 4380 // Artificial location to preserve the scope information 4381 { 4382 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 4383 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 4384 } 4385 4386 // ZExt result to int. 4387 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 4388} 4389 4390Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 4391 // Perform vector logical or on comparisons with zero vectors. 4392 if (E->getType()->isVectorType()) { 4393 CGF.incrementProfileCounter(E); 4394 4395 Value *LHS = Visit(E->getLHS()); 4396 Value *RHS = Visit(E->getRHS()); 4397 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4398 if (LHS->getType()->isFPOrFPVectorTy()) { 4399 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4400 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4401 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4402 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4403 } else { 4404 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4405 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4406 } 4407 Value *Or = Builder.CreateOr(LHS, RHS); 4408 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 4409 } 4410 4411 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4412 llvm::Type *ResTy = ConvertType(E->getType()); 4413 4414 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 4415 // If we have 0 || X, just emit X without inserting the control flow. 4416 bool LHSCondVal; 4417 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4418 if (!LHSCondVal) { // If we have 0 || X, just emit X. 4419 CGF.incrementProfileCounter(E); 4420 4421 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4422 4423 // If we're generating for profiling or coverage, generate a branch to a 4424 // block that increments the RHS counter need to track branch condition 4425 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 4426 // "FalseBlock" after the increment is done. 4427 if (InstrumentRegions && 4428 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4429 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end"); 4430 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4431 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt); 4432 CGF.EmitBlock(RHSBlockCnt); 4433 CGF.incrementProfileCounter(E->getRHS()); 4434 CGF.EmitBranch(FBlock); 4435 CGF.EmitBlock(FBlock); 4436 } 4437 4438 // ZExt result to int or bool. 4439 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 4440 } 4441 4442 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 4443 if (!CGF.ContainsLabel(E->getRHS())) 4444 return llvm::ConstantInt::get(ResTy, 1); 4445 } 4446 4447 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 4448 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 4449 4450 CodeGenFunction::ConditionalEvaluation eval(CGF); 4451 4452 // Branch on the LHS first. If it is true, go to the success (cont) block. 4453 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 4454 CGF.getCurrentProfileCount() - 4455 CGF.getProfileCount(E->getRHS())); 4456 4457 // Any edges into the ContBlock are now from an (indeterminate number of) 4458 // edges from this first condition. All of these values will be true. Start 4459 // setting up the PHI node in the Cont Block for this. 4460 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 4461 "", ContBlock); 4462 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 4463 PI != PE; ++PI) 4464 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 4465 4466 eval.begin(CGF); 4467 4468 // Emit the RHS condition as a bool value. 4469 CGF.EmitBlock(RHSBlock); 4470 CGF.incrementProfileCounter(E); 4471 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4472 4473 eval.end(CGF); 4474 4475 // Reaquire the RHS block, as there may be subblocks inserted. 4476 RHSBlock = Builder.GetInsertBlock(); 4477 4478 // If we're generating for profiling or coverage, generate a branch on the 4479 // RHS to a block that increments the RHS true counter needed to track branch 4480 // condition coverage. 4481 if (InstrumentRegions && 4482 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 4483 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 4484 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt); 4485 CGF.EmitBlock(RHSBlockCnt); 4486 CGF.incrementProfileCounter(E->getRHS()); 4487 CGF.EmitBranch(ContBlock); 4488 PN->addIncoming(RHSCond, RHSBlockCnt); 4489 } 4490 4491 // Emit an unconditional branch from this block to ContBlock. Insert an entry 4492 // into the phi node for the edge with the value of RHSCond. 4493 CGF.EmitBlock(ContBlock); 4494 PN->addIncoming(RHSCond, RHSBlock); 4495 4496 // ZExt result to int. 4497 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 4498} 4499 4500Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 4501 CGF.EmitIgnoredExpr(E->getLHS()); 4502 CGF.EnsureInsertPoint(); 4503 return Visit(E->getRHS()); 4504} 4505 4506//===----------------------------------------------------------------------===// 4507// Other Operators 4508//===----------------------------------------------------------------------===// 4509 4510/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 4511/// expression is cheap enough and side-effect-free enough to evaluate 4512/// unconditionally instead of conditionally. This is used to convert control 4513/// flow into selects in some cases. 4514static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 4515 CodeGenFunction &CGF) { 4516 // Anything that is an integer or floating point constant is fine. 4517 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 4518 4519 // Even non-volatile automatic variables can't be evaluated unconditionally. 4520 // Referencing a thread_local may cause non-trivial initialization work to 4521 // occur. If we're inside a lambda and one of the variables is from the scope 4522 // outside the lambda, that function may have returned already. Reading its 4523 // locals is a bad idea. Also, these reads may introduce races there didn't 4524 // exist in the source-level program. 4525} 4526 4527 4528Value *ScalarExprEmitter:: 4529VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 4530 TestAndClearIgnoreResultAssign(); 4531 4532 // Bind the common expression if necessary. 4533 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 4534 4535 Expr *condExpr = E->getCond(); 4536 Expr *lhsExpr = E->getTrueExpr(); 4537 Expr *rhsExpr = E->getFalseExpr(); 4538 4539 // If the condition constant folds and can be elided, try to avoid emitting 4540 // the condition and the dead arm. 4541 bool CondExprBool; 4542 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4543 Expr *live = lhsExpr, *dead = rhsExpr; 4544 if (!CondExprBool) std::swap(live, dead); 4545 4546 // If the dead side doesn't have labels we need, just emit the Live part. 4547 if (!CGF.ContainsLabel(dead)) { 4548 if (CondExprBool) 4549 CGF.incrementProfileCounter(E); 4550 Value *Result = Visit(live); 4551 4552 // If the live part is a throw expression, it acts like it has a void 4553 // type, so evaluating it returns a null Value*. However, a conditional 4554 // with non-void type must return a non-null Value*. 4555 if (!Result && !E->getType()->isVoidType()) 4556 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 4557 4558 return Result; 4559 } 4560 } 4561 4562 // OpenCL: If the condition is a vector, we can treat this condition like 4563 // the select function. 4564 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 4565 condExpr->getType()->isExtVectorType()) { 4566 CGF.incrementProfileCounter(E); 4567 4568 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4569 llvm::Value *LHS = Visit(lhsExpr); 4570 llvm::Value *RHS = Visit(rhsExpr); 4571 4572 llvm::Type *condType = ConvertType(condExpr->getType()); 4573 auto *vecTy = cast<llvm::FixedVectorType>(condType); 4574 4575 unsigned numElem = vecTy->getNumElements(); 4576 llvm::Type *elemType = vecTy->getElementType(); 4577 4578 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 4579 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 4580 llvm::Value *tmp = Builder.CreateSExt( 4581 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 4582 llvm::Value *tmp2 = Builder.CreateNot(tmp); 4583 4584 // Cast float to int to perform ANDs if necessary. 4585 llvm::Value *RHSTmp = RHS; 4586 llvm::Value *LHSTmp = LHS; 4587 bool wasCast = false; 4588 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 4589 if (rhsVTy->getElementType()->isFloatingPointTy()) { 4590 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 4591 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 4592 wasCast = true; 4593 } 4594 4595 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 4596 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 4597 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 4598 if (wasCast) 4599 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 4600 4601 return tmp5; 4602 } 4603 4604 if (condExpr->getType()->isVectorType()) { 4605 CGF.incrementProfileCounter(E); 4606 4607 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 4608 llvm::Value *LHS = Visit(lhsExpr); 4609 llvm::Value *RHS = Visit(rhsExpr); 4610 4611 llvm::Type *CondType = ConvertType(condExpr->getType()); 4612 auto *VecTy = cast<llvm::VectorType>(CondType); 4613 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 4614 4615 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 4616 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 4617 } 4618 4619 // If this is a really simple expression (like x ? 4 : 5), emit this as a 4620 // select instead of as control flow. We can only do this if it is cheap and 4621 // safe to evaluate the LHS and RHS unconditionally. 4622 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 4623 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 4624 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 4625 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 4626 4627 CGF.incrementProfileCounter(E, StepV); 4628 4629 llvm::Value *LHS = Visit(lhsExpr); 4630 llvm::Value *RHS = Visit(rhsExpr); 4631 if (!LHS) { 4632 // If the conditional has void type, make sure we return a null Value*. 4633 assert(!RHS && "LHS and RHS types must match")(static_cast<void> (0)); 4634 return nullptr; 4635 } 4636 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 4637 } 4638 4639 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 4640 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 4641 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 4642 4643 CodeGenFunction::ConditionalEvaluation eval(CGF); 4644 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 4645 CGF.getProfileCount(lhsExpr)); 4646 4647 CGF.EmitBlock(LHSBlock); 4648 CGF.incrementProfileCounter(E); 4649 eval.begin(CGF); 4650 Value *LHS = Visit(lhsExpr); 4651 eval.end(CGF); 4652 4653 LHSBlock = Builder.GetInsertBlock(); 4654 Builder.CreateBr(ContBlock); 4655 4656 CGF.EmitBlock(RHSBlock); 4657 eval.begin(CGF); 4658 Value *RHS = Visit(rhsExpr); 4659 eval.end(CGF); 4660 4661 RHSBlock = Builder.GetInsertBlock(); 4662 CGF.EmitBlock(ContBlock); 4663 4664 // If the LHS or RHS is a throw expression, it will be legitimately null. 4665 if (!LHS) 4666 return RHS; 4667 if (!RHS) 4668 return LHS; 4669 4670 // Create a PHI node for the real part. 4671 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 4672 PN->addIncoming(LHS, LHSBlock); 4673 PN->addIncoming(RHS, RHSBlock); 4674 return PN; 4675} 4676 4677Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 4678 return Visit(E->getChosenSubExpr()); 4679} 4680 4681Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 4682 QualType Ty = VE->getType(); 4683 4684 if (Ty->isVariablyModifiedType()) 4685 CGF.EmitVariablyModifiedType(Ty); 4686 4687 Address ArgValue = Address::invalid(); 4688 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); 4689 4690 llvm::Type *ArgTy = ConvertType(VE->getType()); 4691 4692 // If EmitVAArg fails, emit an error. 4693 if (!ArgPtr.isValid()) { 4694 CGF.ErrorUnsupported(VE, "va_arg expression"); 4695 return llvm::UndefValue::get(ArgTy); 4696 } 4697 4698 // FIXME Volatility. 4699 llvm::Value *Val = Builder.CreateLoad(ArgPtr); 4700 4701 // If EmitVAArg promoted the type, we must truncate it. 4702 if (ArgTy != Val->getType()) { 4703 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) 4704 Val = Builder.CreateIntToPtr(Val, ArgTy); 4705 else 4706 Val = Builder.CreateTrunc(Val, ArgTy); 4707 } 4708 4709 return Val; 4710} 4711 4712Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 4713 return CGF.EmitBlockLiteral(block); 4714} 4715 4716// Convert a vec3 to vec4, or vice versa. 4717static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 4718 Value *Src, unsigned NumElementsDst) { 4719 static constexpr int Mask[] = {0, 1, 2, -1}; 4720 return Builder.CreateShuffleVector(Src, 4721 llvm::makeArrayRef(Mask, NumElementsDst)); 4722} 4723 4724// Create cast instructions for converting LLVM value \p Src to LLVM type \p 4725// DstTy. \p Src has the same size as \p DstTy. Both are single value types 4726// but could be scalar or vectors of different lengths, and either can be 4727// pointer. 4728// There are 4 cases: 4729// 1. non-pointer -> non-pointer : needs 1 bitcast 4730// 2. pointer -> pointer : needs 1 bitcast or addrspacecast 4731// 3. pointer -> non-pointer 4732// a) pointer -> intptr_t : needs 1 ptrtoint 4733// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 4734// 4. non-pointer -> pointer 4735// a) intptr_t -> pointer : needs 1 inttoptr 4736// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 4737// Note: for cases 3b and 4b two casts are required since LLVM casts do not 4738// allow casting directly between pointer types and non-integer non-pointer 4739// types. 4740static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 4741 const llvm::DataLayout &DL, 4742 Value *Src, llvm::Type *DstTy, 4743 StringRef Name = "") { 4744 auto SrcTy = Src->getType(); 4745 4746 // Case 1. 4747 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 4748 return Builder.CreateBitCast(Src, DstTy, Name); 4749 4750 // Case 2. 4751 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 4752 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 4753 4754 // Case 3. 4755 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 4756 // Case 3b. 4757 if (!DstTy->isIntegerTy()) 4758 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 4759 // Cases 3a and 3b. 4760 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 4761 } 4762 4763 // Case 4b. 4764 if (!SrcTy->isIntegerTy()) 4765 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 4766 // Cases 4a and 4b. 4767 return Builder.CreateIntToPtr(Src, DstTy, Name); 4768} 4769 4770Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 4771 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 4772 llvm::Type *DstTy = ConvertType(E->getType()); 4773 4774 llvm::Type *SrcTy = Src->getType(); 4775 unsigned NumElementsSrc = 4776 isa<llvm::VectorType>(SrcTy) 4777 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements() 4778 : 0; 4779 unsigned NumElementsDst = 4780 isa<llvm::VectorType>(DstTy) 4781 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements() 4782 : 0; 4783 4784 // Going from vec3 to non-vec3 is a special case and requires a shuffle 4785 // vector to get a vec4, then a bitcast if the target type is different. 4786 if (NumElementsSrc == 3 && NumElementsDst != 3) { 4787 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 4788 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4789 DstTy); 4790 4791 Src->setName("astype"); 4792 return Src; 4793 } 4794 4795 // Going from non-vec3 to vec3 is a special case and requires a bitcast 4796 // to vec4 if the original type is not vec4, then a shuffle vector to 4797 // get a vec3. 4798 if (NumElementsSrc != 3 && NumElementsDst == 3) { 4799 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { 4800 auto *Vec4Ty = llvm::FixedVectorType::get( 4801 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 4802 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 4803 Vec4Ty); 4804 } 4805 4806 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 4807 Src->setName("astype"); 4808 return Src; 4809 } 4810 4811 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 4812 Src, DstTy, "astype"); 4813} 4814 4815Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 4816 return CGF.EmitAtomicExpr(E).getScalarVal(); 4817} 4818 4819//===----------------------------------------------------------------------===// 4820// Entry Point into this File 4821//===----------------------------------------------------------------------===// 4822 4823/// Emit the computation of the specified expression of scalar type, ignoring 4824/// the result. 4825Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 4826 assert(E && hasScalarEvaluationKind(E->getType()) &&(static_cast<void> (0)) 4827 "Invalid scalar expression to emit")(static_cast<void> (0)); 4828 4829 return ScalarExprEmitter(*this, IgnoreResultAssign) 4830 .Visit(const_cast<Expr *>(E)); 4831} 4832 4833/// Emit a conversion from the specified type to the specified destination type, 4834/// both of which are LLVM scalar types. 4835Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 4836 QualType DstTy, 4837 SourceLocation Loc) { 4838 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&(static_cast<void> (0)) 4839 "Invalid scalar expression to emit")(static_cast<void> (0)); 4840 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 4841} 4842 4843/// Emit a conversion from the specified complex type to the specified 4844/// destination type, where the destination type is an LLVM scalar type. 4845Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 4846 QualType SrcTy, 4847 QualType DstTy, 4848 SourceLocation Loc) { 4849 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&(static_cast<void> (0)) 4850 "Invalid complex -> scalar conversion")(static_cast<void> (0)); 4851 return ScalarExprEmitter(*this) 4852 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 4853} 4854 4855 4856llvm::Value *CodeGenFunction:: 4857EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 4858 bool isInc, bool isPre) { 4859 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 4860} 4861 4862LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 4863 // object->isa or (*object).isa 4864 // Generate code as for: *(Class*)object 4865 4866 Expr *BaseExpr = E->getBase(); 4867 Address Addr = Address::invalid(); 4868 if (BaseExpr->isPRValue()) { 4869 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign()); 4870 } else { 4871 Addr = EmitLValue(BaseExpr).getAddress(*this); 4872 } 4873 4874 // Cast the address to Class*. 4875 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType())); 4876 return MakeAddrLValue(Addr, E->getType()); 4877} 4878 4879 4880LValue CodeGenFunction::EmitCompoundAssignmentLValue( 4881 const CompoundAssignOperator *E) { 4882 ScalarExprEmitter Scalar(*this); 4883 Value *Result = nullptr; 4884 switch (E->getOpcode()) { 4885#define COMPOUND_OP(Op) \ 4886 case BO_##Op##Assign: \ 4887 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 4888 Result) 4889 COMPOUND_OP(Mul); 4890 COMPOUND_OP(Div); 4891 COMPOUND_OP(Rem); 4892 COMPOUND_OP(Add); 4893 COMPOUND_OP(Sub); 4894 COMPOUND_OP(Shl); 4895 COMPOUND_OP(Shr); 4896 COMPOUND_OP(And); 4897 COMPOUND_OP(Xor); 4898 COMPOUND_OP(Or); 4899#undef COMPOUND_OP 4900 4901 case BO_PtrMemD: 4902 case BO_PtrMemI: 4903 case BO_Mul: 4904 case BO_Div: 4905 case BO_Rem: 4906 case BO_Add: 4907 case BO_Sub: 4908 case BO_Shl: 4909 case BO_Shr: 4910 case BO_LT: 4911 case BO_GT: 4912 case BO_LE: 4913 case BO_GE: 4914 case BO_EQ: 4915 case BO_NE: 4916 case BO_Cmp: 4917 case BO_And: 4918 case BO_Xor: 4919 case BO_Or: 4920 case BO_LAnd: 4921 case BO_LOr: 4922 case BO_Assign: 4923 case BO_Comma: 4924 llvm_unreachable("Not valid compound assignment operators")__builtin_unreachable(); 4925 } 4926 4927 llvm_unreachable("Unhandled compound assignment operator")__builtin_unreachable(); 4928} 4929 4930struct GEPOffsetAndOverflow { 4931 // The total (signed) byte offset for the GEP. 4932 llvm::Value *TotalOffset; 4933 // The offset overflow flag - true if the total offset overflows. 4934 llvm::Value *OffsetOverflows; 4935}; 4936 4937/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 4938/// and compute the total offset it applies from it's base pointer BasePtr. 4939/// Returns offset in bytes and a boolean flag whether an overflow happened 4940/// during evaluation. 4941static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 4942 llvm::LLVMContext &VMContext, 4943 CodeGenModule &CGM, 4944 CGBuilderTy &Builder) { 4945 const auto &DL = CGM.getDataLayout(); 4946 4947 // The total (signed) byte offset for the GEP. 4948 llvm::Value *TotalOffset = nullptr; 4949 4950 // Was the GEP already reduced to a constant? 4951 if (isa<llvm::Constant>(GEPVal)) { 4952 // Compute the offset by casting both pointers to integers and subtracting: 4953 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 4954 Value *BasePtr_int = 4955 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 4956 Value *GEPVal_int = 4957 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 4958 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 4959 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 4960 } 4961 4962 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 4963 assert(GEP->getPointerOperand() == BasePtr &&(static_cast<void> (0)) 4964 "BasePtr must be the the base of the GEP.")(static_cast<void> (0)); 4965 assert(GEP->isInBounds() && "Expected inbounds GEP")(static_cast<void> (0)); 4966 4967 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 4968 4969 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 4970 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 4971 auto *SAddIntrinsic = 4972 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 4973 auto *SMulIntrinsic = 4974 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 4975 4976 // The offset overflow flag - true if the total offset overflows. 4977 llvm::Value *OffsetOverflows = Builder.getFalse(); 4978 4979 /// Return the result of the given binary operation. 4980 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 4981 llvm::Value *RHS) -> llvm::Value * { 4982 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop")(static_cast<void> (0)); 4983 4984 // If the operands are constants, return a constant result. 4985 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 4986 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 4987 llvm::APInt N; 4988 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 4989 /*Signed=*/true, N); 4990 if (HasOverflow) 4991 OffsetOverflows = Builder.getTrue(); 4992 return llvm::ConstantInt::get(VMContext, N); 4993 } 4994 } 4995 4996 // Otherwise, compute the result with checked arithmetic. 4997 auto *ResultAndOverflow = Builder.CreateCall( 4998 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 4999 OffsetOverflows = Builder.CreateOr( 5000 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 5001 return Builder.CreateExtractValue(ResultAndOverflow, 0); 5002 }; 5003 5004 // Determine the total byte offset by looking at each GEP operand. 5005 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 5006 GTI != GTE; ++GTI) { 5007 llvm::Value *LocalOffset; 5008 auto *Index = GTI.getOperand(); 5009 // Compute the local offset contributed by this indexing step: 5010 if (auto *STy = GTI.getStructTypeOrNull()) { 5011 // For struct indexing, the local offset is the byte position of the 5012 // specified field. 5013 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 5014 LocalOffset = llvm::ConstantInt::get( 5015 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 5016 } else { 5017 // Otherwise this is array-like indexing. The local offset is the index 5018 // multiplied by the element size. 5019 auto *ElementSize = llvm::ConstantInt::get( 5020 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); 5021 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 5022 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 5023 } 5024 5025 // If this is the first offset, set it as the total offset. Otherwise, add 5026 // the local offset into the running total. 5027 if (!TotalOffset || TotalOffset == Zero) 5028 TotalOffset = LocalOffset; 5029 else 5030 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 5031 } 5032 5033 return {TotalOffset, OffsetOverflows}; 5034} 5035 5036Value * 5037CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, 5038 bool SignedIndices, bool IsSubtraction, 5039 SourceLocation Loc, const Twine &Name) { 5040 llvm::Type *PtrTy = Ptr->getType(); 5041 Value *GEPVal = Builder.CreateInBoundsGEP( 5042 PtrTy->getPointerElementType(), Ptr, IdxList, Name); 5043 5044 // If the pointer overflow sanitizer isn't enabled, do nothing. 5045 if (!SanOpts.has(SanitizerKind::PointerOverflow))
42
Assuming the condition is false
43
Taking false branch
5046 return GEPVal; 5047 5048 // Perform nullptr-and-offset check unless the nullptr is defined. 5049 bool PerformNullCheck = !NullPointerIsDefined(
44
Assuming the condition is false
5050 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 5051 // Check for overflows unless the GEP got constant-folded, 5052 // and only in the default address space 5053 bool PerformOverflowCheck = 5054 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
45
Assuming 'GEPVal' is not a 'Constant'
46
Assuming the condition is true
5055 5056 if (!(PerformNullCheck
46.1
'PerformNullCheck' is false
46.1
'PerformNullCheck' is false
46.1
'PerformNullCheck' is false
46.1
'PerformNullCheck' is false
46.1
'PerformNullCheck' is false
46.1
'PerformNullCheck' is false
|| PerformOverflowCheck))
47
Taking false branch
5057 return GEPVal; 5058 5059 const auto &DL = CGM.getDataLayout(); 5060 5061 SanitizerScope SanScope(this); 5062 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 5063 5064 GEPOffsetAndOverflow EvaluatedGEP = 5065 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
48
Null pointer value stored to 'EvaluatedGEP.TotalOffset'
5066 5067 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||(static_cast<void> (0)) 5068 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&(static_cast<void> (0)) 5069 "If the offset got constant-folded, we don't expect that there was an "(static_cast<void> (0)) 5070 "overflow.")(static_cast<void> (0)); 5071 5072 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5073 5074 // Common case: if the total offset is zero, and we are using C++ semantics, 5075 // where nullptr+0 is defined, don't emit a check. 5076 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
49
Assuming 'Zero' is not equal to field 'TotalOffset'
5077 return GEPVal; 5078 5079 // Now that we've computed the total offset, add it to the base pointer (with 5080 // wrapping semantics). 5081 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 5082 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 5083 5084 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; 5085 5086 if (PerformNullCheck
49.1
'PerformNullCheck' is false
49.1
'PerformNullCheck' is false
49.1
'PerformNullCheck' is false
49.1
'PerformNullCheck' is false
49.1
'PerformNullCheck' is false
49.1
'PerformNullCheck' is false
) {
50
Taking false branch
5087 // In C++, if the base pointer evaluates to a null pointer value, 5088 // the only valid pointer this inbounds GEP can produce is also 5089 // a null pointer, so the offset must also evaluate to zero. 5090 // Likewise, if we have non-zero base pointer, we can not get null pointer 5091 // as a result, so the offset can not be -intptr_t(BasePtr). 5092 // In other words, both pointers are either null, or both are non-null, 5093 // or the behaviour is undefined. 5094 // 5095 // C, however, is more strict in this regard, and gives more 5096 // optimization opportunities: in C, additionally, nullptr+0 is undefined. 5097 // So both the input to the 'gep inbounds' AND the output must not be null. 5098 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 5099 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 5100 auto *Valid = 5101 CGM.getLangOpts().CPlusPlus 5102 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr) 5103 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr); 5104 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow); 5105 } 5106 5107 if (PerformOverflowCheck
50.1
'PerformOverflowCheck' is true
50.1
'PerformOverflowCheck' is true
50.1
'PerformOverflowCheck' is true
50.1
'PerformOverflowCheck' is true
50.1
'PerformOverflowCheck' is true
50.1
'PerformOverflowCheck' is true
) {
51
Taking true branch
5108 // The GEP is valid if: 5109 // 1) The total offset doesn't overflow, and 5110 // 2) The sign of the difference between the computed address and the base 5111 // pointer matches the sign of the total offset. 5112 llvm::Value *ValidGEP; 5113 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 5114 if (SignedIndices) {
52
Assuming 'SignedIndices' is true
53
Taking true branch
5115 // GEP is computed as `unsigned base + signed offset`, therefore: 5116 // * If offset was positive, then the computed pointer can not be 5117 // [unsigned] less than the base pointer, unless it overflowed. 5118 // * If offset was negative, then the computed pointer can not be 5119 // [unsigned] greater than the bas pointere, unless it overflowed. 5120 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5121 auto *PosOrZeroOffset = 5122 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
54
Passing null pointer value via 1st parameter 'LHS'
55
Calling 'IRBuilderBase::CreateICmpSGE'
5123 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 5124 ValidGEP = 5125 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 5126 } else if (!IsSubtraction) { 5127 // GEP is computed as `unsigned base + unsigned offset`, therefore the 5128 // computed pointer can not be [unsigned] less than base pointer, 5129 // unless there was an overflow. 5130 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5131 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5132 } else { 5133 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5134 // computed pointer can not be [unsigned] greater than base pointer, 5135 // unless there was an overflow. 5136 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5137 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5138 } 5139 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5140 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow); 5141 } 5142 5143 assert(!Checks.empty() && "Should have produced some checks.")(static_cast<void> (0)); 5144 5145 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5146 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5147 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5148 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5149 5150 return GEPVal; 5151}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/include/clang/AST/ASTContext.h

1//===- ASTContext.h - Context to hold long-lived AST nodes ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Defines the clang::ASTContext interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_AST_ASTCONTEXT_H
15#define LLVM_CLANG_AST_ASTCONTEXT_H
16
17#include "clang/AST/ASTContextAllocate.h"
18#include "clang/AST/ASTFwd.h"
19#include "clang/AST/CanonicalType.h"
20#include "clang/AST/CommentCommandTraits.h"
21#include "clang/AST/ComparisonCategories.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/DeclBase.h"
24#include "clang/AST/DeclarationName.h"
25#include "clang/AST/ExternalASTSource.h"
26#include "clang/AST/NestedNameSpecifier.h"
27#include "clang/AST/PrettyPrinter.h"
28#include "clang/AST/RawCommentList.h"
29#include "clang/AST/TemplateName.h"
30#include "clang/AST/Type.h"
31#include "clang/Basic/AddressSpaces.h"
32#include "clang/Basic/AttrKinds.h"
33#include "clang/Basic/IdentifierTable.h"
34#include "clang/Basic/LLVM.h"
35#include "clang/Basic/LangOptions.h"
36#include "clang/Basic/Linkage.h"
37#include "clang/Basic/NoSanitizeList.h"
38#include "clang/Basic/OperatorKinds.h"
39#include "clang/Basic/PartialDiagnostic.h"
40#include "clang/Basic/ProfileList.h"
41#include "clang/Basic/SourceLocation.h"
42#include "clang/Basic/Specifiers.h"
43#include "clang/Basic/TargetCXXABI.h"
44#include "clang/Basic/XRayLists.h"
45#include "llvm/ADT/APSInt.h"
46#include "llvm/ADT/ArrayRef.h"
47#include "llvm/ADT/DenseMap.h"
48#include "llvm/ADT/DenseSet.h"
49#include "llvm/ADT/FoldingSet.h"
50#include "llvm/ADT/IntrusiveRefCntPtr.h"
51#include "llvm/ADT/MapVector.h"
52#include "llvm/ADT/None.h"
53#include "llvm/ADT/Optional.h"
54#include "llvm/ADT/PointerIntPair.h"
55#include "llvm/ADT/PointerUnion.h"
56#include "llvm/ADT/SmallVector.h"
57#include "llvm/ADT/StringMap.h"
58#include "llvm/ADT/StringRef.h"
59#include "llvm/ADT/TinyPtrVector.h"
60#include "llvm/ADT/Triple.h"
61#include "llvm/ADT/iterator_range.h"
62#include "llvm/Support/AlignOf.h"
63#include "llvm/Support/Allocator.h"
64#include "llvm/Support/Casting.h"
65#include "llvm/Support/Compiler.h"
66#include "llvm/Support/TypeSize.h"
67#include <cassert>
68#include <cstddef>
69#include <cstdint>
70#include <iterator>
71#include <memory>
72#include <string>
73#include <type_traits>
74#include <utility>
75#include <vector>
76
77namespace llvm {
78
79class APFixedPoint;
80class FixedPointSemantics;
81struct fltSemantics;
82template <typename T, unsigned N> class SmallPtrSet;
83
84} // namespace llvm
85
86namespace clang {
87
88class APValue;
89class ASTMutationListener;
90class ASTRecordLayout;
91class AtomicExpr;
92class BlockExpr;
93class BuiltinTemplateDecl;
94class CharUnits;
95class ConceptDecl;
96class CXXABI;
97class CXXConstructorDecl;
98class CXXMethodDecl;
99class CXXRecordDecl;
100class DiagnosticsEngine;
101class ParentMapContext;
102class DynTypedNode;
103class DynTypedNodeList;
104class Expr;
105class GlobalDecl;
106class ItaniumMangleContext;
107class MangleContext;
108class MangleNumberingContext;
109class MaterializeTemporaryExpr;
110class MemberSpecializationInfo;
111class Module;
112struct MSGuidDeclParts;
113class ObjCCategoryDecl;
114class ObjCCategoryImplDecl;
115class ObjCContainerDecl;
116class ObjCImplDecl;
117class ObjCImplementationDecl;
118class ObjCInterfaceDecl;
119class ObjCIvarDecl;
120class ObjCMethodDecl;
121class ObjCPropertyDecl;
122class ObjCPropertyImplDecl;
123class ObjCProtocolDecl;
124class ObjCTypeParamDecl;
125class OMPTraitInfo;
126struct ParsedTargetAttr;
127class Preprocessor;
128class Stmt;
129class StoredDeclsMap;
130class TargetAttr;
131class TargetInfo;
132class TemplateDecl;
133class TemplateParameterList;
134class TemplateTemplateParmDecl;
135class TemplateTypeParmDecl;
136class UnresolvedSetIterator;
137class UsingShadowDecl;
138class VarTemplateDecl;
139class VTableContextBase;
140struct BlockVarCopyInit;
141
142namespace Builtin {
143
144class Context;
145
146} // namespace Builtin
147
148enum BuiltinTemplateKind : int;
149enum OpenCLTypeKind : uint8_t;
150
151namespace comments {
152
153class FullComment;
154
155} // namespace comments
156
157namespace interp {
158
159class Context;
160
161} // namespace interp
162
163namespace serialization {
164template <class> class AbstractTypeReader;
165} // namespace serialization
166
167enum class AlignRequirementKind {
168 /// The alignment was not explicit in code.
169 None,
170
171 /// The alignment comes from an alignment attribute on a typedef.
172 RequiredByTypedef,
173
174 /// The alignment comes from an alignment attribute on a record type.
175 RequiredByRecord,
176
177 /// The alignment comes from an alignment attribute on a enum type.
178 RequiredByEnum,
179};
180
181struct TypeInfo {
182 uint64_t Width = 0;
183 unsigned Align = 0;
184 AlignRequirementKind AlignRequirement;
185
186 TypeInfo() : AlignRequirement(AlignRequirementKind::None) {}
187 TypeInfo(uint64_t Width, unsigned Align,
188 AlignRequirementKind AlignRequirement)
189 : Width(Width), Align(Align), AlignRequirement(AlignRequirement) {}
190 bool isAlignRequired() {
191 return AlignRequirement != AlignRequirementKind::None;
192 }
193};
194
195struct TypeInfoChars {
196 CharUnits Width;
197 CharUnits Align;
198 AlignRequirementKind AlignRequirement;
199
200 TypeInfoChars() : AlignRequirement(AlignRequirementKind::None) {}
201 TypeInfoChars(CharUnits Width, CharUnits Align,
202 AlignRequirementKind AlignRequirement)
203 : Width(Width), Align(Align), AlignRequirement(AlignRequirement) {}
204 bool isAlignRequired() {
205 return AlignRequirement != AlignRequirementKind::None;
206 }
207};
208
209/// Holds long-lived AST nodes (such as types and decls) that can be
210/// referred to throughout the semantic analysis of a file.
211class ASTContext : public RefCountedBase<ASTContext> {
212 friend class NestedNameSpecifier;
213
214 mutable SmallVector<Type *, 0> Types;
215 mutable llvm::FoldingSet<ExtQuals> ExtQualNodes;
216 mutable llvm::FoldingSet<ComplexType> ComplexTypes;
217 mutable llvm::FoldingSet<PointerType> PointerTypes;
218 mutable llvm::FoldingSet<AdjustedType> AdjustedTypes;
219 mutable llvm::FoldingSet<BlockPointerType> BlockPointerTypes;
220 mutable llvm::FoldingSet<LValueReferenceType> LValueReferenceTypes;
221 mutable llvm::FoldingSet<RValueReferenceType> RValueReferenceTypes;
222 mutable llvm::FoldingSet<MemberPointerType> MemberPointerTypes;
223 mutable llvm::ContextualFoldingSet<ConstantArrayType, ASTContext &>
224 ConstantArrayTypes;
225 mutable llvm::FoldingSet<IncompleteArrayType> IncompleteArrayTypes;
226 mutable std::vector<VariableArrayType*> VariableArrayTypes;
227 mutable llvm::FoldingSet<DependentSizedArrayType> DependentSizedArrayTypes;
228 mutable llvm::FoldingSet<DependentSizedExtVectorType>
229 DependentSizedExtVectorTypes;
230 mutable llvm::FoldingSet<DependentAddressSpaceType>
231 DependentAddressSpaceTypes;
232 mutable llvm::FoldingSet<VectorType> VectorTypes;
233 mutable llvm::FoldingSet<DependentVectorType> DependentVectorTypes;
234 mutable llvm::FoldingSet<ConstantMatrixType> MatrixTypes;
235 mutable llvm::FoldingSet<DependentSizedMatrixType> DependentSizedMatrixTypes;
236 mutable llvm::FoldingSet<FunctionNoProtoType> FunctionNoProtoTypes;
237 mutable llvm::ContextualFoldingSet<FunctionProtoType, ASTContext&>
238 FunctionProtoTypes;
239 mutable llvm::FoldingSet<DependentTypeOfExprType> DependentTypeOfExprTypes;
240 mutable llvm::FoldingSet<DependentDecltypeType> DependentDecltypeTypes;
241 mutable llvm::FoldingSet<TemplateTypeParmType> TemplateTypeParmTypes;
242 mutable llvm::FoldingSet<ObjCTypeParamType> ObjCTypeParamTypes;
243 mutable llvm::FoldingSet<SubstTemplateTypeParmType>
244 SubstTemplateTypeParmTypes;
245 mutable llvm::FoldingSet<SubstTemplateTypeParmPackType>
246 SubstTemplateTypeParmPackTypes;
247 mutable llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
248 TemplateSpecializationTypes;
249 mutable llvm::FoldingSet<ParenType> ParenTypes;
250 mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes;
251 mutable llvm::FoldingSet<DependentNameType> DependentNameTypes;
252 mutable llvm::ContextualFoldingSet<DependentTemplateSpecializationType,
253 ASTContext&>
254 DependentTemplateSpecializationTypes;
255 llvm::FoldingSet<PackExpansionType> PackExpansionTypes;
256 mutable llvm::FoldingSet<ObjCObjectTypeImpl> ObjCObjectTypes;
257 mutable llvm::FoldingSet<ObjCObjectPointerType> ObjCObjectPointerTypes;
258 mutable llvm::FoldingSet<DependentUnaryTransformType>
259 DependentUnaryTransformTypes;
260 mutable llvm::ContextualFoldingSet<AutoType, ASTContext&> AutoTypes;
261 mutable llvm::FoldingSet<DeducedTemplateSpecializationType>
262 DeducedTemplateSpecializationTypes;
263 mutable llvm::FoldingSet<AtomicType> AtomicTypes;
264 llvm::FoldingSet<AttributedType> AttributedTypes;
265 mutable llvm::FoldingSet<PipeType> PipeTypes;
266 mutable llvm::FoldingSet<ExtIntType> ExtIntTypes;
267 mutable llvm::FoldingSet<DependentExtIntType> DependentExtIntTypes;
268
269 mutable llvm::FoldingSet<QualifiedTemplateName> QualifiedTemplateNames;
270 mutable llvm::FoldingSet<DependentTemplateName> DependentTemplateNames;
271 mutable llvm::FoldingSet<SubstTemplateTemplateParmStorage>
272 SubstTemplateTemplateParms;
273 mutable llvm::ContextualFoldingSet<SubstTemplateTemplateParmPackStorage,
274 ASTContext&>
275 SubstTemplateTemplateParmPacks;
276
277 /// The set of nested name specifiers.
278 ///
279 /// This set is managed by the NestedNameSpecifier class.
280 mutable llvm::FoldingSet<NestedNameSpecifier> NestedNameSpecifiers;
281 mutable NestedNameSpecifier *GlobalNestedNameSpecifier = nullptr;
282
283 /// A cache mapping from RecordDecls to ASTRecordLayouts.
284 ///
285 /// This is lazily created. This is intentionally not serialized.
286 mutable llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>
287 ASTRecordLayouts;
288 mutable llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*>
289 ObjCLayouts;
290
291 /// A cache from types to size and alignment information.
292 using TypeInfoMap = llvm::DenseMap<const Type *, struct TypeInfo>;
293 mutable TypeInfoMap MemoizedTypeInfo;
294
295 /// A cache from types to unadjusted alignment information. Only ARM and
296 /// AArch64 targets need this information, keeping it separate prevents
297 /// imposing overhead on TypeInfo size.
298 using UnadjustedAlignMap = llvm::DenseMap<const Type *, unsigned>;
299 mutable UnadjustedAlignMap MemoizedUnadjustedAlign;
300
301 /// A cache mapping from CXXRecordDecls to key functions.
302 llvm::DenseMap<const CXXRecordDecl*, LazyDeclPtr> KeyFunctions;
303
304 /// Mapping from ObjCContainers to their ObjCImplementations.
305 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*> ObjCImpls;
306
307 /// Mapping from ObjCMethod to its duplicate declaration in the same
308 /// interface.
309 llvm::DenseMap<const ObjCMethodDecl*,const ObjCMethodDecl*> ObjCMethodRedecls;
310
311 /// Mapping from __block VarDecls to BlockVarCopyInit.
312 llvm::DenseMap<const VarDecl *, BlockVarCopyInit> BlockVarCopyInits;
313
314 /// Mapping from GUIDs to the corresponding MSGuidDecl.
315 mutable llvm::FoldingSet<MSGuidDecl> MSGuidDecls;
316
317 /// Mapping from APValues to the corresponding TemplateParamObjects.
318 mutable llvm::FoldingSet<TemplateParamObjectDecl> TemplateParamObjectDecls;
319
320 /// A cache mapping a string value to a StringLiteral object with the same
321 /// value.
322 ///
323 /// This is lazily created. This is intentionally not serialized.
324 mutable llvm::StringMap<StringLiteral *> StringLiteralCache;
325
326 /// MD5 hash of CUID. It is calculated when first used and cached by this
327 /// data member.
328 mutable std::string CUIDHash;
329
330 /// Representation of a "canonical" template template parameter that
331 /// is used in canonical template names.
332 class CanonicalTemplateTemplateParm : public llvm::FoldingSetNode {
333 TemplateTemplateParmDecl *Parm;
334
335 public:
336 CanonicalTemplateTemplateParm(TemplateTemplateParmDecl *Parm)
337 : Parm(Parm) {}
338
339 TemplateTemplateParmDecl *getParam() const { return Parm; }
340
341 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C) {
342 Profile(ID, C, Parm);
343 }
344
345 static void Profile(llvm::FoldingSetNodeID &ID,
346 const ASTContext &C,
347 TemplateTemplateParmDecl *Parm);
348 };
349 mutable llvm::ContextualFoldingSet<CanonicalTemplateTemplateParm,
350 const ASTContext&>
351 CanonTemplateTemplateParms;
352
353 TemplateTemplateParmDecl *
354 getCanonicalTemplateTemplateParmDecl(TemplateTemplateParmDecl *TTP) const;
355
356 /// The typedef for the __int128_t type.
357 mutable TypedefDecl *Int128Decl = nullptr;
358
359 /// The typedef for the __uint128_t type.
360 mutable TypedefDecl *UInt128Decl = nullptr;
361
362 /// The typedef for the target specific predefined
363 /// __builtin_va_list type.
364 mutable TypedefDecl *BuiltinVaListDecl = nullptr;
365
366 /// The typedef for the predefined \c __builtin_ms_va_list type.
367 mutable TypedefDecl *BuiltinMSVaListDecl = nullptr;
368
369 /// The typedef for the predefined \c id type.
370 mutable TypedefDecl *ObjCIdDecl = nullptr;
371
372 /// The typedef for the predefined \c SEL type.
373 mutable TypedefDecl *ObjCSelDecl = nullptr;
374
375 /// The typedef for the predefined \c Class type.
376 mutable TypedefDecl *ObjCClassDecl = nullptr;
377
378 /// The typedef for the predefined \c Protocol class in Objective-C.
379 mutable ObjCInterfaceDecl *ObjCProtocolClassDecl = nullptr;
380
381 /// The typedef for the predefined 'BOOL' type.
382 mutable TypedefDecl *BOOLDecl = nullptr;
383
384 // Typedefs which may be provided defining the structure of Objective-C
385 // pseudo-builtins
386 QualType ObjCIdRedefinitionType;
387 QualType ObjCClassRedefinitionType;
388 QualType ObjCSelRedefinitionType;
389
390 /// The identifier 'bool'.
391 mutable IdentifierInfo *BoolName = nullptr;
392
393 /// The identifier 'NSObject'.
394 mutable IdentifierInfo *NSObjectName = nullptr;
395
396 /// The identifier 'NSCopying'.
397 IdentifierInfo *NSCopyingName = nullptr;
398
399 /// The identifier '__make_integer_seq'.
400 mutable IdentifierInfo *MakeIntegerSeqName = nullptr;
401
402 /// The identifier '__type_pack_element'.
403 mutable IdentifierInfo *TypePackElementName = nullptr;
404
405 QualType ObjCConstantStringType;
406 mutable RecordDecl *CFConstantStringTagDecl = nullptr;
407 mutable TypedefDecl *CFConstantStringTypeDecl = nullptr;
408
409 mutable QualType ObjCSuperType;
410
411 QualType ObjCNSStringType;
412
413 /// The typedef declaration for the Objective-C "instancetype" type.
414 TypedefDecl *ObjCInstanceTypeDecl = nullptr;
415
416 /// The type for the C FILE type.
417 TypeDecl *FILEDecl = nullptr;
418
419 /// The type for the C jmp_buf type.
420 TypeDecl *jmp_bufDecl = nullptr;
421
422 /// The type for the C sigjmp_buf type.
423 TypeDecl *sigjmp_bufDecl = nullptr;
424
425 /// The type for the C ucontext_t type.
426 TypeDecl *ucontext_tDecl = nullptr;
427
428 /// Type for the Block descriptor for Blocks CodeGen.
429 ///
430 /// Since this is only used for generation of debug info, it is not
431 /// serialized.
432 mutable RecordDecl *BlockDescriptorType = nullptr;
433
434 /// Type for the Block descriptor for Blocks CodeGen.
435 ///
436 /// Since this is only used for generation of debug info, it is not
437 /// serialized.
438 mutable RecordDecl *BlockDescriptorExtendedType = nullptr;
439
440 /// Declaration for the CUDA cudaConfigureCall function.
441 FunctionDecl *cudaConfigureCallDecl = nullptr;
442
443 /// Keeps track of all declaration attributes.
444 ///
445 /// Since so few decls have attrs, we keep them in a hash map instead of
446 /// wasting space in the Decl class.
447 llvm::DenseMap<const Decl*, AttrVec*> DeclAttrs;
448
449 /// A mapping from non-redeclarable declarations in modules that were
450 /// merged with other declarations to the canonical declaration that they were
451 /// merged into.
452 llvm::DenseMap<Decl*, Decl*> MergedDecls;
453
454 /// A mapping from a defining declaration to a list of modules (other
455 /// than the owning module of the declaration) that contain merged
456 /// definitions of that entity.
457 llvm::DenseMap<NamedDecl*, llvm::TinyPtrVector<Module*>> MergedDefModules;
458
459 /// Initializers for a module, in order. Each Decl will be either
460 /// something that has a semantic effect on startup (such as a variable with
461 /// a non-constant initializer), or an ImportDecl (which recursively triggers
462 /// initialization of another module).
463 struct PerModuleInitializers {
464 llvm::SmallVector<Decl*, 4> Initializers;
465 llvm::SmallVector<uint32_t, 4> LazyInitializers;
466
467 void resolve(ASTContext &Ctx);
468 };
469 llvm::DenseMap<Module*, PerModuleInitializers*> ModuleInitializers;
470
471 ASTContext &this_() { return *this; }
472
473public:
474 /// A type synonym for the TemplateOrInstantiation mapping.
475 using TemplateOrSpecializationInfo =
476 llvm::PointerUnion<VarTemplateDecl *, MemberSpecializationInfo *>;
477
478private:
479 friend class ASTDeclReader;
480 friend class ASTReader;
481 friend class ASTWriter;
482 template <class> friend class serialization::AbstractTypeReader;
483 friend class CXXRecordDecl;
484 friend class IncrementalParser;
485
486 /// A mapping to contain the template or declaration that
487 /// a variable declaration describes or was instantiated from,
488 /// respectively.
489 ///
490 /// For non-templates, this value will be NULL. For variable
491 /// declarations that describe a variable template, this will be a
492 /// pointer to a VarTemplateDecl. For static data members
493 /// of class template specializations, this will be the
494 /// MemberSpecializationInfo referring to the member variable that was
495 /// instantiated or specialized. Thus, the mapping will keep track of
496 /// the static data member templates from which static data members of
497 /// class template specializations were instantiated.
498 ///
499 /// Given the following example:
500 ///
501 /// \code
502 /// template<typename T>
503 /// struct X {
504 /// static T value;
505 /// };
506 ///
507 /// template<typename T>
508 /// T X<T>::value = T(17);
509 ///
510 /// int *x = &X<int>::value;
511 /// \endcode
512 ///
513 /// This mapping will contain an entry that maps from the VarDecl for
514 /// X<int>::value to the corresponding VarDecl for X<T>::value (within the
515 /// class template X) and will be marked TSK_ImplicitInstantiation.
516 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>
517 TemplateOrInstantiation;
518
519 /// Keeps track of the declaration from which a using declaration was
520 /// created during instantiation.
521 ///
522 /// The source and target declarations are always a UsingDecl, an
523 /// UnresolvedUsingValueDecl, or an UnresolvedUsingTypenameDecl.
524 ///
525 /// For example:
526 /// \code
527 /// template<typename T>
528 /// struct A {
529 /// void f();
530 /// };
531 ///
532 /// template<typename T>
533 /// struct B : A<T> {
534 /// using A<T>::f;
535 /// };
536 ///
537 /// template struct B<int>;
538 /// \endcode
539 ///
540 /// This mapping will contain an entry that maps from the UsingDecl in
541 /// B<int> to the UnresolvedUsingDecl in B<T>.
542 llvm::DenseMap<NamedDecl *, NamedDecl *> InstantiatedFromUsingDecl;
543
544 /// Like InstantiatedFromUsingDecl, but for using-enum-declarations. Maps
545 /// from the instantiated using-enum to the templated decl from whence it
546 /// came.
547 /// Note that using-enum-declarations cannot be dependent and
548 /// thus will never be instantiated from an "unresolved"
549 /// version thereof (as with using-declarations), so each mapping is from
550 /// a (resolved) UsingEnumDecl to a (resolved) UsingEnumDecl.
551 llvm::DenseMap<UsingEnumDecl *, UsingEnumDecl *>
552 InstantiatedFromUsingEnumDecl;
553
554 /// Simlarly maps instantiated UsingShadowDecls to their origin.
555 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>
556 InstantiatedFromUsingShadowDecl;
557
558 llvm::DenseMap<FieldDecl *, FieldDecl *> InstantiatedFromUnnamedFieldDecl;
559
560 /// Mapping that stores the methods overridden by a given C++
561 /// member function.
562 ///
563 /// Since most C++ member functions aren't virtual and therefore
564 /// don't override anything, we store the overridden functions in
565 /// this map on the side rather than within the CXXMethodDecl structure.
566 using CXXMethodVector = llvm::TinyPtrVector<const CXXMethodDecl *>;
567 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector> OverriddenMethods;
568
569 /// Mapping from each declaration context to its corresponding
570 /// mangling numbering context (used for constructs like lambdas which
571 /// need to be consistently numbered for the mangler).
572 llvm::DenseMap<const DeclContext *, std::unique_ptr<MangleNumberingContext>>
573 MangleNumberingContexts;
574 llvm::DenseMap<const Decl *, std::unique_ptr<MangleNumberingContext>>
575 ExtraMangleNumberingContexts;
576
577 /// Side-table of mangling numbers for declarations which rarely
578 /// need them (like static local vars).
579 llvm::MapVector<const NamedDecl *, unsigned> MangleNumbers;
580 llvm::MapVector<const VarDecl *, unsigned> StaticLocalNumbers;
581 /// Mapping the associated device lambda mangling number if present.
582 mutable llvm::DenseMap<const CXXRecordDecl *, unsigned>
583 DeviceLambdaManglingNumbers;
584
585 /// Mapping that stores parameterIndex values for ParmVarDecls when
586 /// that value exceeds the bitfield size of ParmVarDeclBits.ParameterIndex.
587 using ParameterIndexTable = llvm::DenseMap<const VarDecl *, unsigned>;
588 ParameterIndexTable ParamIndices;
589
590 ImportDecl *FirstLocalImport = nullptr;
591 ImportDecl *LastLocalImport = nullptr;
592
593 TranslationUnitDecl *TUDecl = nullptr;
594 mutable ExternCContextDecl *ExternCContext = nullptr;
595 mutable BuiltinTemplateDecl *MakeIntegerSeqDecl = nullptr;
596 mutable BuiltinTemplateDecl *TypePackElementDecl = nullptr;
597
598 /// The associated SourceManager object.
599 SourceManager &SourceMgr;
600
601 /// The language options used to create the AST associated with
602 /// this ASTContext object.
603 LangOptions &LangOpts;
604
605 /// NoSanitizeList object that is used by sanitizers to decide which
606 /// entities should not be instrumented.
607 std::unique_ptr<NoSanitizeList> NoSanitizeL;
608
609 /// Function filtering mechanism to determine whether a given function
610 /// should be imbued with the XRay "always" or "never" attributes.
611 std::unique_ptr<XRayFunctionFilter> XRayFilter;
612
613 /// ProfileList object that is used by the profile instrumentation
614 /// to decide which entities should be instrumented.
615 std::unique_ptr<ProfileList> ProfList;
616
617 /// The allocator used to create AST objects.
618 ///
619 /// AST objects are never destructed; rather, all memory associated with the
620 /// AST objects will be released when the ASTContext itself is destroyed.
621 mutable llvm::BumpPtrAllocator BumpAlloc;
622
623 /// Allocator for partial diagnostics.
624 PartialDiagnostic::DiagStorageAllocator DiagAllocator;
625
626 /// The current C++ ABI.
627 std::unique_ptr<CXXABI> ABI;
628 CXXABI *createCXXABI(const TargetInfo &T);
629
630 /// The logical -> physical address space map.
631 const LangASMap *AddrSpaceMap = nullptr;
632
633 /// Address space map mangling must be used with language specific
634 /// address spaces (e.g. OpenCL/CUDA)
635 bool AddrSpaceMapMangling;
636
637 const TargetInfo *Target = nullptr;
638 const TargetInfo *AuxTarget = nullptr;
639 clang::PrintingPolicy PrintingPolicy;
640 std::unique_ptr<interp::Context> InterpContext;
641 std::unique_ptr<ParentMapContext> ParentMapCtx;
642
643 /// Keeps track of the deallocated DeclListNodes for future reuse.
644 DeclListNode *ListNodeFreeList = nullptr;
645
646public:
647 IdentifierTable &Idents;
648 SelectorTable &Selectors;
649 Builtin::Context &BuiltinInfo;
650 const TranslationUnitKind TUKind;
651 mutable DeclarationNameTable DeclarationNames;
652 IntrusiveRefCntPtr<ExternalASTSource> ExternalSource;
653 ASTMutationListener *Listener = nullptr;
654
655 /// Returns the clang bytecode interpreter context.
656 interp::Context &getInterpContext();
657
658 /// Returns the dynamic AST node parent map context.
659 ParentMapContext &getParentMapContext();
660
661 // A traversal scope limits the parts of the AST visible to certain analyses.
662 // RecursiveASTVisitor only visits specified children of TranslationUnitDecl.
663 // getParents() will only observe reachable parent edges.
664 //
665 // The scope is defined by a set of "top-level" declarations which will be
666 // visible under the TranslationUnitDecl.
667 // Initially, it is the entire TU, represented by {getTranslationUnitDecl()}.
668 //
669 // After setTraversalScope({foo, bar}), the exposed AST looks like:
670 // TranslationUnitDecl
671 // - foo
672 // - ...
673 // - bar
674 // - ...
675 // All other siblings of foo and bar are pruned from the tree.
676 // (However they are still accessible via TranslationUnitDecl->decls())
677 //
678 // Changing the scope clears the parent cache, which is expensive to rebuild.
679 std::vector<Decl *> getTraversalScope() const { return TraversalScope; }
680 void setTraversalScope(const std::vector<Decl *> &);
681
682 /// Forwards to get node parents from the ParentMapContext. New callers should
683 /// use ParentMapContext::getParents() directly.
684 template <typename NodeT> DynTypedNodeList getParents(const NodeT &Node);
685
686 const clang::PrintingPolicy &getPrintingPolicy() const {
687 return PrintingPolicy;
688