Bug Summary

File:llvm/lib/Analysis/ConstantFolding.cpp
Warning:line 2686, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ConstantFolding.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis/ConstantFolding.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis/ConstantFolding.cpp

1//===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines routines for folding instructions into constants.
10//
11// Also, to supplement the basic IR ConstantExpr simplifications,
12// this file defines some additional folding routines that can make use of
13// DataLayout information. These functions cannot go in IR due to library
14// dependency issues.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/Analysis/ConstantFolding.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/APSInt.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/Analysis/TargetFolder.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/Analysis/VectorUtils.h"
31#include "llvm/Config/config.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GlobalValue.h"
38#include "llvm/IR/GlobalVariable.h"
39#include "llvm/IR/InstrTypes.h"
40#include "llvm/IR/Instruction.h"
41#include "llvm/IR/Instructions.h"
42#include "llvm/IR/IntrinsicInst.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/IntrinsicsAArch64.h"
45#include "llvm/IR/IntrinsicsAMDGPU.h"
46#include "llvm/IR/IntrinsicsARM.h"
47#include "llvm/IR/IntrinsicsWebAssembly.h"
48#include "llvm/IR/IntrinsicsX86.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Value.h"
52#include "llvm/Support/Casting.h"
53#include "llvm/Support/ErrorHandling.h"
54#include "llvm/Support/KnownBits.h"
55#include "llvm/Support/MathExtras.h"
56#include <cassert>
57#include <cerrno>
58#include <cfenv>
59#include <cmath>
60#include <cstddef>
61#include <cstdint>
62
63using namespace llvm;
64
65namespace {
66Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
67 ArrayRef<Constant *> Ops,
68 const DataLayout &DL,
69 const TargetLibraryInfo *TLI,
70 bool ForLoadOperand);
71
72//===----------------------------------------------------------------------===//
73// Constant Folding internal helper functions
74//===----------------------------------------------------------------------===//
75
76static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
77 Constant *C, Type *SrcEltTy,
78 unsigned NumSrcElts,
79 const DataLayout &DL) {
80 // Now that we know that the input value is a vector of integers, just shift
81 // and insert them into our result.
82 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
83 for (unsigned i = 0; i != NumSrcElts; ++i) {
84 Constant *Element;
85 if (DL.isLittleEndian())
86 Element = C->getAggregateElement(NumSrcElts - i - 1);
87 else
88 Element = C->getAggregateElement(i);
89
90 if (Element && isa<UndefValue>(Element)) {
91 Result <<= BitShift;
92 continue;
93 }
94
95 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
96 if (!ElementCI)
97 return ConstantExpr::getBitCast(C, DestTy);
98
99 Result <<= BitShift;
100 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
101 }
102
103 return nullptr;
104}
105
106/// Constant fold bitcast, symbolically evaluating it with DataLayout.
107/// This always returns a non-null constant, but it may be a
108/// ConstantExpr if unfoldable.
109Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
110 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&(static_cast<void> (0))
111 "Invalid constantexpr bitcast!")(static_cast<void> (0));
112
113 // Catch the obvious splat cases.
114 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
115 return Constant::getNullValue(DestTy);
116 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy() &&
117 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
118 return Constant::getAllOnesValue(DestTy);
119
120 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
121 // Handle a vector->scalar integer/fp cast.
122 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
123 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
124 Type *SrcEltTy = VTy->getElementType();
125
126 // If the vector is a vector of floating point, convert it to vector of int
127 // to simplify things.
128 if (SrcEltTy->isFloatingPointTy()) {
129 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
130 auto *SrcIVTy = FixedVectorType::get(
131 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
132 // Ask IR to do the conversion now that #elts line up.
133 C = ConstantExpr::getBitCast(C, SrcIVTy);
134 }
135
136 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
137 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
138 SrcEltTy, NumSrcElts, DL))
139 return CE;
140
141 if (isa<IntegerType>(DestTy))
142 return ConstantInt::get(DestTy, Result);
143
144 APFloat FP(DestTy->getFltSemantics(), Result);
145 return ConstantFP::get(DestTy->getContext(), FP);
146 }
147 }
148
149 // The code below only handles casts to vectors currently.
150 auto *DestVTy = dyn_cast<VectorType>(DestTy);
151 if (!DestVTy)
152 return ConstantExpr::getBitCast(C, DestTy);
153
154 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
155 // vector so the code below can handle it uniformly.
156 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
157 Constant *Ops = C; // don't take the address of C!
158 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
159 }
160
161 // If this is a bitcast from constant vector -> vector, fold it.
162 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
163 return ConstantExpr::getBitCast(C, DestTy);
164
165 // If the element types match, IR can fold it.
166 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
167 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
168 if (NumDstElt == NumSrcElt)
169 return ConstantExpr::getBitCast(C, DestTy);
170
171 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
172 Type *DstEltTy = DestVTy->getElementType();
173
174 // Otherwise, we're changing the number of elements in a vector, which
175 // requires endianness information to do the right thing. For example,
176 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
177 // folds to (little endian):
178 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
179 // and to (big endian):
180 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
181
182 // First thing is first. We only want to think about integer here, so if
183 // we have something in FP form, recast it as integer.
184 if (DstEltTy->isFloatingPointTy()) {
185 // Fold to an vector of integers with same size as our FP type.
186 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
187 auto *DestIVTy = FixedVectorType::get(
188 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
189 // Recursively handle this integer conversion, if possible.
190 C = FoldBitCast(C, DestIVTy, DL);
191
192 // Finally, IR can handle this now that #elts line up.
193 return ConstantExpr::getBitCast(C, DestTy);
194 }
195
196 // Okay, we know the destination is integer, if the input is FP, convert
197 // it to integer first.
198 if (SrcEltTy->isFloatingPointTy()) {
199 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
200 auto *SrcIVTy = FixedVectorType::get(
201 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
202 // Ask IR to do the conversion now that #elts line up.
203 C = ConstantExpr::getBitCast(C, SrcIVTy);
204 // If IR wasn't able to fold it, bail out.
205 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
206 !isa<ConstantDataVector>(C))
207 return C;
208 }
209
210 // Now we know that the input and output vectors are both integer vectors
211 // of the same size, and that their #elements is not the same. Do the
212 // conversion here, which depends on whether the input or output has
213 // more elements.
214 bool isLittleEndian = DL.isLittleEndian();
215
216 SmallVector<Constant*, 32> Result;
217 if (NumDstElt < NumSrcElt) {
218 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
219 Constant *Zero = Constant::getNullValue(DstEltTy);
220 unsigned Ratio = NumSrcElt/NumDstElt;
221 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
222 unsigned SrcElt = 0;
223 for (unsigned i = 0; i != NumDstElt; ++i) {
224 // Build each element of the result.
225 Constant *Elt = Zero;
226 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
227 for (unsigned j = 0; j != Ratio; ++j) {
228 Constant *Src = C->getAggregateElement(SrcElt++);
229 if (Src && isa<UndefValue>(Src))
230 Src = Constant::getNullValue(
231 cast<VectorType>(C->getType())->getElementType());
232 else
233 Src = dyn_cast_or_null<ConstantInt>(Src);
234 if (!Src) // Reject constantexpr elements.
235 return ConstantExpr::getBitCast(C, DestTy);
236
237 // Zero extend the element to the right size.
238 Src = ConstantExpr::getZExt(Src, Elt->getType());
239
240 // Shift it to the right place, depending on endianness.
241 Src = ConstantExpr::getShl(Src,
242 ConstantInt::get(Src->getType(), ShiftAmt));
243 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
244
245 // Mix it in.
246 Elt = ConstantExpr::getOr(Elt, Src);
247 }
248 Result.push_back(Elt);
249 }
250 return ConstantVector::get(Result);
251 }
252
253 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
254 unsigned Ratio = NumDstElt/NumSrcElt;
255 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
256
257 // Loop over each source value, expanding into multiple results.
258 for (unsigned i = 0; i != NumSrcElt; ++i) {
259 auto *Element = C->getAggregateElement(i);
260
261 if (!Element) // Reject constantexpr elements.
262 return ConstantExpr::getBitCast(C, DestTy);
263
264 if (isa<UndefValue>(Element)) {
265 // Correctly Propagate undef values.
266 Result.append(Ratio, UndefValue::get(DstEltTy));
267 continue;
268 }
269
270 auto *Src = dyn_cast<ConstantInt>(Element);
271 if (!Src)
272 return ConstantExpr::getBitCast(C, DestTy);
273
274 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
275 for (unsigned j = 0; j != Ratio; ++j) {
276 // Shift the piece of the value into the right place, depending on
277 // endianness.
278 Constant *Elt = ConstantExpr::getLShr(Src,
279 ConstantInt::get(Src->getType(), ShiftAmt));
280 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
281
282 // Truncate the element to an integer with the same pointer size and
283 // convert the element back to a pointer using a inttoptr.
284 if (DstEltTy->isPointerTy()) {
285 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
286 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
287 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
288 continue;
289 }
290
291 // Truncate and remember this piece.
292 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
293 }
294 }
295
296 return ConstantVector::get(Result);
297}
298
299} // end anonymous namespace
300
301/// If this constant is a constant offset from a global, return the global and
302/// the constant. Because of constantexprs, this function is recursive.
303bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
304 APInt &Offset, const DataLayout &DL,
305 DSOLocalEquivalent **DSOEquiv) {
306 if (DSOEquiv)
307 *DSOEquiv = nullptr;
308
309 // Trivial case, constant is the global.
310 if ((GV = dyn_cast<GlobalValue>(C))) {
311 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
312 Offset = APInt(BitWidth, 0);
313 return true;
314 }
315
316 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
317 if (DSOEquiv)
318 *DSOEquiv = FoundDSOEquiv;
319 GV = FoundDSOEquiv->getGlobalValue();
320 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
321 Offset = APInt(BitWidth, 0);
322 return true;
323 }
324
325 // Otherwise, if this isn't a constant expr, bail out.
326 auto *CE = dyn_cast<ConstantExpr>(C);
327 if (!CE) return false;
328
329 // Look through ptr->int and ptr->ptr casts.
330 if (CE->getOpcode() == Instruction::PtrToInt ||
331 CE->getOpcode() == Instruction::BitCast)
332 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
333 DSOEquiv);
334
335 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
336 auto *GEP = dyn_cast<GEPOperator>(CE);
337 if (!GEP)
338 return false;
339
340 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
341 APInt TmpOffset(BitWidth, 0);
342
343 // If the base isn't a global+constant, we aren't either.
344 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
345 DSOEquiv))
346 return false;
347
348 // Otherwise, add any offset that our operands provide.
349 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
350 return false;
351
352 Offset = TmpOffset;
353 return true;
354}
355
356Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
357 const DataLayout &DL) {
358 do {
359 Type *SrcTy = C->getType();
360 uint64_t DestSize = DL.getTypeSizeInBits(DestTy);
361 uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy);
362 if (SrcSize < DestSize)
363 return nullptr;
364
365 // Catch the obvious splat cases (since all-zeros can coerce non-integral
366 // pointers legally).
367 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
368 return Constant::getNullValue(DestTy);
369 if (C->isAllOnesValue() &&
370 (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() ||
371 DestTy->isVectorTy()) &&
372 !DestTy->isX86_AMXTy() && !DestTy->isX86_MMXTy() &&
373 !DestTy->isPtrOrPtrVectorTy())
374 // Get ones when the input is trivial, but
375 // only for supported types inside getAllOnesValue.
376 return Constant::getAllOnesValue(DestTy);
377
378 // If the type sizes are the same and a cast is legal, just directly
379 // cast the constant.
380 // But be careful not to coerce non-integral pointers illegally.
381 if (SrcSize == DestSize &&
382 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
383 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
384 Instruction::CastOps Cast = Instruction::BitCast;
385 // If we are going from a pointer to int or vice versa, we spell the cast
386 // differently.
387 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
388 Cast = Instruction::IntToPtr;
389 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
390 Cast = Instruction::PtrToInt;
391
392 if (CastInst::castIsValid(Cast, C, DestTy))
393 return ConstantExpr::getCast(Cast, C, DestTy);
394 }
395
396 // If this isn't an aggregate type, there is nothing we can do to drill down
397 // and find a bitcastable constant.
398 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
399 return nullptr;
400
401 // We're simulating a load through a pointer that was bitcast to point to
402 // a different type, so we can try to walk down through the initial
403 // elements of an aggregate to see if some part of the aggregate is
404 // castable to implement the "load" semantic model.
405 if (SrcTy->isStructTy()) {
406 // Struct types might have leading zero-length elements like [0 x i32],
407 // which are certainly not what we are looking for, so skip them.
408 unsigned Elem = 0;
409 Constant *ElemC;
410 do {
411 ElemC = C->getAggregateElement(Elem++);
412 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
413 C = ElemC;
414 } else {
415 C = C->getAggregateElement(0u);
416 }
417 } while (C);
418
419 return nullptr;
420}
421
422namespace {
423
424/// Recursive helper to read bits out of global. C is the constant being copied
425/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
426/// results into and BytesLeft is the number of bytes left in
427/// the CurPtr buffer. DL is the DataLayout.
428bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
429 unsigned BytesLeft, const DataLayout &DL) {
430 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&(static_cast<void> (0))
431 "Out of range access")(static_cast<void> (0));
432
433 // If this element is zero or undefined, we can just return since *CurPtr is
434 // zero initialized.
435 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
436 return true;
437
438 if (auto *CI = dyn_cast<ConstantInt>(C)) {
439 if (CI->getBitWidth() > 64 ||
440 (CI->getBitWidth() & 7) != 0)
441 return false;
442
443 uint64_t Val = CI->getZExtValue();
444 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
445
446 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
447 int n = ByteOffset;
448 if (!DL.isLittleEndian())
449 n = IntBytes - n - 1;
450 CurPtr[i] = (unsigned char)(Val >> (n * 8));
451 ++ByteOffset;
452 }
453 return true;
454 }
455
456 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
457 if (CFP->getType()->isDoubleTy()) {
458 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
459 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
460 }
461 if (CFP->getType()->isFloatTy()){
462 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
463 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
464 }
465 if (CFP->getType()->isHalfTy()){
466 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
467 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
468 }
469 return false;
470 }
471
472 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
473 const StructLayout *SL = DL.getStructLayout(CS->getType());
474 unsigned Index = SL->getElementContainingOffset(ByteOffset);
475 uint64_t CurEltOffset = SL->getElementOffset(Index);
476 ByteOffset -= CurEltOffset;
477
478 while (true) {
479 // If the element access is to the element itself and not to tail padding,
480 // read the bytes from the element.
481 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
482
483 if (ByteOffset < EltSize &&
484 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
485 BytesLeft, DL))
486 return false;
487
488 ++Index;
489
490 // Check to see if we read from the last struct element, if so we're done.
491 if (Index == CS->getType()->getNumElements())
492 return true;
493
494 // If we read all of the bytes we needed from this element we're done.
495 uint64_t NextEltOffset = SL->getElementOffset(Index);
496
497 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
498 return true;
499
500 // Move to the next element of the struct.
501 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
502 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
503 ByteOffset = 0;
504 CurEltOffset = NextEltOffset;
505 }
506 // not reached.
507 }
508
509 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
510 isa<ConstantDataSequential>(C)) {
511 uint64_t NumElts;
512 Type *EltTy;
513 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
514 NumElts = AT->getNumElements();
515 EltTy = AT->getElementType();
516 } else {
517 NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
518 EltTy = cast<FixedVectorType>(C->getType())->getElementType();
519 }
520 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
521 uint64_t Index = ByteOffset / EltSize;
522 uint64_t Offset = ByteOffset - Index * EltSize;
523
524 for (; Index != NumElts; ++Index) {
525 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
526 BytesLeft, DL))
527 return false;
528
529 uint64_t BytesWritten = EltSize - Offset;
530 assert(BytesWritten <= EltSize && "Not indexing into this element?")(static_cast<void> (0));
531 if (BytesWritten >= BytesLeft)
532 return true;
533
534 Offset = 0;
535 BytesLeft -= BytesWritten;
536 CurPtr += BytesWritten;
537 }
538 return true;
539 }
540
541 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
542 if (CE->getOpcode() == Instruction::IntToPtr &&
543 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
544 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
545 BytesLeft, DL);
546 }
547 }
548
549 // Otherwise, unknown initializer type.
550 return false;
551}
552
553Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
554 const DataLayout &DL) {
555 // Bail out early. Not expect to load from scalable global variable.
556 if (isa<ScalableVectorType>(LoadTy))
557 return nullptr;
558
559 auto *PTy = cast<PointerType>(C->getType());
560 auto *IntType = dyn_cast<IntegerType>(LoadTy);
561
562 // If this isn't an integer load we can't fold it directly.
563 if (!IntType) {
564 unsigned AS = PTy->getAddressSpace();
565
566 // If this is a float/double load, we can try folding it as an int32/64 load
567 // and then bitcast the result. This can be useful for union cases. Note
568 // that address spaces don't matter here since we're not going to result in
569 // an actual new load.
570 Type *MapTy;
571 if (LoadTy->isHalfTy())
572 MapTy = Type::getInt16Ty(C->getContext());
573 else if (LoadTy->isFloatTy())
574 MapTy = Type::getInt32Ty(C->getContext());
575 else if (LoadTy->isDoubleTy())
576 MapTy = Type::getInt64Ty(C->getContext());
577 else if (LoadTy->isVectorTy()) {
578 MapTy = PointerType::getIntNTy(
579 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
580 } else
581 return nullptr;
582
583 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
584 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) {
585 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
586 !LoadTy->isX86_AMXTy())
587 // Materializing a zero can be done trivially without a bitcast
588 return Constant::getNullValue(LoadTy);
589 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
590 Res = FoldBitCast(Res, CastTy, DL);
591 if (LoadTy->isPtrOrPtrVectorTy()) {
592 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
593 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
594 !LoadTy->isX86_AMXTy())
595 return Constant::getNullValue(LoadTy);
596 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
597 // Be careful not to replace a load of an addrspace value with an inttoptr here
598 return nullptr;
599 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy);
600 }
601 return Res;
602 }
603 return nullptr;
604 }
605
606 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
607 if (BytesLoaded > 32 || BytesLoaded == 0)
608 return nullptr;
609
610 GlobalValue *GVal;
611 APInt OffsetAI;
612 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
613 return nullptr;
614
615 auto *GV = dyn_cast<GlobalVariable>(GVal);
616 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
617 !GV->getInitializer()->getType()->isSized())
618 return nullptr;
619
620 int64_t Offset = OffsetAI.getSExtValue();
621 int64_t InitializerSize =
622 DL.getTypeAllocSize(GV->getInitializer()->getType()).getFixedSize();
623
624 // If we're not accessing anything in this constant, the result is undefined.
625 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
626 return UndefValue::get(IntType);
627
628 // If we're not accessing anything in this constant, the result is undefined.
629 if (Offset >= InitializerSize)
630 return UndefValue::get(IntType);
631
632 unsigned char RawBytes[32] = {0};
633 unsigned char *CurPtr = RawBytes;
634 unsigned BytesLeft = BytesLoaded;
635
636 // If we're loading off the beginning of the global, some bytes may be valid.
637 if (Offset < 0) {
638 CurPtr += -Offset;
639 BytesLeft += Offset;
640 Offset = 0;
641 }
642
643 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
644 return nullptr;
645
646 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
647 if (DL.isLittleEndian()) {
648 ResultVal = RawBytes[BytesLoaded - 1];
649 for (unsigned i = 1; i != BytesLoaded; ++i) {
650 ResultVal <<= 8;
651 ResultVal |= RawBytes[BytesLoaded - 1 - i];
652 }
653 } else {
654 ResultVal = RawBytes[0];
655 for (unsigned i = 1; i != BytesLoaded; ++i) {
656 ResultVal <<= 8;
657 ResultVal |= RawBytes[i];
658 }
659 }
660
661 return ConstantInt::get(IntType->getContext(), ResultVal);
662}
663
664Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
665 const DataLayout &DL) {
666 auto *SrcPtr = CE->getOperand(0);
667 if (!SrcPtr->getType()->isPointerTy())
668 return nullptr;
669
670 return ConstantFoldLoadFromConstPtr(SrcPtr, DestTy, DL);
671}
672
673} // end anonymous namespace
674
675Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
676 const DataLayout &DL) {
677 // First, try the easy cases:
678 if (auto *GV = dyn_cast<GlobalVariable>(C))
679 if (GV->isConstant() && GV->hasDefinitiveInitializer())
680 return ConstantFoldLoadThroughBitcast(GV->getInitializer(), Ty, DL);
681
682 if (auto *GA = dyn_cast<GlobalAlias>(C))
683 if (GA->getAliasee() && !GA->isInterposable())
684 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
685
686 // If the loaded value isn't a constant expr, we can't handle it.
687 auto *CE = dyn_cast<ConstantExpr>(C);
688 if (!CE)
689 return nullptr;
690
691 if (CE->getOpcode() == Instruction::GetElementPtr) {
692 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
693 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
694 if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(
695 GV->getInitializer(), CE, Ty, DL))
696 return V;
697 }
698 } else {
699 // Try to simplify GEP if the pointer operand wasn't a GlobalVariable.
700 // SymbolicallyEvaluateGEP() with `ForLoadOperand = true` can potentially
701 // simplify the GEP more than it normally would have been, but should only
702 // be used for const folding loads.
703 SmallVector<Constant *> Ops;
704 for (unsigned I = 0, E = CE->getNumOperands(); I != E; ++I)
705 Ops.push_back(cast<Constant>(CE->getOperand(I)));
706 if (auto *Simplified = dyn_cast_or_null<ConstantExpr>(
707 SymbolicallyEvaluateGEP(cast<GEPOperator>(CE), Ops, DL, nullptr,
708 /*ForLoadOperand*/ true))) {
709 // If the symbolically evaluated GEP is another GEP, we can only const
710 // fold it if the resulting pointer operand is a GlobalValue. Otherwise
711 // there is nothing else to simplify since the GEP is already in the
712 // most simplified form.
713 if (isa<GEPOperator>(Simplified)) {
714 if (auto *GV = dyn_cast<GlobalVariable>(Simplified->getOperand(0))) {
715 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
716 if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(
717 GV->getInitializer(), Simplified, Ty, DL))
718 return V;
719 }
720 }
721 } else {
722 return ConstantFoldLoadFromConstPtr(Simplified, Ty, DL);
723 }
724 }
725 }
726 }
727
728 if (CE->getOpcode() == Instruction::BitCast)
729 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
730 return LoadedC;
731
732 // Instead of loading constant c string, use corresponding integer value
733 // directly if string length is small enough.
734 StringRef Str;
735 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
736 size_t StrLen = Str.size();
737 unsigned NumBits = Ty->getPrimitiveSizeInBits();
738 // Replace load with immediate integer if the result is an integer or fp
739 // value.
740 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
741 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
742 APInt StrVal(NumBits, 0);
743 APInt SingleChar(NumBits, 0);
744 if (DL.isLittleEndian()) {
745 for (unsigned char C : reverse(Str.bytes())) {
746 SingleChar = static_cast<uint64_t>(C);
747 StrVal = (StrVal << 8) | SingleChar;
748 }
749 } else {
750 for (unsigned char C : Str.bytes()) {
751 SingleChar = static_cast<uint64_t>(C);
752 StrVal = (StrVal << 8) | SingleChar;
753 }
754 // Append NULL at the end.
755 SingleChar = 0;
756 StrVal = (StrVal << 8) | SingleChar;
757 }
758
759 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
760 if (Ty->isFloatingPointTy())
761 Res = ConstantExpr::getBitCast(Res, Ty);
762 return Res;
763 }
764 }
765
766 // If this load comes from anywhere in a constant global, and if the global
767 // is all undef or zero, we know what it loads.
768 if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE))) {
769 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
770 if (GV->getInitializer()->isNullValue())
771 return Constant::getNullValue(Ty);
772 if (isa<UndefValue>(GV->getInitializer()))
773 return UndefValue::get(Ty);
774 }
775 }
776
777 // Try hard to fold loads from bitcasted strange and non-type-safe things.
778 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
779}
780
781namespace {
782
783/// One of Op0/Op1 is a constant expression.
784/// Attempt to symbolically evaluate the result of a binary operator merging
785/// these together. If target data info is available, it is provided as DL,
786/// otherwise DL is null.
787Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
788 const DataLayout &DL) {
789 // SROA
790
791 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
792 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
793 // bits.
794
795 if (Opc == Instruction::And) {
796 KnownBits Known0 = computeKnownBits(Op0, DL);
797 KnownBits Known1 = computeKnownBits(Op1, DL);
798 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
799 // All the bits of Op0 that the 'and' could be masking are already zero.
800 return Op0;
801 }
802 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
803 // All the bits of Op1 that the 'and' could be masking are already zero.
804 return Op1;
805 }
806
807 Known0 &= Known1;
808 if (Known0.isConstant())
809 return ConstantInt::get(Op0->getType(), Known0.getConstant());
810 }
811
812 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
813 // constant. This happens frequently when iterating over a global array.
814 if (Opc == Instruction::Sub) {
815 GlobalValue *GV1, *GV2;
816 APInt Offs1, Offs2;
817
818 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
819 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
820 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
821
822 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
823 // PtrToInt may change the bitwidth so we have convert to the right size
824 // first.
825 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
826 Offs2.zextOrTrunc(OpSize));
827 }
828 }
829
830 return nullptr;
831}
832
833/// If array indices are not pointer-sized integers, explicitly cast them so
834/// that they aren't implicitly casted by the getelementptr.
835Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
836 Type *ResultTy, Optional<unsigned> InRangeIndex,
837 const DataLayout &DL, const TargetLibraryInfo *TLI) {
838 Type *IntIdxTy = DL.getIndexType(ResultTy);
839 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
840
841 bool Any = false;
842 SmallVector<Constant*, 32> NewIdxs;
843 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
844 if ((i == 1 ||
845 !isa<StructType>(GetElementPtrInst::getIndexedType(
846 SrcElemTy, Ops.slice(1, i - 1)))) &&
847 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
848 Any = true;
849 Type *NewType = Ops[i]->getType()->isVectorTy()
850 ? IntIdxTy
851 : IntIdxScalarTy;
852 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
853 true,
854 NewType,
855 true),
856 Ops[i], NewType));
857 } else
858 NewIdxs.push_back(Ops[i]);
859 }
860
861 if (!Any)
862 return nullptr;
863
864 Constant *C = ConstantExpr::getGetElementPtr(
865 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
866 return ConstantFoldConstant(C, DL, TLI);
867}
868
869/// Strip the pointer casts, but preserve the address space information.
870Constant *StripPtrCastKeepAS(Constant *Ptr, bool ForLoadOperand) {
871 assert(Ptr->getType()->isPointerTy() && "Not a pointer type")(static_cast<void> (0));
872 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
873 Ptr = cast<Constant>(Ptr->stripPointerCasts());
874 if (ForLoadOperand) {
875 while (isa<GlobalAlias>(Ptr) && !cast<GlobalAlias>(Ptr)->isInterposable() &&
876 !cast<GlobalAlias>(Ptr)->getBaseObject()->isInterposable()) {
877 Ptr = cast<GlobalAlias>(Ptr)->getAliasee();
878 }
879 }
880
881 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
882
883 // Preserve the address space number of the pointer.
884 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
885 Ptr = ConstantExpr::getPointerCast(
886 Ptr, PointerType::getWithSamePointeeType(NewPtrTy,
887 OldPtrTy->getAddressSpace()));
888 }
889 return Ptr;
890}
891
892/// If we can symbolically evaluate the GEP constant expression, do so.
893Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
894 ArrayRef<Constant *> Ops,
895 const DataLayout &DL,
896 const TargetLibraryInfo *TLI,
897 bool ForLoadOperand) {
898 const GEPOperator *InnermostGEP = GEP;
899 bool InBounds = GEP->isInBounds();
900
901 Type *SrcElemTy = GEP->getSourceElementType();
902 Type *ResElemTy = GEP->getResultElementType();
903 Type *ResTy = GEP->getType();
904 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
905 return nullptr;
906
907 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
908 GEP->getInRangeIndex(), DL, TLI))
909 return C;
910
911 Constant *Ptr = Ops[0];
912 if (!Ptr->getType()->isPointerTy())
913 return nullptr;
914
915 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
916
917 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
918 // "inttoptr (sub (ptrtoint Ptr), V)"
919 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
920 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
921 assert((!CE || CE->getType() == IntIdxTy) &&(static_cast<void> (0))
922 "CastGEPIndices didn't canonicalize index types!")(static_cast<void> (0));
923 if (CE && CE->getOpcode() == Instruction::Sub &&
924 CE->getOperand(0)->isNullValue()) {
925 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
926 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
927 Res = ConstantExpr::getIntToPtr(Res, ResTy);
928 return ConstantFoldConstant(Res, DL, TLI);
929 }
930 }
931
932 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
933 if (!isa<ConstantInt>(Ops[i]))
934 return nullptr;
935
936 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
937 APInt Offset =
938 APInt(BitWidth,
939 DL.getIndexedOffsetInType(
940 SrcElemTy,
941 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
942 Ptr = StripPtrCastKeepAS(Ptr, ForLoadOperand);
943
944 // If this is a GEP of a GEP, fold it all into a single GEP.
945 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
946 InnermostGEP = GEP;
947 InBounds &= GEP->isInBounds();
948
949 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
950
951 // Do not try the incorporate the sub-GEP if some index is not a number.
952 bool AllConstantInt = true;
953 for (Value *NestedOp : NestedOps)
954 if (!isa<ConstantInt>(NestedOp)) {
955 AllConstantInt = false;
956 break;
957 }
958 if (!AllConstantInt)
959 break;
960
961 Ptr = cast<Constant>(GEP->getOperand(0));
962 SrcElemTy = GEP->getSourceElementType();
963 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
964 Ptr = StripPtrCastKeepAS(Ptr, ForLoadOperand);
965 }
966
967 // If the base value for this address is a literal integer value, fold the
968 // getelementptr to the resulting integer value casted to the pointer type.
969 APInt BasePtr(BitWidth, 0);
970 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
971 if (CE->getOpcode() == Instruction::IntToPtr) {
972 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
973 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
974 }
975 }
976
977 auto *PTy = cast<PointerType>(Ptr->getType());
978 if ((Ptr->isNullValue() || BasePtr != 0) &&
979 !DL.isNonIntegralPointerType(PTy)) {
980 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
981 return ConstantExpr::getIntToPtr(C, ResTy);
982 }
983
984 // Otherwise form a regular getelementptr. Recompute the indices so that
985 // we eliminate over-indexing of the notional static type array bounds.
986 // This makes it easy to determine if the getelementptr is "inbounds".
987 // Also, this helps GlobalOpt do SROA on GlobalVariables.
988 SmallVector<Constant *, 32> NewIdxs;
989 Type *Ty = PTy;
990 SrcElemTy = PTy->getElementType();
991
992 do {
993 if (!Ty->isStructTy()) {
994 if (Ty->isPointerTy()) {
995 // The only pointer indexing we'll do is on the first index of the GEP.
996 if (!NewIdxs.empty())
997 break;
998
999 Ty = SrcElemTy;
1000
1001 // Only handle pointers to sized types, not pointers to functions.
1002 if (!Ty->isSized())
1003 return nullptr;
1004 } else {
1005 Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
1006 if (!NextTy)
1007 break;
1008 Ty = NextTy;
1009 }
1010
1011 // Determine which element of the array the offset points into.
1012 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
1013 if (ElemSize == 0) {
1014 // The element size is 0. This may be [0 x Ty]*, so just use a zero
1015 // index for this level and proceed to the next level to see if it can
1016 // accommodate the offset.
1017 NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0));
1018 } else {
1019 // The element size is non-zero divide the offset by the element
1020 // size (rounding down), to compute the index at this level.
1021 bool Overflow;
1022 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
1023 if (Overflow)
1024 break;
1025 Offset -= NewIdx * ElemSize;
1026 NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx));
1027 }
1028 } else {
1029 auto *STy = cast<StructType>(Ty);
1030 // If we end up with an offset that isn't valid for this struct type, we
1031 // can't re-form this GEP in a regular form, so bail out. The pointer
1032 // operand likely went through casts that are necessary to make the GEP
1033 // sensible.
1034 const StructLayout &SL = *DL.getStructLayout(STy);
1035 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
1036 break;
1037
1038 // Determine which field of the struct the offset points into. The
1039 // getZExtValue is fine as we've already ensured that the offset is
1040 // within the range representable by the StructLayout API.
1041 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
1042 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
1043 ElIdx));
1044 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
1045 Ty = STy->getTypeAtIndex(ElIdx);
1046 }
1047 } while (Ty != ResElemTy);
1048
1049 // If we haven't used up the entire offset by descending the static
1050 // type, then the offset is pointing into the middle of an indivisible
1051 // member, so we can't simplify it.
1052 if (Offset != 0)
1053 return nullptr;
1054
1055 // Preserve the inrange index from the innermost GEP if possible. We must
1056 // have calculated the same indices up to and including the inrange index.
1057 Optional<unsigned> InRangeIndex;
1058 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
1059 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
1060 NewIdxs.size() > *LastIRIndex) {
1061 InRangeIndex = LastIRIndex;
1062 for (unsigned I = 0; I <= *LastIRIndex; ++I)
1063 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
1064 return nullptr;
1065 }
1066
1067 // Create a GEP.
1068 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
1069 InBounds, InRangeIndex);
1070 assert(C->getType()->getPointerElementType() == Ty &&(static_cast<void> (0))
1071 "Computed GetElementPtr has unexpected type!")(static_cast<void> (0));
1072
1073 // If we ended up indexing a member with a type that doesn't match
1074 // the type of what the original indices indexed, add a cast.
1075 if (C->getType() != ResTy)
1076 C = FoldBitCast(C, ResTy, DL);
1077
1078 return C;
1079}
1080
1081/// Attempt to constant fold an instruction with the
1082/// specified opcode and operands. If successful, the constant result is
1083/// returned, if not, null is returned. Note that this function can fail when
1084/// attempting to fold instructions like loads and stores, which have no
1085/// constant expression form.
1086Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
1087 ArrayRef<Constant *> Ops,
1088 const DataLayout &DL,
1089 const TargetLibraryInfo *TLI) {
1090 Type *DestTy = InstOrCE->getType();
1091
1092 if (Instruction::isUnaryOp(Opcode))
1093 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
1094
1095 if (Instruction::isBinaryOp(Opcode))
1096 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1097
1098 if (Instruction::isCast(Opcode))
1099 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1100
1101 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1102 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI,
1103 /*ForLoadOperand*/ false))
1104 return C;
1105
1106 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1107 Ops.slice(1), GEP->isInBounds(),
1108 GEP->getInRangeIndex());
1109 }
1110
1111 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1112 return CE->getWithOperands(Ops);
1113
1114 switch (Opcode) {
1115 default: return nullptr;
1116 case Instruction::ICmp:
1117 case Instruction::FCmp: llvm_unreachable("Invalid for compares")__builtin_unreachable();
1118 case Instruction::Freeze:
1119 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1120 case Instruction::Call:
1121 if (auto *F = dyn_cast<Function>(Ops.back())) {
1122 const auto *Call = cast<CallBase>(InstOrCE);
1123 if (canConstantFoldCallTo(Call, F))
1124 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1125 }
1126 return nullptr;
1127 case Instruction::Select:
1128 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1129 case Instruction::ExtractElement:
1130 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1131 case Instruction::ExtractValue:
1132 return ConstantExpr::getExtractValue(
1133 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1134 case Instruction::InsertElement:
1135 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1136 case Instruction::ShuffleVector:
1137 return ConstantExpr::getShuffleVector(
1138 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1139 }
1140}
1141
1142} // end anonymous namespace
1143
1144//===----------------------------------------------------------------------===//
1145// Constant Folding public APIs
1146//===----------------------------------------------------------------------===//
1147
1148namespace {
1149
1150Constant *
1151ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1152 const TargetLibraryInfo *TLI,
1153 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1154 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1155 return const_cast<Constant *>(C);
1156
1157 SmallVector<Constant *, 8> Ops;
1158 for (const Use &OldU : C->operands()) {
1159 Constant *OldC = cast<Constant>(&OldU);
1160 Constant *NewC = OldC;
1161 // Recursively fold the ConstantExpr's operands. If we have already folded
1162 // a ConstantExpr, we don't have to process it again.
1163 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1164 auto It = FoldedOps.find(OldC);
1165 if (It == FoldedOps.end()) {
1166 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1167 FoldedOps.insert({OldC, NewC});
1168 } else {
1169 NewC = It->second;
1170 }
1171 }
1172 Ops.push_back(NewC);
1173 }
1174
1175 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1176 if (CE->isCompare())
1177 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1178 DL, TLI);
1179
1180 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1181 }
1182
1183 assert(isa<ConstantVector>(C))(static_cast<void> (0));
1184 return ConstantVector::get(Ops);
1185}
1186
1187} // end anonymous namespace
1188
1189Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1190 const TargetLibraryInfo *TLI) {
1191 // Handle PHI nodes quickly here...
1192 if (auto *PN = dyn_cast<PHINode>(I)) {
1193 Constant *CommonValue = nullptr;
1194
1195 SmallDenseMap<Constant *, Constant *> FoldedOps;
1196 for (Value *Incoming : PN->incoming_values()) {
1197 // If the incoming value is undef then skip it. Note that while we could
1198 // skip the value if it is equal to the phi node itself we choose not to
1199 // because that would break the rule that constant folding only applies if
1200 // all operands are constants.
1201 if (isa<UndefValue>(Incoming))
1202 continue;
1203 // If the incoming value is not a constant, then give up.
1204 auto *C = dyn_cast<Constant>(Incoming);
1205 if (!C)
1206 return nullptr;
1207 // Fold the PHI's operands.
1208 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1209 // If the incoming value is a different constant to
1210 // the one we saw previously, then give up.
1211 if (CommonValue && C != CommonValue)
1212 return nullptr;
1213 CommonValue = C;
1214 }
1215
1216 // If we reach here, all incoming values are the same constant or undef.
1217 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1218 }
1219
1220 // Scan the operand list, checking to see if they are all constants, if so,
1221 // hand off to ConstantFoldInstOperandsImpl.
1222 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1223 return nullptr;
1224
1225 SmallDenseMap<Constant *, Constant *> FoldedOps;
1226 SmallVector<Constant *, 8> Ops;
1227 for (const Use &OpU : I->operands()) {
1228 auto *Op = cast<Constant>(&OpU);
1229 // Fold the Instruction's operands.
1230 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1231 Ops.push_back(Op);
1232 }
1233
1234 if (const auto *CI = dyn_cast<CmpInst>(I))
1235 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1236 DL, TLI);
1237
1238 if (const auto *LI = dyn_cast<LoadInst>(I)) {
1239 if (LI->isVolatile())
1240 return nullptr;
1241 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1242 }
1243
1244 if (auto *IVI = dyn_cast<InsertValueInst>(I))
1245 return ConstantExpr::getInsertValue(Ops[0], Ops[1], IVI->getIndices());
1246
1247 if (auto *EVI = dyn_cast<ExtractValueInst>(I))
1248 return ConstantExpr::getExtractValue(Ops[0], EVI->getIndices());
1249
1250 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1251}
1252
1253Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1254 const TargetLibraryInfo *TLI) {
1255 SmallDenseMap<Constant *, Constant *> FoldedOps;
1256 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1257}
1258
1259Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1260 ArrayRef<Constant *> Ops,
1261 const DataLayout &DL,
1262 const TargetLibraryInfo *TLI) {
1263 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1264}
1265
1266Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1267 Constant *Ops0, Constant *Ops1,
1268 const DataLayout &DL,
1269 const TargetLibraryInfo *TLI) {
1270 // fold: icmp (inttoptr x), null -> icmp x, 0
1271 // fold: icmp null, (inttoptr x) -> icmp 0, x
1272 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1273 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1274 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1275 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1276 //
1277 // FIXME: The following comment is out of data and the DataLayout is here now.
1278 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1279 // around to know if bit truncation is happening.
1280 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1281 if (Ops1->isNullValue()) {
1282 if (CE0->getOpcode() == Instruction::IntToPtr) {
1283 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1284 // Convert the integer value to the right size to ensure we get the
1285 // proper extension or truncation.
1286 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1287 IntPtrTy, false);
1288 Constant *Null = Constant::getNullValue(C->getType());
1289 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1290 }
1291
1292 // Only do this transformation if the int is intptrty in size, otherwise
1293 // there is a truncation or extension that we aren't modeling.
1294 if (CE0->getOpcode() == Instruction::PtrToInt) {
1295 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1296 if (CE0->getType() == IntPtrTy) {
1297 Constant *C = CE0->getOperand(0);
1298 Constant *Null = Constant::getNullValue(C->getType());
1299 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1300 }
1301 }
1302 }
1303
1304 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1305 if (CE0->getOpcode() == CE1->getOpcode()) {
1306 if (CE0->getOpcode() == Instruction::IntToPtr) {
1307 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1308
1309 // Convert the integer value to the right size to ensure we get the
1310 // proper extension or truncation.
1311 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1312 IntPtrTy, false);
1313 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1314 IntPtrTy, false);
1315 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1316 }
1317
1318 // Only do this transformation if the int is intptrty in size, otherwise
1319 // there is a truncation or extension that we aren't modeling.
1320 if (CE0->getOpcode() == Instruction::PtrToInt) {
1321 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1322 if (CE0->getType() == IntPtrTy &&
1323 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1324 return ConstantFoldCompareInstOperands(
1325 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1326 }
1327 }
1328 }
1329 }
1330
1331 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1332 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1333 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1334 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1335 Constant *LHS = ConstantFoldCompareInstOperands(
1336 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1337 Constant *RHS = ConstantFoldCompareInstOperands(
1338 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1339 unsigned OpC =
1340 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1341 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1342 }
1343 } else if (isa<ConstantExpr>(Ops1)) {
1344 // If RHS is a constant expression, but the left side isn't, swap the
1345 // operands and try again.
1346 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1347 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1348 }
1349
1350 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1351}
1352
1353Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1354 const DataLayout &DL) {
1355 assert(Instruction::isUnaryOp(Opcode))(static_cast<void> (0));
1356
1357 return ConstantExpr::get(Opcode, Op);
1358}
1359
1360Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1361 Constant *RHS,
1362 const DataLayout &DL) {
1363 assert(Instruction::isBinaryOp(Opcode))(static_cast<void> (0));
1364 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1365 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1366 return C;
1367
1368 return ConstantExpr::get(Opcode, LHS, RHS);
1369}
1370
1371Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1372 Type *DestTy, const DataLayout &DL) {
1373 assert(Instruction::isCast(Opcode))(static_cast<void> (0));
1374 switch (Opcode) {
1375 default:
1376 llvm_unreachable("Missing case")__builtin_unreachable();
1377 case Instruction::PtrToInt:
1378 // If the input is a inttoptr, eliminate the pair. This requires knowing
1379 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1380 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1381 if (CE->getOpcode() == Instruction::IntToPtr) {
1382 Constant *Input = CE->getOperand(0);
1383 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1384 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1385 if (PtrWidth < InWidth) {
1386 Constant *Mask =
1387 ConstantInt::get(CE->getContext(),
1388 APInt::getLowBitsSet(InWidth, PtrWidth));
1389 Input = ConstantExpr::getAnd(Input, Mask);
1390 }
1391 // Do a zext or trunc to get to the dest size.
1392 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1393 }
1394 }
1395 return ConstantExpr::getCast(Opcode, C, DestTy);
1396 case Instruction::IntToPtr:
1397 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1398 // the int size is >= the ptr size and the address spaces are the same.
1399 // This requires knowing the width of a pointer, so it can't be done in
1400 // ConstantExpr::getCast.
1401 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1402 if (CE->getOpcode() == Instruction::PtrToInt) {
1403 Constant *SrcPtr = CE->getOperand(0);
1404 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1405 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1406
1407 if (MidIntSize >= SrcPtrSize) {
1408 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1409 if (SrcAS == DestTy->getPointerAddressSpace())
1410 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1411 }
1412 }
1413 }
1414
1415 return ConstantExpr::getCast(Opcode, C, DestTy);
1416 case Instruction::Trunc:
1417 case Instruction::ZExt:
1418 case Instruction::SExt:
1419 case Instruction::FPTrunc:
1420 case Instruction::FPExt:
1421 case Instruction::UIToFP:
1422 case Instruction::SIToFP:
1423 case Instruction::FPToUI:
1424 case Instruction::FPToSI:
1425 case Instruction::AddrSpaceCast:
1426 return ConstantExpr::getCast(Opcode, C, DestTy);
1427 case Instruction::BitCast:
1428 return FoldBitCast(C, DestTy, DL);
1429 }
1430}
1431
1432Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1433 ConstantExpr *CE,
1434 Type *Ty,
1435 const DataLayout &DL) {
1436 if (!CE->getOperand(1)->isNullValue())
1437 return nullptr; // Do not allow stepping over the value!
1438
1439 // Loop over all of the operands, tracking down which value we are
1440 // addressing.
1441 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1442 C = C->getAggregateElement(CE->getOperand(i));
1443 if (!C)
1444 return nullptr;
1445 }
1446 return ConstantFoldLoadThroughBitcast(C, Ty, DL);
1447}
1448
1449Constant *
1450llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1451 ArrayRef<Constant *> Indices) {
1452 // Loop over all of the operands, tracking down which value we are
1453 // addressing.
1454 for (Constant *Index : Indices) {
1455 C = C->getAggregateElement(Index);
1456 if (!C)
1457 return nullptr;
1458 }
1459 return C;
1460}
1461
1462//===----------------------------------------------------------------------===//
1463// Constant Folding for Calls
1464//
1465
1466bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1467 if (Call->isNoBuiltin())
1468 return false;
1469 switch (F->getIntrinsicID()) {
1470 // Operations that do not operate floating-point numbers and do not depend on
1471 // FP environment can be folded even in strictfp functions.
1472 case Intrinsic::bswap:
1473 case Intrinsic::ctpop:
1474 case Intrinsic::ctlz:
1475 case Intrinsic::cttz:
1476 case Intrinsic::fshl:
1477 case Intrinsic::fshr:
1478 case Intrinsic::launder_invariant_group:
1479 case Intrinsic::strip_invariant_group:
1480 case Intrinsic::masked_load:
1481 case Intrinsic::get_active_lane_mask:
1482 case Intrinsic::abs:
1483 case Intrinsic::smax:
1484 case Intrinsic::smin:
1485 case Intrinsic::umax:
1486 case Intrinsic::umin:
1487 case Intrinsic::sadd_with_overflow:
1488 case Intrinsic::uadd_with_overflow:
1489 case Intrinsic::ssub_with_overflow:
1490 case Intrinsic::usub_with_overflow:
1491 case Intrinsic::smul_with_overflow:
1492 case Intrinsic::umul_with_overflow:
1493 case Intrinsic::sadd_sat:
1494 case Intrinsic::uadd_sat:
1495 case Intrinsic::ssub_sat:
1496 case Intrinsic::usub_sat:
1497 case Intrinsic::smul_fix:
1498 case Intrinsic::smul_fix_sat:
1499 case Intrinsic::bitreverse:
1500 case Intrinsic::is_constant:
1501 case Intrinsic::vector_reduce_add:
1502 case Intrinsic::vector_reduce_mul:
1503 case Intrinsic::vector_reduce_and:
1504 case Intrinsic::vector_reduce_or:
1505 case Intrinsic::vector_reduce_xor:
1506 case Intrinsic::vector_reduce_smin:
1507 case Intrinsic::vector_reduce_smax:
1508 case Intrinsic::vector_reduce_umin:
1509 case Intrinsic::vector_reduce_umax:
1510 // Target intrinsics
1511 case Intrinsic::amdgcn_perm:
1512 case Intrinsic::arm_mve_vctp8:
1513 case Intrinsic::arm_mve_vctp16:
1514 case Intrinsic::arm_mve_vctp32:
1515 case Intrinsic::arm_mve_vctp64:
1516 case Intrinsic::aarch64_sve_convert_from_svbool:
1517 // WebAssembly float semantics are always known
1518 case Intrinsic::wasm_trunc_signed:
1519 case Intrinsic::wasm_trunc_unsigned:
1520 return true;
1521
1522 // Floating point operations cannot be folded in strictfp functions in
1523 // general case. They can be folded if FP environment is known to compiler.
1524 case Intrinsic::minnum:
1525 case Intrinsic::maxnum:
1526 case Intrinsic::minimum:
1527 case Intrinsic::maximum:
1528 case Intrinsic::log:
1529 case Intrinsic::log2:
1530 case Intrinsic::log10:
1531 case Intrinsic::exp:
1532 case Intrinsic::exp2:
1533 case Intrinsic::sqrt:
1534 case Intrinsic::sin:
1535 case Intrinsic::cos:
1536 case Intrinsic::pow:
1537 case Intrinsic::powi:
1538 case Intrinsic::fma:
1539 case Intrinsic::fmuladd:
1540 case Intrinsic::fptoui_sat:
1541 case Intrinsic::fptosi_sat:
1542 case Intrinsic::convert_from_fp16:
1543 case Intrinsic::convert_to_fp16:
1544 case Intrinsic::amdgcn_cos:
1545 case Intrinsic::amdgcn_cubeid:
1546 case Intrinsic::amdgcn_cubema:
1547 case Intrinsic::amdgcn_cubesc:
1548 case Intrinsic::amdgcn_cubetc:
1549 case Intrinsic::amdgcn_fmul_legacy:
1550 case Intrinsic::amdgcn_fma_legacy:
1551 case Intrinsic::amdgcn_fract:
1552 case Intrinsic::amdgcn_ldexp:
1553 case Intrinsic::amdgcn_sin:
1554 // The intrinsics below depend on rounding mode in MXCSR.
1555 case Intrinsic::x86_sse_cvtss2si:
1556 case Intrinsic::x86_sse_cvtss2si64:
1557 case Intrinsic::x86_sse_cvttss2si:
1558 case Intrinsic::x86_sse_cvttss2si64:
1559 case Intrinsic::x86_sse2_cvtsd2si:
1560 case Intrinsic::x86_sse2_cvtsd2si64:
1561 case Intrinsic::x86_sse2_cvttsd2si:
1562 case Intrinsic::x86_sse2_cvttsd2si64:
1563 case Intrinsic::x86_avx512_vcvtss2si32:
1564 case Intrinsic::x86_avx512_vcvtss2si64:
1565 case Intrinsic::x86_avx512_cvttss2si:
1566 case Intrinsic::x86_avx512_cvttss2si64:
1567 case Intrinsic::x86_avx512_vcvtsd2si32:
1568 case Intrinsic::x86_avx512_vcvtsd2si64:
1569 case Intrinsic::x86_avx512_cvttsd2si:
1570 case Intrinsic::x86_avx512_cvttsd2si64:
1571 case Intrinsic::x86_avx512_vcvtss2usi32:
1572 case Intrinsic::x86_avx512_vcvtss2usi64:
1573 case Intrinsic::x86_avx512_cvttss2usi:
1574 case Intrinsic::x86_avx512_cvttss2usi64:
1575 case Intrinsic::x86_avx512_vcvtsd2usi32:
1576 case Intrinsic::x86_avx512_vcvtsd2usi64:
1577 case Intrinsic::x86_avx512_cvttsd2usi:
1578 case Intrinsic::x86_avx512_cvttsd2usi64:
1579 return !Call->isStrictFP();
1580
1581 // Sign operations are actually bitwise operations, they do not raise
1582 // exceptions even for SNANs.
1583 case Intrinsic::fabs:
1584 case Intrinsic::copysign:
1585 // Non-constrained variants of rounding operations means default FP
1586 // environment, they can be folded in any case.
1587 case Intrinsic::ceil:
1588 case Intrinsic::floor:
1589 case Intrinsic::round:
1590 case Intrinsic::roundeven:
1591 case Intrinsic::trunc:
1592 case Intrinsic::nearbyint:
1593 case Intrinsic::rint:
1594 // Constrained intrinsics can be folded if FP environment is known
1595 // to compiler.
1596 case Intrinsic::experimental_constrained_fma:
1597 case Intrinsic::experimental_constrained_fmuladd:
1598 case Intrinsic::experimental_constrained_fadd:
1599 case Intrinsic::experimental_constrained_fsub:
1600 case Intrinsic::experimental_constrained_fmul:
1601 case Intrinsic::experimental_constrained_fdiv:
1602 case Intrinsic::experimental_constrained_frem:
1603 case Intrinsic::experimental_constrained_ceil:
1604 case Intrinsic::experimental_constrained_floor:
1605 case Intrinsic::experimental_constrained_round:
1606 case Intrinsic::experimental_constrained_roundeven:
1607 case Intrinsic::experimental_constrained_trunc:
1608 case Intrinsic::experimental_constrained_nearbyint:
1609 case Intrinsic::experimental_constrained_rint:
1610 return true;
1611 default:
1612 return false;
1613 case Intrinsic::not_intrinsic: break;
1614 }
1615
1616 if (!F->hasName() || Call->isStrictFP())
1617 return false;
1618
1619 // In these cases, the check of the length is required. We don't want to
1620 // return true for a name like "cos\0blah" which strcmp would return equal to
1621 // "cos", but has length 8.
1622 StringRef Name = F->getName();
1623 switch (Name[0]) {
1624 default:
1625 return false;
1626 case 'a':
1627 return Name == "acos" || Name == "acosf" ||
1628 Name == "asin" || Name == "asinf" ||
1629 Name == "atan" || Name == "atanf" ||
1630 Name == "atan2" || Name == "atan2f";
1631 case 'c':
1632 return Name == "ceil" || Name == "ceilf" ||
1633 Name == "cos" || Name == "cosf" ||
1634 Name == "cosh" || Name == "coshf";
1635 case 'e':
1636 return Name == "exp" || Name == "expf" ||
1637 Name == "exp2" || Name == "exp2f";
1638 case 'f':
1639 return Name == "fabs" || Name == "fabsf" ||
1640 Name == "floor" || Name == "floorf" ||
1641 Name == "fmod" || Name == "fmodf";
1642 case 'l':
1643 return Name == "log" || Name == "logf" ||
1644 Name == "log2" || Name == "log2f" ||
1645 Name == "log10" || Name == "log10f";
1646 case 'n':
1647 return Name == "nearbyint" || Name == "nearbyintf";
1648 case 'p':
1649 return Name == "pow" || Name == "powf";
1650 case 'r':
1651 return Name == "remainder" || Name == "remainderf" ||
1652 Name == "rint" || Name == "rintf" ||
1653 Name == "round" || Name == "roundf";
1654 case 's':
1655 return Name == "sin" || Name == "sinf" ||
1656 Name == "sinh" || Name == "sinhf" ||
1657 Name == "sqrt" || Name == "sqrtf";
1658 case 't':
1659 return Name == "tan" || Name == "tanf" ||
1660 Name == "tanh" || Name == "tanhf" ||
1661 Name == "trunc" || Name == "truncf";
1662 case '_':
1663 // Check for various function names that get used for the math functions
1664 // when the header files are preprocessed with the macro
1665 // __FINITE_MATH_ONLY__ enabled.
1666 // The '12' here is the length of the shortest name that can match.
1667 // We need to check the size before looking at Name[1] and Name[2]
1668 // so we may as well check a limit that will eliminate mismatches.
1669 if (Name.size() < 12 || Name[1] != '_')
1670 return false;
1671 switch (Name[2]) {
1672 default:
1673 return false;
1674 case 'a':
1675 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1676 Name == "__asin_finite" || Name == "__asinf_finite" ||
1677 Name == "__atan2_finite" || Name == "__atan2f_finite";
1678 case 'c':
1679 return Name == "__cosh_finite" || Name == "__coshf_finite";
1680 case 'e':
1681 return Name == "__exp_finite" || Name == "__expf_finite" ||
1682 Name == "__exp2_finite" || Name == "__exp2f_finite";
1683 case 'l':
1684 return Name == "__log_finite" || Name == "__logf_finite" ||
1685 Name == "__log10_finite" || Name == "__log10f_finite";
1686 case 'p':
1687 return Name == "__pow_finite" || Name == "__powf_finite";
1688 case 's':
1689 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1690 }
1691 }
1692}
1693
1694namespace {
1695
1696Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1697 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1698 APFloat APF(V);
1699 bool unused;
1700 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1701 return ConstantFP::get(Ty->getContext(), APF);
1702 }
1703 if (Ty->isDoubleTy())
1704 return ConstantFP::get(Ty->getContext(), APFloat(V));
1705 llvm_unreachable("Can only constant fold half/float/double")__builtin_unreachable();
1706}
1707
1708/// Clear the floating-point exception state.
1709inline void llvm_fenv_clearexcept() {
1710#if defined(HAVE_FENV_H1) && HAVE_DECL_FE_ALL_EXCEPT1
1711 feclearexcept(FE_ALL_EXCEPT(0x20 | 0x04 | 0x10 | 0x08 | 0x01));
1712#endif
1713 errno(*__errno_location ()) = 0;
1714}
1715
1716/// Test if a floating-point exception was raised.
1717inline bool llvm_fenv_testexcept() {
1718 int errno_val = errno(*__errno_location ());
1719 if (errno_val == ERANGE34 || errno_val == EDOM33)
1720 return true;
1721#if defined(HAVE_FENV_H1) && HAVE_DECL_FE_ALL_EXCEPT1 && HAVE_DECL_FE_INEXACT1
1722 if (fetestexcept(FE_ALL_EXCEPT(0x20 | 0x04 | 0x10 | 0x08 | 0x01) & ~FE_INEXACT0x20))
1723 return true;
1724#endif
1725 return false;
1726}
1727
1728Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1729 Type *Ty) {
1730 llvm_fenv_clearexcept();
1731 double Result = NativeFP(V.convertToDouble());
1732 if (llvm_fenv_testexcept()) {
1733 llvm_fenv_clearexcept();
1734 return nullptr;
1735 }
1736
1737 return GetConstantFoldFPValue(Result, Ty);
1738}
1739
1740Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1741 const APFloat &V, const APFloat &W, Type *Ty) {
1742 llvm_fenv_clearexcept();
1743 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1744 if (llvm_fenv_testexcept()) {
1745 llvm_fenv_clearexcept();
1746 return nullptr;
1747 }
1748
1749 return GetConstantFoldFPValue(Result, Ty);
1750}
1751
1752Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1753 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1754 if (!VT)
1755 return nullptr;
1756
1757 // This isn't strictly necessary, but handle the special/common case of zero:
1758 // all integer reductions of a zero input produce zero.
1759 if (isa<ConstantAggregateZero>(Op))
1760 return ConstantInt::get(VT->getElementType(), 0);
1761
1762 // This is the same as the underlying binops - poison propagates.
1763 if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
1764 return PoisonValue::get(VT->getElementType());
1765
1766 // TODO: Handle undef.
1767 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1768 return nullptr;
1769
1770 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1771 if (!EltC)
1772 return nullptr;
1773
1774 APInt Acc = EltC->getValue();
1775 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
1776 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1777 return nullptr;
1778 const APInt &X = EltC->getValue();
1779 switch (IID) {
1780 case Intrinsic::vector_reduce_add:
1781 Acc = Acc + X;
1782 break;
1783 case Intrinsic::vector_reduce_mul:
1784 Acc = Acc * X;
1785 break;
1786 case Intrinsic::vector_reduce_and:
1787 Acc = Acc & X;
1788 break;
1789 case Intrinsic::vector_reduce_or:
1790 Acc = Acc | X;
1791 break;
1792 case Intrinsic::vector_reduce_xor:
1793 Acc = Acc ^ X;
1794 break;
1795 case Intrinsic::vector_reduce_smin:
1796 Acc = APIntOps::smin(Acc, X);
1797 break;
1798 case Intrinsic::vector_reduce_smax:
1799 Acc = APIntOps::smax(Acc, X);
1800 break;
1801 case Intrinsic::vector_reduce_umin:
1802 Acc = APIntOps::umin(Acc, X);
1803 break;
1804 case Intrinsic::vector_reduce_umax:
1805 Acc = APIntOps::umax(Acc, X);
1806 break;
1807 }
1808 }
1809
1810 return ConstantInt::get(Op->getContext(), Acc);
1811}
1812
1813/// Attempt to fold an SSE floating point to integer conversion of a constant
1814/// floating point. If roundTowardZero is false, the default IEEE rounding is
1815/// used (toward nearest, ties to even). This matches the behavior of the
1816/// non-truncating SSE instructions in the default rounding mode. The desired
1817/// integer type Ty is used to select how many bits are available for the
1818/// result. Returns null if the conversion cannot be performed, otherwise
1819/// returns the Constant value resulting from the conversion.
1820Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1821 Type *Ty, bool IsSigned) {
1822 // All of these conversion intrinsics form an integer of at most 64bits.
1823 unsigned ResultWidth = Ty->getIntegerBitWidth();
1824 assert(ResultWidth <= 64 &&(static_cast<void> (0))
1825 "Can only constant fold conversions to 64 and 32 bit ints")(static_cast<void> (0));
1826
1827 uint64_t UIntVal;
1828 bool isExact = false;
1829 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1830 : APFloat::rmNearestTiesToEven;
1831 APFloat::opStatus status =
1832 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1833 IsSigned, mode, &isExact);
1834 if (status != APFloat::opOK &&
1835 (!roundTowardZero || status != APFloat::opInexact))
1836 return nullptr;
1837 return ConstantInt::get(Ty, UIntVal, IsSigned);
1838}
1839
1840double getValueAsDouble(ConstantFP *Op) {
1841 Type *Ty = Op->getType();
1842
1843 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
1844 return Op->getValueAPF().convertToDouble();
1845
1846 bool unused;
1847 APFloat APF = Op->getValueAPF();
1848 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1849 return APF.convertToDouble();
1850}
1851
1852static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1853 if (auto *CI
23.1
'CI' is null
30.1
'CI' is null
23.1
'CI' is null
30.1
'CI' is null
= dyn_cast<ConstantInt>(Op)) {
23
Assuming 'Op' is not a 'ConstantInt'
24
Taking false branch
30
Assuming 'Op' is not a 'ConstantInt'
31
Taking false branch
1854 C = &CI->getValue();
1855 return true;
1856 }
1857 if (isa<UndefValue>(Op)) {
25
Assuming 'Op' is a 'UndefValue'
26
Taking true branch
32
Assuming 'Op' is a 'UndefValue'
33
Taking true branch
1858 C = nullptr;
34
Null pointer value stored to 'C1'
1859 return true;
27
Returning the value 1, which participates in a condition later
35
Returning the value 1, which participates in a condition later
1860 }
1861 return false;
1862}
1863
1864/// Checks if the given intrinsic call, which evaluates to constant, is allowed
1865/// to be folded.
1866///
1867/// \param CI Constrained intrinsic call.
1868/// \param St Exception flags raised during constant evaluation.
1869static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
1870 APFloat::opStatus St) {
1871 Optional<RoundingMode> ORM = CI->getRoundingMode();
1872 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1873
1874 // If the operation does not change exception status flags, it is safe
1875 // to fold.
1876 if (St == APFloat::opStatus::opOK) {
1877 // When FP exceptions are not ignored, intrinsic call will not be
1878 // eliminated, because it is considered as having side effect. But we
1879 // know that its evaluation does not raise exceptions, so side effect
1880 // is absent. To allow removing the call, mark it as not accessing memory.
1881 if (EB && *EB != fp::ExceptionBehavior::ebIgnore)
1882 CI->addFnAttr(Attribute::ReadNone);
1883 return true;
1884 }
1885
1886 // If evaluation raised FP exception, the result can depend on rounding
1887 // mode. If the latter is unknown, folding is not possible.
1888 if (!ORM || *ORM == RoundingMode::Dynamic)
1889 return false;
1890
1891 // If FP exceptions are ignored, fold the call, even if such exception is
1892 // raised.
1893 if (!EB || *EB != fp::ExceptionBehavior::ebStrict)
1894 return true;
1895
1896 // Leave the calculation for runtime so that exception flags be correctly set
1897 // in hardware.
1898 return false;
1899}
1900
1901/// Returns the rounding mode that should be used for constant evaluation.
1902static RoundingMode
1903getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
1904 Optional<RoundingMode> ORM = CI->getRoundingMode();
1905 if (!ORM || *ORM == RoundingMode::Dynamic)
1906 // Even if the rounding mode is unknown, try evaluating the operation.
1907 // If it does not raise inexact exception, rounding was not applied,
1908 // so the result is exact and does not depend on rounding mode. Whether
1909 // other FP exceptions are raised, it does not depend on rounding mode.
1910 return RoundingMode::NearestTiesToEven;
1911 return *ORM;
1912}
1913
1914static Constant *ConstantFoldScalarCall1(StringRef Name,
1915 Intrinsic::ID IntrinsicID,
1916 Type *Ty,
1917 ArrayRef<Constant *> Operands,
1918 const TargetLibraryInfo *TLI,
1919 const CallBase *Call) {
1920 assert(Operands.size() == 1 && "Wrong number of operands.")(static_cast<void> (0));
1921
1922 if (IntrinsicID == Intrinsic::is_constant) {
1923 // We know we have a "Constant" argument. But we want to only
1924 // return true for manifest constants, not those that depend on
1925 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1926 if (Operands[0]->isManifestConstant())
1927 return ConstantInt::getTrue(Ty->getContext());
1928 return nullptr;
1929 }
1930 if (isa<UndefValue>(Operands[0])) {
1931 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1932 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1933 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
1934 if (IntrinsicID == Intrinsic::cos ||
1935 IntrinsicID == Intrinsic::ctpop ||
1936 IntrinsicID == Intrinsic::fptoui_sat ||
1937 IntrinsicID == Intrinsic::fptosi_sat)
1938 return Constant::getNullValue(Ty);
1939 if (IntrinsicID == Intrinsic::bswap ||
1940 IntrinsicID == Intrinsic::bitreverse ||
1941 IntrinsicID == Intrinsic::launder_invariant_group ||
1942 IntrinsicID == Intrinsic::strip_invariant_group)
1943 return Operands[0];
1944 }
1945
1946 if (isa<ConstantPointerNull>(Operands[0])) {
1947 // launder(null) == null == strip(null) iff in addrspace 0
1948 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1949 IntrinsicID == Intrinsic::strip_invariant_group) {
1950 // If instruction is not yet put in a basic block (e.g. when cloning
1951 // a function during inlining), Call's caller may not be available.
1952 // So check Call's BB first before querying Call->getCaller.
1953 const Function *Caller =
1954 Call->getParent() ? Call->getCaller() : nullptr;
1955 if (Caller &&
1956 !NullPointerIsDefined(
1957 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1958 return Operands[0];
1959 }
1960 return nullptr;
1961 }
1962 }
1963
1964 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1965 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1966 APFloat Val(Op->getValueAPF());
1967
1968 bool lost = false;
1969 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1970
1971 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1972 }
1973
1974 APFloat U = Op->getValueAPF();
1975
1976 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
1977 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
1978 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
1979
1980 if (U.isNaN())
1981 return nullptr;
1982
1983 unsigned Width = Ty->getIntegerBitWidth();
1984 APSInt Int(Width, !Signed);
1985 bool IsExact = false;
1986 APFloat::opStatus Status =
1987 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1988
1989 if (Status == APFloat::opOK || Status == APFloat::opInexact)
1990 return ConstantInt::get(Ty, Int);
1991
1992 return nullptr;
1993 }
1994
1995 if (IntrinsicID == Intrinsic::fptoui_sat ||
1996 IntrinsicID == Intrinsic::fptosi_sat) {
1997 // convertToInteger() already has the desired saturation semantics.
1998 APSInt Int(Ty->getIntegerBitWidth(),
1999 IntrinsicID == Intrinsic::fptoui_sat);
2000 bool IsExact;
2001 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2002 return ConstantInt::get(Ty, Int);
2003 }
2004
2005 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2006 return nullptr;
2007
2008 // Use internal versions of these intrinsics.
2009
2010 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2011 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2012 return ConstantFP::get(Ty->getContext(), U);
2013 }
2014
2015 if (IntrinsicID == Intrinsic::round) {
2016 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2017 return ConstantFP::get(Ty->getContext(), U);
2018 }
2019
2020 if (IntrinsicID == Intrinsic::roundeven) {
2021 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2022 return ConstantFP::get(Ty->getContext(), U);
2023 }
2024
2025 if (IntrinsicID == Intrinsic::ceil) {
2026 U.roundToIntegral(APFloat::rmTowardPositive);
2027 return ConstantFP::get(Ty->getContext(), U);
2028 }
2029
2030 if (IntrinsicID == Intrinsic::floor) {
2031 U.roundToIntegral(APFloat::rmTowardNegative);
2032 return ConstantFP::get(Ty->getContext(), U);
2033 }
2034
2035 if (IntrinsicID == Intrinsic::trunc) {
2036 U.roundToIntegral(APFloat::rmTowardZero);
2037 return ConstantFP::get(Ty->getContext(), U);
2038 }
2039
2040 if (IntrinsicID == Intrinsic::fabs) {
2041 U.clearSign();
2042 return ConstantFP::get(Ty->getContext(), U);
2043 }
2044
2045 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2046 // The v_fract instruction behaves like the OpenCL spec, which defines
2047 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2048 // there to prevent fract(-small) from returning 1.0. It returns the
2049 // largest positive floating-point number less than 1.0."
2050 APFloat FloorU(U);
2051 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2052 APFloat FractU(U - FloorU);
2053 APFloat AlmostOne(U.getSemantics(), 1);
2054 AlmostOne.next(/*nextDown*/ true);
2055 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
2056 }
2057
2058 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2059 // raise FP exceptions, unless the argument is signaling NaN.
2060
2061 Optional<APFloat::roundingMode> RM;
2062 switch (IntrinsicID) {
2063 default:
2064 break;
2065 case Intrinsic::experimental_constrained_nearbyint:
2066 case Intrinsic::experimental_constrained_rint: {
2067 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2068 RM = CI->getRoundingMode();
2069 if (!RM || RM.getValue() == RoundingMode::Dynamic)
2070 return nullptr;
2071 break;
2072 }
2073 case Intrinsic::experimental_constrained_round:
2074 RM = APFloat::rmNearestTiesToAway;
2075 break;
2076 case Intrinsic::experimental_constrained_ceil:
2077 RM = APFloat::rmTowardPositive;
2078 break;
2079 case Intrinsic::experimental_constrained_floor:
2080 RM = APFloat::rmTowardNegative;
2081 break;
2082 case Intrinsic::experimental_constrained_trunc:
2083 RM = APFloat::rmTowardZero;
2084 break;
2085 }
2086 if (RM) {
2087 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2088 if (U.isFinite()) {
2089 APFloat::opStatus St = U.roundToIntegral(*RM);
2090 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2091 St == APFloat::opInexact) {
2092 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2093 if (EB && *EB == fp::ebStrict)
2094 return nullptr;
2095 }
2096 } else if (U.isSignaling()) {
2097 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2098 if (EB && *EB != fp::ebIgnore)
2099 return nullptr;
2100 U = APFloat::getQNaN(U.getSemantics());
2101 }
2102 return ConstantFP::get(Ty->getContext(), U);
2103 }
2104
2105 /// We only fold functions with finite arguments. Folding NaN and inf is
2106 /// likely to be aborted with an exception anyway, and some host libms
2107 /// have known errors raising exceptions.
2108 if (!U.isFinite())
2109 return nullptr;
2110
2111 /// Currently APFloat versions of these functions do not exist, so we use
2112 /// the host native double versions. Float versions are not called
2113 /// directly but for all these it is true (float)(f((double)arg)) ==
2114 /// f(arg). Long double not supported yet.
2115 APFloat APF = Op->getValueAPF();
2116
2117 switch (IntrinsicID) {
2118 default: break;
2119 case Intrinsic::log:
2120 return ConstantFoldFP(log, APF, Ty);
2121 case Intrinsic::log2:
2122 // TODO: What about hosts that lack a C99 library?
2123 return ConstantFoldFP(Log2, APF, Ty);
2124 case Intrinsic::log10:
2125 // TODO: What about hosts that lack a C99 library?
2126 return ConstantFoldFP(log10, APF, Ty);
2127 case Intrinsic::exp:
2128 return ConstantFoldFP(exp, APF, Ty);
2129 case Intrinsic::exp2:
2130 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2131 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2132 case Intrinsic::sin:
2133 return ConstantFoldFP(sin, APF, Ty);
2134 case Intrinsic::cos:
2135 return ConstantFoldFP(cos, APF, Ty);
2136 case Intrinsic::sqrt:
2137 return ConstantFoldFP(sqrt, APF, Ty);
2138 case Intrinsic::amdgcn_cos:
2139 case Intrinsic::amdgcn_sin: {
2140 double V = getValueAsDouble(Op);
2141 if (V < -256.0 || V > 256.0)
2142 // The gfx8 and gfx9 architectures handle arguments outside the range
2143 // [-256, 256] differently. This should be a rare case so bail out
2144 // rather than trying to handle the difference.
2145 return nullptr;
2146 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2147 double V4 = V * 4.0;
2148 if (V4 == floor(V4)) {
2149 // Force exact results for quarter-integer inputs.
2150 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2151 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2152 } else {
2153 if (IsCos)
2154 V = cos(V * 2.0 * numbers::pi);
2155 else
2156 V = sin(V * 2.0 * numbers::pi);
2157 }
2158 return GetConstantFoldFPValue(V, Ty);
2159 }
2160 }
2161
2162 if (!TLI)
2163 return nullptr;
2164
2165 LibFunc Func = NotLibFunc;
2166 TLI->getLibFunc(Name, Func);
2167 switch (Func) {
2168 default:
2169 break;
2170 case LibFunc_acos:
2171 case LibFunc_acosf:
2172 case LibFunc_acos_finite:
2173 case LibFunc_acosf_finite:
2174 if (TLI->has(Func))
2175 return ConstantFoldFP(acos, APF, Ty);
2176 break;
2177 case LibFunc_asin:
2178 case LibFunc_asinf:
2179 case LibFunc_asin_finite:
2180 case LibFunc_asinf_finite:
2181 if (TLI->has(Func))
2182 return ConstantFoldFP(asin, APF, Ty);
2183 break;
2184 case LibFunc_atan:
2185 case LibFunc_atanf:
2186 if (TLI->has(Func))
2187 return ConstantFoldFP(atan, APF, Ty);
2188 break;
2189 case LibFunc_ceil:
2190 case LibFunc_ceilf:
2191 if (TLI->has(Func)) {
2192 U.roundToIntegral(APFloat::rmTowardPositive);
2193 return ConstantFP::get(Ty->getContext(), U);
2194 }
2195 break;
2196 case LibFunc_cos:
2197 case LibFunc_cosf:
2198 if (TLI->has(Func))
2199 return ConstantFoldFP(cos, APF, Ty);
2200 break;
2201 case LibFunc_cosh:
2202 case LibFunc_coshf:
2203 case LibFunc_cosh_finite:
2204 case LibFunc_coshf_finite:
2205 if (TLI->has(Func))
2206 return ConstantFoldFP(cosh, APF, Ty);
2207 break;
2208 case LibFunc_exp:
2209 case LibFunc_expf:
2210 case LibFunc_exp_finite:
2211 case LibFunc_expf_finite:
2212 if (TLI->has(Func))
2213 return ConstantFoldFP(exp, APF, Ty);
2214 break;
2215 case LibFunc_exp2:
2216 case LibFunc_exp2f:
2217 case LibFunc_exp2_finite:
2218 case LibFunc_exp2f_finite:
2219 if (TLI->has(Func))
2220 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2221 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2222 break;
2223 case LibFunc_fabs:
2224 case LibFunc_fabsf:
2225 if (TLI->has(Func)) {
2226 U.clearSign();
2227 return ConstantFP::get(Ty->getContext(), U);
2228 }
2229 break;
2230 case LibFunc_floor:
2231 case LibFunc_floorf:
2232 if (TLI->has(Func)) {
2233 U.roundToIntegral(APFloat::rmTowardNegative);
2234 return ConstantFP::get(Ty->getContext(), U);
2235 }
2236 break;
2237 case LibFunc_log:
2238 case LibFunc_logf:
2239 case LibFunc_log_finite:
2240 case LibFunc_logf_finite:
2241 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2242 return ConstantFoldFP(log, APF, Ty);
2243 break;
2244 case LibFunc_log2:
2245 case LibFunc_log2f:
2246 case LibFunc_log2_finite:
2247 case LibFunc_log2f_finite:
2248 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2249 // TODO: What about hosts that lack a C99 library?
2250 return ConstantFoldFP(Log2, APF, Ty);
2251 break;
2252 case LibFunc_log10:
2253 case LibFunc_log10f:
2254 case LibFunc_log10_finite:
2255 case LibFunc_log10f_finite:
2256 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2257 // TODO: What about hosts that lack a C99 library?
2258 return ConstantFoldFP(log10, APF, Ty);
2259 break;
2260 case LibFunc_nearbyint:
2261 case LibFunc_nearbyintf:
2262 case LibFunc_rint:
2263 case LibFunc_rintf:
2264 if (TLI->has(Func)) {
2265 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2266 return ConstantFP::get(Ty->getContext(), U);
2267 }
2268 break;
2269 case LibFunc_round:
2270 case LibFunc_roundf:
2271 if (TLI->has(Func)) {
2272 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2273 return ConstantFP::get(Ty->getContext(), U);
2274 }
2275 break;
2276 case LibFunc_sin:
2277 case LibFunc_sinf:
2278 if (TLI->has(Func))
2279 return ConstantFoldFP(sin, APF, Ty);
2280 break;
2281 case LibFunc_sinh:
2282 case LibFunc_sinhf:
2283 case LibFunc_sinh_finite:
2284 case LibFunc_sinhf_finite:
2285 if (TLI->has(Func))
2286 return ConstantFoldFP(sinh, APF, Ty);
2287 break;
2288 case LibFunc_sqrt:
2289 case LibFunc_sqrtf:
2290 if (!APF.isNegative() && TLI->has(Func))
2291 return ConstantFoldFP(sqrt, APF, Ty);
2292 break;
2293 case LibFunc_tan:
2294 case LibFunc_tanf:
2295 if (TLI->has(Func))
2296 return ConstantFoldFP(tan, APF, Ty);
2297 break;
2298 case LibFunc_tanh:
2299 case LibFunc_tanhf:
2300 if (TLI->has(Func))
2301 return ConstantFoldFP(tanh, APF, Ty);
2302 break;
2303 case LibFunc_trunc:
2304 case LibFunc_truncf:
2305 if (TLI->has(Func)) {
2306 U.roundToIntegral(APFloat::rmTowardZero);
2307 return ConstantFP::get(Ty->getContext(), U);
2308 }
2309 break;
2310 }
2311 return nullptr;
2312 }
2313
2314 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2315 switch (IntrinsicID) {
2316 case Intrinsic::bswap:
2317 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2318 case Intrinsic::ctpop:
2319 return ConstantInt::get(Ty, Op->getValue().countPopulation());
2320 case Intrinsic::bitreverse:
2321 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2322 case Intrinsic::convert_from_fp16: {
2323 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2324
2325 bool lost = false;
2326 APFloat::opStatus status = Val.convert(
2327 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2328
2329 // Conversion is always precise.
2330 (void)status;
2331 assert(status == APFloat::opOK && !lost &&(static_cast<void> (0))
2332 "Precision lost during fp16 constfolding")(static_cast<void> (0));
2333
2334 return ConstantFP::get(Ty->getContext(), Val);
2335 }
2336 default:
2337 return nullptr;
2338 }
2339 }
2340
2341 switch (IntrinsicID) {
2342 default: break;
2343 case Intrinsic::vector_reduce_add:
2344 case Intrinsic::vector_reduce_mul:
2345 case Intrinsic::vector_reduce_and:
2346 case Intrinsic::vector_reduce_or:
2347 case Intrinsic::vector_reduce_xor:
2348 case Intrinsic::vector_reduce_smin:
2349 case Intrinsic::vector_reduce_smax:
2350 case Intrinsic::vector_reduce_umin:
2351 case Intrinsic::vector_reduce_umax:
2352 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2353 return C;
2354 break;
2355 }
2356
2357 // Support ConstantVector in case we have an Undef in the top.
2358 if (isa<ConstantVector>(Operands[0]) ||
2359 isa<ConstantDataVector>(Operands[0])) {
2360 auto *Op = cast<Constant>(Operands[0]);
2361 switch (IntrinsicID) {
2362 default: break;
2363 case Intrinsic::x86_sse_cvtss2si:
2364 case Intrinsic::x86_sse_cvtss2si64:
2365 case Intrinsic::x86_sse2_cvtsd2si:
2366 case Intrinsic::x86_sse2_cvtsd2si64:
2367 if (ConstantFP *FPOp =
2368 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2369 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2370 /*roundTowardZero=*/false, Ty,
2371 /*IsSigned*/true);
2372 break;
2373 case Intrinsic::x86_sse_cvttss2si:
2374 case Intrinsic::x86_sse_cvttss2si64:
2375 case Intrinsic::x86_sse2_cvttsd2si:
2376 case Intrinsic::x86_sse2_cvttsd2si64:
2377 if (ConstantFP *FPOp =
2378 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2379 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2380 /*roundTowardZero=*/true, Ty,
2381 /*IsSigned*/true);
2382 break;
2383 }
2384 }
2385
2386 return nullptr;
2387}
2388
2389static Constant *ConstantFoldScalarCall2(StringRef Name,
2390 Intrinsic::ID IntrinsicID,
2391 Type *Ty,
2392 ArrayRef<Constant *> Operands,
2393 const TargetLibraryInfo *TLI,
2394 const CallBase *Call) {
2395 assert(Operands.size() == 2 && "Wrong number of operands.")(static_cast<void> (0));
2396
2397 if (Ty->isFloatingPointTy()) {
1
Calling 'Type::isFloatingPointTy'
10
Returning from 'Type::isFloatingPointTy'
11
Taking false branch
2398 // TODO: We should have undef handling for all of the FP intrinsics that
2399 // are attempted to be folded in this function.
2400 bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2401 bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2402 switch (IntrinsicID) {
2403 case Intrinsic::maxnum:
2404 case Intrinsic::minnum:
2405 case Intrinsic::maximum:
2406 case Intrinsic::minimum:
2407 // If one argument is undef, return the other argument.
2408 if (IsOp0Undef)
2409 return Operands[1];
2410 if (IsOp1Undef)
2411 return Operands[0];
2412 break;
2413 }
2414 }
2415
2416 if (const auto *Op1
12.1
'Op1' is null
12.1
'Op1' is null
= dyn_cast<ConstantFP>(Operands[0])) {
12
Assuming the object is not a 'ConstantFP'
2417 if (!Ty->isFloatingPointTy())
2418 return nullptr;
2419 APFloat Op1V = Op1->getValueAPF();
2420
2421 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2422 if (Op2->getType() != Op1->getType())
2423 return nullptr;
2424 APFloat Op2V = Op2->getValueAPF();
2425
2426 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2427 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2428 APFloat Res = Op1V;
2429 APFloat::opStatus St;
2430 switch (IntrinsicID) {
2431 default:
2432 return nullptr;
2433 case Intrinsic::experimental_constrained_fadd:
2434 St = Res.add(Op2V, RM);
2435 break;
2436 case Intrinsic::experimental_constrained_fsub:
2437 St = Res.subtract(Op2V, RM);
2438 break;
2439 case Intrinsic::experimental_constrained_fmul:
2440 St = Res.multiply(Op2V, RM);
2441 break;
2442 case Intrinsic::experimental_constrained_fdiv:
2443 St = Res.divide(Op2V, RM);
2444 break;
2445 case Intrinsic::experimental_constrained_frem:
2446 St = Res.mod(Op2V);
2447 break;
2448 }
2449 if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
2450 St))
2451 return ConstantFP::get(Ty->getContext(), Res);
2452 return nullptr;
2453 }
2454
2455 switch (IntrinsicID) {
2456 default:
2457 break;
2458 case Intrinsic::copysign:
2459 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
2460 case Intrinsic::minnum:
2461 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
2462 case Intrinsic::maxnum:
2463 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
2464 case Intrinsic::minimum:
2465 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
2466 case Intrinsic::maximum:
2467 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
2468 }
2469
2470 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2471 return nullptr;
2472
2473 switch (IntrinsicID) {
2474 default:
2475 break;
2476 case Intrinsic::pow:
2477 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2478 case Intrinsic::amdgcn_fmul_legacy:
2479 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2480 // NaN or infinity, gives +0.0.
2481 if (Op1V.isZero() || Op2V.isZero())
2482 return ConstantFP::getNullValue(Ty);
2483 return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
2484 }
2485
2486 if (!TLI)
2487 return nullptr;
2488
2489 LibFunc Func = NotLibFunc;
2490 TLI->getLibFunc(Name, Func);
2491 switch (Func) {
2492 default:
2493 break;
2494 case LibFunc_pow:
2495 case LibFunc_powf:
2496 case LibFunc_pow_finite:
2497 case LibFunc_powf_finite:
2498 if (TLI->has(Func))
2499 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2500 break;
2501 case LibFunc_fmod:
2502 case LibFunc_fmodf:
2503 if (TLI->has(Func)) {
2504 APFloat V = Op1->getValueAPF();
2505 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2506 return ConstantFP::get(Ty->getContext(), V);
2507 }
2508 break;
2509 case LibFunc_remainder:
2510 case LibFunc_remainderf:
2511 if (TLI->has(Func)) {
2512 APFloat V = Op1->getValueAPF();
2513 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2514 return ConstantFP::get(Ty->getContext(), V);
2515 }
2516 break;
2517 case LibFunc_atan2:
2518 case LibFunc_atan2f:
2519 case LibFunc_atan2_finite:
2520 case LibFunc_atan2f_finite:
2521 if (TLI->has(Func))
2522 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2523 break;
2524 }
2525 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2526 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2527 return nullptr;
2528 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2529 return ConstantFP::get(
2530 Ty->getContext(),
2531 APFloat((float)std::pow((float)Op1V.convertToDouble(),
2532 (int)Op2C->getZExtValue())));
2533 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2534 return ConstantFP::get(
2535 Ty->getContext(),
2536 APFloat((float)std::pow((float)Op1V.convertToDouble(),
2537 (int)Op2C->getZExtValue())));
2538 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2539 return ConstantFP::get(
2540 Ty->getContext(),
2541 APFloat((double)std::pow(Op1V.convertToDouble(),
2542 (int)Op2C->getZExtValue())));
2543
2544 if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2545 // FIXME: Should flush denorms depending on FP mode, but that's ignored
2546 // everywhere else.
2547
2548 // scalbn is equivalent to ldexp with float radix 2
2549 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(),
2550 APFloat::rmNearestTiesToEven);
2551 return ConstantFP::get(Ty->getContext(), Result);
2552 }
2553 }
2554 return nullptr;
2555 }
2556
2557 if (Operands[0]->getType()->isIntegerTy() &&
13
Calling 'Type::isIntegerTy'
16
Returning from 'Type::isIntegerTy'
21
Taking true branch
2558 Operands[1]->getType()->isIntegerTy()) {
17
Calling 'Type::isIntegerTy'
20
Returning from 'Type::isIntegerTy'
2559 const APInt *C0, *C1;
2560 if (!getConstIntOrUndef(Operands[0], C0) ||
22
Calling 'getConstIntOrUndef'
28
Returning from 'getConstIntOrUndef'
37
Taking false branch
2561 !getConstIntOrUndef(Operands[1], C1))
29
Calling 'getConstIntOrUndef'
36
Returning from 'getConstIntOrUndef'
2562 return nullptr;
2563
2564 unsigned BitWidth = Ty->getScalarSizeInBits();
2565 switch (IntrinsicID) {
38
Control jumps to 'case abs:' at line 2683
2566 default: break;
2567 case Intrinsic::smax:
2568 if (!C0 && !C1)
2569 return UndefValue::get(Ty);
2570 if (!C0 || !C1)
2571 return ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
2572 return ConstantInt::get(Ty, C0->sgt(*C1) ? *C0 : *C1);
2573
2574 case Intrinsic::smin:
2575 if (!C0 && !C1)
2576 return UndefValue::get(Ty);
2577 if (!C0 || !C1)
2578 return ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth));
2579 return ConstantInt::get(Ty, C0->slt(*C1) ? *C0 : *C1);
2580
2581 case Intrinsic::umax:
2582 if (!C0 && !C1)
2583 return UndefValue::get(Ty);
2584 if (!C0 || !C1)
2585 return ConstantInt::get(Ty, APInt::getMaxValue(BitWidth));
2586 return ConstantInt::get(Ty, C0->ugt(*C1) ? *C0 : *C1);
2587
2588 case Intrinsic::umin:
2589 if (!C0 && !C1)
2590 return UndefValue::get(Ty);
2591 if (!C0 || !C1)
2592 return ConstantInt::get(Ty, APInt::getMinValue(BitWidth));
2593 return ConstantInt::get(Ty, C0->ult(*C1) ? *C0 : *C1);
2594
2595 case Intrinsic::usub_with_overflow:
2596 case Intrinsic::ssub_with_overflow:
2597 // X - undef -> { 0, false }
2598 // undef - X -> { 0, false }
2599 if (!C0 || !C1)
2600 return Constant::getNullValue(Ty);
2601 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2602 case Intrinsic::uadd_with_overflow:
2603 case Intrinsic::sadd_with_overflow:
2604 // X + undef -> { -1, false }
2605 // undef + x -> { -1, false }
2606 if (!C0 || !C1) {
2607 return ConstantStruct::get(
2608 cast<StructType>(Ty),
2609 {Constant::getAllOnesValue(Ty->getStructElementType(0)),
2610 Constant::getNullValue(Ty->getStructElementType(1))});
2611 }
2612 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2613 case Intrinsic::smul_with_overflow:
2614 case Intrinsic::umul_with_overflow: {
2615 // undef * X -> { 0, false }
2616 // X * undef -> { 0, false }
2617 if (!C0 || !C1)
2618 return Constant::getNullValue(Ty);
2619
2620 APInt Res;
2621 bool Overflow;
2622 switch (IntrinsicID) {
2623 default: llvm_unreachable("Invalid case")__builtin_unreachable();
2624 case Intrinsic::sadd_with_overflow:
2625 Res = C0->sadd_ov(*C1, Overflow);
2626 break;
2627 case Intrinsic::uadd_with_overflow:
2628 Res = C0->uadd_ov(*C1, Overflow);
2629 break;
2630 case Intrinsic::ssub_with_overflow:
2631 Res = C0->ssub_ov(*C1, Overflow);
2632 break;
2633 case Intrinsic::usub_with_overflow:
2634 Res = C0->usub_ov(*C1, Overflow);
2635 break;
2636 case Intrinsic::smul_with_overflow:
2637 Res = C0->smul_ov(*C1, Overflow);
2638 break;
2639 case Intrinsic::umul_with_overflow:
2640 Res = C0->umul_ov(*C1, Overflow);
2641 break;
2642 }
2643 Constant *Ops[] = {
2644 ConstantInt::get(Ty->getContext(), Res),
2645 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2646 };
2647 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2648 }
2649 case Intrinsic::uadd_sat:
2650 case Intrinsic::sadd_sat:
2651 if (!C0 && !C1)
2652 return UndefValue::get(Ty);
2653 if (!C0 || !C1)
2654 return Constant::getAllOnesValue(Ty);
2655 if (IntrinsicID == Intrinsic::uadd_sat)
2656 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2657 else
2658 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2659 case Intrinsic::usub_sat:
2660 case Intrinsic::ssub_sat:
2661 if (!C0 && !C1)
2662 return UndefValue::get(Ty);
2663 if (!C0 || !C1)
2664 return Constant::getNullValue(Ty);
2665 if (IntrinsicID == Intrinsic::usub_sat)
2666 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2667 else
2668 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2669 case Intrinsic::cttz:
2670 case Intrinsic::ctlz:
2671 assert(C1 && "Must be constant int")(static_cast<void> (0));
2672
2673 // cttz(0, 1) and ctlz(0, 1) are undef.
2674 if (C1->isOneValue() && (!C0 || C0->isNullValue()))
2675 return UndefValue::get(Ty);
2676 if (!C0)
2677 return Constant::getNullValue(Ty);
2678 if (IntrinsicID == Intrinsic::cttz)
2679 return ConstantInt::get(Ty, C0->countTrailingZeros());
2680 else
2681 return ConstantInt::get(Ty, C0->countLeadingZeros());
2682
2683 case Intrinsic::abs:
2684 // Undef or minimum val operand with poison min --> undef
2685 assert(C1 && "Must be constant int")(static_cast<void> (0));
2686 if (C1->isOneValue() && (!C0 || C0->isMinSignedValue()))
39
Called C++ object pointer is null
2687 return UndefValue::get(Ty);
2688
2689 // Undef operand with no poison min --> 0 (sign bit must be clear)
2690 if (C1->isNullValue() && !C0)
2691 return Constant::getNullValue(Ty);
2692
2693 return ConstantInt::get(Ty, C0->abs());
2694 }
2695
2696 return nullptr;
2697 }
2698
2699 // Support ConstantVector in case we have an Undef in the top.
2700 if ((isa<ConstantVector>(Operands[0]) ||
2701 isa<ConstantDataVector>(Operands[0])) &&
2702 // Check for default rounding mode.
2703 // FIXME: Support other rounding modes?
2704 isa<ConstantInt>(Operands[1]) &&
2705 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2706 auto *Op = cast<Constant>(Operands[0]);
2707 switch (IntrinsicID) {
2708 default: break;
2709 case Intrinsic::x86_avx512_vcvtss2si32:
2710 case Intrinsic::x86_avx512_vcvtss2si64:
2711 case Intrinsic::x86_avx512_vcvtsd2si32:
2712 case Intrinsic::x86_avx512_vcvtsd2si64:
2713 if (ConstantFP *FPOp =
2714 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2715 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2716 /*roundTowardZero=*/false, Ty,
2717 /*IsSigned*/true);
2718 break;
2719 case Intrinsic::x86_avx512_vcvtss2usi32:
2720 case Intrinsic::x86_avx512_vcvtss2usi64:
2721 case Intrinsic::x86_avx512_vcvtsd2usi32:
2722 case Intrinsic::x86_avx512_vcvtsd2usi64:
2723 if (ConstantFP *FPOp =
2724 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2725 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2726 /*roundTowardZero=*/false, Ty,
2727 /*IsSigned*/false);
2728 break;
2729 case Intrinsic::x86_avx512_cvttss2si:
2730 case Intrinsic::x86_avx512_cvttss2si64:
2731 case Intrinsic::x86_avx512_cvttsd2si:
2732 case Intrinsic::x86_avx512_cvttsd2si64:
2733 if (ConstantFP *FPOp =
2734 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2735 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2736 /*roundTowardZero=*/true, Ty,
2737 /*IsSigned*/true);
2738 break;
2739 case Intrinsic::x86_avx512_cvttss2usi:
2740 case Intrinsic::x86_avx512_cvttss2usi64:
2741 case Intrinsic::x86_avx512_cvttsd2usi:
2742 case Intrinsic::x86_avx512_cvttsd2usi64:
2743 if (ConstantFP *FPOp =
2744 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2745 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2746 /*roundTowardZero=*/true, Ty,
2747 /*IsSigned*/false);
2748 break;
2749 }
2750 }
2751 return nullptr;
2752}
2753
2754static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
2755 const APFloat &S0,
2756 const APFloat &S1,
2757 const APFloat &S2) {
2758 unsigned ID;
2759 const fltSemantics &Sem = S0.getSemantics();
2760 APFloat MA(Sem), SC(Sem), TC(Sem);
2761 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
2762 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
2763 // S2 < 0
2764 ID = 5;
2765 SC = -S0;
2766 } else {
2767 ID = 4;
2768 SC = S0;
2769 }
2770 MA = S2;
2771 TC = -S1;
2772 } else if (abs(S1) >= abs(S0)) {
2773 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
2774 // S1 < 0
2775 ID = 3;
2776 TC = -S2;
2777 } else {
2778 ID = 2;
2779 TC = S2;
2780 }
2781 MA = S1;
2782 SC = S0;
2783 } else {
2784 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
2785 // S0 < 0
2786 ID = 1;
2787 SC = S2;
2788 } else {
2789 ID = 0;
2790 SC = -S2;
2791 }
2792 MA = S0;
2793 TC = -S1;
2794 }
2795 switch (IntrinsicID) {
2796 default:
2797 llvm_unreachable("unhandled amdgcn cube intrinsic")__builtin_unreachable();
2798 case Intrinsic::amdgcn_cubeid:
2799 return APFloat(Sem, ID);
2800 case Intrinsic::amdgcn_cubema:
2801 return MA + MA;
2802 case Intrinsic::amdgcn_cubesc:
2803 return SC;
2804 case Intrinsic::amdgcn_cubetc:
2805 return TC;
2806 }
2807}
2808
2809static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
2810 Type *Ty) {
2811 const APInt *C0, *C1, *C2;
2812 if (!getConstIntOrUndef(Operands[0], C0) ||
2813 !getConstIntOrUndef(Operands[1], C1) ||
2814 !getConstIntOrUndef(Operands[2], C2))
2815 return nullptr;
2816
2817 if (!C2)
2818 return UndefValue::get(Ty);
2819
2820 APInt Val(32, 0);
2821 unsigned NumUndefBytes = 0;
2822 for (unsigned I = 0; I < 32; I += 8) {
2823 unsigned Sel = C2->extractBitsAsZExtValue(8, I);
2824 unsigned B = 0;
2825
2826 if (Sel >= 13)
2827 B = 0xff;
2828 else if (Sel == 12)
2829 B = 0x00;
2830 else {
2831 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
2832 if (!Src)
2833 ++NumUndefBytes;
2834 else if (Sel < 8)
2835 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
2836 else
2837 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
2838 }
2839
2840 Val.insertBits(B, I, 8);
2841 }
2842
2843 if (NumUndefBytes == 4)
2844 return UndefValue::get(Ty);
2845
2846 return ConstantInt::get(Ty, Val);
2847}
2848
2849static Constant *ConstantFoldScalarCall3(StringRef Name,
2850 Intrinsic::ID IntrinsicID,
2851 Type *Ty,
2852 ArrayRef<Constant *> Operands,
2853 const TargetLibraryInfo *TLI,
2854 const CallBase *Call) {
2855 assert(Operands.size() == 3 && "Wrong number of operands.")(static_cast<void> (0));
2856
2857 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2858 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2859 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2860 const APFloat &C1 = Op1->getValueAPF();
2861 const APFloat &C2 = Op2->getValueAPF();
2862 const APFloat &C3 = Op3->getValueAPF();
2863
2864 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2865 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2866 APFloat Res = C1;
2867 APFloat::opStatus St;
2868 switch (IntrinsicID) {
2869 default:
2870 return nullptr;
2871 case Intrinsic::experimental_constrained_fma:
2872 case Intrinsic::experimental_constrained_fmuladd:
2873 St = Res.fusedMultiplyAdd(C2, C3, RM);
2874 break;
2875 }
2876 if (mayFoldConstrained(
2877 const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
2878 return ConstantFP::get(Ty->getContext(), Res);
2879 return nullptr;
2880 }
2881
2882 switch (IntrinsicID) {
2883 default: break;
2884 case Intrinsic::amdgcn_fma_legacy: {
2885 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2886 // NaN or infinity, gives +0.0.
2887 if (C1.isZero() || C2.isZero()) {
2888 // It's tempting to just return C3 here, but that would give the
2889 // wrong result if C3 was -0.0.
2890 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
2891 }
2892 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2893 }
2894 case Intrinsic::fma:
2895 case Intrinsic::fmuladd: {
2896 APFloat V = C1;
2897 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
2898 return ConstantFP::get(Ty->getContext(), V);
2899 }
2900 case Intrinsic::amdgcn_cubeid:
2901 case Intrinsic::amdgcn_cubema:
2902 case Intrinsic::amdgcn_cubesc:
2903 case Intrinsic::amdgcn_cubetc: {
2904 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
2905 return ConstantFP::get(Ty->getContext(), V);
2906 }
2907 }
2908 }
2909 }
2910 }
2911
2912 if (IntrinsicID == Intrinsic::smul_fix ||
2913 IntrinsicID == Intrinsic::smul_fix_sat) {
2914 // poison * C -> poison
2915 // C * poison -> poison
2916 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2917 return PoisonValue::get(Ty);
2918
2919 const APInt *C0, *C1;
2920 if (!getConstIntOrUndef(Operands[0], C0) ||
2921 !getConstIntOrUndef(Operands[1], C1))
2922 return nullptr;
2923
2924 // undef * C -> 0
2925 // C * undef -> 0
2926 if (!C0 || !C1)
2927 return Constant::getNullValue(Ty);
2928
2929 // This code performs rounding towards negative infinity in case the result
2930 // cannot be represented exactly for the given scale. Targets that do care
2931 // about rounding should use a target hook for specifying how rounding
2932 // should be done, and provide their own folding to be consistent with
2933 // rounding. This is the same approach as used by
2934 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2935 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
2936 unsigned Width = C0->getBitWidth();
2937 assert(Scale < Width && "Illegal scale.")(static_cast<void> (0));
2938 unsigned ExtendedWidth = Width * 2;
2939 APInt Product = (C0->sextOrSelf(ExtendedWidth) *
2940 C1->sextOrSelf(ExtendedWidth)).ashr(Scale);
2941 if (IntrinsicID == Intrinsic::smul_fix_sat) {
2942 APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
2943 APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
2944 Product = APIntOps::smin(Product, Max);
2945 Product = APIntOps::smax(Product, Min);
2946 }
2947 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
2948 }
2949
2950 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2951 const APInt *C0, *C1, *C2;
2952 if (!getConstIntOrUndef(Operands[0], C0) ||
2953 !getConstIntOrUndef(Operands[1], C1) ||
2954 !getConstIntOrUndef(Operands[2], C2))
2955 return nullptr;
2956
2957 bool IsRight = IntrinsicID == Intrinsic::fshr;
2958 if (!C2)
2959 return Operands[IsRight ? 1 : 0];
2960 if (!C0 && !C1)
2961 return UndefValue::get(Ty);
2962
2963 // The shift amount is interpreted as modulo the bitwidth. If the shift
2964 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2965 unsigned BitWidth = C2->getBitWidth();
2966 unsigned ShAmt = C2->urem(BitWidth);
2967 if (!ShAmt)
2968 return Operands[IsRight ? 1 : 0];
2969
2970 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2971 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2972 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2973 if (!C0)
2974 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2975 if (!C1)
2976 return ConstantInt::get(Ty, C0->shl(ShlAmt));
2977 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2978 }
2979
2980 if (IntrinsicID == Intrinsic::amdgcn_perm)
2981 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
2982
2983 return nullptr;
2984}
2985
2986static Constant *ConstantFoldScalarCall(StringRef Name,
2987 Intrinsic::ID IntrinsicID,
2988 Type *Ty,
2989 ArrayRef<Constant *> Operands,
2990 const TargetLibraryInfo *TLI,
2991 const CallBase *Call) {
2992 if (Operands.size() == 1)
2993 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
2994
2995 if (Operands.size() == 2)
2996 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
2997
2998 if (Operands.size() == 3)
2999 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
3000
3001 return nullptr;
3002}
3003
3004static Constant *ConstantFoldFixedVectorCall(
3005 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
3006 ArrayRef<Constant *> Operands, const DataLayout &DL,
3007 const TargetLibraryInfo *TLI, const CallBase *Call) {
3008 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
3009 SmallVector<Constant *, 4> Lane(Operands.size());
3010 Type *Ty = FVTy->getElementType();
3011
3012 switch (IntrinsicID) {
3013 case Intrinsic::masked_load: {
3014 auto *SrcPtr = Operands[0];
3015 auto *Mask = Operands[2];
3016 auto *Passthru = Operands[3];
3017
3018 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
3019
3020 SmallVector<Constant *, 32> NewElements;
3021 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3022 auto *MaskElt = Mask->getAggregateElement(I);
3023 if (!MaskElt)
3024 break;
3025 auto *PassthruElt = Passthru->getAggregateElement(I);
3026 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
3027 if (isa<UndefValue>(MaskElt)) {
3028 if (PassthruElt)
3029 NewElements.push_back(PassthruElt);
3030 else if (VecElt)
3031 NewElements.push_back(VecElt);
3032 else
3033 return nullptr;
3034 }
3035 if (MaskElt->isNullValue()) {
3036 if (!PassthruElt)
3037 return nullptr;
3038 NewElements.push_back(PassthruElt);
3039 } else if (MaskElt->isOneValue()) {
3040 if (!VecElt)
3041 return nullptr;
3042 NewElements.push_back(VecElt);
3043 } else {
3044 return nullptr;
3045 }
3046 }
3047 if (NewElements.size() != FVTy->getNumElements())
3048 return nullptr;
3049 return ConstantVector::get(NewElements);
3050 }
3051 case Intrinsic::arm_mve_vctp8:
3052 case Intrinsic::arm_mve_vctp16:
3053 case Intrinsic::arm_mve_vctp32:
3054 case Intrinsic::arm_mve_vctp64: {
3055 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
3056 unsigned Lanes = FVTy->getNumElements();
3057 uint64_t Limit = Op->getZExtValue();
3058 // vctp64 are currently modelled as returning a v4i1, not a v2i1. Make
3059 // sure we get the limit right in that case and set all relevant lanes.
3060 if (IntrinsicID == Intrinsic::arm_mve_vctp64)
3061 Limit *= 2;
3062
3063 SmallVector<Constant *, 16> NCs;
3064 for (unsigned i = 0; i < Lanes; i++) {
3065 if (i < Limit)
3066 NCs.push_back(ConstantInt::getTrue(Ty));
3067 else
3068 NCs.push_back(ConstantInt::getFalse(Ty));
3069 }
3070 return ConstantVector::get(NCs);
3071 }
3072 break;
3073 }
3074 case Intrinsic::get_active_lane_mask: {
3075 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
3076 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
3077 if (Op0 && Op1) {
3078 unsigned Lanes = FVTy->getNumElements();
3079 uint64_t Base = Op0->getZExtValue();
3080 uint64_t Limit = Op1->getZExtValue();
3081
3082 SmallVector<Constant *, 16> NCs;
3083 for (unsigned i = 0; i < Lanes; i++) {
3084 if (Base + i < Limit)
3085 NCs.push_back(ConstantInt::getTrue(Ty));
3086 else
3087 NCs.push_back(ConstantInt::getFalse(Ty));
3088 }
3089 return ConstantVector::get(NCs);
3090 }
3091 break;
3092 }
3093 default:
3094 break;
3095 }
3096
3097 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3098 // Gather a column of constants.
3099 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3100 // Some intrinsics use a scalar type for certain arguments.
3101 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
3102 Lane[J] = Operands[J];
3103 continue;
3104 }
3105
3106 Constant *Agg = Operands[J]->getAggregateElement(I);
3107 if (!Agg)
3108 return nullptr;
3109
3110 Lane[J] = Agg;
3111 }
3112
3113 // Use the regular scalar folding to simplify this column.
3114 Constant *Folded =
3115 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
3116 if (!Folded)
3117 return nullptr;
3118 Result[I] = Folded;
3119 }
3120
3121 return ConstantVector::get(Result);
3122}
3123
3124static Constant *ConstantFoldScalableVectorCall(
3125 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3126 ArrayRef<Constant *> Operands, const DataLayout &DL,
3127 const TargetLibraryInfo *TLI, const CallBase *Call) {
3128 switch (IntrinsicID) {
3129 case Intrinsic::aarch64_sve_convert_from_svbool: {
3130 auto *Src = dyn_cast<Constant>(Operands[0]);
3131 if (!Src || !Src->isNullValue())
3132 break;
3133
3134 return ConstantInt::getFalse(SVTy);
3135 }
3136 default:
3137 break;
3138 }
3139 return nullptr;
3140}
3141
3142} // end anonymous namespace
3143
3144Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
3145 ArrayRef<Constant *> Operands,
3146 const TargetLibraryInfo *TLI) {
3147 if (Call->isNoBuiltin())
3148 return nullptr;
3149 if (!F->hasName())
3150 return nullptr;
3151
3152 // If this is not an intrinsic and not recognized as a library call, bail out.
3153 if (F->getIntrinsicID() == Intrinsic::not_intrinsic) {
3154 if (!TLI)
3155 return nullptr;
3156 LibFunc LibF;
3157 if (!TLI->getLibFunc(*F, LibF))
3158 return nullptr;
3159 }
3160
3161 StringRef Name = F->getName();
3162 Type *Ty = F->getReturnType();
3163 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3164 return ConstantFoldFixedVectorCall(
3165 Name, F->getIntrinsicID(), FVTy, Operands,
3166 F->getParent()->getDataLayout(), TLI, Call);
3167
3168 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3169 return ConstantFoldScalableVectorCall(
3170 Name, F->getIntrinsicID(), SVTy, Operands,
3171 F->getParent()->getDataLayout(), TLI, Call);
3172
3173 // TODO: If this is a library function, we already discovered that above,
3174 // so we should pass the LibFunc, not the name (and it might be better
3175 // still to separate intrinsic handling from libcalls).
3176 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
3177 Call);
3178}
3179
3180bool llvm::isMathLibCallNoop(const CallBase *Call,
3181 const TargetLibraryInfo *TLI) {
3182 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3183 // (and to some extent ConstantFoldScalarCall).
3184 if (Call->isNoBuiltin() || Call->isStrictFP())
3185 return false;
3186 Function *F = Call->getCalledFunction();
3187 if (!F)
3188 return false;
3189
3190 LibFunc Func;
3191 if (!TLI || !TLI->getLibFunc(*F, Func))
3192 return false;
3193
3194 if (Call->getNumArgOperands() == 1) {
3195 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3196 const APFloat &Op = OpC->getValueAPF();
3197 switch (Func) {
3198 case LibFunc_logl:
3199 case LibFunc_log:
3200 case LibFunc_logf:
3201 case LibFunc_log2l:
3202 case LibFunc_log2:
3203 case LibFunc_log2f:
3204 case LibFunc_log10l:
3205 case LibFunc_log10:
3206 case LibFunc_log10f:
3207 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3208
3209 case LibFunc_expl:
3210 case LibFunc_exp:
3211 case LibFunc_expf:
3212 // FIXME: These boundaries are slightly conservative.
3213 if (OpC->getType()->isDoubleTy())
3214 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3215 if (OpC->getType()->isFloatTy())
3216 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3217 break;
3218
3219 case LibFunc_exp2l:
3220 case LibFunc_exp2:
3221 case LibFunc_exp2f:
3222 // FIXME: These boundaries are slightly conservative.
3223 if (OpC->getType()->isDoubleTy())
3224 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3225 if (OpC->getType()->isFloatTy())
3226 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3227 break;
3228
3229 case LibFunc_sinl:
3230 case LibFunc_sin:
3231 case LibFunc_sinf:
3232 case LibFunc_cosl:
3233 case LibFunc_cos:
3234 case LibFunc_cosf:
3235 return !Op.isInfinity();
3236
3237 case LibFunc_tanl:
3238 case LibFunc_tan:
3239 case LibFunc_tanf: {
3240 // FIXME: Stop using the host math library.
3241 // FIXME: The computation isn't done in the right precision.
3242 Type *Ty = OpC->getType();
3243 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3244 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3245 break;
3246 }
3247
3248 case LibFunc_asinl:
3249 case LibFunc_asin:
3250 case LibFunc_asinf:
3251 case LibFunc_acosl:
3252 case LibFunc_acos:
3253 case LibFunc_acosf:
3254 return !(Op < APFloat(Op.getSemantics(), "-1") ||
3255 Op > APFloat(Op.getSemantics(), "1"));
3256
3257 case LibFunc_sinh:
3258 case LibFunc_cosh:
3259 case LibFunc_sinhf:
3260 case LibFunc_coshf:
3261 case LibFunc_sinhl:
3262 case LibFunc_coshl:
3263 // FIXME: These boundaries are slightly conservative.
3264 if (OpC->getType()->isDoubleTy())
3265 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3266 if (OpC->getType()->isFloatTy())
3267 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3268 break;
3269
3270 case LibFunc_sqrtl:
3271 case LibFunc_sqrt:
3272 case LibFunc_sqrtf:
3273 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3274
3275 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3276 // maybe others?
3277 default:
3278 break;
3279 }
3280 }
3281 }
3282
3283 if (Call->getNumArgOperands() == 2) {
3284 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3285 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3286 if (Op0C && Op1C) {
3287 const APFloat &Op0 = Op0C->getValueAPF();
3288 const APFloat &Op1 = Op1C->getValueAPF();
3289
3290 switch (Func) {
3291 case LibFunc_powl:
3292 case LibFunc_pow:
3293 case LibFunc_powf: {
3294 // FIXME: Stop using the host math library.
3295 // FIXME: The computation isn't done in the right precision.
3296 Type *Ty = Op0C->getType();
3297 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3298 if (Ty == Op1C->getType())
3299 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
3300 }
3301 break;
3302 }
3303
3304 case LibFunc_fmodl:
3305 case LibFunc_fmod:
3306 case LibFunc_fmodf:
3307 case LibFunc_remainderl:
3308 case LibFunc_remainder:
3309 case LibFunc_remainderf:
3310 return Op0.isNaN() || Op1.isNaN() ||
3311 (!Op0.isInfinity() && !Op1.isZero());
3312
3313 default:
3314 break;
3315 }
3316 }
3317 }
3318
3319 return false;
3320}
3321
3322void TargetFolder::anchor() {}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/IR/Type.h

1//===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the Type class. For more "Type"
10// stuff, look in DerivedTypes.h.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_TYPE_H
15#define LLVM_IR_TYPE_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/Support/CBindingWrapping.h"
21#include "llvm/Support/Casting.h"
22#include "llvm/Support/Compiler.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/TypeSize.h"
25#include <cassert>
26#include <cstdint>
27#include <iterator>
28
29namespace llvm {
30
31class IntegerType;
32class LLVMContext;
33class PointerType;
34class raw_ostream;
35class StringRef;
36
37/// The instances of the Type class are immutable: once they are created,
38/// they are never changed. Also note that only one instance of a particular
39/// type is ever created. Thus seeing if two types are equal is a matter of
40/// doing a trivial pointer comparison. To enforce that no two equal instances
41/// are created, Type instances can only be created via static factory methods
42/// in class Type and in derived classes. Once allocated, Types are never
43/// free'd.
44///
45class Type {
46public:
47 //===--------------------------------------------------------------------===//
48 /// Definitions of all of the base types for the Type system. Based on this
49 /// value, you can cast to a class defined in DerivedTypes.h.
50 /// Note: If you add an element to this, you need to add an element to the
51 /// Type::getPrimitiveType function, or else things will break!
52 /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
53 ///
54 enum TypeID {
55 // PrimitiveTypes
56 HalfTyID = 0, ///< 16-bit floating point type
57 BFloatTyID, ///< 16-bit floating point type (7-bit significand)
58 FloatTyID, ///< 32-bit floating point type
59 DoubleTyID, ///< 64-bit floating point type
60 X86_FP80TyID, ///< 80-bit floating point type (X87)
61 FP128TyID, ///< 128-bit floating point type (112-bit significand)
62 PPC_FP128TyID, ///< 128-bit floating point type (two 64-bits, PowerPC)
63 VoidTyID, ///< type with no size
64 LabelTyID, ///< Labels
65 MetadataTyID, ///< Metadata
66 X86_MMXTyID, ///< MMX vectors (64 bits, X86 specific)
67 X86_AMXTyID, ///< AMX vectors (8192 bits, X86 specific)
68 TokenTyID, ///< Tokens
69
70 // Derived types... see DerivedTypes.h file.
71 IntegerTyID, ///< Arbitrary bit width integers
72 FunctionTyID, ///< Functions
73 PointerTyID, ///< Pointers
74 StructTyID, ///< Structures
75 ArrayTyID, ///< Arrays
76 FixedVectorTyID, ///< Fixed width SIMD vector type
77 ScalableVectorTyID ///< Scalable SIMD vector type
78 };
79
80private:
81 /// This refers to the LLVMContext in which this type was uniqued.
82 LLVMContext &Context;
83
84 TypeID ID : 8; // The current base type of this type.
85 unsigned SubclassData : 24; // Space for subclasses to store data.
86 // Note that this should be synchronized with
87 // MAX_INT_BITS value in IntegerType class.
88
89protected:
90 friend class LLVMContextImpl;
91
92 explicit Type(LLVMContext &C, TypeID tid)
93 : Context(C), ID(tid), SubclassData(0) {}
94 ~Type() = default;
95
96 unsigned getSubclassData() const { return SubclassData; }
97
98 void setSubclassData(unsigned val) {
99 SubclassData = val;
100 // Ensure we don't have any accidental truncation.
101 assert(getSubclassData() == val && "Subclass data too large for field")(static_cast<void> (0));
102 }
103
104 /// Keeps track of how many Type*'s there are in the ContainedTys list.
105 unsigned NumContainedTys = 0;
106
107 /// A pointer to the array of Types contained by this Type. For example, this
108 /// includes the arguments of a function type, the elements of a structure,
109 /// the pointee of a pointer, the element type of an array, etc. This pointer
110 /// may be 0 for types that don't contain other types (Integer, Double,
111 /// Float).
112 Type * const *ContainedTys = nullptr;
113
114public:
115 /// Print the current type.
116 /// Omit the type details if \p NoDetails == true.
117 /// E.g., let %st = type { i32, i16 }
118 /// When \p NoDetails is true, we only print %st.
119 /// Put differently, \p NoDetails prints the type as if
120 /// inlined with the operands when printing an instruction.
121 void print(raw_ostream &O, bool IsForDebug = false,
122 bool NoDetails = false) const;
123
124 void dump() const;
125
126 /// Return the LLVMContext in which this type was uniqued.
127 LLVMContext &getContext() const { return Context; }
128
129 //===--------------------------------------------------------------------===//
130 // Accessors for working with types.
131 //
132
133 /// Return the type id for the type. This will return one of the TypeID enum
134 /// elements defined above.
135 TypeID getTypeID() const { return ID; }
136
137 /// Return true if this is 'void'.
138 bool isVoidTy() const { return getTypeID() == VoidTyID; }
139
140 /// Return true if this is 'half', a 16-bit IEEE fp type.
141 bool isHalfTy() const { return getTypeID() == HalfTyID; }
142
143 /// Return true if this is 'bfloat', a 16-bit bfloat type.
144 bool isBFloatTy() const { return getTypeID() == BFloatTyID; }
145
146 /// Return true if this is 'float', a 32-bit IEEE fp type.
147 bool isFloatTy() const { return getTypeID() == FloatTyID; }
148
149 /// Return true if this is 'double', a 64-bit IEEE fp type.
150 bool isDoubleTy() const { return getTypeID() == DoubleTyID; }
151
152 /// Return true if this is x86 long double.
153 bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }
154
155 /// Return true if this is 'fp128'.
156 bool isFP128Ty() const { return getTypeID() == FP128TyID; }
157
158 /// Return true if this is powerpc long double.
159 bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
160
161 /// Return true if this is one of the six floating-point types
162 bool isFloatingPointTy() const {
163 return getTypeID() == HalfTyID || getTypeID() == BFloatTyID ||
2
Assuming the condition is false
3
Assuming the condition is false
9
Returning zero, which participates in a condition later
164 getTypeID() == FloatTyID || getTypeID() == DoubleTyID ||
4
Assuming the condition is false
5
Assuming the condition is false
165 getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
6
Assuming the condition is false
7
Assuming the condition is false
166 getTypeID() == PPC_FP128TyID;
8
Assuming the condition is false
167 }
168
169 const fltSemantics &getFltSemantics() const {
170 switch (getTypeID()) {
171 case HalfTyID: return APFloat::IEEEhalf();
172 case BFloatTyID: return APFloat::BFloat();
173 case FloatTyID: return APFloat::IEEEsingle();
174 case DoubleTyID: return APFloat::IEEEdouble();
175 case X86_FP80TyID: return APFloat::x87DoubleExtended();
176 case FP128TyID: return APFloat::IEEEquad();
177 case PPC_FP128TyID: return APFloat::PPCDoubleDouble();
178 default: llvm_unreachable("Invalid floating type")__builtin_unreachable();
179 }
180 }
181
182 /// Return true if this is X86 MMX.
183 bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }
184
185 /// Return true if this is X86 AMX.
186 bool isX86_AMXTy() const { return getTypeID() == X86_AMXTyID; }
187
188 /// Return true if this is a FP type or a vector of FP.
189 bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
190
191 /// Return true if this is 'label'.
192 bool isLabelTy() const { return getTypeID() == LabelTyID; }
193
194 /// Return true if this is 'metadata'.
195 bool isMetadataTy() const { return getTypeID() == MetadataTyID; }
196
197 /// Return true if this is 'token'.
198 bool isTokenTy() const { return getTypeID() == TokenTyID; }
199
200 /// True if this is an instance of IntegerType.
201 bool isIntegerTy() const { return getTypeID() == IntegerTyID; }
14
Assuming the condition is true
15
Returning the value 1, which participates in a condition later
18
Assuming the condition is true
19
Returning the value 1, which participates in a condition later
202
203 /// Return true if this is an IntegerType of the given width.
204 bool isIntegerTy(unsigned Bitwidth) const;
205
206 /// Return true if this is an integer type or a vector of integer types.
207 bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
208
209 /// Return true if this is an integer type or a vector of integer types of
210 /// the given width.
211 bool isIntOrIntVectorTy(unsigned BitWidth) const {
212 return getScalarType()->isIntegerTy(BitWidth);
213 }
214
215 /// Return true if this is an integer type or a pointer type.
216 bool isIntOrPtrTy() const { return isIntegerTy() || isPointerTy(); }
217
218 /// True if this is an instance of FunctionType.
219 bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
220
221 /// True if this is an instance of StructType.
222 bool isStructTy() const { return getTypeID() == StructTyID; }
223
224 /// True if this is an instance of ArrayType.
225 bool isArrayTy() const { return getTypeID() == ArrayTyID; }
226
227 /// True if this is an instance of PointerType.
228 bool isPointerTy() const { return getTypeID() == PointerTyID; }
229
230 /// True if this is an instance of an opaque PointerType.
231 bool isOpaquePointerTy() const;
232
233 /// Return true if this is a pointer type or a vector of pointer types.
234 bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
235
236 /// True if this is an instance of VectorType.
237 inline bool isVectorTy() const {
238 return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID;
239 }
240
241 /// Return true if this type could be converted with a lossless BitCast to
242 /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the
243 /// same size only where no re-interpretation of the bits is done.
244 /// Determine if this type could be losslessly bitcast to Ty
245 bool canLosslesslyBitCastTo(Type *Ty) const;
246
247 /// Return true if this type is empty, that is, it has no elements or all of
248 /// its elements are empty.
249 bool isEmptyTy() const;
250
251 /// Return true if the type is "first class", meaning it is a valid type for a
252 /// Value.
253 bool isFirstClassType() const {
254 return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
255 }
256
257 /// Return true if the type is a valid type for a register in codegen. This
258 /// includes all first-class types except struct and array types.
259 bool isSingleValueType() const {
260 return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
261 isPointerTy() || isVectorTy() || isX86_AMXTy();
262 }
263
264 /// Return true if the type is an aggregate type. This means it is valid as
265 /// the first operand of an insertvalue or extractvalue instruction. This
266 /// includes struct and array types, but does not include vector types.
267 bool isAggregateType() const {
268 return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
269 }
270
271 /// Return true if it makes sense to take the size of this type. To get the
272 /// actual size for a particular target, it is reasonable to use the
273 /// DataLayout subsystem to do this.
274 bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
275 // If it's a primitive, it is always sized.
276 if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
277 getTypeID() == PointerTyID || getTypeID() == X86_MMXTyID ||
278 getTypeID() == X86_AMXTyID)
279 return true;
280 // If it is not something that can have a size (e.g. a function or label),
281 // it doesn't have a size.
282 if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && !isVectorTy())
283 return false;
284 // Otherwise we have to try harder to decide.
285 return isSizedDerivedType(Visited);
286 }
287
288 /// Return the basic size of this type if it is a primitive type. These are
289 /// fixed by LLVM and are not target-dependent.
290 /// This will return zero if the type does not have a size or is not a
291 /// primitive type.
292 ///
293 /// If this is a scalable vector type, the scalable property will be set and
294 /// the runtime size will be a positive integer multiple of the base size.
295 ///
296 /// Note that this may not reflect the size of memory allocated for an
297 /// instance of the type or the number of bytes that are written when an
298 /// instance of the type is stored to memory. The DataLayout class provides
299 /// additional query functions to provide this information.
300 ///
301 TypeSize getPrimitiveSizeInBits() const LLVM_READONLY__attribute__((__pure__));
302
303 /// If this is a vector type, return the getPrimitiveSizeInBits value for the
304 /// element type. Otherwise return the getPrimitiveSizeInBits value for this
305 /// type.
306 unsigned getScalarSizeInBits() const LLVM_READONLY__attribute__((__pure__));
307
308 /// Return the width of the mantissa of this type. This is only valid on
309 /// floating-point types. If the FP type does not have a stable mantissa (e.g.
310 /// ppc long double), this method returns -1.
311 int getFPMantissaWidth() const;
312
313 /// Return whether the type is IEEE compatible, as defined by the eponymous
314 /// method in APFloat.
315 bool isIEEE() const { return APFloat::getZero(getFltSemantics()).isIEEE(); }
316
317 /// If this is a vector type, return the element type, otherwise return
318 /// 'this'.
319 inline Type *getScalarType() const {
320 if (isVectorTy())
321 return getContainedType(0);
322 return const_cast<Type *>(this);
323 }
324
325 //===--------------------------------------------------------------------===//
326 // Type Iteration support.
327 //
328 using subtype_iterator = Type * const *;
329
330 subtype_iterator subtype_begin() const { return ContainedTys; }
331 subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];}
332 ArrayRef<Type*> subtypes() const {
333 return makeArrayRef(subtype_begin(), subtype_end());
334 }
335
336 using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>;
337
338 subtype_reverse_iterator subtype_rbegin() const {
339 return subtype_reverse_iterator(subtype_end());
340 }
341 subtype_reverse_iterator subtype_rend() const {
342 return subtype_reverse_iterator(subtype_begin());
343 }
344
345 /// This method is used to implement the type iterator (defined at the end of
346 /// the file). For derived types, this returns the types 'contained' in the
347 /// derived type.
348 Type *getContainedType(unsigned i) const {
349 assert(i < NumContainedTys && "Index out of range!")(static_cast<void> (0));
350 return ContainedTys[i];
351 }
352
353 /// Return the number of types in the derived type.
354 unsigned getNumContainedTypes() const { return NumContainedTys; }
355
356 //===--------------------------------------------------------------------===//
357 // Helper methods corresponding to subclass methods. This forces a cast to
358 // the specified subclass and calls its accessor. "getArrayNumElements" (for
359 // example) is shorthand for cast<ArrayType>(Ty)->getNumElements(). This is
360 // only intended to cover the core methods that are frequently used, helper
361 // methods should not be added here.
362
363 inline unsigned getIntegerBitWidth() const;
364
365 inline Type *getFunctionParamType(unsigned i) const;
366 inline unsigned getFunctionNumParams() const;
367 inline bool isFunctionVarArg() const;
368
369 inline StringRef getStructName() const;
370 inline unsigned getStructNumElements() const;
371 inline Type *getStructElementType(unsigned N) const;
372
373 inline uint64_t getArrayNumElements() const;
374
375 Type *getArrayElementType() const {
376 assert(getTypeID() == ArrayTyID)(static_cast<void> (0));
377 return ContainedTys[0];
378 }
379
380 Type *getPointerElementType() const {
381 assert(getTypeID() == PointerTyID)(static_cast<void> (0));
382 return ContainedTys[0];
383 }
384
385 /// Given vector type, change the element type,
386 /// whilst keeping the old number of elements.
387 /// For non-vectors simply returns \p EltTy.
388 inline Type *getWithNewType(Type *EltTy) const;
389
390 /// Given an integer or vector type, change the lane bitwidth to NewBitwidth,
391 /// whilst keeping the old number of lanes.
392 inline Type *getWithNewBitWidth(unsigned NewBitWidth) const;
393
394 /// Given scalar/vector integer type, returns a type with elements twice as
395 /// wide as in the original type. For vectors, preserves element count.
396 inline Type *getExtendedType() const;
397
398 /// Get the address space of this pointer or pointer vector type.
399 inline unsigned getPointerAddressSpace() const;
400
401 //===--------------------------------------------------------------------===//
402 // Static members exported by the Type class itself. Useful for getting
403 // instances of Type.
404 //
405
406 /// Return a type based on an identifier.
407 static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber);
408
409 //===--------------------------------------------------------------------===//
410 // These are the builtin types that are always available.
411 //
412 static Type *getVoidTy(LLVMContext &C);
413 static Type *getLabelTy(LLVMContext &C);
414 static Type *getHalfTy(LLVMContext &C);
415 static Type *getBFloatTy(LLVMContext &C);
416 static Type *getFloatTy(LLVMContext &C);
417 static Type *getDoubleTy(LLVMContext &C);
418 static Type *getMetadataTy(LLVMContext &C);
419 static Type *getX86_FP80Ty(LLVMContext &C);
420 static Type *getFP128Ty(LLVMContext &C);
421 static Type *getPPC_FP128Ty(LLVMContext &C);
422 static Type *getX86_MMXTy(LLVMContext &C);
423 static Type *getX86_AMXTy(LLVMContext &C);
424 static Type *getTokenTy(LLVMContext &C);
425 static IntegerType *getIntNTy(LLVMContext &C, unsigned N);
426 static IntegerType *getInt1Ty(LLVMContext &C);
427 static IntegerType *getInt8Ty(LLVMContext &C);
428 static IntegerType *getInt16Ty(LLVMContext &C);
429 static IntegerType *getInt32Ty(LLVMContext &C);
430 static IntegerType *getInt64Ty(LLVMContext &C);
431 static IntegerType *getInt128Ty(LLVMContext &C);
432 template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) {
433 int noOfBits = sizeof(ScalarTy) * CHAR_BIT8;
434 if (std::is_integral<ScalarTy>::value) {
435 return (Type*) Type::getIntNTy(C, noOfBits);
436 } else if (std::is_floating_point<ScalarTy>::value) {
437 switch (noOfBits) {
438 case 32:
439 return Type::getFloatTy(C);
440 case 64:
441 return Type::getDoubleTy(C);
442 }
443 }
444 llvm_unreachable("Unsupported type in Type::getScalarTy")__builtin_unreachable();
445 }
446 static Type *getFloatingPointTy(LLVMContext &C, const fltSemantics &S) {
447 Type *Ty;
448 if (&S == &APFloat::IEEEhalf())
449 Ty = Type::getHalfTy(C);
450 else if (&S == &APFloat::BFloat())
451 Ty = Type::getBFloatTy(C);
452 else if (&S == &APFloat::IEEEsingle())
453 Ty = Type::getFloatTy(C);
454 else if (&S == &APFloat::IEEEdouble())
455 Ty = Type::getDoubleTy(C);
456 else if (&S == &APFloat::x87DoubleExtended())
457 Ty = Type::getX86_FP80Ty(C);
458 else if (&S == &APFloat::IEEEquad())
459 Ty = Type::getFP128Ty(C);
460 else {
461 assert(&S == &APFloat::PPCDoubleDouble() && "Unknown FP format")(static_cast<void> (0));
462 Ty = Type::getPPC_FP128Ty(C);
463 }
464 return Ty;
465 }
466
467 //===--------------------------------------------------------------------===//
468 // Convenience methods for getting pointer types with one of the above builtin
469 // types as pointee.
470 //
471 static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
472 static PointerType *getBFloatPtrTy(LLVMContext &C, unsigned AS = 0);
473 static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
474 static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
475 static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
476 static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
477 static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
478 static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
479 static PointerType *getX86_AMXPtrTy(LLVMContext &C, unsigned AS = 0);
480 static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0);
481 static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
482 static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
483 static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
484 static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
485 static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
486
487 /// Return a pointer to the current type. This is equivalent to
488 /// PointerType::get(Foo, AddrSpace).
489 /// TODO: Remove this after opaque pointer transition is complete.
490 PointerType *getPointerTo(unsigned AddrSpace = 0) const;
491
492private:
493 /// Derived types like structures and arrays are sized iff all of the members
494 /// of the type are sized as well. Since asking for their size is relatively
495 /// uncommon, move this operation out-of-line.
496 bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
497};
498
499// Printing of types.
500inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) {
501 T.print(OS);
502 return OS;
503}
504
505// allow isa<PointerType>(x) to work without DerivedTypes.h included.
506template <> struct isa_impl<PointerType, Type> {
507 static inline bool doit(const Type &Ty) {
508 return Ty.getTypeID() == Type::PointerTyID;
509 }
510};
511
512// Create wrappers for C Binding types (see CBindingWrapping.h).
513DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)inline Type *unwrap(LLVMTypeRef P) { return reinterpret_cast<
Type*>(P); } inline LLVMTypeRef wrap(const Type *P) { return
reinterpret_cast<LLVMTypeRef>(const_cast<Type*>(
P)); } template<typename T> inline T *unwrap(LLVMTypeRef
P) { return cast<T>(unwrap(P)); }
514
515/* Specialized opaque type conversions.
516 */
517inline Type **unwrap(LLVMTypeRef* Tys) {
518 return reinterpret_cast<Type**>(Tys);
519}
520
521inline LLVMTypeRef *wrap(Type **Tys) {
522 return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
523}
524
525} // end namespace llvm
526
527#endif // LLVM_IR_TYPE_H