Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Analysis/ConstantFolding.cpp
Warning:line 707, column 39
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ConstantFolding.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Analysis -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Analysis -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Analysis/ConstantFolding.cpp
1//===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines routines for folding instructions into constants.
10//
11// Also, to supplement the basic IR ConstantExpr simplifications,
12// this file defines some additional folding routines that can make use of
13// DataLayout information. These functions cannot go in IR due to library
14// dependency issues.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/Analysis/ConstantFolding.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/APSInt.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/Analysis/TargetFolder.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/Analysis/VectorUtils.h"
31#include "llvm/Config/config.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/ConstantFold.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GlobalValue.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
50#include "llvm/IR/Operator.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Value.h"
53#include "llvm/Support/Casting.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/KnownBits.h"
56#include "llvm/Support/MathExtras.h"
57#include <cassert>
58#include <cerrno>
59#include <cfenv>
60#include <cmath>
61#include <cstdint>
62
63using namespace llvm;
64
65namespace {
66
67//===----------------------------------------------------------------------===//
68// Constant Folding internal helper functions
69//===----------------------------------------------------------------------===//
70
71static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
72 Constant *C, Type *SrcEltTy,
73 unsigned NumSrcElts,
74 const DataLayout &DL) {
75 // Now that we know that the input value is a vector of integers, just shift
76 // and insert them into our result.
77 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
78 for (unsigned i = 0; i != NumSrcElts; ++i) {
79 Constant *Element;
80 if (DL.isLittleEndian())
81 Element = C->getAggregateElement(NumSrcElts - i - 1);
82 else
83 Element = C->getAggregateElement(i);
84
85 if (Element && isa<UndefValue>(Element)) {
86 Result <<= BitShift;
87 continue;
88 }
89
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
91 if (!ElementCI)
92 return ConstantExpr::getBitCast(C, DestTy);
93
94 Result <<= BitShift;
95 Result |= ElementCI->getValue().zext(Result.getBitWidth());
96 }
97
98 return nullptr;
99}
100
101/// Constant fold bitcast, symbolically evaluating it with DataLayout.
102/// This always returns a non-null constant, but it may be a
103/// ConstantExpr if unfoldable.
104Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
105 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&(static_cast <bool> (CastInst::castIsValid(Instruction::
BitCast, C, DestTy) && "Invalid constantexpr bitcast!"
) ? void (0) : __assert_fail ("CastInst::castIsValid(Instruction::BitCast, C, DestTy) && \"Invalid constantexpr bitcast!\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 106, __extension__
__PRETTY_FUNCTION__))
106 "Invalid constantexpr bitcast!")(static_cast <bool> (CastInst::castIsValid(Instruction::
BitCast, C, DestTy) && "Invalid constantexpr bitcast!"
) ? void (0) : __assert_fail ("CastInst::castIsValid(Instruction::BitCast, C, DestTy) && \"Invalid constantexpr bitcast!\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 106, __extension__
__PRETTY_FUNCTION__))
;
107
108 // Catch the obvious splat cases.
109 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
110 return Res;
111
112 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
113 // Handle a vector->scalar integer/fp cast.
114 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
117
118 // If the vector is a vector of floating point, convert it to vector of int
119 // to simplify things.
120 if (SrcEltTy->isFloatingPointTy()) {
121 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
122 auto *SrcIVTy = FixedVectorType::get(
123 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
124 // Ask IR to do the conversion now that #elts line up.
125 C = ConstantExpr::getBitCast(C, SrcIVTy);
126 }
127
128 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
129 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
130 SrcEltTy, NumSrcElts, DL))
131 return CE;
132
133 if (isa<IntegerType>(DestTy))
134 return ConstantInt::get(DestTy, Result);
135
136 APFloat FP(DestTy->getFltSemantics(), Result);
137 return ConstantFP::get(DestTy->getContext(), FP);
138 }
139 }
140
141 // The code below only handles casts to vectors currently.
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
143 if (!DestVTy)
144 return ConstantExpr::getBitCast(C, DestTy);
145
146 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
147 // vector so the code below can handle it uniformly.
148 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
149 Constant *Ops = C; // don't take the address of C!
150 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
151 }
152
153 // If this is a bitcast from constant vector -> vector, fold it.
154 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
155 return ConstantExpr::getBitCast(C, DestTy);
156
157 // If the element types match, IR can fold it.
158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
160 if (NumDstElt == NumSrcElt)
161 return ConstantExpr::getBitCast(C, DestTy);
162
163 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
164 Type *DstEltTy = DestVTy->getElementType();
165
166 // Otherwise, we're changing the number of elements in a vector, which
167 // requires endianness information to do the right thing. For example,
168 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
169 // folds to (little endian):
170 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
171 // and to (big endian):
172 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
173
174 // First thing is first. We only want to think about integer here, so if
175 // we have something in FP form, recast it as integer.
176 if (DstEltTy->isFloatingPointTy()) {
177 // Fold to an vector of integers with same size as our FP type.
178 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
179 auto *DestIVTy = FixedVectorType::get(
180 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
181 // Recursively handle this integer conversion, if possible.
182 C = FoldBitCast(C, DestIVTy, DL);
183
184 // Finally, IR can handle this now that #elts line up.
185 return ConstantExpr::getBitCast(C, DestTy);
186 }
187
188 // Okay, we know the destination is integer, if the input is FP, convert
189 // it to integer first.
190 if (SrcEltTy->isFloatingPointTy()) {
191 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
192 auto *SrcIVTy = FixedVectorType::get(
193 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
194 // Ask IR to do the conversion now that #elts line up.
195 C = ConstantExpr::getBitCast(C, SrcIVTy);
196 // If IR wasn't able to fold it, bail out.
197 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
198 !isa<ConstantDataVector>(C))
199 return C;
200 }
201
202 // Now we know that the input and output vectors are both integer vectors
203 // of the same size, and that their #elements is not the same. Do the
204 // conversion here, which depends on whether the input or output has
205 // more elements.
206 bool isLittleEndian = DL.isLittleEndian();
207
208 SmallVector<Constant*, 32> Result;
209 if (NumDstElt < NumSrcElt) {
210 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
211 Constant *Zero = Constant::getNullValue(DstEltTy);
212 unsigned Ratio = NumSrcElt/NumDstElt;
213 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
214 unsigned SrcElt = 0;
215 for (unsigned i = 0; i != NumDstElt; ++i) {
216 // Build each element of the result.
217 Constant *Elt = Zero;
218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219 for (unsigned j = 0; j != Ratio; ++j) {
220 Constant *Src = C->getAggregateElement(SrcElt++);
221 if (Src && isa<UndefValue>(Src))
222 Src = Constant::getNullValue(
223 cast<VectorType>(C->getType())->getElementType());
224 else
225 Src = dyn_cast_or_null<ConstantInt>(Src);
226 if (!Src) // Reject constantexpr elements.
227 return ConstantExpr::getBitCast(C, DestTy);
228
229 // Zero extend the element to the right size.
230 Src = ConstantExpr::getZExt(Src, Elt->getType());
231
232 // Shift it to the right place, depending on endianness.
233 Src = ConstantExpr::getShl(Src,
234 ConstantInt::get(Src->getType(), ShiftAmt));
235 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
236
237 // Mix it in.
238 Elt = ConstantExpr::getOr(Elt, Src);
239 }
240 Result.push_back(Elt);
241 }
242 return ConstantVector::get(Result);
243 }
244
245 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
246 unsigned Ratio = NumDstElt/NumSrcElt;
247 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
248
249 // Loop over each source value, expanding into multiple results.
250 for (unsigned i = 0; i != NumSrcElt; ++i) {
251 auto *Element = C->getAggregateElement(i);
252
253 if (!Element) // Reject constantexpr elements.
254 return ConstantExpr::getBitCast(C, DestTy);
255
256 if (isa<UndefValue>(Element)) {
257 // Correctly Propagate undef values.
258 Result.append(Ratio, UndefValue::get(DstEltTy));
259 continue;
260 }
261
262 auto *Src = dyn_cast<ConstantInt>(Element);
263 if (!Src)
264 return ConstantExpr::getBitCast(C, DestTy);
265
266 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
267 for (unsigned j = 0; j != Ratio; ++j) {
268 // Shift the piece of the value into the right place, depending on
269 // endianness.
270 Constant *Elt = ConstantExpr::getLShr(Src,
271 ConstantInt::get(Src->getType(), ShiftAmt));
272 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
273
274 // Truncate the element to an integer with the same pointer size and
275 // convert the element back to a pointer using a inttoptr.
276 if (DstEltTy->isPointerTy()) {
277 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
278 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
279 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
280 continue;
281 }
282
283 // Truncate and remember this piece.
284 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
285 }
286 }
287
288 return ConstantVector::get(Result);
289}
290
291} // end anonymous namespace
292
293/// If this constant is a constant offset from a global, return the global and
294/// the constant. Because of constantexprs, this function is recursive.
295bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
296 APInt &Offset, const DataLayout &DL,
297 DSOLocalEquivalent **DSOEquiv) {
298 if (DSOEquiv)
299 *DSOEquiv = nullptr;
300
301 // Trivial case, constant is the global.
302 if ((GV = dyn_cast<GlobalValue>(C))) {
303 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
304 Offset = APInt(BitWidth, 0);
305 return true;
306 }
307
308 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
309 if (DSOEquiv)
310 *DSOEquiv = FoundDSOEquiv;
311 GV = FoundDSOEquiv->getGlobalValue();
312 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
313 Offset = APInt(BitWidth, 0);
314 return true;
315 }
316
317 // Otherwise, if this isn't a constant expr, bail out.
318 auto *CE = dyn_cast<ConstantExpr>(C);
319 if (!CE) return false;
320
321 // Look through ptr->int and ptr->ptr casts.
322 if (CE->getOpcode() == Instruction::PtrToInt ||
323 CE->getOpcode() == Instruction::BitCast)
324 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
325 DSOEquiv);
326
327 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
328 auto *GEP = dyn_cast<GEPOperator>(CE);
329 if (!GEP)
330 return false;
331
332 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
333 APInt TmpOffset(BitWidth, 0);
334
335 // If the base isn't a global+constant, we aren't either.
336 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
337 DSOEquiv))
338 return false;
339
340 // Otherwise, add any offset that our operands provide.
341 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
342 return false;
343
344 Offset = TmpOffset;
345 return true;
346}
347
348Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
349 const DataLayout &DL) {
350 do {
351 Type *SrcTy = C->getType();
352 if (SrcTy == DestTy)
353 return C;
354
355 TypeSize DestSize = DL.getTypeSizeInBits(DestTy);
356 TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy);
357 if (!TypeSize::isKnownGE(SrcSize, DestSize))
358 return nullptr;
359
360 // Catch the obvious splat cases (since all-zeros can coerce non-integral
361 // pointers legally).
362 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
363 return Res;
364
365 // If the type sizes are the same and a cast is legal, just directly
366 // cast the constant.
367 // But be careful not to coerce non-integral pointers illegally.
368 if (SrcSize == DestSize &&
369 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
370 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
371 Instruction::CastOps Cast = Instruction::BitCast;
372 // If we are going from a pointer to int or vice versa, we spell the cast
373 // differently.
374 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
375 Cast = Instruction::IntToPtr;
376 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
377 Cast = Instruction::PtrToInt;
378
379 if (CastInst::castIsValid(Cast, C, DestTy))
380 return ConstantExpr::getCast(Cast, C, DestTy);
381 }
382
383 // If this isn't an aggregate type, there is nothing we can do to drill down
384 // and find a bitcastable constant.
385 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
386 return nullptr;
387
388 // We're simulating a load through a pointer that was bitcast to point to
389 // a different type, so we can try to walk down through the initial
390 // elements of an aggregate to see if some part of the aggregate is
391 // castable to implement the "load" semantic model.
392 if (SrcTy->isStructTy()) {
393 // Struct types might have leading zero-length elements like [0 x i32],
394 // which are certainly not what we are looking for, so skip them.
395 unsigned Elem = 0;
396 Constant *ElemC;
397 do {
398 ElemC = C->getAggregateElement(Elem++);
399 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
400 C = ElemC;
401 } else {
402 // For non-byte-sized vector elements, the first element is not
403 // necessarily located at the vector base address.
404 if (auto *VT = dyn_cast<VectorType>(SrcTy))
405 if (!DL.typeSizeEqualsStoreSize(VT->getElementType()))
406 return nullptr;
407
408 C = C->getAggregateElement(0u);
409 }
410 } while (C);
411
412 return nullptr;
413}
414
415namespace {
416
417/// Recursive helper to read bits out of global. C is the constant being copied
418/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
419/// results into and BytesLeft is the number of bytes left in
420/// the CurPtr buffer. DL is the DataLayout.
421bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
422 unsigned BytesLeft, const DataLayout &DL) {
423 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&(static_cast <bool> (ByteOffset <= DL.getTypeAllocSize
(C->getType()) && "Out of range access") ? void (0
) : __assert_fail ("ByteOffset <= DL.getTypeAllocSize(C->getType()) && \"Out of range access\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 424, __extension__
__PRETTY_FUNCTION__))
424 "Out of range access")(static_cast <bool> (ByteOffset <= DL.getTypeAllocSize
(C->getType()) && "Out of range access") ? void (0
) : __assert_fail ("ByteOffset <= DL.getTypeAllocSize(C->getType()) && \"Out of range access\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 424, __extension__
__PRETTY_FUNCTION__))
;
425
426 // If this element is zero or undefined, we can just return since *CurPtr is
427 // zero initialized.
428 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
429 return true;
430
431 if (auto *CI = dyn_cast<ConstantInt>(C)) {
432 if (CI->getBitWidth() > 64 ||
433 (CI->getBitWidth() & 7) != 0)
434 return false;
435
436 uint64_t Val = CI->getZExtValue();
437 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
438
439 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
440 int n = ByteOffset;
441 if (!DL.isLittleEndian())
442 n = IntBytes - n - 1;
443 CurPtr[i] = (unsigned char)(Val >> (n * 8));
444 ++ByteOffset;
445 }
446 return true;
447 }
448
449 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
450 if (CFP->getType()->isDoubleTy()) {
451 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
452 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
453 }
454 if (CFP->getType()->isFloatTy()){
455 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
456 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
457 }
458 if (CFP->getType()->isHalfTy()){
459 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
460 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
461 }
462 return false;
463 }
464
465 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
466 const StructLayout *SL = DL.getStructLayout(CS->getType());
467 unsigned Index = SL->getElementContainingOffset(ByteOffset);
468 uint64_t CurEltOffset = SL->getElementOffset(Index);
469 ByteOffset -= CurEltOffset;
470
471 while (true) {
472 // If the element access is to the element itself and not to tail padding,
473 // read the bytes from the element.
474 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
475
476 if (ByteOffset < EltSize &&
477 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
478 BytesLeft, DL))
479 return false;
480
481 ++Index;
482
483 // Check to see if we read from the last struct element, if so we're done.
484 if (Index == CS->getType()->getNumElements())
485 return true;
486
487 // If we read all of the bytes we needed from this element we're done.
488 uint64_t NextEltOffset = SL->getElementOffset(Index);
489
490 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
491 return true;
492
493 // Move to the next element of the struct.
494 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
495 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
496 ByteOffset = 0;
497 CurEltOffset = NextEltOffset;
498 }
499 // not reached.
500 }
501
502 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
503 isa<ConstantDataSequential>(C)) {
504 uint64_t NumElts;
505 Type *EltTy;
506 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
507 NumElts = AT->getNumElements();
508 EltTy = AT->getElementType();
509 } else {
510 NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
511 EltTy = cast<FixedVectorType>(C->getType())->getElementType();
512 }
513 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
514 uint64_t Index = ByteOffset / EltSize;
515 uint64_t Offset = ByteOffset - Index * EltSize;
516
517 for (; Index != NumElts; ++Index) {
518 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
519 BytesLeft, DL))
520 return false;
521
522 uint64_t BytesWritten = EltSize - Offset;
523 assert(BytesWritten <= EltSize && "Not indexing into this element?")(static_cast <bool> (BytesWritten <= EltSize &&
"Not indexing into this element?") ? void (0) : __assert_fail
("BytesWritten <= EltSize && \"Not indexing into this element?\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 523, __extension__
__PRETTY_FUNCTION__))
;
524 if (BytesWritten >= BytesLeft)
525 return true;
526
527 Offset = 0;
528 BytesLeft -= BytesWritten;
529 CurPtr += BytesWritten;
530 }
531 return true;
532 }
533
534 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
535 if (CE->getOpcode() == Instruction::IntToPtr &&
536 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
537 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
538 BytesLeft, DL);
539 }
540 }
541
542 // Otherwise, unknown initializer type.
543 return false;
544}
545
546Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
547 int64_t Offset, const DataLayout &DL) {
548 // Bail out early. Not expect to load from scalable global variable.
549 if (isa<ScalableVectorType>(LoadTy))
550 return nullptr;
551
552 auto *IntType = dyn_cast<IntegerType>(LoadTy);
553
554 // If this isn't an integer load we can't fold it directly.
555 if (!IntType) {
556 // If this is a non-integer load, we can try folding it as an int load and
557 // then bitcast the result. This can be useful for union cases. Note
558 // that address spaces don't matter here since we're not going to result in
559 // an actual new load.
560 if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
561 !LoadTy->isVectorTy())
562 return nullptr;
563
564 Type *MapTy = Type::getIntNTy(
565 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
566 if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
567 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
568 !LoadTy->isX86_AMXTy())
569 // Materializing a zero can be done trivially without a bitcast
570 return Constant::getNullValue(LoadTy);
571 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
572 Res = FoldBitCast(Res, CastTy, DL);
573 if (LoadTy->isPtrOrPtrVectorTy()) {
574 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
575 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
576 !LoadTy->isX86_AMXTy())
577 return Constant::getNullValue(LoadTy);
578 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
579 // Be careful not to replace a load of an addrspace value with an inttoptr here
580 return nullptr;
581 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy);
582 }
583 return Res;
584 }
585 return nullptr;
586 }
587
588 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
589 if (BytesLoaded > 32 || BytesLoaded == 0)
590 return nullptr;
591
592 // If we're not accessing anything in this constant, the result is undefined.
593 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
594 return PoisonValue::get(IntType);
595
596 // TODO: We should be able to support scalable types.
597 TypeSize InitializerSize = DL.getTypeAllocSize(C->getType());
598 if (InitializerSize.isScalable())
599 return nullptr;
600
601 // If we're not accessing anything in this constant, the result is undefined.
602 if (Offset >= (int64_t)InitializerSize.getFixedValue())
603 return PoisonValue::get(IntType);
604
605 unsigned char RawBytes[32] = {0};
606 unsigned char *CurPtr = RawBytes;
607 unsigned BytesLeft = BytesLoaded;
608
609 // If we're loading off the beginning of the global, some bytes may be valid.
610 if (Offset < 0) {
611 CurPtr += -Offset;
612 BytesLeft += Offset;
613 Offset = 0;
614 }
615
616 if (!ReadDataFromGlobal(C, Offset, CurPtr, BytesLeft, DL))
617 return nullptr;
618
619 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
620 if (DL.isLittleEndian()) {
621 ResultVal = RawBytes[BytesLoaded - 1];
622 for (unsigned i = 1; i != BytesLoaded; ++i) {
623 ResultVal <<= 8;
624 ResultVal |= RawBytes[BytesLoaded - 1 - i];
625 }
626 } else {
627 ResultVal = RawBytes[0];
628 for (unsigned i = 1; i != BytesLoaded; ++i) {
629 ResultVal <<= 8;
630 ResultVal |= RawBytes[i];
631 }
632 }
633
634 return ConstantInt::get(IntType->getContext(), ResultVal);
635}
636
637} // anonymous namespace
638
639// If GV is a constant with an initializer read its representation starting
640// at Offset and return it as a constant array of unsigned char. Otherwise
641// return null.
642Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
643 uint64_t Offset) {
644 if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
645 return nullptr;
646
647 const DataLayout &DL = GV->getParent()->getDataLayout();
648 Constant *Init = const_cast<Constant *>(GV->getInitializer());
649 TypeSize InitSize = DL.getTypeAllocSize(Init->getType());
650 if (InitSize < Offset)
651 return nullptr;
652
653 uint64_t NBytes = InitSize - Offset;
654 if (NBytes > UINT16_MAX(65535))
655 // Bail for large initializers in excess of 64K to avoid allocating
656 // too much memory.
657 // Offset is assumed to be less than or equal than InitSize (this
658 // is enforced in ReadDataFromGlobal).
659 return nullptr;
660
661 SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes));
662 unsigned char *CurPtr = RawBytes.data();
663
664 if (!ReadDataFromGlobal(Init, Offset, CurPtr, NBytes, DL))
665 return nullptr;
666
667 return ConstantDataArray::get(GV->getContext(), RawBytes);
668}
669
670/// If this Offset points exactly to the start of an aggregate element, return
671/// that element, otherwise return nullptr.
672Constant *getConstantAtOffset(Constant *Base, APInt Offset,
673 const DataLayout &DL) {
674 if (Offset.isZero())
4
Taking true branch
675 return Base;
5
Returning pointer (loaded from 'Base'), which participates in a condition later
676
677 if (!isa<ConstantAggregate>(Base) && !isa<ConstantDataSequential>(Base))
678 return nullptr;
679
680 Type *ElemTy = Base->getType();
681 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
682 if (!Offset.isZero() || !Indices[0].isZero())
683 return nullptr;
684
685 Constant *C = Base;
686 for (const APInt &Index : drop_begin(Indices)) {
687 if (Index.isNegative() || Index.getActiveBits() >= 32)
688 return nullptr;
689
690 C = C->getAggregateElement(Index.getZExtValue());
691 if (!C)
692 return nullptr;
693 }
694
695 return C;
696}
697
698Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
699 const APInt &Offset,
700 const DataLayout &DL) {
701 if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL))
3
Calling 'getConstantAtOffset'
6
Returning from 'getConstantAtOffset'
7
Assuming 'AtOffset' is null
8
Taking false branch
702 if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL))
703 return Result;
704
705 // Explicitly check for out-of-bounds access, so we return poison even if the
706 // constant is a uniform value.
707 TypeSize Size = DL.getTypeAllocSize(C->getType());
9
Called C++ object pointer is null
708 if (!Size.isScalable() && Offset.sge(Size.getFixedSize()))
709 return PoisonValue::get(Ty);
710
711 // Try an offset-independent fold of a uniform value.
712 if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty))
713 return Result;
714
715 // Try hard to fold loads from bitcasted strange and non-type-safe things.
716 if (Offset.getMinSignedBits() <= 64)
717 if (Constant *Result =
718 FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL))
719 return Result;
720
721 return nullptr;
722}
723
724Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
725 const DataLayout &DL) {
726 return ConstantFoldLoadFromConst(C, Ty, APInt(64, 0), DL);
1
Passing value via 1st parameter 'C'
2
Calling 'ConstantFoldLoadFromConst'
727}
728
729Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
730 APInt Offset,
731 const DataLayout &DL) {
732 C = cast<Constant>(C->stripAndAccumulateConstantOffsets(
733 DL, Offset, /* AllowNonInbounds */ true));
734
735 if (auto *GV = dyn_cast<GlobalVariable>(C))
736 if (GV->isConstant() && GV->hasDefinitiveInitializer())
737 if (Constant *Result = ConstantFoldLoadFromConst(GV->getInitializer(), Ty,
738 Offset, DL))
739 return Result;
740
741 // If this load comes from anywhere in a uniform constant global, the value
742 // is always the same, regardless of the loaded offset.
743 if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C))) {
744 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
745 if (Constant *Res =
746 ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty))
747 return Res;
748 }
749 }
750
751 return nullptr;
752}
753
754Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
755 const DataLayout &DL) {
756 APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0);
757 return ConstantFoldLoadFromConstPtr(C, Ty, Offset, DL);
758}
759
760Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty) {
761 if (isa<PoisonValue>(C))
762 return PoisonValue::get(Ty);
763 if (isa<UndefValue>(C))
764 return UndefValue::get(Ty);
765 if (C->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy())
766 return Constant::getNullValue(Ty);
767 if (C->isAllOnesValue() &&
768 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
769 return Constant::getAllOnesValue(Ty);
770 return nullptr;
771}
772
773namespace {
774
775/// One of Op0/Op1 is a constant expression.
776/// Attempt to symbolically evaluate the result of a binary operator merging
777/// these together. If target data info is available, it is provided as DL,
778/// otherwise DL is null.
779Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
780 const DataLayout &DL) {
781 // SROA
782
783 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
784 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
785 // bits.
786
787 if (Opc == Instruction::And) {
788 KnownBits Known0 = computeKnownBits(Op0, DL);
789 KnownBits Known1 = computeKnownBits(Op1, DL);
790 if ((Known1.One | Known0.Zero).isAllOnes()) {
791 // All the bits of Op0 that the 'and' could be masking are already zero.
792 return Op0;
793 }
794 if ((Known0.One | Known1.Zero).isAllOnes()) {
795 // All the bits of Op1 that the 'and' could be masking are already zero.
796 return Op1;
797 }
798
799 Known0 &= Known1;
800 if (Known0.isConstant())
801 return ConstantInt::get(Op0->getType(), Known0.getConstant());
802 }
803
804 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
805 // constant. This happens frequently when iterating over a global array.
806 if (Opc == Instruction::Sub) {
807 GlobalValue *GV1, *GV2;
808 APInt Offs1, Offs2;
809
810 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
811 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
812 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
813
814 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
815 // PtrToInt may change the bitwidth so we have convert to the right size
816 // first.
817 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
818 Offs2.zextOrTrunc(OpSize));
819 }
820 }
821
822 return nullptr;
823}
824
825/// If array indices are not pointer-sized integers, explicitly cast them so
826/// that they aren't implicitly casted by the getelementptr.
827Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
828 Type *ResultTy, Optional<unsigned> InRangeIndex,
829 const DataLayout &DL, const TargetLibraryInfo *TLI) {
830 Type *IntIdxTy = DL.getIndexType(ResultTy);
831 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
832
833 bool Any = false;
834 SmallVector<Constant*, 32> NewIdxs;
835 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
836 if ((i == 1 ||
837 !isa<StructType>(GetElementPtrInst::getIndexedType(
838 SrcElemTy, Ops.slice(1, i - 1)))) &&
839 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
840 Any = true;
841 Type *NewType = Ops[i]->getType()->isVectorTy()
842 ? IntIdxTy
843 : IntIdxScalarTy;
844 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
845 true,
846 NewType,
847 true),
848 Ops[i], NewType));
849 } else
850 NewIdxs.push_back(Ops[i]);
851 }
852
853 if (!Any)
854 return nullptr;
855
856 Constant *C = ConstantExpr::getGetElementPtr(
857 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
858 return ConstantFoldConstant(C, DL, TLI);
859}
860
861/// Strip the pointer casts, but preserve the address space information.
862Constant *StripPtrCastKeepAS(Constant *Ptr) {
863 assert(Ptr->getType()->isPointerTy() && "Not a pointer type")(static_cast <bool> (Ptr->getType()->isPointerTy(
) && "Not a pointer type") ? void (0) : __assert_fail
("Ptr->getType()->isPointerTy() && \"Not a pointer type\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 863, __extension__
__PRETTY_FUNCTION__))
;
864 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
865 Ptr = cast<Constant>(Ptr->stripPointerCasts());
866 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
867
868 // Preserve the address space number of the pointer.
869 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
870 Ptr = ConstantExpr::getPointerCast(
871 Ptr, PointerType::getWithSamePointeeType(NewPtrTy,
872 OldPtrTy->getAddressSpace()));
873 }
874 return Ptr;
875}
876
877/// If we can symbolically evaluate the GEP constant expression, do so.
878Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
879 ArrayRef<Constant *> Ops,
880 const DataLayout &DL,
881 const TargetLibraryInfo *TLI) {
882 const GEPOperator *InnermostGEP = GEP;
883 bool InBounds = GEP->isInBounds();
884
885 Type *SrcElemTy = GEP->getSourceElementType();
886 Type *ResElemTy = GEP->getResultElementType();
887 Type *ResTy = GEP->getType();
888 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
889 return nullptr;
890
891 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
892 GEP->getInRangeIndex(), DL, TLI))
893 return C;
894
895 Constant *Ptr = Ops[0];
896 if (!Ptr->getType()->isPointerTy())
897 return nullptr;
898
899 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
900
901 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
902 if (!isa<ConstantInt>(Ops[i]))
903 return nullptr;
904
905 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
906 APInt Offset =
907 APInt(BitWidth,
908 DL.getIndexedOffsetInType(
909 SrcElemTy,
910 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
911 Ptr = StripPtrCastKeepAS(Ptr);
912
913 // If this is a GEP of a GEP, fold it all into a single GEP.
914 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
915 InnermostGEP = GEP;
916 InBounds &= GEP->isInBounds();
917
918 SmallVector<Value *, 4> NestedOps(llvm::drop_begin(GEP->operands()));
919
920 // Do not try the incorporate the sub-GEP if some index is not a number.
921 bool AllConstantInt = true;
922 for (Value *NestedOp : NestedOps)
923 if (!isa<ConstantInt>(NestedOp)) {
924 AllConstantInt = false;
925 break;
926 }
927 if (!AllConstantInt)
928 break;
929
930 Ptr = cast<Constant>(GEP->getOperand(0));
931 SrcElemTy = GEP->getSourceElementType();
932 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
933 Ptr = StripPtrCastKeepAS(Ptr);
934 }
935
936 // If the base value for this address is a literal integer value, fold the
937 // getelementptr to the resulting integer value casted to the pointer type.
938 APInt BasePtr(BitWidth, 0);
939 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
940 if (CE->getOpcode() == Instruction::IntToPtr) {
941 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
942 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
943 }
944 }
945
946 auto *PTy = cast<PointerType>(Ptr->getType());
947 if ((Ptr->isNullValue() || BasePtr != 0) &&
948 !DL.isNonIntegralPointerType(PTy)) {
949 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
950 return ConstantExpr::getIntToPtr(C, ResTy);
951 }
952
953 // Otherwise form a regular getelementptr. Recompute the indices so that
954 // we eliminate over-indexing of the notional static type array bounds.
955 // This makes it easy to determine if the getelementptr is "inbounds".
956 // Also, this helps GlobalOpt do SROA on GlobalVariables.
957
958 // For GEPs of GlobalValues, use the value type even for opaque pointers.
959 // Otherwise use an i8 GEP.
960 if (auto *GV = dyn_cast<GlobalValue>(Ptr))
961 SrcElemTy = GV->getValueType();
962 else if (!PTy->isOpaque())
963 SrcElemTy = PTy->getNonOpaquePointerElementType();
964 else
965 SrcElemTy = Type::getInt8Ty(Ptr->getContext());
966
967 if (!SrcElemTy->isSized())
968 return nullptr;
969
970 Type *ElemTy = SrcElemTy;
971 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
972 if (Offset != 0)
973 return nullptr;
974
975 // Try to add additional zero indices to reach the desired result element
976 // type.
977 // TODO: Should we avoid extra zero indices if ResElemTy can't be reached and
978 // we'll have to insert a bitcast anyway?
979 while (ElemTy != ResElemTy) {
980 Type *NextTy = GetElementPtrInst::getTypeAtIndex(ElemTy, (uint64_t)0);
981 if (!NextTy)
982 break;
983
984 Indices.push_back(APInt::getZero(isa<StructType>(ElemTy) ? 32 : BitWidth));
985 ElemTy = NextTy;
986 }
987
988 SmallVector<Constant *, 32> NewIdxs;
989 for (const APInt &Index : Indices)
990 NewIdxs.push_back(ConstantInt::get(
991 Type::getIntNTy(Ptr->getContext(), Index.getBitWidth()), Index));
992
993 // Preserve the inrange index from the innermost GEP if possible. We must
994 // have calculated the same indices up to and including the inrange index.
995 Optional<unsigned> InRangeIndex;
996 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
997 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
998 NewIdxs.size() > *LastIRIndex) {
999 InRangeIndex = LastIRIndex;
1000 for (unsigned I = 0; I <= *LastIRIndex; ++I)
1001 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
1002 return nullptr;
1003 }
1004
1005 // Create a GEP.
1006 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
1007 InBounds, InRangeIndex);
1008 assert((static_cast <bool> (cast<PointerType>(C->getType
())->isOpaqueOrPointeeTypeMatches(ElemTy) && "Computed GetElementPtr has unexpected type!"
) ? void (0) : __assert_fail ("cast<PointerType>(C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) && \"Computed GetElementPtr has unexpected type!\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 1010, __extension__
__PRETTY_FUNCTION__))
1009 cast<PointerType>(C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) &&(static_cast <bool> (cast<PointerType>(C->getType
())->isOpaqueOrPointeeTypeMatches(ElemTy) && "Computed GetElementPtr has unexpected type!"
) ? void (0) : __assert_fail ("cast<PointerType>(C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) && \"Computed GetElementPtr has unexpected type!\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 1010, __extension__
__PRETTY_FUNCTION__))
1010 "Computed GetElementPtr has unexpected type!")(static_cast <bool> (cast<PointerType>(C->getType
())->isOpaqueOrPointeeTypeMatches(ElemTy) && "Computed GetElementPtr has unexpected type!"
) ? void (0) : __assert_fail ("cast<PointerType>(C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) && \"Computed GetElementPtr has unexpected type!\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 1010, __extension__
__PRETTY_FUNCTION__))
;
1011
1012 // If we ended up indexing a member with a type that doesn't match
1013 // the type of what the original indices indexed, add a cast.
1014 if (C->getType() != ResTy)
1015 C = FoldBitCast(C, ResTy, DL);
1016
1017 return C;
1018}
1019
1020/// Attempt to constant fold an instruction with the
1021/// specified opcode and operands. If successful, the constant result is
1022/// returned, if not, null is returned. Note that this function can fail when
1023/// attempting to fold instructions like loads and stores, which have no
1024/// constant expression form.
1025Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
1026 ArrayRef<Constant *> Ops,
1027 const DataLayout &DL,
1028 const TargetLibraryInfo *TLI) {
1029 Type *DestTy = InstOrCE->getType();
1030
1031 if (Instruction::isUnaryOp(Opcode))
1032 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
1033
1034 if (Instruction::isBinaryOp(Opcode)) {
1035 switch (Opcode) {
1036 default:
1037 break;
1038 case Instruction::FAdd:
1039 case Instruction::FSub:
1040 case Instruction::FMul:
1041 case Instruction::FDiv:
1042 case Instruction::FRem:
1043 // Handle floating point instructions separately to account for denormals
1044 // TODO: If a constant expression is being folded rather than an
1045 // instruction, denormals will not be flushed/treated as zero
1046 if (const auto *I = dyn_cast<Instruction>(InstOrCE)) {
1047 return ConstantFoldFPInstOperands(Opcode, Ops[0], Ops[1], DL, I);
1048 }
1049 }
1050 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1051 }
1052
1053 if (Instruction::isCast(Opcode))
1054 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1055
1056 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1057 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1058 return C;
1059
1060 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1061 Ops.slice(1), GEP->isInBounds(),
1062 GEP->getInRangeIndex());
1063 }
1064
1065 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) {
1066 if (CE->isCompare())
1067 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1068 DL, TLI);
1069 return CE->getWithOperands(Ops);
1070 }
1071
1072 switch (Opcode) {
1073 default: return nullptr;
1074 case Instruction::ICmp:
1075 case Instruction::FCmp: {
1076 auto *C = cast<CmpInst>(InstOrCE);
1077 return ConstantFoldCompareInstOperands(C->getPredicate(), Ops[0], Ops[1],
1078 DL, TLI, C);
1079 }
1080 case Instruction::Freeze:
1081 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1082 case Instruction::Call:
1083 if (auto *F = dyn_cast<Function>(Ops.back())) {
1084 const auto *Call = cast<CallBase>(InstOrCE);
1085 if (canConstantFoldCallTo(Call, F))
1086 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1087 }
1088 return nullptr;
1089 case Instruction::Select:
1090 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1091 case Instruction::ExtractElement:
1092 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1093 case Instruction::ExtractValue:
1094 return ConstantFoldExtractValueInstruction(
1095 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1096 case Instruction::InsertElement:
1097 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1098 case Instruction::InsertValue:
1099 return ConstantFoldInsertValueInstruction(
1100 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1101 case Instruction::ShuffleVector:
1102 return ConstantExpr::getShuffleVector(
1103 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1104 case Instruction::Load: {
1105 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1106 if (LI->isVolatile())
1107 return nullptr;
1108 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1109 }
1110 }
1111}
1112
1113} // end anonymous namespace
1114
1115//===----------------------------------------------------------------------===//
1116// Constant Folding public APIs
1117//===----------------------------------------------------------------------===//
1118
1119namespace {
1120
1121Constant *
1122ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1123 const TargetLibraryInfo *TLI,
1124 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1125 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1126 return const_cast<Constant *>(C);
1127
1128 SmallVector<Constant *, 8> Ops;
1129 for (const Use &OldU : C->operands()) {
1130 Constant *OldC = cast<Constant>(&OldU);
1131 Constant *NewC = OldC;
1132 // Recursively fold the ConstantExpr's operands. If we have already folded
1133 // a ConstantExpr, we don't have to process it again.
1134 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1135 auto It = FoldedOps.find(OldC);
1136 if (It == FoldedOps.end()) {
1137 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1138 FoldedOps.insert({OldC, NewC});
1139 } else {
1140 NewC = It->second;
1141 }
1142 }
1143 Ops.push_back(NewC);
1144 }
1145
1146 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1147 if (Constant *Res =
1148 ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI))
1149 return Res;
1150 return const_cast<Constant *>(C);
1151 }
1152
1153 assert(isa<ConstantVector>(C))(static_cast <bool> (isa<ConstantVector>(C)) ? void
(0) : __assert_fail ("isa<ConstantVector>(C)", "llvm/lib/Analysis/ConstantFolding.cpp"
, 1153, __extension__ __PRETTY_FUNCTION__))
;
1154 return ConstantVector::get(Ops);
1155}
1156
1157} // end anonymous namespace
1158
1159Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1160 const TargetLibraryInfo *TLI) {
1161 // Handle PHI nodes quickly here...
1162 if (auto *PN = dyn_cast<PHINode>(I)) {
1163 Constant *CommonValue = nullptr;
1164
1165 SmallDenseMap<Constant *, Constant *> FoldedOps;
1166 for (Value *Incoming : PN->incoming_values()) {
1167 // If the incoming value is undef then skip it. Note that while we could
1168 // skip the value if it is equal to the phi node itself we choose not to
1169 // because that would break the rule that constant folding only applies if
1170 // all operands are constants.
1171 if (isa<UndefValue>(Incoming))
1172 continue;
1173 // If the incoming value is not a constant, then give up.
1174 auto *C = dyn_cast<Constant>(Incoming);
1175 if (!C)
1176 return nullptr;
1177 // Fold the PHI's operands.
1178 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1179 // If the incoming value is a different constant to
1180 // the one we saw previously, then give up.
1181 if (CommonValue && C != CommonValue)
1182 return nullptr;
1183 CommonValue = C;
1184 }
1185
1186 // If we reach here, all incoming values are the same constant or undef.
1187 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1188 }
1189
1190 // Scan the operand list, checking to see if they are all constants, if so,
1191 // hand off to ConstantFoldInstOperandsImpl.
1192 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1193 return nullptr;
1194
1195 SmallDenseMap<Constant *, Constant *> FoldedOps;
1196 SmallVector<Constant *, 8> Ops;
1197 for (const Use &OpU : I->operands()) {
1198 auto *Op = cast<Constant>(&OpU);
1199 // Fold the Instruction's operands.
1200 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1201 Ops.push_back(Op);
1202 }
1203
1204 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1205}
1206
1207Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1208 const TargetLibraryInfo *TLI) {
1209 SmallDenseMap<Constant *, Constant *> FoldedOps;
1210 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1211}
1212
1213Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1214 ArrayRef<Constant *> Ops,
1215 const DataLayout &DL,
1216 const TargetLibraryInfo *TLI) {
1217 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1218}
1219
1220Constant *llvm::ConstantFoldCompareInstOperands(
1221 unsigned IntPredicate, Constant *Ops0, Constant *Ops1, const DataLayout &DL,
1222 const TargetLibraryInfo *TLI, const Instruction *I) {
1223 CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
1224 // fold: icmp (inttoptr x), null -> icmp x, 0
1225 // fold: icmp null, (inttoptr x) -> icmp 0, x
1226 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1227 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1228 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1229 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1230 //
1231 // FIXME: The following comment is out of data and the DataLayout is here now.
1232 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1233 // around to know if bit truncation is happening.
1234 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1235 if (Ops1->isNullValue()) {
1236 if (CE0->getOpcode() == Instruction::IntToPtr) {
1237 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1238 // Convert the integer value to the right size to ensure we get the
1239 // proper extension or truncation.
1240 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1241 IntPtrTy, false);
1242 Constant *Null = Constant::getNullValue(C->getType());
1243 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1244 }
1245
1246 // Only do this transformation if the int is intptrty in size, otherwise
1247 // there is a truncation or extension that we aren't modeling.
1248 if (CE0->getOpcode() == Instruction::PtrToInt) {
1249 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1250 if (CE0->getType() == IntPtrTy) {
1251 Constant *C = CE0->getOperand(0);
1252 Constant *Null = Constant::getNullValue(C->getType());
1253 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1254 }
1255 }
1256 }
1257
1258 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1259 if (CE0->getOpcode() == CE1->getOpcode()) {
1260 if (CE0->getOpcode() == Instruction::IntToPtr) {
1261 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1262
1263 // Convert the integer value to the right size to ensure we get the
1264 // proper extension or truncation.
1265 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1266 IntPtrTy, false);
1267 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1268 IntPtrTy, false);
1269 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1270 }
1271
1272 // Only do this transformation if the int is intptrty in size, otherwise
1273 // there is a truncation or extension that we aren't modeling.
1274 if (CE0->getOpcode() == Instruction::PtrToInt) {
1275 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1276 if (CE0->getType() == IntPtrTy &&
1277 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1278 return ConstantFoldCompareInstOperands(
1279 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1280 }
1281 }
1282 }
1283 }
1284
1285 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1286 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1287 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1288 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1289 Constant *LHS = ConstantFoldCompareInstOperands(
1290 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1291 Constant *RHS = ConstantFoldCompareInstOperands(
1292 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1293 unsigned OpC =
1294 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1295 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1296 }
1297
1298 // Convert pointer comparison (base+offset1) pred (base+offset2) into
1299 // offset1 pred offset2, for the case where the offset is inbounds. This
1300 // only works for equality and unsigned comparison, as inbounds permits
1301 // crossing the sign boundary. However, the offset comparison itself is
1302 // signed.
1303 if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate)) {
1304 unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ops0->getType());
1305 APInt Offset0(IndexWidth, 0);
1306 Value *Stripped0 =
1307 Ops0->stripAndAccumulateInBoundsConstantOffsets(DL, Offset0);
1308 APInt Offset1(IndexWidth, 0);
1309 Value *Stripped1 =
1310 Ops1->stripAndAccumulateInBoundsConstantOffsets(DL, Offset1);
1311 if (Stripped0 == Stripped1)
1312 return ConstantExpr::getCompare(
1313 ICmpInst::getSignedPredicate(Predicate),
1314 ConstantInt::get(CE0->getContext(), Offset0),
1315 ConstantInt::get(CE0->getContext(), Offset1));
1316 }
1317 } else if (isa<ConstantExpr>(Ops1)) {
1318 // If RHS is a constant expression, but the left side isn't, swap the
1319 // operands and try again.
1320 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1321 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1322 }
1323
1324 // Flush any denormal constant float input according to denormal handling
1325 // mode.
1326 Ops0 = FlushFPConstant(Ops0, I, /* IsOutput */ false);
1327 Ops1 = FlushFPConstant(Ops1, I, /* IsOutput */ false);
1328
1329 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1330}
1331
1332Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1333 const DataLayout &DL) {
1334 assert(Instruction::isUnaryOp(Opcode))(static_cast <bool> (Instruction::isUnaryOp(Opcode)) ? void
(0) : __assert_fail ("Instruction::isUnaryOp(Opcode)", "llvm/lib/Analysis/ConstantFolding.cpp"
, 1334, __extension__ __PRETTY_FUNCTION__))
;
1335
1336 return ConstantExpr::get(Opcode, Op);
1337}
1338
1339Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1340 Constant *RHS,
1341 const DataLayout &DL) {
1342 assert(Instruction::isBinaryOp(Opcode))(static_cast <bool> (Instruction::isBinaryOp(Opcode)) ?
void (0) : __assert_fail ("Instruction::isBinaryOp(Opcode)",
"llvm/lib/Analysis/ConstantFolding.cpp", 1342, __extension__
__PRETTY_FUNCTION__))
;
1343 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1344 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1345 return C;
1346
1347 if (ConstantExpr::isDesirableBinOp(Opcode))
1348 return ConstantExpr::get(Opcode, LHS, RHS);
1349 return ConstantFoldBinaryInstruction(Opcode, LHS, RHS);
1350}
1351
1352Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *I,
1353 bool IsOutput) {
1354 if (!I || !I->getParent() || !I->getFunction())
1355 return Operand;
1356
1357 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1358 if (!CFP)
1359 return Operand;
1360
1361 const APFloat &APF = CFP->getValueAPF();
1362 Type *Ty = CFP->getType();
1363 DenormalMode DenormMode =
1364 I->getFunction()->getDenormalMode(Ty->getFltSemantics());
1365 DenormalMode::DenormalModeKind Mode =
1366 IsOutput ? DenormMode.Output : DenormMode.Input;
1367 switch (Mode) {
1368 default:
1369 llvm_unreachable("unknown denormal mode")::llvm::llvm_unreachable_internal("unknown denormal mode", "llvm/lib/Analysis/ConstantFolding.cpp"
, 1369)
;
1370 return Operand;
1371 case DenormalMode::IEEE:
1372 return Operand;
1373 case DenormalMode::PreserveSign:
1374 if (APF.isDenormal()) {
1375 return ConstantFP::get(
1376 Ty->getContext(),
1377 APFloat::getZero(Ty->getFltSemantics(), APF.isNegative()));
1378 }
1379 return Operand;
1380 case DenormalMode::PositiveZero:
1381 if (APF.isDenormal()) {
1382 return ConstantFP::get(Ty->getContext(),
1383 APFloat::getZero(Ty->getFltSemantics(), false));
1384 }
1385 return Operand;
1386 }
1387 return Operand;
1388}
1389
1390Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
1391 Constant *RHS, const DataLayout &DL,
1392 const Instruction *I) {
1393 if (Instruction::isBinaryOp(Opcode)) {
1394 // Flush denormal inputs if needed.
1395 Constant *Op0 = FlushFPConstant(LHS, I, /* IsOutput */ false);
1396 Constant *Op1 = FlushFPConstant(RHS, I, /* IsOutput */ false);
1397
1398 // Calculate constant result.
1399 Constant *C = ConstantFoldBinaryOpOperands(Opcode, Op0, Op1, DL);
1400 if (!C)
1401 return nullptr;
1402
1403 // Flush denormal output if needed.
1404 return FlushFPConstant(C, I, /* IsOutput */ true);
1405 }
1406 // If instruction lacks a parent/function and the denormal mode cannot be
1407 // determined, use the default (IEEE).
1408 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
1409}
1410
1411Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1412 Type *DestTy, const DataLayout &DL) {
1413 assert(Instruction::isCast(Opcode))(static_cast <bool> (Instruction::isCast(Opcode)) ? void
(0) : __assert_fail ("Instruction::isCast(Opcode)", "llvm/lib/Analysis/ConstantFolding.cpp"
, 1413, __extension__ __PRETTY_FUNCTION__))
;
1414 switch (Opcode) {
1415 default:
1416 llvm_unreachable("Missing case")::llvm::llvm_unreachable_internal("Missing case", "llvm/lib/Analysis/ConstantFolding.cpp"
, 1416)
;
1417 case Instruction::PtrToInt:
1418 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1419 Constant *FoldedValue = nullptr;
1420 // If the input is a inttoptr, eliminate the pair. This requires knowing
1421 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1422 if (CE->getOpcode() == Instruction::IntToPtr) {
1423 // zext/trunc the inttoptr to pointer size.
1424 FoldedValue = ConstantExpr::getIntegerCast(
1425 CE->getOperand(0), DL.getIntPtrType(CE->getType()),
1426 /*IsSigned=*/false);
1427 } else if (auto *GEP = dyn_cast<GEPOperator>(CE)) {
1428 // If we have GEP, we can perform the following folds:
1429 // (ptrtoint (gep null, x)) -> x
1430 // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1431 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1432 APInt BaseOffset(BitWidth, 0);
1433 auto *Base = cast<Constant>(GEP->stripAndAccumulateConstantOffsets(
1434 DL, BaseOffset, /*AllowNonInbounds=*/true));
1435 if (Base->isNullValue()) {
1436 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1437 } else {
1438 // ptrtoint (gep i8, Ptr, (sub 0, V)) -> sub (ptrtoint Ptr), V
1439 if (GEP->getNumIndices() == 1 &&
1440 GEP->getSourceElementType()->isIntegerTy(8)) {
1441 auto *Ptr = cast<Constant>(GEP->getPointerOperand());
1442 auto *Sub = dyn_cast<ConstantExpr>(GEP->getOperand(1));
1443 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
1444 if (Sub && Sub->getType() == IntIdxTy &&
1445 Sub->getOpcode() == Instruction::Sub &&
1446 Sub->getOperand(0)->isNullValue())
1447 FoldedValue = ConstantExpr::getSub(
1448 ConstantExpr::getPtrToInt(Ptr, IntIdxTy), Sub->getOperand(1));
1449 }
1450 }
1451 }
1452 if (FoldedValue) {
1453 // Do a zext or trunc to get to the ptrtoint dest size.
1454 return ConstantExpr::getIntegerCast(FoldedValue, DestTy,
1455 /*IsSigned=*/false);
1456 }
1457 }
1458 return ConstantExpr::getCast(Opcode, C, DestTy);
1459 case Instruction::IntToPtr:
1460 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1461 // the int size is >= the ptr size and the address spaces are the same.
1462 // This requires knowing the width of a pointer, so it can't be done in
1463 // ConstantExpr::getCast.
1464 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1465 if (CE->getOpcode() == Instruction::PtrToInt) {
1466 Constant *SrcPtr = CE->getOperand(0);
1467 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1468 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1469
1470 if (MidIntSize >= SrcPtrSize) {
1471 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1472 if (SrcAS == DestTy->getPointerAddressSpace())
1473 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1474 }
1475 }
1476 }
1477
1478 return ConstantExpr::getCast(Opcode, C, DestTy);
1479 case Instruction::Trunc:
1480 case Instruction::ZExt:
1481 case Instruction::SExt:
1482 case Instruction::FPTrunc:
1483 case Instruction::FPExt:
1484 case Instruction::UIToFP:
1485 case Instruction::SIToFP:
1486 case Instruction::FPToUI:
1487 case Instruction::FPToSI:
1488 case Instruction::AddrSpaceCast:
1489 return ConstantExpr::getCast(Opcode, C, DestTy);
1490 case Instruction::BitCast:
1491 return FoldBitCast(C, DestTy, DL);
1492 }
1493}
1494
1495//===----------------------------------------------------------------------===//
1496// Constant Folding for Calls
1497//
1498
1499bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1500 if (Call->isNoBuiltin())
1501 return false;
1502 if (Call->getFunctionType() != F->getFunctionType())
1503 return false;
1504 switch (F->getIntrinsicID()) {
1505 // Operations that do not operate floating-point numbers and do not depend on
1506 // FP environment can be folded even in strictfp functions.
1507 case Intrinsic::bswap:
1508 case Intrinsic::ctpop:
1509 case Intrinsic::ctlz:
1510 case Intrinsic::cttz:
1511 case Intrinsic::fshl:
1512 case Intrinsic::fshr:
1513 case Intrinsic::launder_invariant_group:
1514 case Intrinsic::strip_invariant_group:
1515 case Intrinsic::masked_load:
1516 case Intrinsic::get_active_lane_mask:
1517 case Intrinsic::abs:
1518 case Intrinsic::smax:
1519 case Intrinsic::smin:
1520 case Intrinsic::umax:
1521 case Intrinsic::umin:
1522 case Intrinsic::sadd_with_overflow:
1523 case Intrinsic::uadd_with_overflow:
1524 case Intrinsic::ssub_with_overflow:
1525 case Intrinsic::usub_with_overflow:
1526 case Intrinsic::smul_with_overflow:
1527 case Intrinsic::umul_with_overflow:
1528 case Intrinsic::sadd_sat:
1529 case Intrinsic::uadd_sat:
1530 case Intrinsic::ssub_sat:
1531 case Intrinsic::usub_sat:
1532 case Intrinsic::smul_fix:
1533 case Intrinsic::smul_fix_sat:
1534 case Intrinsic::bitreverse:
1535 case Intrinsic::is_constant:
1536 case Intrinsic::vector_reduce_add:
1537 case Intrinsic::vector_reduce_mul:
1538 case Intrinsic::vector_reduce_and:
1539 case Intrinsic::vector_reduce_or:
1540 case Intrinsic::vector_reduce_xor:
1541 case Intrinsic::vector_reduce_smin:
1542 case Intrinsic::vector_reduce_smax:
1543 case Intrinsic::vector_reduce_umin:
1544 case Intrinsic::vector_reduce_umax:
1545 // Target intrinsics
1546 case Intrinsic::amdgcn_perm:
1547 case Intrinsic::arm_mve_vctp8:
1548 case Intrinsic::arm_mve_vctp16:
1549 case Intrinsic::arm_mve_vctp32:
1550 case Intrinsic::arm_mve_vctp64:
1551 case Intrinsic::aarch64_sve_convert_from_svbool:
1552 // WebAssembly float semantics are always known
1553 case Intrinsic::wasm_trunc_signed:
1554 case Intrinsic::wasm_trunc_unsigned:
1555 return true;
1556
1557 // Floating point operations cannot be folded in strictfp functions in
1558 // general case. They can be folded if FP environment is known to compiler.
1559 case Intrinsic::minnum:
1560 case Intrinsic::maxnum:
1561 case Intrinsic::minimum:
1562 case Intrinsic::maximum:
1563 case Intrinsic::log:
1564 case Intrinsic::log2:
1565 case Intrinsic::log10:
1566 case Intrinsic::exp:
1567 case Intrinsic::exp2:
1568 case Intrinsic::sqrt:
1569 case Intrinsic::sin:
1570 case Intrinsic::cos:
1571 case Intrinsic::pow:
1572 case Intrinsic::powi:
1573 case Intrinsic::fma:
1574 case Intrinsic::fmuladd:
1575 case Intrinsic::fptoui_sat:
1576 case Intrinsic::fptosi_sat:
1577 case Intrinsic::convert_from_fp16:
1578 case Intrinsic::convert_to_fp16:
1579 case Intrinsic::amdgcn_cos:
1580 case Intrinsic::amdgcn_cubeid:
1581 case Intrinsic::amdgcn_cubema:
1582 case Intrinsic::amdgcn_cubesc:
1583 case Intrinsic::amdgcn_cubetc:
1584 case Intrinsic::amdgcn_fmul_legacy:
1585 case Intrinsic::amdgcn_fma_legacy:
1586 case Intrinsic::amdgcn_fract:
1587 case Intrinsic::amdgcn_ldexp:
1588 case Intrinsic::amdgcn_sin:
1589 // The intrinsics below depend on rounding mode in MXCSR.
1590 case Intrinsic::x86_sse_cvtss2si:
1591 case Intrinsic::x86_sse_cvtss2si64:
1592 case Intrinsic::x86_sse_cvttss2si:
1593 case Intrinsic::x86_sse_cvttss2si64:
1594 case Intrinsic::x86_sse2_cvtsd2si:
1595 case Intrinsic::x86_sse2_cvtsd2si64:
1596 case Intrinsic::x86_sse2_cvttsd2si:
1597 case Intrinsic::x86_sse2_cvttsd2si64:
1598 case Intrinsic::x86_avx512_vcvtss2si32:
1599 case Intrinsic::x86_avx512_vcvtss2si64:
1600 case Intrinsic::x86_avx512_cvttss2si:
1601 case Intrinsic::x86_avx512_cvttss2si64:
1602 case Intrinsic::x86_avx512_vcvtsd2si32:
1603 case Intrinsic::x86_avx512_vcvtsd2si64:
1604 case Intrinsic::x86_avx512_cvttsd2si:
1605 case Intrinsic::x86_avx512_cvttsd2si64:
1606 case Intrinsic::x86_avx512_vcvtss2usi32:
1607 case Intrinsic::x86_avx512_vcvtss2usi64:
1608 case Intrinsic::x86_avx512_cvttss2usi:
1609 case Intrinsic::x86_avx512_cvttss2usi64:
1610 case Intrinsic::x86_avx512_vcvtsd2usi32:
1611 case Intrinsic::x86_avx512_vcvtsd2usi64:
1612 case Intrinsic::x86_avx512_cvttsd2usi:
1613 case Intrinsic::x86_avx512_cvttsd2usi64:
1614 return !Call->isStrictFP();
1615
1616 // Sign operations are actually bitwise operations, they do not raise
1617 // exceptions even for SNANs.
1618 case Intrinsic::fabs:
1619 case Intrinsic::copysign:
1620 // Non-constrained variants of rounding operations means default FP
1621 // environment, they can be folded in any case.
1622 case Intrinsic::ceil:
1623 case Intrinsic::floor:
1624 case Intrinsic::round:
1625 case Intrinsic::roundeven:
1626 case Intrinsic::trunc:
1627 case Intrinsic::nearbyint:
1628 case Intrinsic::rint:
1629 // Constrained intrinsics can be folded if FP environment is known
1630 // to compiler.
1631 case Intrinsic::experimental_constrained_fma:
1632 case Intrinsic::experimental_constrained_fmuladd:
1633 case Intrinsic::experimental_constrained_fadd:
1634 case Intrinsic::experimental_constrained_fsub:
1635 case Intrinsic::experimental_constrained_fmul:
1636 case Intrinsic::experimental_constrained_fdiv:
1637 case Intrinsic::experimental_constrained_frem:
1638 case Intrinsic::experimental_constrained_ceil:
1639 case Intrinsic::experimental_constrained_floor:
1640 case Intrinsic::experimental_constrained_round:
1641 case Intrinsic::experimental_constrained_roundeven:
1642 case Intrinsic::experimental_constrained_trunc:
1643 case Intrinsic::experimental_constrained_nearbyint:
1644 case Intrinsic::experimental_constrained_rint:
1645 case Intrinsic::experimental_constrained_fcmp:
1646 case Intrinsic::experimental_constrained_fcmps:
1647 return true;
1648 default:
1649 return false;
1650 case Intrinsic::not_intrinsic: break;
1651 }
1652
1653 if (!F->hasName() || Call->isStrictFP())
1654 return false;
1655
1656 // In these cases, the check of the length is required. We don't want to
1657 // return true for a name like "cos\0blah" which strcmp would return equal to
1658 // "cos", but has length 8.
1659 StringRef Name = F->getName();
1660 switch (Name[0]) {
1661 default:
1662 return false;
1663 case 'a':
1664 return Name == "acos" || Name == "acosf" ||
1665 Name == "asin" || Name == "asinf" ||
1666 Name == "atan" || Name == "atanf" ||
1667 Name == "atan2" || Name == "atan2f";
1668 case 'c':
1669 return Name == "ceil" || Name == "ceilf" ||
1670 Name == "cos" || Name == "cosf" ||
1671 Name == "cosh" || Name == "coshf";
1672 case 'e':
1673 return Name == "exp" || Name == "expf" ||
1674 Name == "exp2" || Name == "exp2f";
1675 case 'f':
1676 return Name == "fabs" || Name == "fabsf" ||
1677 Name == "floor" || Name == "floorf" ||
1678 Name == "fmod" || Name == "fmodf";
1679 case 'l':
1680 return Name == "log" || Name == "logf" ||
1681 Name == "log2" || Name == "log2f" ||
1682 Name == "log10" || Name == "log10f";
1683 case 'n':
1684 return Name == "nearbyint" || Name == "nearbyintf";
1685 case 'p':
1686 return Name == "pow" || Name == "powf";
1687 case 'r':
1688 return Name == "remainder" || Name == "remainderf" ||
1689 Name == "rint" || Name == "rintf" ||
1690 Name == "round" || Name == "roundf";
1691 case 's':
1692 return Name == "sin" || Name == "sinf" ||
1693 Name == "sinh" || Name == "sinhf" ||
1694 Name == "sqrt" || Name == "sqrtf";
1695 case 't':
1696 return Name == "tan" || Name == "tanf" ||
1697 Name == "tanh" || Name == "tanhf" ||
1698 Name == "trunc" || Name == "truncf";
1699 case '_':
1700 // Check for various function names that get used for the math functions
1701 // when the header files are preprocessed with the macro
1702 // __FINITE_MATH_ONLY__ enabled.
1703 // The '12' here is the length of the shortest name that can match.
1704 // We need to check the size before looking at Name[1] and Name[2]
1705 // so we may as well check a limit that will eliminate mismatches.
1706 if (Name.size() < 12 || Name[1] != '_')
1707 return false;
1708 switch (Name[2]) {
1709 default:
1710 return false;
1711 case 'a':
1712 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1713 Name == "__asin_finite" || Name == "__asinf_finite" ||
1714 Name == "__atan2_finite" || Name == "__atan2f_finite";
1715 case 'c':
1716 return Name == "__cosh_finite" || Name == "__coshf_finite";
1717 case 'e':
1718 return Name == "__exp_finite" || Name == "__expf_finite" ||
1719 Name == "__exp2_finite" || Name == "__exp2f_finite";
1720 case 'l':
1721 return Name == "__log_finite" || Name == "__logf_finite" ||
1722 Name == "__log10_finite" || Name == "__log10f_finite";
1723 case 'p':
1724 return Name == "__pow_finite" || Name == "__powf_finite";
1725 case 's':
1726 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1727 }
1728 }
1729}
1730
1731namespace {
1732
1733Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1734 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1735 APFloat APF(V);
1736 bool unused;
1737 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1738 return ConstantFP::get(Ty->getContext(), APF);
1739 }
1740 if (Ty->isDoubleTy())
1741 return ConstantFP::get(Ty->getContext(), APFloat(V));
1742 llvm_unreachable("Can only constant fold half/float/double")::llvm::llvm_unreachable_internal("Can only constant fold half/float/double"
, "llvm/lib/Analysis/ConstantFolding.cpp", 1742)
;
1743}
1744
1745/// Clear the floating-point exception state.
1746inline void llvm_fenv_clearexcept() {
1747#if defined(HAVE_FENV_H1) && HAVE_DECL_FE_ALL_EXCEPT1
1748 feclearexcept(FE_ALL_EXCEPT(0x20 | 0x04 | 0x10 | 0x08 | 0x01));
1749#endif
1750 errno(*__errno_location ()) = 0;
1751}
1752
1753/// Test if a floating-point exception was raised.
1754inline bool llvm_fenv_testexcept() {
1755 int errno_val = errno(*__errno_location ());
1756 if (errno_val == ERANGE34 || errno_val == EDOM33)
1757 return true;
1758#if defined(HAVE_FENV_H1) && HAVE_DECL_FE_ALL_EXCEPT1 && HAVE_DECL_FE_INEXACT1
1759 if (fetestexcept(FE_ALL_EXCEPT(0x20 | 0x04 | 0x10 | 0x08 | 0x01) & ~FE_INEXACT0x20))
1760 return true;
1761#endif
1762 return false;
1763}
1764
1765Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1766 Type *Ty) {
1767 llvm_fenv_clearexcept();
1768 double Result = NativeFP(V.convertToDouble());
1769 if (llvm_fenv_testexcept()) {
1770 llvm_fenv_clearexcept();
1771 return nullptr;
1772 }
1773
1774 return GetConstantFoldFPValue(Result, Ty);
1775}
1776
1777Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1778 const APFloat &V, const APFloat &W, Type *Ty) {
1779 llvm_fenv_clearexcept();
1780 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1781 if (llvm_fenv_testexcept()) {
1782 llvm_fenv_clearexcept();
1783 return nullptr;
1784 }
1785
1786 return GetConstantFoldFPValue(Result, Ty);
1787}
1788
1789Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1790 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1791 if (!VT)
1792 return nullptr;
1793
1794 // This isn't strictly necessary, but handle the special/common case of zero:
1795 // all integer reductions of a zero input produce zero.
1796 if (isa<ConstantAggregateZero>(Op))
1797 return ConstantInt::get(VT->getElementType(), 0);
1798
1799 // This is the same as the underlying binops - poison propagates.
1800 if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
1801 return PoisonValue::get(VT->getElementType());
1802
1803 // TODO: Handle undef.
1804 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1805 return nullptr;
1806
1807 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1808 if (!EltC)
1809 return nullptr;
1810
1811 APInt Acc = EltC->getValue();
1812 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
1813 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1814 return nullptr;
1815 const APInt &X = EltC->getValue();
1816 switch (IID) {
1817 case Intrinsic::vector_reduce_add:
1818 Acc = Acc + X;
1819 break;
1820 case Intrinsic::vector_reduce_mul:
1821 Acc = Acc * X;
1822 break;
1823 case Intrinsic::vector_reduce_and:
1824 Acc = Acc & X;
1825 break;
1826 case Intrinsic::vector_reduce_or:
1827 Acc = Acc | X;
1828 break;
1829 case Intrinsic::vector_reduce_xor:
1830 Acc = Acc ^ X;
1831 break;
1832 case Intrinsic::vector_reduce_smin:
1833 Acc = APIntOps::smin(Acc, X);
1834 break;
1835 case Intrinsic::vector_reduce_smax:
1836 Acc = APIntOps::smax(Acc, X);
1837 break;
1838 case Intrinsic::vector_reduce_umin:
1839 Acc = APIntOps::umin(Acc, X);
1840 break;
1841 case Intrinsic::vector_reduce_umax:
1842 Acc = APIntOps::umax(Acc, X);
1843 break;
1844 }
1845 }
1846
1847 return ConstantInt::get(Op->getContext(), Acc);
1848}
1849
1850/// Attempt to fold an SSE floating point to integer conversion of a constant
1851/// floating point. If roundTowardZero is false, the default IEEE rounding is
1852/// used (toward nearest, ties to even). This matches the behavior of the
1853/// non-truncating SSE instructions in the default rounding mode. The desired
1854/// integer type Ty is used to select how many bits are available for the
1855/// result. Returns null if the conversion cannot be performed, otherwise
1856/// returns the Constant value resulting from the conversion.
1857Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1858 Type *Ty, bool IsSigned) {
1859 // All of these conversion intrinsics form an integer of at most 64bits.
1860 unsigned ResultWidth = Ty->getIntegerBitWidth();
1861 assert(ResultWidth <= 64 &&(static_cast <bool> (ResultWidth <= 64 && "Can only constant fold conversions to 64 and 32 bit ints"
) ? void (0) : __assert_fail ("ResultWidth <= 64 && \"Can only constant fold conversions to 64 and 32 bit ints\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 1862, __extension__
__PRETTY_FUNCTION__))
1862 "Can only constant fold conversions to 64 and 32 bit ints")(static_cast <bool> (ResultWidth <= 64 && "Can only constant fold conversions to 64 and 32 bit ints"
) ? void (0) : __assert_fail ("ResultWidth <= 64 && \"Can only constant fold conversions to 64 and 32 bit ints\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 1862, __extension__
__PRETTY_FUNCTION__))
;
1863
1864 uint64_t UIntVal;
1865 bool isExact = false;
1866 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1867 : APFloat::rmNearestTiesToEven;
1868 APFloat::opStatus status =
1869 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1870 IsSigned, mode, &isExact);
1871 if (status != APFloat::opOK &&
1872 (!roundTowardZero || status != APFloat::opInexact))
1873 return nullptr;
1874 return ConstantInt::get(Ty, UIntVal, IsSigned);
1875}
1876
1877double getValueAsDouble(ConstantFP *Op) {
1878 Type *Ty = Op->getType();
1879
1880 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
1881 return Op->getValueAPF().convertToDouble();
1882
1883 bool unused;
1884 APFloat APF = Op->getValueAPF();
1885 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1886 return APF.convertToDouble();
1887}
1888
1889static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1890 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1891 C = &CI->getValue();
1892 return true;
1893 }
1894 if (isa<UndefValue>(Op)) {
1895 C = nullptr;
1896 return true;
1897 }
1898 return false;
1899}
1900
1901/// Checks if the given intrinsic call, which evaluates to constant, is allowed
1902/// to be folded.
1903///
1904/// \param CI Constrained intrinsic call.
1905/// \param St Exception flags raised during constant evaluation.
1906static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
1907 APFloat::opStatus St) {
1908 Optional<RoundingMode> ORM = CI->getRoundingMode();
1909 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1910
1911 // If the operation does not change exception status flags, it is safe
1912 // to fold.
1913 if (St == APFloat::opStatus::opOK)
1914 return true;
1915
1916 // If evaluation raised FP exception, the result can depend on rounding
1917 // mode. If the latter is unknown, folding is not possible.
1918 if (ORM && *ORM == RoundingMode::Dynamic)
1919 return false;
1920
1921 // If FP exceptions are ignored, fold the call, even if such exception is
1922 // raised.
1923 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1924 return true;
1925
1926 // Leave the calculation for runtime so that exception flags be correctly set
1927 // in hardware.
1928 return false;
1929}
1930
1931/// Returns the rounding mode that should be used for constant evaluation.
1932static RoundingMode
1933getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
1934 Optional<RoundingMode> ORM = CI->getRoundingMode();
1935 if (!ORM || *ORM == RoundingMode::Dynamic)
1936 // Even if the rounding mode is unknown, try evaluating the operation.
1937 // If it does not raise inexact exception, rounding was not applied,
1938 // so the result is exact and does not depend on rounding mode. Whether
1939 // other FP exceptions are raised, it does not depend on rounding mode.
1940 return RoundingMode::NearestTiesToEven;
1941 return *ORM;
1942}
1943
1944static Constant *ConstantFoldScalarCall1(StringRef Name,
1945 Intrinsic::ID IntrinsicID,
1946 Type *Ty,
1947 ArrayRef<Constant *> Operands,
1948 const TargetLibraryInfo *TLI,
1949 const CallBase *Call) {
1950 assert(Operands.size() == 1 && "Wrong number of operands.")(static_cast <bool> (Operands.size() == 1 && "Wrong number of operands."
) ? void (0) : __assert_fail ("Operands.size() == 1 && \"Wrong number of operands.\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 1950, __extension__
__PRETTY_FUNCTION__))
;
1951
1952 if (IntrinsicID == Intrinsic::is_constant) {
1953 // We know we have a "Constant" argument. But we want to only
1954 // return true for manifest constants, not those that depend on
1955 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1956 if (Operands[0]->isManifestConstant())
1957 return ConstantInt::getTrue(Ty->getContext());
1958 return nullptr;
1959 }
1960 if (isa<UndefValue>(Operands[0])) {
1961 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1962 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1963 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
1964 if (IntrinsicID == Intrinsic::cos ||
1965 IntrinsicID == Intrinsic::ctpop ||
1966 IntrinsicID == Intrinsic::fptoui_sat ||
1967 IntrinsicID == Intrinsic::fptosi_sat)
1968 return Constant::getNullValue(Ty);
1969 if (IntrinsicID == Intrinsic::bswap ||
1970 IntrinsicID == Intrinsic::bitreverse ||
1971 IntrinsicID == Intrinsic::launder_invariant_group ||
1972 IntrinsicID == Intrinsic::strip_invariant_group)
1973 return Operands[0];
1974 }
1975
1976 if (isa<ConstantPointerNull>(Operands[0])) {
1977 // launder(null) == null == strip(null) iff in addrspace 0
1978 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1979 IntrinsicID == Intrinsic::strip_invariant_group) {
1980 // If instruction is not yet put in a basic block (e.g. when cloning
1981 // a function during inlining), Call's caller may not be available.
1982 // So check Call's BB first before querying Call->getCaller.
1983 const Function *Caller =
1984 Call->getParent() ? Call->getCaller() : nullptr;
1985 if (Caller &&
1986 !NullPointerIsDefined(
1987 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1988 return Operands[0];
1989 }
1990 return nullptr;
1991 }
1992 }
1993
1994 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1995 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1996 APFloat Val(Op->getValueAPF());
1997
1998 bool lost = false;
1999 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2000
2001 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
2002 }
2003
2004 APFloat U = Op->getValueAPF();
2005
2006 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2007 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2008 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2009
2010 if (U.isNaN())
2011 return nullptr;
2012
2013 unsigned Width = Ty->getIntegerBitWidth();
2014 APSInt Int(Width, !Signed);
2015 bool IsExact = false;
2016 APFloat::opStatus Status =
2017 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2018
2019 if (Status == APFloat::opOK || Status == APFloat::opInexact)
2020 return ConstantInt::get(Ty, Int);
2021
2022 return nullptr;
2023 }
2024
2025 if (IntrinsicID == Intrinsic::fptoui_sat ||
2026 IntrinsicID == Intrinsic::fptosi_sat) {
2027 // convertToInteger() already has the desired saturation semantics.
2028 APSInt Int(Ty->getIntegerBitWidth(),
2029 IntrinsicID == Intrinsic::fptoui_sat);
2030 bool IsExact;
2031 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2032 return ConstantInt::get(Ty, Int);
2033 }
2034
2035 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2036 return nullptr;
2037
2038 // Use internal versions of these intrinsics.
2039
2040 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2041 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2042 return ConstantFP::get(Ty->getContext(), U);
2043 }
2044
2045 if (IntrinsicID == Intrinsic::round) {
2046 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2047 return ConstantFP::get(Ty->getContext(), U);
2048 }
2049
2050 if (IntrinsicID == Intrinsic::roundeven) {
2051 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2052 return ConstantFP::get(Ty->getContext(), U);
2053 }
2054
2055 if (IntrinsicID == Intrinsic::ceil) {
2056 U.roundToIntegral(APFloat::rmTowardPositive);
2057 return ConstantFP::get(Ty->getContext(), U);
2058 }
2059
2060 if (IntrinsicID == Intrinsic::floor) {
2061 U.roundToIntegral(APFloat::rmTowardNegative);
2062 return ConstantFP::get(Ty->getContext(), U);
2063 }
2064
2065 if (IntrinsicID == Intrinsic::trunc) {
2066 U.roundToIntegral(APFloat::rmTowardZero);
2067 return ConstantFP::get(Ty->getContext(), U);
2068 }
2069
2070 if (IntrinsicID == Intrinsic::fabs) {
2071 U.clearSign();
2072 return ConstantFP::get(Ty->getContext(), U);
2073 }
2074
2075 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2076 // The v_fract instruction behaves like the OpenCL spec, which defines
2077 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2078 // there to prevent fract(-small) from returning 1.0. It returns the
2079 // largest positive floating-point number less than 1.0."
2080 APFloat FloorU(U);
2081 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2082 APFloat FractU(U - FloorU);
2083 APFloat AlmostOne(U.getSemantics(), 1);
2084 AlmostOne.next(/*nextDown*/ true);
2085 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
2086 }
2087
2088 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2089 // raise FP exceptions, unless the argument is signaling NaN.
2090
2091 Optional<APFloat::roundingMode> RM;
2092 switch (IntrinsicID) {
2093 default:
2094 break;
2095 case Intrinsic::experimental_constrained_nearbyint:
2096 case Intrinsic::experimental_constrained_rint: {
2097 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2098 RM = CI->getRoundingMode();
2099 if (!RM || *RM == RoundingMode::Dynamic)
2100 return nullptr;
2101 break;
2102 }
2103 case Intrinsic::experimental_constrained_round:
2104 RM = APFloat::rmNearestTiesToAway;
2105 break;
2106 case Intrinsic::experimental_constrained_ceil:
2107 RM = APFloat::rmTowardPositive;
2108 break;
2109 case Intrinsic::experimental_constrained_floor:
2110 RM = APFloat::rmTowardNegative;
2111 break;
2112 case Intrinsic::experimental_constrained_trunc:
2113 RM = APFloat::rmTowardZero;
2114 break;
2115 }
2116 if (RM) {
2117 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2118 if (U.isFinite()) {
2119 APFloat::opStatus St = U.roundToIntegral(*RM);
2120 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2121 St == APFloat::opInexact) {
2122 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2123 if (EB && *EB == fp::ebStrict)
2124 return nullptr;
2125 }
2126 } else if (U.isSignaling()) {
2127 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2128 if (EB && *EB != fp::ebIgnore)
2129 return nullptr;
2130 U = APFloat::getQNaN(U.getSemantics());
2131 }
2132 return ConstantFP::get(Ty->getContext(), U);
2133 }
2134
2135 /// We only fold functions with finite arguments. Folding NaN and inf is
2136 /// likely to be aborted with an exception anyway, and some host libms
2137 /// have known errors raising exceptions.
2138 if (!U.isFinite())
2139 return nullptr;
2140
2141 /// Currently APFloat versions of these functions do not exist, so we use
2142 /// the host native double versions. Float versions are not called
2143 /// directly but for all these it is true (float)(f((double)arg)) ==
2144 /// f(arg). Long double not supported yet.
2145 const APFloat &APF = Op->getValueAPF();
2146
2147 switch (IntrinsicID) {
2148 default: break;
2149 case Intrinsic::log:
2150 return ConstantFoldFP(log, APF, Ty);
2151 case Intrinsic::log2:
2152 // TODO: What about hosts that lack a C99 library?
2153 return ConstantFoldFP(log2, APF, Ty);
2154 case Intrinsic::log10:
2155 // TODO: What about hosts that lack a C99 library?
2156 return ConstantFoldFP(log10, APF, Ty);
2157 case Intrinsic::exp:
2158 return ConstantFoldFP(exp, APF, Ty);
2159 case Intrinsic::exp2:
2160 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2161 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2162 case Intrinsic::sin:
2163 return ConstantFoldFP(sin, APF, Ty);
2164 case Intrinsic::cos:
2165 return ConstantFoldFP(cos, APF, Ty);
2166 case Intrinsic::sqrt:
2167 return ConstantFoldFP(sqrt, APF, Ty);
2168 case Intrinsic::amdgcn_cos:
2169 case Intrinsic::amdgcn_sin: {
2170 double V = getValueAsDouble(Op);
2171 if (V < -256.0 || V > 256.0)
2172 // The gfx8 and gfx9 architectures handle arguments outside the range
2173 // [-256, 256] differently. This should be a rare case so bail out
2174 // rather than trying to handle the difference.
2175 return nullptr;
2176 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2177 double V4 = V * 4.0;
2178 if (V4 == floor(V4)) {
2179 // Force exact results for quarter-integer inputs.
2180 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2181 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2182 } else {
2183 if (IsCos)
2184 V = cos(V * 2.0 * numbers::pi);
2185 else
2186 V = sin(V * 2.0 * numbers::pi);
2187 }
2188 return GetConstantFoldFPValue(V, Ty);
2189 }
2190 }
2191
2192 if (!TLI)
2193 return nullptr;
2194
2195 LibFunc Func = NotLibFunc;
2196 if (!TLI->getLibFunc(Name, Func))
2197 return nullptr;
2198
2199 switch (Func) {
2200 default:
2201 break;
2202 case LibFunc_acos:
2203 case LibFunc_acosf:
2204 case LibFunc_acos_finite:
2205 case LibFunc_acosf_finite:
2206 if (TLI->has(Func))
2207 return ConstantFoldFP(acos, APF, Ty);
2208 break;
2209 case LibFunc_asin:
2210 case LibFunc_asinf:
2211 case LibFunc_asin_finite:
2212 case LibFunc_asinf_finite:
2213 if (TLI->has(Func))
2214 return ConstantFoldFP(asin, APF, Ty);
2215 break;
2216 case LibFunc_atan:
2217 case LibFunc_atanf:
2218 if (TLI->has(Func))
2219 return ConstantFoldFP(atan, APF, Ty);
2220 break;
2221 case LibFunc_ceil:
2222 case LibFunc_ceilf:
2223 if (TLI->has(Func)) {
2224 U.roundToIntegral(APFloat::rmTowardPositive);
2225 return ConstantFP::get(Ty->getContext(), U);
2226 }
2227 break;
2228 case LibFunc_cos:
2229 case LibFunc_cosf:
2230 if (TLI->has(Func))
2231 return ConstantFoldFP(cos, APF, Ty);
2232 break;
2233 case LibFunc_cosh:
2234 case LibFunc_coshf:
2235 case LibFunc_cosh_finite:
2236 case LibFunc_coshf_finite:
2237 if (TLI->has(Func))
2238 return ConstantFoldFP(cosh, APF, Ty);
2239 break;
2240 case LibFunc_exp:
2241 case LibFunc_expf:
2242 case LibFunc_exp_finite:
2243 case LibFunc_expf_finite:
2244 if (TLI->has(Func))
2245 return ConstantFoldFP(exp, APF, Ty);
2246 break;
2247 case LibFunc_exp2:
2248 case LibFunc_exp2f:
2249 case LibFunc_exp2_finite:
2250 case LibFunc_exp2f_finite:
2251 if (TLI->has(Func))
2252 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2253 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2254 break;
2255 case LibFunc_fabs:
2256 case LibFunc_fabsf:
2257 if (TLI->has(Func)) {
2258 U.clearSign();
2259 return ConstantFP::get(Ty->getContext(), U);
2260 }
2261 break;
2262 case LibFunc_floor:
2263 case LibFunc_floorf:
2264 if (TLI->has(Func)) {
2265 U.roundToIntegral(APFloat::rmTowardNegative);
2266 return ConstantFP::get(Ty->getContext(), U);
2267 }
2268 break;
2269 case LibFunc_log:
2270 case LibFunc_logf:
2271 case LibFunc_log_finite:
2272 case LibFunc_logf_finite:
2273 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2274 return ConstantFoldFP(log, APF, Ty);
2275 break;
2276 case LibFunc_log2:
2277 case LibFunc_log2f:
2278 case LibFunc_log2_finite:
2279 case LibFunc_log2f_finite:
2280 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2281 // TODO: What about hosts that lack a C99 library?
2282 return ConstantFoldFP(log2, APF, Ty);
2283 break;
2284 case LibFunc_log10:
2285 case LibFunc_log10f:
2286 case LibFunc_log10_finite:
2287 case LibFunc_log10f_finite:
2288 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2289 // TODO: What about hosts that lack a C99 library?
2290 return ConstantFoldFP(log10, APF, Ty);
2291 break;
2292 case LibFunc_nearbyint:
2293 case LibFunc_nearbyintf:
2294 case LibFunc_rint:
2295 case LibFunc_rintf:
2296 if (TLI->has(Func)) {
2297 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2298 return ConstantFP::get(Ty->getContext(), U);
2299 }
2300 break;
2301 case LibFunc_round:
2302 case LibFunc_roundf:
2303 if (TLI->has(Func)) {
2304 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2305 return ConstantFP::get(Ty->getContext(), U);
2306 }
2307 break;
2308 case LibFunc_sin:
2309 case LibFunc_sinf:
2310 if (TLI->has(Func))
2311 return ConstantFoldFP(sin, APF, Ty);
2312 break;
2313 case LibFunc_sinh:
2314 case LibFunc_sinhf:
2315 case LibFunc_sinh_finite:
2316 case LibFunc_sinhf_finite:
2317 if (TLI->has(Func))
2318 return ConstantFoldFP(sinh, APF, Ty);
2319 break;
2320 case LibFunc_sqrt:
2321 case LibFunc_sqrtf:
2322 if (!APF.isNegative() && TLI->has(Func))
2323 return ConstantFoldFP(sqrt, APF, Ty);
2324 break;
2325 case LibFunc_tan:
2326 case LibFunc_tanf:
2327 if (TLI->has(Func))
2328 return ConstantFoldFP(tan, APF, Ty);
2329 break;
2330 case LibFunc_tanh:
2331 case LibFunc_tanhf:
2332 if (TLI->has(Func))
2333 return ConstantFoldFP(tanh, APF, Ty);
2334 break;
2335 case LibFunc_trunc:
2336 case LibFunc_truncf:
2337 if (TLI->has(Func)) {
2338 U.roundToIntegral(APFloat::rmTowardZero);
2339 return ConstantFP::get(Ty->getContext(), U);
2340 }
2341 break;
2342 }
2343 return nullptr;
2344 }
2345
2346 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2347 switch (IntrinsicID) {
2348 case Intrinsic::bswap:
2349 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2350 case Intrinsic::ctpop:
2351 return ConstantInt::get(Ty, Op->getValue().countPopulation());
2352 case Intrinsic::bitreverse:
2353 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2354 case Intrinsic::convert_from_fp16: {
2355 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2356
2357 bool lost = false;
2358 APFloat::opStatus status = Val.convert(
2359 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2360
2361 // Conversion is always precise.
2362 (void)status;
2363 assert(status != APFloat::opInexact && !lost &&(static_cast <bool> (status != APFloat::opInexact &&
!lost && "Precision lost during fp16 constfolding") ?
void (0) : __assert_fail ("status != APFloat::opInexact && !lost && \"Precision lost during fp16 constfolding\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 2364, __extension__
__PRETTY_FUNCTION__))
2364 "Precision lost during fp16 constfolding")(static_cast <bool> (status != APFloat::opInexact &&
!lost && "Precision lost during fp16 constfolding") ?
void (0) : __assert_fail ("status != APFloat::opInexact && !lost && \"Precision lost during fp16 constfolding\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 2364, __extension__
__PRETTY_FUNCTION__))
;
2365
2366 return ConstantFP::get(Ty->getContext(), Val);
2367 }
2368 default:
2369 return nullptr;
2370 }
2371 }
2372
2373 switch (IntrinsicID) {
2374 default: break;
2375 case Intrinsic::vector_reduce_add:
2376 case Intrinsic::vector_reduce_mul:
2377 case Intrinsic::vector_reduce_and:
2378 case Intrinsic::vector_reduce_or:
2379 case Intrinsic::vector_reduce_xor:
2380 case Intrinsic::vector_reduce_smin:
2381 case Intrinsic::vector_reduce_smax:
2382 case Intrinsic::vector_reduce_umin:
2383 case Intrinsic::vector_reduce_umax:
2384 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2385 return C;
2386 break;
2387 }
2388
2389 // Support ConstantVector in case we have an Undef in the top.
2390 if (isa<ConstantVector>(Operands[0]) ||
2391 isa<ConstantDataVector>(Operands[0])) {
2392 auto *Op = cast<Constant>(Operands[0]);
2393 switch (IntrinsicID) {
2394 default: break;
2395 case Intrinsic::x86_sse_cvtss2si:
2396 case Intrinsic::x86_sse_cvtss2si64:
2397 case Intrinsic::x86_sse2_cvtsd2si:
2398 case Intrinsic::x86_sse2_cvtsd2si64:
2399 if (ConstantFP *FPOp =
2400 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2401 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2402 /*roundTowardZero=*/false, Ty,
2403 /*IsSigned*/true);
2404 break;
2405 case Intrinsic::x86_sse_cvttss2si:
2406 case Intrinsic::x86_sse_cvttss2si64:
2407 case Intrinsic::x86_sse2_cvttsd2si:
2408 case Intrinsic::x86_sse2_cvttsd2si64:
2409 if (ConstantFP *FPOp =
2410 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2411 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2412 /*roundTowardZero=*/true, Ty,
2413 /*IsSigned*/true);
2414 break;
2415 }
2416 }
2417
2418 return nullptr;
2419}
2420
2421static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2,
2422 const ConstrainedFPIntrinsic *Call) {
2423 APFloat::opStatus St = APFloat::opOK;
2424 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2425 FCmpInst::Predicate Cond = FCmp->getPredicate();
2426 if (FCmp->isSignaling()) {
2427 if (Op1.isNaN() || Op2.isNaN())
2428 St = APFloat::opInvalidOp;
2429 } else {
2430 if (Op1.isSignaling() || Op2.isSignaling())
2431 St = APFloat::opInvalidOp;
2432 }
2433 bool Result = FCmpInst::compare(Op1, Op2, Cond);
2434 if (mayFoldConstrained(const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St))
2435 return ConstantInt::get(Call->getType()->getScalarType(), Result);
2436 return nullptr;
2437}
2438
2439static Constant *ConstantFoldScalarCall2(StringRef Name,
2440 Intrinsic::ID IntrinsicID,
2441 Type *Ty,
2442 ArrayRef<Constant *> Operands,
2443 const TargetLibraryInfo *TLI,
2444 const CallBase *Call) {
2445 assert(Operands.size() == 2 && "Wrong number of operands.")(static_cast <bool> (Operands.size() == 2 && "Wrong number of operands."
) ? void (0) : __assert_fail ("Operands.size() == 2 && \"Wrong number of operands.\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 2445, __extension__
__PRETTY_FUNCTION__))
;
2446
2447 if (Ty->isFloatingPointTy()) {
2448 // TODO: We should have undef handling for all of the FP intrinsics that
2449 // are attempted to be folded in this function.
2450 bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2451 bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2452 switch (IntrinsicID) {
2453 case Intrinsic::maxnum:
2454 case Intrinsic::minnum:
2455 case Intrinsic::maximum:
2456 case Intrinsic::minimum:
2457 // If one argument is undef, return the other argument.
2458 if (IsOp0Undef)
2459 return Operands[1];
2460 if (IsOp1Undef)
2461 return Operands[0];
2462 break;
2463 }
2464 }
2465
2466 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2467 const APFloat &Op1V = Op1->getValueAPF();
2468
2469 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2470 if (Op2->getType() != Op1->getType())
2471 return nullptr;
2472 const APFloat &Op2V = Op2->getValueAPF();
2473
2474 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2475 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2476 APFloat Res = Op1V;
2477 APFloat::opStatus St;
2478 switch (IntrinsicID) {
2479 default:
2480 return nullptr;
2481 case Intrinsic::experimental_constrained_fadd:
2482 St = Res.add(Op2V, RM);
2483 break;
2484 case Intrinsic::experimental_constrained_fsub:
2485 St = Res.subtract(Op2V, RM);
2486 break;
2487 case Intrinsic::experimental_constrained_fmul:
2488 St = Res.multiply(Op2V, RM);
2489 break;
2490 case Intrinsic::experimental_constrained_fdiv:
2491 St = Res.divide(Op2V, RM);
2492 break;
2493 case Intrinsic::experimental_constrained_frem:
2494 St = Res.mod(Op2V);
2495 break;
2496 case Intrinsic::experimental_constrained_fcmp:
2497 case Intrinsic::experimental_constrained_fcmps:
2498 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2499 }
2500 if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
2501 St))
2502 return ConstantFP::get(Ty->getContext(), Res);
2503 return nullptr;
2504 }
2505
2506 switch (IntrinsicID) {
2507 default:
2508 break;
2509 case Intrinsic::copysign:
2510 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
2511 case Intrinsic::minnum:
2512 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
2513 case Intrinsic::maxnum:
2514 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
2515 case Intrinsic::minimum:
2516 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
2517 case Intrinsic::maximum:
2518 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
2519 }
2520
2521 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2522 return nullptr;
2523
2524 switch (IntrinsicID) {
2525 default:
2526 break;
2527 case Intrinsic::pow:
2528 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2529 case Intrinsic::amdgcn_fmul_legacy:
2530 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2531 // NaN or infinity, gives +0.0.
2532 if (Op1V.isZero() || Op2V.isZero())
2533 return ConstantFP::getNullValue(Ty);
2534 return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
2535 }
2536
2537 if (!TLI)
2538 return nullptr;
2539
2540 LibFunc Func = NotLibFunc;
2541 if (!TLI->getLibFunc(Name, Func))
2542 return nullptr;
2543
2544 switch (Func) {
2545 default:
2546 break;
2547 case LibFunc_pow:
2548 case LibFunc_powf:
2549 case LibFunc_pow_finite:
2550 case LibFunc_powf_finite:
2551 if (TLI->has(Func))
2552 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2553 break;
2554 case LibFunc_fmod:
2555 case LibFunc_fmodf:
2556 if (TLI->has(Func)) {
2557 APFloat V = Op1->getValueAPF();
2558 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2559 return ConstantFP::get(Ty->getContext(), V);
2560 }
2561 break;
2562 case LibFunc_remainder:
2563 case LibFunc_remainderf:
2564 if (TLI->has(Func)) {
2565 APFloat V = Op1->getValueAPF();
2566 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2567 return ConstantFP::get(Ty->getContext(), V);
2568 }
2569 break;
2570 case LibFunc_atan2:
2571 case LibFunc_atan2f:
2572 // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm
2573 // (Solaris), so we do not assume a known result for that.
2574 if (Op1V.isZero() && Op2V.isZero())
2575 return nullptr;
2576 [[fallthrough]];
2577 case LibFunc_atan2_finite:
2578 case LibFunc_atan2f_finite:
2579 if (TLI->has(Func))
2580 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2581 break;
2582 }
2583 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2584 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2585 return nullptr;
2586 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2587 return ConstantFP::get(
2588 Ty->getContext(),
2589 APFloat((float)std::pow((float)Op1V.convertToDouble(),
2590 (int)Op2C->getZExtValue())));
2591 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2592 return ConstantFP::get(
2593 Ty->getContext(),
2594 APFloat((float)std::pow((float)Op1V.convertToDouble(),
2595 (int)Op2C->getZExtValue())));
2596 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2597 return ConstantFP::get(
2598 Ty->getContext(),
2599 APFloat((double)std::pow(Op1V.convertToDouble(),
2600 (int)Op2C->getZExtValue())));
2601
2602 if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2603 // FIXME: Should flush denorms depending on FP mode, but that's ignored
2604 // everywhere else.
2605
2606 // scalbn is equivalent to ldexp with float radix 2
2607 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(),
2608 APFloat::rmNearestTiesToEven);
2609 return ConstantFP::get(Ty->getContext(), Result);
2610 }
2611 }
2612 return nullptr;
2613 }
2614
2615 if (Operands[0]->getType()->isIntegerTy() &&
2616 Operands[1]->getType()->isIntegerTy()) {
2617 const APInt *C0, *C1;
2618 if (!getConstIntOrUndef(Operands[0], C0) ||
2619 !getConstIntOrUndef(Operands[1], C1))
2620 return nullptr;
2621
2622 switch (IntrinsicID) {
2623 default: break;
2624 case Intrinsic::smax:
2625 case Intrinsic::smin:
2626 case Intrinsic::umax:
2627 case Intrinsic::umin:
2628 // This is the same as for binary ops - poison propagates.
2629 // TODO: Poison handling should be consolidated.
2630 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2631 return PoisonValue::get(Ty);
2632
2633 if (!C0 && !C1)
2634 return UndefValue::get(Ty);
2635 if (!C0 || !C1)
2636 return MinMaxIntrinsic::getSaturationPoint(IntrinsicID, Ty);
2637 return ConstantInt::get(
2638 Ty, ICmpInst::compare(*C0, *C1,
2639 MinMaxIntrinsic::getPredicate(IntrinsicID))
2640 ? *C0
2641 : *C1);
2642
2643 case Intrinsic::usub_with_overflow:
2644 case Intrinsic::ssub_with_overflow:
2645 // X - undef -> { 0, false }
2646 // undef - X -> { 0, false }
2647 if (!C0 || !C1)
2648 return Constant::getNullValue(Ty);
2649 [[fallthrough]];
2650 case Intrinsic::uadd_with_overflow:
2651 case Intrinsic::sadd_with_overflow:
2652 // X + undef -> { -1, false }
2653 // undef + x -> { -1, false }
2654 if (!C0 || !C1) {
2655 return ConstantStruct::get(
2656 cast<StructType>(Ty),
2657 {Constant::getAllOnesValue(Ty->getStructElementType(0)),
2658 Constant::getNullValue(Ty->getStructElementType(1))});
2659 }
2660 [[fallthrough]];
2661 case Intrinsic::smul_with_overflow:
2662 case Intrinsic::umul_with_overflow: {
2663 // undef * X -> { 0, false }
2664 // X * undef -> { 0, false }
2665 if (!C0 || !C1)
2666 return Constant::getNullValue(Ty);
2667
2668 APInt Res;
2669 bool Overflow;
2670 switch (IntrinsicID) {
2671 default: llvm_unreachable("Invalid case")::llvm::llvm_unreachable_internal("Invalid case", "llvm/lib/Analysis/ConstantFolding.cpp"
, 2671)
;
2672 case Intrinsic::sadd_with_overflow:
2673 Res = C0->sadd_ov(*C1, Overflow);
2674 break;
2675 case Intrinsic::uadd_with_overflow:
2676 Res = C0->uadd_ov(*C1, Overflow);
2677 break;
2678 case Intrinsic::ssub_with_overflow:
2679 Res = C0->ssub_ov(*C1, Overflow);
2680 break;
2681 case Intrinsic::usub_with_overflow:
2682 Res = C0->usub_ov(*C1, Overflow);
2683 break;
2684 case Intrinsic::smul_with_overflow:
2685 Res = C0->smul_ov(*C1, Overflow);
2686 break;
2687 case Intrinsic::umul_with_overflow:
2688 Res = C0->umul_ov(*C1, Overflow);
2689 break;
2690 }
2691 Constant *Ops[] = {
2692 ConstantInt::get(Ty->getContext(), Res),
2693 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2694 };
2695 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2696 }
2697 case Intrinsic::uadd_sat:
2698 case Intrinsic::sadd_sat:
2699 // This is the same as for binary ops - poison propagates.
2700 // TODO: Poison handling should be consolidated.
2701 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2702 return PoisonValue::get(Ty);
2703
2704 if (!C0 && !C1)
2705 return UndefValue::get(Ty);
2706 if (!C0 || !C1)
2707 return Constant::getAllOnesValue(Ty);
2708 if (IntrinsicID == Intrinsic::uadd_sat)
2709 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2710 else
2711 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2712 case Intrinsic::usub_sat:
2713 case Intrinsic::ssub_sat:
2714 // This is the same as for binary ops - poison propagates.
2715 // TODO: Poison handling should be consolidated.
2716 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2717 return PoisonValue::get(Ty);
2718
2719 if (!C0 && !C1)
2720 return UndefValue::get(Ty);
2721 if (!C0 || !C1)
2722 return Constant::getNullValue(Ty);
2723 if (IntrinsicID == Intrinsic::usub_sat)
2724 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2725 else
2726 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2727 case Intrinsic::cttz:
2728 case Intrinsic::ctlz:
2729 assert(C1 && "Must be constant int")(static_cast <bool> (C1 && "Must be constant int"
) ? void (0) : __assert_fail ("C1 && \"Must be constant int\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 2729, __extension__
__PRETTY_FUNCTION__))
;
2730
2731 // cttz(0, 1) and ctlz(0, 1) are poison.
2732 if (C1->isOne() && (!C0 || C0->isZero()))
2733 return PoisonValue::get(Ty);
2734 if (!C0)
2735 return Constant::getNullValue(Ty);
2736 if (IntrinsicID == Intrinsic::cttz)
2737 return ConstantInt::get(Ty, C0->countTrailingZeros());
2738 else
2739 return ConstantInt::get(Ty, C0->countLeadingZeros());
2740
2741 case Intrinsic::abs:
2742 assert(C1 && "Must be constant int")(static_cast <bool> (C1 && "Must be constant int"
) ? void (0) : __assert_fail ("C1 && \"Must be constant int\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 2742, __extension__
__PRETTY_FUNCTION__))
;
2743 assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1")(static_cast <bool> ((C1->isOne() || C1->isZero()
) && "Must be 0 or 1") ? void (0) : __assert_fail ("(C1->isOne() || C1->isZero()) && \"Must be 0 or 1\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 2743, __extension__
__PRETTY_FUNCTION__))
;
2744
2745 // Undef or minimum val operand with poison min --> undef
2746 if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
2747 return UndefValue::get(Ty);
2748
2749 // Undef operand with no poison min --> 0 (sign bit must be clear)
2750 if (!C0)
2751 return Constant::getNullValue(Ty);
2752
2753 return ConstantInt::get(Ty, C0->abs());
2754 }
2755
2756 return nullptr;
2757 }
2758
2759 // Support ConstantVector in case we have an Undef in the top.
2760 if ((isa<ConstantVector>(Operands[0]) ||
2761 isa<ConstantDataVector>(Operands[0])) &&
2762 // Check for default rounding mode.
2763 // FIXME: Support other rounding modes?
2764 isa<ConstantInt>(Operands[1]) &&
2765 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2766 auto *Op = cast<Constant>(Operands[0]);
2767 switch (IntrinsicID) {
2768 default: break;
2769 case Intrinsic::x86_avx512_vcvtss2si32:
2770 case Intrinsic::x86_avx512_vcvtss2si64:
2771 case Intrinsic::x86_avx512_vcvtsd2si32:
2772 case Intrinsic::x86_avx512_vcvtsd2si64:
2773 if (ConstantFP *FPOp =
2774 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2775 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2776 /*roundTowardZero=*/false, Ty,
2777 /*IsSigned*/true);
2778 break;
2779 case Intrinsic::x86_avx512_vcvtss2usi32:
2780 case Intrinsic::x86_avx512_vcvtss2usi64:
2781 case Intrinsic::x86_avx512_vcvtsd2usi32:
2782 case Intrinsic::x86_avx512_vcvtsd2usi64:
2783 if (ConstantFP *FPOp =
2784 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2785 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2786 /*roundTowardZero=*/false, Ty,
2787 /*IsSigned*/false);
2788 break;
2789 case Intrinsic::x86_avx512_cvttss2si:
2790 case Intrinsic::x86_avx512_cvttss2si64:
2791 case Intrinsic::x86_avx512_cvttsd2si:
2792 case Intrinsic::x86_avx512_cvttsd2si64:
2793 if (ConstantFP *FPOp =
2794 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2795 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2796 /*roundTowardZero=*/true, Ty,
2797 /*IsSigned*/true);
2798 break;
2799 case Intrinsic::x86_avx512_cvttss2usi:
2800 case Intrinsic::x86_avx512_cvttss2usi64:
2801 case Intrinsic::x86_avx512_cvttsd2usi:
2802 case Intrinsic::x86_avx512_cvttsd2usi64:
2803 if (ConstantFP *FPOp =
2804 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2805 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2806 /*roundTowardZero=*/true, Ty,
2807 /*IsSigned*/false);
2808 break;
2809 }
2810 }
2811 return nullptr;
2812}
2813
2814static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
2815 const APFloat &S0,
2816 const APFloat &S1,
2817 const APFloat &S2) {
2818 unsigned ID;
2819 const fltSemantics &Sem = S0.getSemantics();
2820 APFloat MA(Sem), SC(Sem), TC(Sem);
2821 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
2822 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
2823 // S2 < 0
2824 ID = 5;
2825 SC = -S0;
2826 } else {
2827 ID = 4;
2828 SC = S0;
2829 }
2830 MA = S2;
2831 TC = -S1;
2832 } else if (abs(S1) >= abs(S0)) {
2833 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
2834 // S1 < 0
2835 ID = 3;
2836 TC = -S2;
2837 } else {
2838 ID = 2;
2839 TC = S2;
2840 }
2841 MA = S1;
2842 SC = S0;
2843 } else {
2844 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
2845 // S0 < 0
2846 ID = 1;
2847 SC = S2;
2848 } else {
2849 ID = 0;
2850 SC = -S2;
2851 }
2852 MA = S0;
2853 TC = -S1;
2854 }
2855 switch (IntrinsicID) {
2856 default:
2857 llvm_unreachable("unhandled amdgcn cube intrinsic")::llvm::llvm_unreachable_internal("unhandled amdgcn cube intrinsic"
, "llvm/lib/Analysis/ConstantFolding.cpp", 2857)
;
2858 case Intrinsic::amdgcn_cubeid:
2859 return APFloat(Sem, ID);
2860 case Intrinsic::amdgcn_cubema:
2861 return MA + MA;
2862 case Intrinsic::amdgcn_cubesc:
2863 return SC;
2864 case Intrinsic::amdgcn_cubetc:
2865 return TC;
2866 }
2867}
2868
2869static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
2870 Type *Ty) {
2871 const APInt *C0, *C1, *C2;
2872 if (!getConstIntOrUndef(Operands[0], C0) ||
2873 !getConstIntOrUndef(Operands[1], C1) ||
2874 !getConstIntOrUndef(Operands[2], C2))
2875 return nullptr;
2876
2877 if (!C2)
2878 return UndefValue::get(Ty);
2879
2880 APInt Val(32, 0);
2881 unsigned NumUndefBytes = 0;
2882 for (unsigned I = 0; I < 32; I += 8) {
2883 unsigned Sel = C2->extractBitsAsZExtValue(8, I);
2884 unsigned B = 0;
2885
2886 if (Sel >= 13)
2887 B = 0xff;
2888 else if (Sel == 12)
2889 B = 0x00;
2890 else {
2891 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
2892 if (!Src)
2893 ++NumUndefBytes;
2894 else if (Sel < 8)
2895 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
2896 else
2897 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
2898 }
2899
2900 Val.insertBits(B, I, 8);
2901 }
2902
2903 if (NumUndefBytes == 4)
2904 return UndefValue::get(Ty);
2905
2906 return ConstantInt::get(Ty, Val);
2907}
2908
2909static Constant *ConstantFoldScalarCall3(StringRef Name,
2910 Intrinsic::ID IntrinsicID,
2911 Type *Ty,
2912 ArrayRef<Constant *> Operands,
2913 const TargetLibraryInfo *TLI,
2914 const CallBase *Call) {
2915 assert(Operands.size() == 3 && "Wrong number of operands.")(static_cast <bool> (Operands.size() == 3 && "Wrong number of operands."
) ? void (0) : __assert_fail ("Operands.size() == 3 && \"Wrong number of operands.\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 2915, __extension__
__PRETTY_FUNCTION__))
;
2916
2917 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2918 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2919 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2920 const APFloat &C1 = Op1->getValueAPF();
2921 const APFloat &C2 = Op2->getValueAPF();
2922 const APFloat &C3 = Op3->getValueAPF();
2923
2924 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2925 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2926 APFloat Res = C1;
2927 APFloat::opStatus St;
2928 switch (IntrinsicID) {
2929 default:
2930 return nullptr;
2931 case Intrinsic::experimental_constrained_fma:
2932 case Intrinsic::experimental_constrained_fmuladd:
2933 St = Res.fusedMultiplyAdd(C2, C3, RM);
2934 break;
2935 }
2936 if (mayFoldConstrained(
2937 const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
2938 return ConstantFP::get(Ty->getContext(), Res);
2939 return nullptr;
2940 }
2941
2942 switch (IntrinsicID) {
2943 default: break;
2944 case Intrinsic::amdgcn_fma_legacy: {
2945 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2946 // NaN or infinity, gives +0.0.
2947 if (C1.isZero() || C2.isZero()) {
2948 // It's tempting to just return C3 here, but that would give the
2949 // wrong result if C3 was -0.0.
2950 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
2951 }
2952 [[fallthrough]];
2953 }
2954 case Intrinsic::fma:
2955 case Intrinsic::fmuladd: {
2956 APFloat V = C1;
2957 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
2958 return ConstantFP::get(Ty->getContext(), V);
2959 }
2960 case Intrinsic::amdgcn_cubeid:
2961 case Intrinsic::amdgcn_cubema:
2962 case Intrinsic::amdgcn_cubesc:
2963 case Intrinsic::amdgcn_cubetc: {
2964 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
2965 return ConstantFP::get(Ty->getContext(), V);
2966 }
2967 }
2968 }
2969 }
2970 }
2971
2972 if (IntrinsicID == Intrinsic::smul_fix ||
2973 IntrinsicID == Intrinsic::smul_fix_sat) {
2974 // poison * C -> poison
2975 // C * poison -> poison
2976 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2977 return PoisonValue::get(Ty);
2978
2979 const APInt *C0, *C1;
2980 if (!getConstIntOrUndef(Operands[0], C0) ||
2981 !getConstIntOrUndef(Operands[1], C1))
2982 return nullptr;
2983
2984 // undef * C -> 0
2985 // C * undef -> 0
2986 if (!C0 || !C1)
2987 return Constant::getNullValue(Ty);
2988
2989 // This code performs rounding towards negative infinity in case the result
2990 // cannot be represented exactly for the given scale. Targets that do care
2991 // about rounding should use a target hook for specifying how rounding
2992 // should be done, and provide their own folding to be consistent with
2993 // rounding. This is the same approach as used by
2994 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2995 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
2996 unsigned Width = C0->getBitWidth();
2997 assert(Scale < Width && "Illegal scale.")(static_cast <bool> (Scale < Width && "Illegal scale."
) ? void (0) : __assert_fail ("Scale < Width && \"Illegal scale.\""
, "llvm/lib/Analysis/ConstantFolding.cpp", 2997, __extension__
__PRETTY_FUNCTION__))
;
2998 unsigned ExtendedWidth = Width * 2;
2999 APInt Product =
3000 (C0->sext(ExtendedWidth) * C1->sext(ExtendedWidth)).ashr(Scale);
3001 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3002 APInt Max = APInt::getSignedMaxValue(Width).sext(ExtendedWidth);
3003 APInt Min = APInt::getSignedMinValue(Width).sext(ExtendedWidth);
3004 Product = APIntOps::smin(Product, Max);
3005 Product = APIntOps::smax(Product, Min);
3006 }
3007 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
3008 }
3009
3010 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3011 const APInt *C0, *C1, *C2;
3012 if (!getConstIntOrUndef(Operands[0], C0) ||
3013 !getConstIntOrUndef(Operands[1], C1) ||
3014 !getConstIntOrUndef(Operands[2], C2))
3015 return nullptr;
3016
3017 bool IsRight = IntrinsicID == Intrinsic::fshr;
3018 if (!C2)
3019 return Operands[IsRight ? 1 : 0];
3020 if (!C0 && !C1)
3021 return UndefValue::get(Ty);
3022
3023 // The shift amount is interpreted as modulo the bitwidth. If the shift
3024 // amount is effectively 0, avoid UB due to oversized inverse shift below.
3025 unsigned BitWidth = C2->getBitWidth();
3026 unsigned ShAmt = C2->urem(BitWidth);
3027 if (!ShAmt)
3028 return Operands[IsRight ? 1 : 0];
3029
3030 // (C0 << ShlAmt) | (C1 >> LshrAmt)
3031 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
3032 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
3033 if (!C0)
3034 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
3035 if (!C1)
3036 return ConstantInt::get(Ty, C0->shl(ShlAmt));
3037 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
3038 }
3039
3040 if (IntrinsicID == Intrinsic::amdgcn_perm)
3041 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
3042
3043 return nullptr;
3044}
3045
3046static Constant *ConstantFoldScalarCall(StringRef Name,
3047 Intrinsic::ID IntrinsicID,
3048 Type *Ty,
3049 ArrayRef<Constant *> Operands,
3050 const TargetLibraryInfo *TLI,
3051 const CallBase *Call) {
3052 if (Operands.size() == 1)
3053 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
3054
3055 if (Operands.size() == 2)
3056 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
3057
3058 if (Operands.size() == 3)
3059 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
3060
3061 return nullptr;
3062}
3063
3064static Constant *ConstantFoldFixedVectorCall(
3065 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
3066 ArrayRef<Constant *> Operands, const DataLayout &DL,
3067 const TargetLibraryInfo *TLI, const CallBase *Call) {
3068 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
3069 SmallVector<Constant *, 4> Lane(Operands.size());
3070 Type *Ty = FVTy->getElementType();
3071
3072 switch (IntrinsicID) {
3073 case Intrinsic::masked_load: {
3074 auto *SrcPtr = Operands[0];
3075 auto *Mask = Operands[2];
3076 auto *Passthru = Operands[3];
3077
3078 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
3079
3080 SmallVector<Constant *, 32> NewElements;
3081 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3082 auto *MaskElt = Mask->getAggregateElement(I);
3083 if (!MaskElt)
3084 break;
3085 auto *PassthruElt = Passthru->getAggregateElement(I);
3086 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
3087 if (isa<UndefValue>(MaskElt)) {
3088 if (PassthruElt)
3089 NewElements.push_back(PassthruElt);
3090 else if (VecElt)
3091 NewElements.push_back(VecElt);
3092 else
3093 return nullptr;
3094 }
3095 if (MaskElt->isNullValue()) {
3096 if (!PassthruElt)
3097 return nullptr;
3098 NewElements.push_back(PassthruElt);
3099 } else if (MaskElt->isOneValue()) {
3100 if (!VecElt)
3101 return nullptr;
3102 NewElements.push_back(VecElt);
3103 } else {
3104 return nullptr;
3105 }
3106 }
3107 if (NewElements.size() != FVTy->getNumElements())
3108 return nullptr;
3109 return ConstantVector::get(NewElements);
3110 }
3111 case Intrinsic::arm_mve_vctp8:
3112 case Intrinsic::arm_mve_vctp16:
3113 case Intrinsic::arm_mve_vctp32:
3114 case Intrinsic::arm_mve_vctp64: {
3115 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
3116 unsigned Lanes = FVTy->getNumElements();
3117 uint64_t Limit = Op->getZExtValue();
3118
3119 SmallVector<Constant *, 16> NCs;
3120 for (unsigned i = 0; i < Lanes; i++) {
3121 if (i < Limit)
3122 NCs.push_back(ConstantInt::getTrue(Ty));
3123 else
3124 NCs.push_back(ConstantInt::getFalse(Ty));
3125 }
3126 return ConstantVector::get(NCs);
3127 }
3128 return nullptr;
3129 }
3130 case Intrinsic::get_active_lane_mask: {
3131 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
3132 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
3133 if (Op0 && Op1) {
3134 unsigned Lanes = FVTy->getNumElements();
3135 uint64_t Base = Op0->getZExtValue();
3136 uint64_t Limit = Op1->getZExtValue();
3137
3138 SmallVector<Constant *, 16> NCs;
3139 for (unsigned i = 0; i < Lanes; i++) {
3140 if (Base + i < Limit)
3141 NCs.push_back(ConstantInt::getTrue(Ty));
3142 else
3143 NCs.push_back(ConstantInt::getFalse(Ty));
3144 }
3145 return ConstantVector::get(NCs);
3146 }
3147 return nullptr;
3148 }
3149 default:
3150 break;
3151 }
3152
3153 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3154 // Gather a column of constants.
3155 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3156 // Some intrinsics use a scalar type for certain arguments.
3157 if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, J)) {
3158 Lane[J] = Operands[J];
3159 continue;
3160 }
3161
3162 Constant *Agg = Operands[J]->getAggregateElement(I);
3163 if (!Agg)
3164 return nullptr;
3165
3166 Lane[J] = Agg;
3167 }
3168
3169 // Use the regular scalar folding to simplify this column.
3170 Constant *Folded =
3171 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
3172 if (!Folded)
3173 return nullptr;
3174 Result[I] = Folded;
3175 }
3176
3177 return ConstantVector::get(Result);
3178}
3179
3180static Constant *ConstantFoldScalableVectorCall(
3181 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3182 ArrayRef<Constant *> Operands, const DataLayout &DL,
3183 const TargetLibraryInfo *TLI, const CallBase *Call) {
3184 switch (IntrinsicID) {
3185 case Intrinsic::aarch64_sve_convert_from_svbool: {
3186 auto *Src = dyn_cast<Constant>(Operands[0]);
3187 if (!Src || !Src->isNullValue())
3188 break;
3189
3190 return ConstantInt::getFalse(SVTy);
3191 }
3192 default:
3193 break;
3194 }
3195 return nullptr;
3196}
3197
3198} // end anonymous namespace
3199
3200Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
3201 ArrayRef<Constant *> Operands,
3202 const TargetLibraryInfo *TLI) {
3203 if (Call->isNoBuiltin())
3204 return nullptr;
3205 if (!F->hasName())
3206 return nullptr;
3207
3208 // If this is not an intrinsic and not recognized as a library call, bail out.
3209 if (F->getIntrinsicID() == Intrinsic::not_intrinsic) {
3210 if (!TLI)
3211 return nullptr;
3212 LibFunc LibF;
3213 if (!TLI->getLibFunc(*F, LibF))
3214 return nullptr;
3215 }
3216
3217 StringRef Name = F->getName();
3218 Type *Ty = F->getReturnType();
3219 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3220 return ConstantFoldFixedVectorCall(
3221 Name, F->getIntrinsicID(), FVTy, Operands,
3222 F->getParent()->getDataLayout(), TLI, Call);
3223
3224 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3225 return ConstantFoldScalableVectorCall(
3226 Name, F->getIntrinsicID(), SVTy, Operands,
3227 F->getParent()->getDataLayout(), TLI, Call);
3228
3229 // TODO: If this is a library function, we already discovered that above,
3230 // so we should pass the LibFunc, not the name (and it might be better
3231 // still to separate intrinsic handling from libcalls).
3232 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
3233 Call);
3234}
3235
3236bool llvm::isMathLibCallNoop(const CallBase *Call,
3237 const TargetLibraryInfo *TLI) {
3238 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3239 // (and to some extent ConstantFoldScalarCall).
3240 if (Call->isNoBuiltin() || Call->isStrictFP())
3241 return false;
3242 Function *F = Call->getCalledFunction();
3243 if (!F)
3244 return false;
3245
3246 LibFunc Func;
3247 if (!TLI || !TLI->getLibFunc(*F, Func))
3248 return false;
3249
3250 if (Call->arg_size() == 1) {
3251 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3252 const APFloat &Op = OpC->getValueAPF();
3253 switch (Func) {
3254 case LibFunc_logl:
3255 case LibFunc_log:
3256 case LibFunc_logf:
3257 case LibFunc_log2l:
3258 case LibFunc_log2:
3259 case LibFunc_log2f:
3260 case LibFunc_log10l:
3261 case LibFunc_log10:
3262 case LibFunc_log10f:
3263 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3264
3265 case LibFunc_expl:
3266 case LibFunc_exp:
3267 case LibFunc_expf:
3268 // FIXME: These boundaries are slightly conservative.
3269 if (OpC->getType()->isDoubleTy())
3270 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3271 if (OpC->getType()->isFloatTy())
3272 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3273 break;
3274
3275 case LibFunc_exp2l:
3276 case LibFunc_exp2:
3277 case LibFunc_exp2f:
3278 // FIXME: These boundaries are slightly conservative.
3279 if (OpC->getType()->isDoubleTy())
3280 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3281 if (OpC->getType()->isFloatTy())
3282 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3283 break;
3284
3285 case LibFunc_sinl:
3286 case LibFunc_sin:
3287 case LibFunc_sinf:
3288 case LibFunc_cosl:
3289 case LibFunc_cos:
3290 case LibFunc_cosf:
3291 return !Op.isInfinity();
3292
3293 case LibFunc_tanl:
3294 case LibFunc_tan:
3295 case LibFunc_tanf: {
3296 // FIXME: Stop using the host math library.
3297 // FIXME: The computation isn't done in the right precision.
3298 Type *Ty = OpC->getType();
3299 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3300 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3301 break;
3302 }
3303
3304 case LibFunc_atan:
3305 case LibFunc_atanf:
3306 case LibFunc_atanl:
3307 // Per POSIX, this MAY fail if Op is denormal. We choose not failing.
3308 return true;
3309
3310
3311 case LibFunc_asinl:
3312 case LibFunc_asin:
3313 case LibFunc_asinf:
3314 case LibFunc_acosl:
3315 case LibFunc_acos:
3316 case LibFunc_acosf:
3317 return !(Op < APFloat(Op.getSemantics(), "-1") ||
3318 Op > APFloat(Op.getSemantics(), "1"));
3319
3320 case LibFunc_sinh:
3321 case LibFunc_cosh:
3322 case LibFunc_sinhf:
3323 case LibFunc_coshf:
3324 case LibFunc_sinhl:
3325 case LibFunc_coshl:
3326 // FIXME: These boundaries are slightly conservative.
3327 if (OpC->getType()->isDoubleTy())
3328 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3329 if (OpC->getType()->isFloatTy())
3330 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3331 break;
3332
3333 case LibFunc_sqrtl:
3334 case LibFunc_sqrt:
3335 case LibFunc_sqrtf:
3336 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3337
3338 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3339 // maybe others?
3340 default:
3341 break;
3342 }
3343 }
3344 }
3345
3346 if (Call->arg_size() == 2) {
3347 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3348 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3349 if (Op0C && Op1C) {
3350 const APFloat &Op0 = Op0C->getValueAPF();
3351 const APFloat &Op1 = Op1C->getValueAPF();
3352
3353 switch (Func) {
3354 case LibFunc_powl:
3355 case LibFunc_pow:
3356 case LibFunc_powf: {
3357 // FIXME: Stop using the host math library.
3358 // FIXME: The computation isn't done in the right precision.
3359 Type *Ty = Op0C->getType();
3360 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3361 if (Ty == Op1C->getType())
3362 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
3363 }
3364 break;
3365 }
3366
3367 case LibFunc_fmodl:
3368 case LibFunc_fmod:
3369 case LibFunc_fmodf:
3370 case LibFunc_remainderl:
3371 case LibFunc_remainder:
3372 case LibFunc_remainderf:
3373 return Op0.isNaN() || Op1.isNaN() ||
3374 (!Op0.isInfinity() && !Op1.isZero());
3375
3376 case LibFunc_atan2:
3377 case LibFunc_atan2f:
3378 case LibFunc_atan2l:
3379 // Although IEEE-754 says atan2(+/-0.0, +/-0.0) are well-defined, and
3380 // GLIBC and MSVC do not appear to raise an error on those, we
3381 // cannot rely on that behavior. POSIX and C11 say that a domain error
3382 // may occur, so allow for that possibility.
3383 return !Op0.isZero() || !Op1.isZero();
3384
3385 default:
3386 break;
3387 }
3388 }
3389 }
3390
3391 return false;
3392}
3393
3394void TargetFolder::anchor() {}