LLVM  9.0.0svn
ConstantFolding.cpp
Go to the documentation of this file.
1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
10 //
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
15 //
16 //===----------------------------------------------------------------------===//
17 
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
28 #include "llvm/Config/config.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/InstrTypes.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/KnownBits.h"
46 #include <cassert>
47 #include <cerrno>
48 #include <cfenv>
49 #include <cmath>
50 #include <cstddef>
51 #include <cstdint>
52 
53 using namespace llvm;
54 
55 namespace {
56 
57 //===----------------------------------------------------------------------===//
58 // Constant Folding internal helper functions
59 //===----------------------------------------------------------------------===//
60 
61 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
62  Constant *C, Type *SrcEltTy,
63  unsigned NumSrcElts,
64  const DataLayout &DL) {
65  // Now that we know that the input value is a vector of integers, just shift
66  // and insert them into our result.
67  unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
68  for (unsigned i = 0; i != NumSrcElts; ++i) {
69  Constant *Element;
70  if (DL.isLittleEndian())
71  Element = C->getAggregateElement(NumSrcElts - i - 1);
72  else
73  Element = C->getAggregateElement(i);
74 
75  if (Element && isa<UndefValue>(Element)) {
76  Result <<= BitShift;
77  continue;
78  }
79 
80  auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
81  if (!ElementCI)
82  return ConstantExpr::getBitCast(C, DestTy);
83 
84  Result <<= BitShift;
85  Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
86  }
87 
88  return nullptr;
89 }
90 
91 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
92 /// This always returns a non-null constant, but it may be a
93 /// ConstantExpr if unfoldable.
94 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
95  // Catch the obvious splat cases.
96  if (C->isNullValue() && !DestTy->isX86_MMXTy())
97  return Constant::getNullValue(DestTy);
98  if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
99  !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
100  return Constant::getAllOnesValue(DestTy);
101 
102  if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
103  // Handle a vector->scalar integer/fp cast.
104  if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
105  unsigned NumSrcElts = VTy->getNumElements();
106  Type *SrcEltTy = VTy->getElementType();
107 
108  // If the vector is a vector of floating point, convert it to vector of int
109  // to simplify things.
110  if (SrcEltTy->isFloatingPointTy()) {
111  unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
112  Type *SrcIVTy =
113  VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
114  // Ask IR to do the conversion now that #elts line up.
115  C = ConstantExpr::getBitCast(C, SrcIVTy);
116  }
117 
118  APInt Result(DL.getTypeSizeInBits(DestTy), 0);
119  if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
120  SrcEltTy, NumSrcElts, DL))
121  return CE;
122 
123  if (isa<IntegerType>(DestTy))
124  return ConstantInt::get(DestTy, Result);
125 
126  APFloat FP(DestTy->getFltSemantics(), Result);
127  return ConstantFP::get(DestTy->getContext(), FP);
128  }
129  }
130 
131  // The code below only handles casts to vectors currently.
132  auto *DestVTy = dyn_cast<VectorType>(DestTy);
133  if (!DestVTy)
134  return ConstantExpr::getBitCast(C, DestTy);
135 
136  // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
137  // vector so the code below can handle it uniformly.
138  if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
139  Constant *Ops = C; // don't take the address of C!
140  return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
141  }
142 
143  // If this is a bitcast from constant vector -> vector, fold it.
144  if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
145  return ConstantExpr::getBitCast(C, DestTy);
146 
147  // If the element types match, IR can fold it.
148  unsigned NumDstElt = DestVTy->getNumElements();
149  unsigned NumSrcElt = C->getType()->getVectorNumElements();
150  if (NumDstElt == NumSrcElt)
151  return ConstantExpr::getBitCast(C, DestTy);
152 
153  Type *SrcEltTy = C->getType()->getVectorElementType();
154  Type *DstEltTy = DestVTy->getElementType();
155 
156  // Otherwise, we're changing the number of elements in a vector, which
157  // requires endianness information to do the right thing. For example,
158  // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
159  // folds to (little endian):
160  // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
161  // and to (big endian):
162  // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
163 
164  // First thing is first. We only want to think about integer here, so if
165  // we have something in FP form, recast it as integer.
166  if (DstEltTy->isFloatingPointTy()) {
167  // Fold to an vector of integers with same size as our FP type.
168  unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
169  Type *DestIVTy =
170  VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
171  // Recursively handle this integer conversion, if possible.
172  C = FoldBitCast(C, DestIVTy, DL);
173 
174  // Finally, IR can handle this now that #elts line up.
175  return ConstantExpr::getBitCast(C, DestTy);
176  }
177 
178  // Okay, we know the destination is integer, if the input is FP, convert
179  // it to integer first.
180  if (SrcEltTy->isFloatingPointTy()) {
181  unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
182  Type *SrcIVTy =
183  VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
184  // Ask IR to do the conversion now that #elts line up.
185  C = ConstantExpr::getBitCast(C, SrcIVTy);
186  // If IR wasn't able to fold it, bail out.
187  if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
188  !isa<ConstantDataVector>(C))
189  return C;
190  }
191 
192  // Now we know that the input and output vectors are both integer vectors
193  // of the same size, and that their #elements is not the same. Do the
194  // conversion here, which depends on whether the input or output has
195  // more elements.
196  bool isLittleEndian = DL.isLittleEndian();
197 
199  if (NumDstElt < NumSrcElt) {
200  // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
201  Constant *Zero = Constant::getNullValue(DstEltTy);
202  unsigned Ratio = NumSrcElt/NumDstElt;
203  unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
204  unsigned SrcElt = 0;
205  for (unsigned i = 0; i != NumDstElt; ++i) {
206  // Build each element of the result.
207  Constant *Elt = Zero;
208  unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
209  for (unsigned j = 0; j != Ratio; ++j) {
210  Constant *Src = C->getAggregateElement(SrcElt++);
211  if (Src && isa<UndefValue>(Src))
213  else
214  Src = dyn_cast_or_null<ConstantInt>(Src);
215  if (!Src) // Reject constantexpr elements.
216  return ConstantExpr::getBitCast(C, DestTy);
217 
218  // Zero extend the element to the right size.
219  Src = ConstantExpr::getZExt(Src, Elt->getType());
220 
221  // Shift it to the right place, depending on endianness.
222  Src = ConstantExpr::getShl(Src,
223  ConstantInt::get(Src->getType(), ShiftAmt));
224  ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
225 
226  // Mix it in.
227  Elt = ConstantExpr::getOr(Elt, Src);
228  }
229  Result.push_back(Elt);
230  }
231  return ConstantVector::get(Result);
232  }
233 
234  // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
235  unsigned Ratio = NumDstElt/NumSrcElt;
236  unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
237 
238  // Loop over each source value, expanding into multiple results.
239  for (unsigned i = 0; i != NumSrcElt; ++i) {
240  auto *Element = C->getAggregateElement(i);
241 
242  if (!Element) // Reject constantexpr elements.
243  return ConstantExpr::getBitCast(C, DestTy);
244 
245  if (isa<UndefValue>(Element)) {
246  // Correctly Propagate undef values.
247  Result.append(Ratio, UndefValue::get(DstEltTy));
248  continue;
249  }
250 
251  auto *Src = dyn_cast<ConstantInt>(Element);
252  if (!Src)
253  return ConstantExpr::getBitCast(C, DestTy);
254 
255  unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
256  for (unsigned j = 0; j != Ratio; ++j) {
257  // Shift the piece of the value into the right place, depending on
258  // endianness.
259  Constant *Elt = ConstantExpr::getLShr(Src,
260  ConstantInt::get(Src->getType(), ShiftAmt));
261  ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
262 
263  // Truncate the element to an integer with the same pointer size and
264  // convert the element back to a pointer using a inttoptr.
265  if (DstEltTy->isPointerTy()) {
266  IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
267  Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
268  Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
269  continue;
270  }
271 
272  // Truncate and remember this piece.
273  Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
274  }
275  }
276 
277  return ConstantVector::get(Result);
278 }
279 
280 } // end anonymous namespace
281 
282 /// If this constant is a constant offset from a global, return the global and
283 /// the constant. Because of constantexprs, this function is recursive.
285  APInt &Offset, const DataLayout &DL) {
286  // Trivial case, constant is the global.
287  if ((GV = dyn_cast<GlobalValue>(C))) {
288  unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
289  Offset = APInt(BitWidth, 0);
290  return true;
291  }
292 
293  // Otherwise, if this isn't a constant expr, bail out.
294  auto *CE = dyn_cast<ConstantExpr>(C);
295  if (!CE) return false;
296 
297  // Look through ptr->int and ptr->ptr casts.
298  if (CE->getOpcode() == Instruction::PtrToInt ||
299  CE->getOpcode() == Instruction::BitCast)
300  return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
301 
302  // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
303  auto *GEP = dyn_cast<GEPOperator>(CE);
304  if (!GEP)
305  return false;
306 
307  unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
308  APInt TmpOffset(BitWidth, 0);
309 
310  // If the base isn't a global+constant, we aren't either.
311  if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
312  return false;
313 
314  // Otherwise, add any offset that our operands provide.
315  if (!GEP->accumulateConstantOffset(DL, TmpOffset))
316  return false;
317 
318  Offset = TmpOffset;
319  return true;
320 }
321 
323  const DataLayout &DL) {
324  do {
325  Type *SrcTy = C->getType();
326 
327  // If the type sizes are the same and a cast is legal, just directly
328  // cast the constant.
329  if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
330  Instruction::CastOps Cast = Instruction::BitCast;
331  // If we are going from a pointer to int or vice versa, we spell the cast
332  // differently.
333  if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
334  Cast = Instruction::IntToPtr;
335  else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
336  Cast = Instruction::PtrToInt;
337 
338  if (CastInst::castIsValid(Cast, C, DestTy))
339  return ConstantExpr::getCast(Cast, C, DestTy);
340  }
341 
342  // If this isn't an aggregate type, there is nothing we can do to drill down
343  // and find a bitcastable constant.
344  if (!SrcTy->isAggregateType())
345  return nullptr;
346 
347  // We're simulating a load through a pointer that was bitcast to point to
348  // a different type, so we can try to walk down through the initial
349  // elements of an aggregate to see if some part of the aggregate is
350  // castable to implement the "load" semantic model.
351  if (SrcTy->isStructTy()) {
352  // Struct types might have leading zero-length elements like [0 x i32],
353  // which are certainly not what we are looking for, so skip them.
354  unsigned Elem = 0;
355  Constant *ElemC;
356  do {
357  ElemC = C->getAggregateElement(Elem++);
358  } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()) == 0);
359  C = ElemC;
360  } else {
361  C = C->getAggregateElement(0u);
362  }
363  } while (C);
364 
365  return nullptr;
366 }
367 
368 namespace {
369 
370 /// Recursive helper to read bits out of global. C is the constant being copied
371 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
372 /// results into and BytesLeft is the number of bytes left in
373 /// the CurPtr buffer. DL is the DataLayout.
374 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
375  unsigned BytesLeft, const DataLayout &DL) {
376  assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
377  "Out of range access");
378 
379  // If this element is zero or undefined, we can just return since *CurPtr is
380  // zero initialized.
381  if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
382  return true;
383 
384  if (auto *CI = dyn_cast<ConstantInt>(C)) {
385  if (CI->getBitWidth() > 64 ||
386  (CI->getBitWidth() & 7) != 0)
387  return false;
388 
389  uint64_t Val = CI->getZExtValue();
390  unsigned IntBytes = unsigned(CI->getBitWidth()/8);
391 
392  for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
393  int n = ByteOffset;
394  if (!DL.isLittleEndian())
395  n = IntBytes - n - 1;
396  CurPtr[i] = (unsigned char)(Val >> (n * 8));
397  ++ByteOffset;
398  }
399  return true;
400  }
401 
402  if (auto *CFP = dyn_cast<ConstantFP>(C)) {
403  if (CFP->getType()->isDoubleTy()) {
404  C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
405  return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
406  }
407  if (CFP->getType()->isFloatTy()){
408  C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
409  return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
410  }
411  if (CFP->getType()->isHalfTy()){
412  C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
413  return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
414  }
415  return false;
416  }
417 
418  if (auto *CS = dyn_cast<ConstantStruct>(C)) {
419  const StructLayout *SL = DL.getStructLayout(CS->getType());
420  unsigned Index = SL->getElementContainingOffset(ByteOffset);
421  uint64_t CurEltOffset = SL->getElementOffset(Index);
422  ByteOffset -= CurEltOffset;
423 
424  while (true) {
425  // If the element access is to the element itself and not to tail padding,
426  // read the bytes from the element.
427  uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
428 
429  if (ByteOffset < EltSize &&
430  !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
431  BytesLeft, DL))
432  return false;
433 
434  ++Index;
435 
436  // Check to see if we read from the last struct element, if so we're done.
437  if (Index == CS->getType()->getNumElements())
438  return true;
439 
440  // If we read all of the bytes we needed from this element we're done.
441  uint64_t NextEltOffset = SL->getElementOffset(Index);
442 
443  if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
444  return true;
445 
446  // Move to the next element of the struct.
447  CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
448  BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
449  ByteOffset = 0;
450  CurEltOffset = NextEltOffset;
451  }
452  // not reached.
453  }
454 
455  if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
456  isa<ConstantDataSequential>(C)) {
457  Type *EltTy = C->getType()->getSequentialElementType();
458  uint64_t EltSize = DL.getTypeAllocSize(EltTy);
459  uint64_t Index = ByteOffset / EltSize;
460  uint64_t Offset = ByteOffset - Index * EltSize;
461  uint64_t NumElts;
462  if (auto *AT = dyn_cast<ArrayType>(C->getType()))
463  NumElts = AT->getNumElements();
464  else
465  NumElts = C->getType()->getVectorNumElements();
466 
467  for (; Index != NumElts; ++Index) {
468  if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
469  BytesLeft, DL))
470  return false;
471 
472  uint64_t BytesWritten = EltSize - Offset;
473  assert(BytesWritten <= EltSize && "Not indexing into this element?");
474  if (BytesWritten >= BytesLeft)
475  return true;
476 
477  Offset = 0;
478  BytesLeft -= BytesWritten;
479  CurPtr += BytesWritten;
480  }
481  return true;
482  }
483 
484  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
485  if (CE->getOpcode() == Instruction::IntToPtr &&
486  CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
487  return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
488  BytesLeft, DL);
489  }
490  }
491 
492  // Otherwise, unknown initializer type.
493  return false;
494 }
495 
496 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
497  const DataLayout &DL) {
498  auto *PTy = cast<PointerType>(C->getType());
499  auto *IntType = dyn_cast<IntegerType>(LoadTy);
500 
501  // If this isn't an integer load we can't fold it directly.
502  if (!IntType) {
503  unsigned AS = PTy->getAddressSpace();
504 
505  // If this is a float/double load, we can try folding it as an int32/64 load
506  // and then bitcast the result. This can be useful for union cases. Note
507  // that address spaces don't matter here since we're not going to result in
508  // an actual new load.
509  Type *MapTy;
510  if (LoadTy->isHalfTy())
511  MapTy = Type::getInt16Ty(C->getContext());
512  else if (LoadTy->isFloatTy())
513  MapTy = Type::getInt32Ty(C->getContext());
514  else if (LoadTy->isDoubleTy())
515  MapTy = Type::getInt64Ty(C->getContext());
516  else if (LoadTy->isVectorTy()) {
517  MapTy = PointerType::getIntNTy(C->getContext(),
518  DL.getTypeAllocSizeInBits(LoadTy));
519  } else
520  return nullptr;
521 
522  C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
523  if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
524  return FoldBitCast(Res, LoadTy, DL);
525  return nullptr;
526  }
527 
528  unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
529  if (BytesLoaded > 32 || BytesLoaded == 0)
530  return nullptr;
531 
532  GlobalValue *GVal;
533  APInt OffsetAI;
534  if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
535  return nullptr;
536 
537  auto *GV = dyn_cast<GlobalVariable>(GVal);
538  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
539  !GV->getInitializer()->getType()->isSized())
540  return nullptr;
541 
542  int64_t Offset = OffsetAI.getSExtValue();
543  int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
544 
545  // If we're not accessing anything in this constant, the result is undefined.
546  if (Offset + BytesLoaded <= 0)
547  return UndefValue::get(IntType);
548 
549  // If we're not accessing anything in this constant, the result is undefined.
550  if (Offset >= InitializerSize)
551  return UndefValue::get(IntType);
552 
553  unsigned char RawBytes[32] = {0};
554  unsigned char *CurPtr = RawBytes;
555  unsigned BytesLeft = BytesLoaded;
556 
557  // If we're loading off the beginning of the global, some bytes may be valid.
558  if (Offset < 0) {
559  CurPtr += -Offset;
560  BytesLeft += Offset;
561  Offset = 0;
562  }
563 
564  if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
565  return nullptr;
566 
567  APInt ResultVal = APInt(IntType->getBitWidth(), 0);
568  if (DL.isLittleEndian()) {
569  ResultVal = RawBytes[BytesLoaded - 1];
570  for (unsigned i = 1; i != BytesLoaded; ++i) {
571  ResultVal <<= 8;
572  ResultVal |= RawBytes[BytesLoaded - 1 - i];
573  }
574  } else {
575  ResultVal = RawBytes[0];
576  for (unsigned i = 1; i != BytesLoaded; ++i) {
577  ResultVal <<= 8;
578  ResultVal |= RawBytes[i];
579  }
580  }
581 
582  return ConstantInt::get(IntType->getContext(), ResultVal);
583 }
584 
585 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
586  const DataLayout &DL) {
587  auto *SrcPtr = CE->getOperand(0);
588  auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
589  if (!SrcPtrTy)
590  return nullptr;
591  Type *SrcTy = SrcPtrTy->getPointerElementType();
592 
593  Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
594  if (!C)
595  return nullptr;
596 
597  return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
598 }
599 
600 } // end anonymous namespace
601 
603  const DataLayout &DL) {
604  // First, try the easy cases:
605  if (auto *GV = dyn_cast<GlobalVariable>(C))
606  if (GV->isConstant() && GV->hasDefinitiveInitializer())
607  return GV->getInitializer();
608 
609  if (auto *GA = dyn_cast<GlobalAlias>(C))
610  if (GA->getAliasee() && !GA->isInterposable())
611  return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
612 
613  // If the loaded value isn't a constant expr, we can't handle it.
614  auto *CE = dyn_cast<ConstantExpr>(C);
615  if (!CE)
616  return nullptr;
617 
618  if (CE->getOpcode() == Instruction::GetElementPtr) {
619  if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
620  if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
621  if (Constant *V =
622  ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
623  return V;
624  }
625  }
626  }
627 
628  if (CE->getOpcode() == Instruction::BitCast)
629  if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
630  return LoadedC;
631 
632  // Instead of loading constant c string, use corresponding integer value
633  // directly if string length is small enough.
634  StringRef Str;
635  if (getConstantStringInfo(CE, Str) && !Str.empty()) {
636  size_t StrLen = Str.size();
637  unsigned NumBits = Ty->getPrimitiveSizeInBits();
638  // Replace load with immediate integer if the result is an integer or fp
639  // value.
640  if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
641  (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
642  APInt StrVal(NumBits, 0);
643  APInt SingleChar(NumBits, 0);
644  if (DL.isLittleEndian()) {
645  for (unsigned char C : reverse(Str.bytes())) {
646  SingleChar = static_cast<uint64_t>(C);
647  StrVal = (StrVal << 8) | SingleChar;
648  }
649  } else {
650  for (unsigned char C : Str.bytes()) {
651  SingleChar = static_cast<uint64_t>(C);
652  StrVal = (StrVal << 8) | SingleChar;
653  }
654  // Append NULL at the end.
655  SingleChar = 0;
656  StrVal = (StrVal << 8) | SingleChar;
657  }
658 
659  Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
660  if (Ty->isFloatingPointTy())
661  Res = ConstantExpr::getBitCast(Res, Ty);
662  return Res;
663  }
664  }
665 
666  // If this load comes from anywhere in a constant global, and if the global
667  // is all undef or zero, we know what it loads.
668  if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
669  if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
670  if (GV->getInitializer()->isNullValue())
671  return Constant::getNullValue(Ty);
672  if (isa<UndefValue>(GV->getInitializer()))
673  return UndefValue::get(Ty);
674  }
675  }
676 
677  // Try hard to fold loads from bitcasted strange and non-type-safe things.
678  return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
679 }
680 
681 namespace {
682 
683 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
684  if (LI->isVolatile()) return nullptr;
685 
686  if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
687  return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
688 
689  return nullptr;
690 }
691 
692 /// One of Op0/Op1 is a constant expression.
693 /// Attempt to symbolically evaluate the result of a binary operator merging
694 /// these together. If target data info is available, it is provided as DL,
695 /// otherwise DL is null.
696 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
697  const DataLayout &DL) {
698  // SROA
699 
700  // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
701  // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
702  // bits.
703 
704  if (Opc == Instruction::And) {
705  KnownBits Known0 = computeKnownBits(Op0, DL);
706  KnownBits Known1 = computeKnownBits(Op1, DL);
707  if ((Known1.One | Known0.Zero).isAllOnesValue()) {
708  // All the bits of Op0 that the 'and' could be masking are already zero.
709  return Op0;
710  }
711  if ((Known0.One | Known1.Zero).isAllOnesValue()) {
712  // All the bits of Op1 that the 'and' could be masking are already zero.
713  return Op1;
714  }
715 
716  Known0.Zero |= Known1.Zero;
717  Known0.One &= Known1.One;
718  if (Known0.isConstant())
719  return ConstantInt::get(Op0->getType(), Known0.getConstant());
720  }
721 
722  // If the constant expr is something like &A[123] - &A[4].f, fold this into a
723  // constant. This happens frequently when iterating over a global array.
724  if (Opc == Instruction::Sub) {
725  GlobalValue *GV1, *GV2;
726  APInt Offs1, Offs2;
727 
728  if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
729  if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
730  unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
731 
732  // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
733  // PtrToInt may change the bitwidth so we have convert to the right size
734  // first.
735  return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
736  Offs2.zextOrTrunc(OpSize));
737  }
738  }
739 
740  return nullptr;
741 }
742 
743 /// If array indices are not pointer-sized integers, explicitly cast them so
744 /// that they aren't implicitly casted by the getelementptr.
745 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
746  Type *ResultTy, Optional<unsigned> InRangeIndex,
747  const DataLayout &DL, const TargetLibraryInfo *TLI) {
748  Type *IntPtrTy = DL.getIntPtrType(ResultTy);
749  Type *IntPtrScalarTy = IntPtrTy->getScalarType();
750 
751  bool Any = false;
753  for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
754  if ((i == 1 ||
755  !isa<StructType>(GetElementPtrInst::getIndexedType(
756  SrcElemTy, Ops.slice(1, i - 1)))) &&
757  Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
758  Any = true;
759  Type *NewType = Ops[i]->getType()->isVectorTy()
760  ? IntPtrTy
761  : IntPtrTy->getScalarType();
763  true,
764  NewType,
765  true),
766  Ops[i], NewType));
767  } else
768  NewIdxs.push_back(Ops[i]);
769  }
770 
771  if (!Any)
772  return nullptr;
773 
775  SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
776  if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
777  C = Folded;
778 
779  return C;
780 }
781 
782 /// Strip the pointer casts, but preserve the address space information.
783 Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
784  assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
785  auto *OldPtrTy = cast<PointerType>(Ptr->getType());
786  Ptr = Ptr->stripPointerCasts();
787  auto *NewPtrTy = cast<PointerType>(Ptr->getType());
788 
789  ElemTy = NewPtrTy->getPointerElementType();
790 
791  // Preserve the address space number of the pointer.
792  if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
793  NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
794  Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
795  }
796  return Ptr;
797 }
798 
799 /// If we can symbolically evaluate the GEP constant expression, do so.
800 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
802  const DataLayout &DL,
803  const TargetLibraryInfo *TLI) {
804  const GEPOperator *InnermostGEP = GEP;
805  bool InBounds = GEP->isInBounds();
806 
807  Type *SrcElemTy = GEP->getSourceElementType();
808  Type *ResElemTy = GEP->getResultElementType();
809  Type *ResTy = GEP->getType();
810  if (!SrcElemTy->isSized())
811  return nullptr;
812 
813  if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
814  GEP->getInRangeIndex(), DL, TLI))
815  return C;
816 
817  Constant *Ptr = Ops[0];
818  if (!Ptr->getType()->isPointerTy())
819  return nullptr;
820 
821  Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
822 
823  // If this is a constant expr gep that is effectively computing an
824  // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
825  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
826  if (!isa<ConstantInt>(Ops[i])) {
827 
828  // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
829  // "inttoptr (sub (ptrtoint Ptr), V)"
830  if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
831  auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
832  assert((!CE || CE->getType() == IntPtrTy) &&
833  "CastGEPIndices didn't canonicalize index types!");
834  if (CE && CE->getOpcode() == Instruction::Sub &&
835  CE->getOperand(0)->isNullValue()) {
836  Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
837  Res = ConstantExpr::getSub(Res, CE->getOperand(1));
838  Res = ConstantExpr::getIntToPtr(Res, ResTy);
839  if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
840  Res = FoldedRes;
841  return Res;
842  }
843  }
844  return nullptr;
845  }
846 
847  unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
848  APInt Offset =
849  APInt(BitWidth,
851  SrcElemTy,
852  makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
853  Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
854 
855  // If this is a GEP of a GEP, fold it all into a single GEP.
856  while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
857  InnermostGEP = GEP;
858  InBounds &= GEP->isInBounds();
859 
860  SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
861 
862  // Do not try the incorporate the sub-GEP if some index is not a number.
863  bool AllConstantInt = true;
864  for (Value *NestedOp : NestedOps)
865  if (!isa<ConstantInt>(NestedOp)) {
866  AllConstantInt = false;
867  break;
868  }
869  if (!AllConstantInt)
870  break;
871 
872  Ptr = cast<Constant>(GEP->getOperand(0));
873  SrcElemTy = GEP->getSourceElementType();
874  Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
875  Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
876  }
877 
878  // If the base value for this address is a literal integer value, fold the
879  // getelementptr to the resulting integer value casted to the pointer type.
880  APInt BasePtr(BitWidth, 0);
881  if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
882  if (CE->getOpcode() == Instruction::IntToPtr) {
883  if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
884  BasePtr = Base->getValue().zextOrTrunc(BitWidth);
885  }
886  }
887 
888  auto *PTy = cast<PointerType>(Ptr->getType());
889  if ((Ptr->isNullValue() || BasePtr != 0) &&
890  !DL.isNonIntegralPointerType(PTy)) {
891  Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
892  return ConstantExpr::getIntToPtr(C, ResTy);
893  }
894 
895  // Otherwise form a regular getelementptr. Recompute the indices so that
896  // we eliminate over-indexing of the notional static type array bounds.
897  // This makes it easy to determine if the getelementptr is "inbounds".
898  // Also, this helps GlobalOpt do SROA on GlobalVariables.
899  Type *Ty = PTy;
901 
902  do {
903  if (!Ty->isStructTy()) {
904  if (Ty->isPointerTy()) {
905  // The only pointer indexing we'll do is on the first index of the GEP.
906  if (!NewIdxs.empty())
907  break;
908 
909  Ty = SrcElemTy;
910 
911  // Only handle pointers to sized types, not pointers to functions.
912  if (!Ty->isSized())
913  return nullptr;
914  } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
915  Ty = ATy->getElementType();
916  } else {
917  // We've reached some non-indexable type.
918  break;
919  }
920 
921  // Determine which element of the array the offset points into.
922  APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
923  if (ElemSize == 0) {
924  // The element size is 0. This may be [0 x Ty]*, so just use a zero
925  // index for this level and proceed to the next level to see if it can
926  // accommodate the offset.
927  NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
928  } else {
929  // The element size is non-zero divide the offset by the element
930  // size (rounding down), to compute the index at this level.
931  bool Overflow;
932  APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
933  if (Overflow)
934  break;
935  Offset -= NewIdx * ElemSize;
936  NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
937  }
938  } else {
939  auto *STy = cast<StructType>(Ty);
940  // If we end up with an offset that isn't valid for this struct type, we
941  // can't re-form this GEP in a regular form, so bail out. The pointer
942  // operand likely went through casts that are necessary to make the GEP
943  // sensible.
944  const StructLayout &SL = *DL.getStructLayout(STy);
945  if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
946  break;
947 
948  // Determine which field of the struct the offset points into. The
949  // getZExtValue is fine as we've already ensured that the offset is
950  // within the range representable by the StructLayout API.
951  unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
953  ElIdx));
954  Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
955  Ty = STy->getTypeAtIndex(ElIdx);
956  }
957  } while (Ty != ResElemTy);
958 
959  // If we haven't used up the entire offset by descending the static
960  // type, then the offset is pointing into the middle of an indivisible
961  // member, so we can't simplify it.
962  if (Offset != 0)
963  return nullptr;
964 
965  // Preserve the inrange index from the innermost GEP if possible. We must
966  // have calculated the same indices up to and including the inrange index.
967  Optional<unsigned> InRangeIndex;
968  if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
969  if (SrcElemTy == InnermostGEP->getSourceElementType() &&
970  NewIdxs.size() > *LastIRIndex) {
971  InRangeIndex = LastIRIndex;
972  for (unsigned I = 0; I <= *LastIRIndex; ++I)
973  if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
974  return nullptr;
975  }
976 
977  // Create a GEP.
978  Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
979  InBounds, InRangeIndex);
980  assert(C->getType()->getPointerElementType() == Ty &&
981  "Computed GetElementPtr has unexpected type!");
982 
983  // If we ended up indexing a member with a type that doesn't match
984  // the type of what the original indices indexed, add a cast.
985  if (Ty != ResElemTy)
986  C = FoldBitCast(C, ResTy, DL);
987 
988  return C;
989 }
990 
991 /// Attempt to constant fold an instruction with the
992 /// specified opcode and operands. If successful, the constant result is
993 /// returned, if not, null is returned. Note that this function can fail when
994 /// attempting to fold instructions like loads and stores, which have no
995 /// constant expression form.
996 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
998  const DataLayout &DL,
999  const TargetLibraryInfo *TLI) {
1000  Type *DestTy = InstOrCE->getType();
1001 
1002  // Handle easy binops first.
1003  if (Instruction::isBinaryOp(Opcode))
1004  return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1005 
1006  if (Instruction::isCast(Opcode))
1007  return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1008 
1009  if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1010  if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1011  return C;
1012 
1014  Ops.slice(1), GEP->isInBounds(),
1015  GEP->getInRangeIndex());
1016  }
1017 
1018  if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1019  return CE->getWithOperands(Ops);
1020 
1021  switch (Opcode) {
1022  default: return nullptr;
1023  case Instruction::ICmp:
1024  case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1025  case Instruction::Call:
1026  if (auto *F = dyn_cast<Function>(Ops.back())) {
1027  const auto *Call = cast<CallBase>(InstOrCE);
1028  if (canConstantFoldCallTo(Call, F))
1029  return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1030  }
1031  return nullptr;
1032  case Instruction::Select:
1033  return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1034  case Instruction::ExtractElement:
1035  return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1036  case Instruction::InsertElement:
1037  return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1038  case Instruction::ShuffleVector:
1039  return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
1040  }
1041 }
1042 
1043 } // end anonymous namespace
1044 
1045 //===----------------------------------------------------------------------===//
1046 // Constant Folding public APIs
1047 //===----------------------------------------------------------------------===//
1048 
1049 namespace {
1050 
1051 Constant *
1052 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1053  const TargetLibraryInfo *TLI,
1055  if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1056  return nullptr;
1057 
1059  for (const Use &NewU : C->operands()) {
1060  auto *NewC = cast<Constant>(&NewU);
1061  // Recursively fold the ConstantExpr's operands. If we have already folded
1062  // a ConstantExpr, we don't have to process it again.
1063  if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
1064  auto It = FoldedOps.find(NewC);
1065  if (It == FoldedOps.end()) {
1066  if (auto *FoldedC =
1067  ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
1068  FoldedOps.insert({NewC, FoldedC});
1069  NewC = FoldedC;
1070  } else {
1071  FoldedOps.insert({NewC, NewC});
1072  }
1073  } else {
1074  NewC = It->second;
1075  }
1076  }
1077  Ops.push_back(NewC);
1078  }
1079 
1080  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1081  if (CE->isCompare())
1082  return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1083  DL, TLI);
1084 
1085  return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1086  }
1087 
1088  assert(isa<ConstantVector>(C));
1089  return ConstantVector::get(Ops);
1090 }
1091 
1092 } // end anonymous namespace
1093 
1095  const TargetLibraryInfo *TLI) {
1096  // Handle PHI nodes quickly here...
1097  if (auto *PN = dyn_cast<PHINode>(I)) {
1098  Constant *CommonValue = nullptr;
1099 
1101  for (Value *Incoming : PN->incoming_values()) {
1102  // If the incoming value is undef then skip it. Note that while we could
1103  // skip the value if it is equal to the phi node itself we choose not to
1104  // because that would break the rule that constant folding only applies if
1105  // all operands are constants.
1106  if (isa<UndefValue>(Incoming))
1107  continue;
1108  // If the incoming value is not a constant, then give up.
1109  auto *C = dyn_cast<Constant>(Incoming);
1110  if (!C)
1111  return nullptr;
1112  // Fold the PHI's operands.
1113  if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
1114  C = FoldedC;
1115  // If the incoming value is a different constant to
1116  // the one we saw previously, then give up.
1117  if (CommonValue && C != CommonValue)
1118  return nullptr;
1119  CommonValue = C;
1120  }
1121 
1122  // If we reach here, all incoming values are the same constant or undef.
1123  return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1124  }
1125 
1126  // Scan the operand list, checking to see if they are all constants, if so,
1127  // hand off to ConstantFoldInstOperandsImpl.
1128  if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1129  return nullptr;
1130 
1133  for (const Use &OpU : I->operands()) {
1134  auto *Op = cast<Constant>(&OpU);
1135  // Fold the Instruction's operands.
1136  if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
1137  Op = FoldedOp;
1138 
1139  Ops.push_back(Op);
1140  }
1141 
1142  if (const auto *CI = dyn_cast<CmpInst>(I))
1143  return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1144  DL, TLI);
1145 
1146  if (const auto *LI = dyn_cast<LoadInst>(I))
1147  return ConstantFoldLoadInst(LI, DL);
1148 
1149  if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1151  cast<Constant>(IVI->getAggregateOperand()),
1152  cast<Constant>(IVI->getInsertedValueOperand()),
1153  IVI->getIndices());
1154  }
1155 
1156  if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1158  cast<Constant>(EVI->getAggregateOperand()),
1159  EVI->getIndices());
1160  }
1161 
1162  return ConstantFoldInstOperands(I, Ops, DL, TLI);
1163 }
1164 
1166  const TargetLibraryInfo *TLI) {
1168  return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1169 }
1170 
1173  const DataLayout &DL,
1174  const TargetLibraryInfo *TLI) {
1175  return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1176 }
1177 
1179  Constant *Ops0, Constant *Ops1,
1180  const DataLayout &DL,
1181  const TargetLibraryInfo *TLI) {
1182  // fold: icmp (inttoptr x), null -> icmp x, 0
1183  // fold: icmp null, (inttoptr x) -> icmp 0, x
1184  // fold: icmp (ptrtoint x), 0 -> icmp x, null
1185  // fold: icmp 0, (ptrtoint x) -> icmp null, x
1186  // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1187  // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1188  //
1189  // FIXME: The following comment is out of data and the DataLayout is here now.
1190  // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1191  // around to know if bit truncation is happening.
1192  if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1193  if (Ops1->isNullValue()) {
1194  if (CE0->getOpcode() == Instruction::IntToPtr) {
1195  Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1196  // Convert the integer value to the right size to ensure we get the
1197  // proper extension or truncation.
1198  Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1199  IntPtrTy, false);
1200  Constant *Null = Constant::getNullValue(C->getType());
1201  return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1202  }
1203 
1204  // Only do this transformation if the int is intptrty in size, otherwise
1205  // there is a truncation or extension that we aren't modeling.
1206  if (CE0->getOpcode() == Instruction::PtrToInt) {
1207  Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1208  if (CE0->getType() == IntPtrTy) {
1209  Constant *C = CE0->getOperand(0);
1210  Constant *Null = Constant::getNullValue(C->getType());
1211  return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1212  }
1213  }
1214  }
1215 
1216  if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1217  if (CE0->getOpcode() == CE1->getOpcode()) {
1218  if (CE0->getOpcode() == Instruction::IntToPtr) {
1219  Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1220 
1221  // Convert the integer value to the right size to ensure we get the
1222  // proper extension or truncation.
1223  Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1224  IntPtrTy, false);
1225  Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1226  IntPtrTy, false);
1227  return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1228  }
1229 
1230  // Only do this transformation if the int is intptrty in size, otherwise
1231  // there is a truncation or extension that we aren't modeling.
1232  if (CE0->getOpcode() == Instruction::PtrToInt) {
1233  Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1234  if (CE0->getType() == IntPtrTy &&
1235  CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1237  Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1238  }
1239  }
1240  }
1241  }
1242 
1243  // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1244  // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1245  if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1246  CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1248  Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1250  Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1251  unsigned OpC =
1252  Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1253  return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1254  }
1255  } else if (isa<ConstantExpr>(Ops1)) {
1256  // If RHS is a constant expression, but the left side isn't, swap the
1257  // operands and try again.
1258  Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1259  return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1260  }
1261 
1262  return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1263 }
1264 
1266  Constant *RHS,
1267  const DataLayout &DL) {
1269  if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1270  if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1271  return C;
1272 
1273  return ConstantExpr::get(Opcode, LHS, RHS);
1274 }
1275 
1277  Type *DestTy, const DataLayout &DL) {
1278  assert(Instruction::isCast(Opcode));
1279  switch (Opcode) {
1280  default:
1281  llvm_unreachable("Missing case");
1282  case Instruction::PtrToInt:
1283  // If the input is a inttoptr, eliminate the pair. This requires knowing
1284  // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1285  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1286  if (CE->getOpcode() == Instruction::IntToPtr) {
1287  Constant *Input = CE->getOperand(0);
1288  unsigned InWidth = Input->getType()->getScalarSizeInBits();
1289  unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1290  if (PtrWidth < InWidth) {
1291  Constant *Mask =
1292  ConstantInt::get(CE->getContext(),
1293  APInt::getLowBitsSet(InWidth, PtrWidth));
1294  Input = ConstantExpr::getAnd(Input, Mask);
1295  }
1296  // Do a zext or trunc to get to the dest size.
1297  return ConstantExpr::getIntegerCast(Input, DestTy, false);
1298  }
1299  }
1300  return ConstantExpr::getCast(Opcode, C, DestTy);
1301  case Instruction::IntToPtr:
1302  // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1303  // the int size is >= the ptr size and the address spaces are the same.
1304  // This requires knowing the width of a pointer, so it can't be done in
1305  // ConstantExpr::getCast.
1306  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1307  if (CE->getOpcode() == Instruction::PtrToInt) {
1308  Constant *SrcPtr = CE->getOperand(0);
1309  unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1310  unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1311 
1312  if (MidIntSize >= SrcPtrSize) {
1313  unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1314  if (SrcAS == DestTy->getPointerAddressSpace())
1315  return FoldBitCast(CE->getOperand(0), DestTy, DL);
1316  }
1317  }
1318  }
1319 
1320  return ConstantExpr::getCast(Opcode, C, DestTy);
1321  case Instruction::Trunc:
1322  case Instruction::ZExt:
1323  case Instruction::SExt:
1324  case Instruction::FPTrunc:
1325  case Instruction::FPExt:
1326  case Instruction::UIToFP:
1327  case Instruction::SIToFP:
1328  case Instruction::FPToUI:
1329  case Instruction::FPToSI:
1330  case Instruction::AddrSpaceCast:
1331  return ConstantExpr::getCast(Opcode, C, DestTy);
1332  case Instruction::BitCast:
1333  return FoldBitCast(C, DestTy, DL);
1334  }
1335 }
1336 
1338  ConstantExpr *CE) {
1339  if (!CE->getOperand(1)->isNullValue())
1340  return nullptr; // Do not allow stepping over the value!
1341 
1342  // Loop over all of the operands, tracking down which value we are
1343  // addressing.
1344  for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1345  C = C->getAggregateElement(CE->getOperand(i));
1346  if (!C)
1347  return nullptr;
1348  }
1349  return C;
1350 }
1351 
1352 Constant *
1354  ArrayRef<Constant *> Indices) {
1355  // Loop over all of the operands, tracking down which value we are
1356  // addressing.
1357  for (Constant *Index : Indices) {
1358  C = C->getAggregateElement(Index);
1359  if (!C)
1360  return nullptr;
1361  }
1362  return C;
1363 }
1364 
1365 //===----------------------------------------------------------------------===//
1366 // Constant Folding for Calls
1367 //
1368 
1369 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1370  if (Call->isNoBuiltin() || Call->isStrictFP())
1371  return false;
1372  switch (F->getIntrinsicID()) {
1373  case Intrinsic::fabs:
1374  case Intrinsic::minnum:
1375  case Intrinsic::maxnum:
1376  case Intrinsic::minimum:
1377  case Intrinsic::maximum:
1378  case Intrinsic::log:
1379  case Intrinsic::log2:
1380  case Intrinsic::log10:
1381  case Intrinsic::exp:
1382  case Intrinsic::exp2:
1383  case Intrinsic::floor:
1384  case Intrinsic::ceil:
1385  case Intrinsic::sqrt:
1386  case Intrinsic::sin:
1387  case Intrinsic::cos:
1388  case Intrinsic::trunc:
1389  case Intrinsic::rint:
1390  case Intrinsic::nearbyint:
1391  case Intrinsic::pow:
1392  case Intrinsic::powi:
1393  case Intrinsic::bswap:
1394  case Intrinsic::ctpop:
1395  case Intrinsic::ctlz:
1396  case Intrinsic::cttz:
1397  case Intrinsic::fshl:
1398  case Intrinsic::fshr:
1399  case Intrinsic::fma:
1400  case Intrinsic::fmuladd:
1401  case Intrinsic::copysign:
1402  case Intrinsic::launder_invariant_group:
1403  case Intrinsic::strip_invariant_group:
1404  case Intrinsic::round:
1405  case Intrinsic::masked_load:
1406  case Intrinsic::sadd_with_overflow:
1407  case Intrinsic::uadd_with_overflow:
1408  case Intrinsic::ssub_with_overflow:
1409  case Intrinsic::usub_with_overflow:
1410  case Intrinsic::smul_with_overflow:
1411  case Intrinsic::umul_with_overflow:
1412  case Intrinsic::sadd_sat:
1413  case Intrinsic::uadd_sat:
1414  case Intrinsic::ssub_sat:
1415  case Intrinsic::usub_sat:
1416  case Intrinsic::convert_from_fp16:
1417  case Intrinsic::convert_to_fp16:
1418  case Intrinsic::bitreverse:
1419  case Intrinsic::x86_sse_cvtss2si:
1420  case Intrinsic::x86_sse_cvtss2si64:
1421  case Intrinsic::x86_sse_cvttss2si:
1422  case Intrinsic::x86_sse_cvttss2si64:
1423  case Intrinsic::x86_sse2_cvtsd2si:
1424  case Intrinsic::x86_sse2_cvtsd2si64:
1425  case Intrinsic::x86_sse2_cvttsd2si:
1426  case Intrinsic::x86_sse2_cvttsd2si64:
1427  case Intrinsic::x86_avx512_vcvtss2si32:
1428  case Intrinsic::x86_avx512_vcvtss2si64:
1429  case Intrinsic::x86_avx512_cvttss2si:
1430  case Intrinsic::x86_avx512_cvttss2si64:
1431  case Intrinsic::x86_avx512_vcvtsd2si32:
1432  case Intrinsic::x86_avx512_vcvtsd2si64:
1433  case Intrinsic::x86_avx512_cvttsd2si:
1434  case Intrinsic::x86_avx512_cvttsd2si64:
1435  case Intrinsic::x86_avx512_vcvtss2usi32:
1436  case Intrinsic::x86_avx512_vcvtss2usi64:
1437  case Intrinsic::x86_avx512_cvttss2usi:
1438  case Intrinsic::x86_avx512_cvttss2usi64:
1439  case Intrinsic::x86_avx512_vcvtsd2usi32:
1440  case Intrinsic::x86_avx512_vcvtsd2usi64:
1441  case Intrinsic::x86_avx512_cvttsd2usi:
1442  case Intrinsic::x86_avx512_cvttsd2usi64:
1443  case Intrinsic::is_constant:
1444  return true;
1445  default:
1446  return false;
1447  case Intrinsic::not_intrinsic: break;
1448  }
1449 
1450  if (!F->hasName())
1451  return false;
1452  StringRef Name = F->getName();
1453 
1454  // In these cases, the check of the length is required. We don't want to
1455  // return true for a name like "cos\0blah" which strcmp would return equal to
1456  // "cos", but has length 8.
1457  switch (Name[0]) {
1458  default:
1459  return false;
1460  case 'a':
1461  return Name == "acos" || Name == "asin" || Name == "atan" ||
1462  Name == "atan2" || Name == "acosf" || Name == "asinf" ||
1463  Name == "atanf" || Name == "atan2f";
1464  case 'c':
1465  return Name == "ceil" || Name == "cos" || Name == "cosh" ||
1466  Name == "ceilf" || Name == "cosf" || Name == "coshf";
1467  case 'e':
1468  return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
1469  case 'f':
1470  return Name == "fabs" || Name == "floor" || Name == "fmod" ||
1471  Name == "fabsf" || Name == "floorf" || Name == "fmodf";
1472  case 'l':
1473  return Name == "log" || Name == "log10" || Name == "logf" ||
1474  Name == "log10f";
1475  case 'p':
1476  return Name == "pow" || Name == "powf";
1477  case 'r':
1478  return Name == "round" || Name == "roundf";
1479  case 's':
1480  return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1481  Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
1482  case 't':
1483  return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
1484  case '_':
1485 
1486  // Check for various function names that get used for the math functions
1487  // when the header files are preprocessed with the macro
1488  // __FINITE_MATH_ONLY__ enabled.
1489  // The '12' here is the length of the shortest name that can match.
1490  // We need to check the size before looking at Name[1] and Name[2]
1491  // so we may as well check a limit that will eliminate mismatches.
1492  if (Name.size() < 12 || Name[1] != '_')
1493  return false;
1494  switch (Name[2]) {
1495  default:
1496  return false;
1497  case 'a':
1498  return Name == "__acos_finite" || Name == "__acosf_finite" ||
1499  Name == "__asin_finite" || Name == "__asinf_finite" ||
1500  Name == "__atan2_finite" || Name == "__atan2f_finite";
1501  case 'c':
1502  return Name == "__cosh_finite" || Name == "__coshf_finite";
1503  case 'e':
1504  return Name == "__exp_finite" || Name == "__expf_finite" ||
1505  Name == "__exp2_finite" || Name == "__exp2f_finite";
1506  case 'l':
1507  return Name == "__log_finite" || Name == "__logf_finite" ||
1508  Name == "__log10_finite" || Name == "__log10f_finite";
1509  case 'p':
1510  return Name == "__pow_finite" || Name == "__powf_finite";
1511  case 's':
1512  return Name == "__sinh_finite" || Name == "__sinhf_finite";
1513  }
1514  }
1515 }
1516 
1517 namespace {
1518 
1519 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1520  if (Ty->isHalfTy() || Ty->isFloatTy()) {
1521  APFloat APF(V);
1522  bool unused;
1524  return ConstantFP::get(Ty->getContext(), APF);
1525  }
1526  if (Ty->isDoubleTy())
1527  return ConstantFP::get(Ty->getContext(), APFloat(V));
1528  llvm_unreachable("Can only constant fold half/float/double");
1529 }
1530 
1531 /// Clear the floating-point exception state.
1532 inline void llvm_fenv_clearexcept() {
1533 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1534  feclearexcept(FE_ALL_EXCEPT);
1535 #endif
1536  errno = 0;
1537 }
1538 
1539 /// Test if a floating-point exception was raised.
1540 inline bool llvm_fenv_testexcept() {
1541  int errno_val = errno;
1542  if (errno_val == ERANGE || errno_val == EDOM)
1543  return true;
1544 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1545  if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1546  return true;
1547 #endif
1548  return false;
1549 }
1550 
1551 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1552  llvm_fenv_clearexcept();
1553  V = NativeFP(V);
1554  if (llvm_fenv_testexcept()) {
1555  llvm_fenv_clearexcept();
1556  return nullptr;
1557  }
1558 
1559  return GetConstantFoldFPValue(V, Ty);
1560 }
1561 
1562 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1563  double W, Type *Ty) {
1564  llvm_fenv_clearexcept();
1565  V = NativeFP(V, W);
1566  if (llvm_fenv_testexcept()) {
1567  llvm_fenv_clearexcept();
1568  return nullptr;
1569  }
1570 
1571  return GetConstantFoldFPValue(V, Ty);
1572 }
1573 
1574 /// Attempt to fold an SSE floating point to integer conversion of a constant
1575 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1576 /// used (toward nearest, ties to even). This matches the behavior of the
1577 /// non-truncating SSE instructions in the default rounding mode. The desired
1578 /// integer type Ty is used to select how many bits are available for the
1579 /// result. Returns null if the conversion cannot be performed, otherwise
1580 /// returns the Constant value resulting from the conversion.
1581 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1582  Type *Ty, bool IsSigned) {
1583  // All of these conversion intrinsics form an integer of at most 64bits.
1584  unsigned ResultWidth = Ty->getIntegerBitWidth();
1585  assert(ResultWidth <= 64 &&
1586  "Can only constant fold conversions to 64 and 32 bit ints");
1587 
1588  uint64_t UIntVal;
1589  bool isExact = false;
1593  Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1594  IsSigned, mode, &isExact);
1595  if (status != APFloat::opOK &&
1596  (!roundTowardZero || status != APFloat::opInexact))
1597  return nullptr;
1598  return ConstantInt::get(Ty, UIntVal, IsSigned);
1599 }
1600 
1601 double getValueAsDouble(ConstantFP *Op) {
1602  Type *Ty = Op->getType();
1603 
1604  if (Ty->isFloatTy())
1605  return Op->getValueAPF().convertToFloat();
1606 
1607  if (Ty->isDoubleTy())
1608  return Op->getValueAPF().convertToDouble();
1609 
1610  bool unused;
1611  APFloat APF = Op->getValueAPF();
1613  return APF.convertToDouble();
1614 }
1615 
1616 static bool isManifestConstant(const Constant *c) {
1617  if (isa<ConstantData>(c)) {
1618  return true;
1619  } else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) {
1620  for (const Value *subc : c->operand_values()) {
1621  if (!isManifestConstant(cast<Constant>(subc)))
1622  return false;
1623  }
1624  return true;
1625  }
1626  return false;
1627 }
1628 
1629 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1630  if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1631  C = &CI->getValue();
1632  return true;
1633  }
1634  if (isa<UndefValue>(Op)) {
1635  C = nullptr;
1636  return true;
1637  }
1638  return false;
1639 }
1640 
1641 Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
1642  ArrayRef<Constant *> Operands,
1643  const TargetLibraryInfo *TLI,
1644  const CallBase *Call) {
1645  if (Operands.size() == 1) {
1646  if (IntrinsicID == Intrinsic::is_constant) {
1647  // We know we have a "Constant" argument. But we want to only
1648  // return true for manifest constants, not those that depend on
1649  // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1650  if (isManifestConstant(Operands[0]))
1651  return ConstantInt::getTrue(Ty->getContext());
1652  return nullptr;
1653  }
1654  if (isa<UndefValue>(Operands[0])) {
1655  // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1656  // ctpop() is between 0 and bitwidth, pick 0 for undef.
1657  if (IntrinsicID == Intrinsic::cos ||
1658  IntrinsicID == Intrinsic::ctpop)
1659  return Constant::getNullValue(Ty);
1660  if (IntrinsicID == Intrinsic::bswap ||
1661  IntrinsicID == Intrinsic::bitreverse ||
1662  IntrinsicID == Intrinsic::launder_invariant_group ||
1663  IntrinsicID == Intrinsic::strip_invariant_group)
1664  return Operands[0];
1665  }
1666 
1667  if (isa<ConstantPointerNull>(Operands[0])) {
1668  // launder(null) == null == strip(null) iff in addrspace 0
1669  if (IntrinsicID == Intrinsic::launder_invariant_group ||
1670  IntrinsicID == Intrinsic::strip_invariant_group) {
1671  // If instruction is not yet put in a basic block (e.g. when cloning
1672  // a function during inlining), Call's caller may not be available.
1673  // So check Call's BB first before querying Call->getCaller.
1674  const Function *Caller =
1675  Call->getParent() ? Call->getCaller() : nullptr;
1676  if (Caller &&
1678  Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1679  return Operands[0];
1680  }
1681  return nullptr;
1682  }
1683  }
1684 
1685  if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1686  if (IntrinsicID == Intrinsic::convert_to_fp16) {
1687  APFloat Val(Op->getValueAPF());
1688 
1689  bool lost = false;
1691 
1692  return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1693  }
1694 
1695  if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1696  return nullptr;
1697 
1698  if (IntrinsicID == Intrinsic::round) {
1699  APFloat V = Op->getValueAPF();
1701  return ConstantFP::get(Ty->getContext(), V);
1702  }
1703 
1704  if (IntrinsicID == Intrinsic::floor) {
1705  APFloat V = Op->getValueAPF();
1707  return ConstantFP::get(Ty->getContext(), V);
1708  }
1709 
1710  if (IntrinsicID == Intrinsic::ceil) {
1711  APFloat V = Op->getValueAPF();
1713  return ConstantFP::get(Ty->getContext(), V);
1714  }
1715 
1716  if (IntrinsicID == Intrinsic::trunc) {
1717  APFloat V = Op->getValueAPF();
1719  return ConstantFP::get(Ty->getContext(), V);
1720  }
1721 
1722  if (IntrinsicID == Intrinsic::rint) {
1723  APFloat V = Op->getValueAPF();
1725  return ConstantFP::get(Ty->getContext(), V);
1726  }
1727 
1728  if (IntrinsicID == Intrinsic::nearbyint) {
1729  APFloat V = Op->getValueAPF();
1731  return ConstantFP::get(Ty->getContext(), V);
1732  }
1733 
1734  /// We only fold functions with finite arguments. Folding NaN and inf is
1735  /// likely to be aborted with an exception anyway, and some host libms
1736  /// have known errors raising exceptions.
1737  if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1738  return nullptr;
1739 
1740  /// Currently APFloat versions of these functions do not exist, so we use
1741  /// the host native double versions. Float versions are not called
1742  /// directly but for all these it is true (float)(f((double)arg)) ==
1743  /// f(arg). Long double not supported yet.
1744  double V = getValueAsDouble(Op);
1745 
1746  switch (IntrinsicID) {
1747  default: break;
1748  case Intrinsic::fabs:
1749  return ConstantFoldFP(fabs, V, Ty);
1750  case Intrinsic::log2:
1751  return ConstantFoldFP(Log2, V, Ty);
1752  case Intrinsic::log:
1753  return ConstantFoldFP(log, V, Ty);
1754  case Intrinsic::log10:
1755  return ConstantFoldFP(log10, V, Ty);
1756  case Intrinsic::exp:
1757  return ConstantFoldFP(exp, V, Ty);
1758  case Intrinsic::exp2:
1759  return ConstantFoldFP(exp2, V, Ty);
1760  case Intrinsic::sin:
1761  return ConstantFoldFP(sin, V, Ty);
1762  case Intrinsic::cos:
1763  return ConstantFoldFP(cos, V, Ty);
1764  case Intrinsic::sqrt:
1765  return ConstantFoldFP(sqrt, V, Ty);
1766  }
1767 
1768  if (!TLI)
1769  return nullptr;
1770 
1771  char NameKeyChar = Name[0];
1772  if (Name[0] == '_' && Name.size() > 2 && Name[1] == '_')
1773  NameKeyChar = Name[2];
1774 
1775  switch (NameKeyChar) {
1776  case 'a':
1777  if ((Name == "acos" && TLI->has(LibFunc_acos)) ||
1778  (Name == "acosf" && TLI->has(LibFunc_acosf)) ||
1779  (Name == "__acos_finite" && TLI->has(LibFunc_acos_finite)) ||
1780  (Name == "__acosf_finite" && TLI->has(LibFunc_acosf_finite)))
1781  return ConstantFoldFP(acos, V, Ty);
1782  else if ((Name == "asin" && TLI->has(LibFunc_asin)) ||
1783  (Name == "asinf" && TLI->has(LibFunc_asinf)) ||
1784  (Name == "__asin_finite" && TLI->has(LibFunc_asin_finite)) ||
1785  (Name == "__asinf_finite" && TLI->has(LibFunc_asinf_finite)))
1786  return ConstantFoldFP(asin, V, Ty);
1787  else if ((Name == "atan" && TLI->has(LibFunc_atan)) ||
1788  (Name == "atanf" && TLI->has(LibFunc_atanf)))
1789  return ConstantFoldFP(atan, V, Ty);
1790  break;
1791  case 'c':
1792  if ((Name == "ceil" && TLI->has(LibFunc_ceil)) ||
1793  (Name == "ceilf" && TLI->has(LibFunc_ceilf)))
1794  return ConstantFoldFP(ceil, V, Ty);
1795  else if ((Name == "cos" && TLI->has(LibFunc_cos)) ||
1796  (Name == "cosf" && TLI->has(LibFunc_cosf)))
1797  return ConstantFoldFP(cos, V, Ty);
1798  else if ((Name == "cosh" && TLI->has(LibFunc_cosh)) ||
1799  (Name == "coshf" && TLI->has(LibFunc_coshf)) ||
1800  (Name == "__cosh_finite" && TLI->has(LibFunc_cosh_finite)) ||
1801  (Name == "__coshf_finite" && TLI->has(LibFunc_coshf_finite)))
1802  return ConstantFoldFP(cosh, V, Ty);
1803  break;
1804  case 'e':
1805  if ((Name == "exp" && TLI->has(LibFunc_exp)) ||
1806  (Name == "expf" && TLI->has(LibFunc_expf)) ||
1807  (Name == "__exp_finite" && TLI->has(LibFunc_exp_finite)) ||
1808  (Name == "__expf_finite" && TLI->has(LibFunc_expf_finite)))
1809  return ConstantFoldFP(exp, V, Ty);
1810  if ((Name == "exp2" && TLI->has(LibFunc_exp2)) ||
1811  (Name == "exp2f" && TLI->has(LibFunc_exp2f)) ||
1812  (Name == "__exp2_finite" && TLI->has(LibFunc_exp2_finite)) ||
1813  (Name == "__exp2f_finite" && TLI->has(LibFunc_exp2f_finite)))
1814  // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1815  // C99 library.
1816  return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1817  break;
1818  case 'f':
1819  if ((Name == "fabs" && TLI->has(LibFunc_fabs)) ||
1820  (Name == "fabsf" && TLI->has(LibFunc_fabsf)))
1821  return ConstantFoldFP(fabs, V, Ty);
1822  else if ((Name == "floor" && TLI->has(LibFunc_floor)) ||
1823  (Name == "floorf" && TLI->has(LibFunc_floorf)))
1824  return ConstantFoldFP(floor, V, Ty);
1825  break;
1826  case 'l':
1827  if ((Name == "log" && V > 0 && TLI->has(LibFunc_log)) ||
1828  (Name == "logf" && V > 0 && TLI->has(LibFunc_logf)) ||
1829  (Name == "__log_finite" && V > 0 &&
1830  TLI->has(LibFunc_log_finite)) ||
1831  (Name == "__logf_finite" && V > 0 &&
1832  TLI->has(LibFunc_logf_finite)))
1833  return ConstantFoldFP(log, V, Ty);
1834  else if ((Name == "log10" && V > 0 && TLI->has(LibFunc_log10)) ||
1835  (Name == "log10f" && V > 0 && TLI->has(LibFunc_log10f)) ||
1836  (Name == "__log10_finite" && V > 0 &&
1837  TLI->has(LibFunc_log10_finite)) ||
1838  (Name == "__log10f_finite" && V > 0 &&
1839  TLI->has(LibFunc_log10f_finite)))
1840  return ConstantFoldFP(log10, V, Ty);
1841  break;
1842  case 'r':
1843  if ((Name == "round" && TLI->has(LibFunc_round)) ||
1844  (Name == "roundf" && TLI->has(LibFunc_roundf)))
1845  return ConstantFoldFP(round, V, Ty);
1846  break;
1847  case 's':
1848  if ((Name == "sin" && TLI->has(LibFunc_sin)) ||
1849  (Name == "sinf" && TLI->has(LibFunc_sinf)))
1850  return ConstantFoldFP(sin, V, Ty);
1851  else if ((Name == "sinh" && TLI->has(LibFunc_sinh)) ||
1852  (Name == "sinhf" && TLI->has(LibFunc_sinhf)) ||
1853  (Name == "__sinh_finite" && TLI->has(LibFunc_sinh_finite)) ||
1854  (Name == "__sinhf_finite" && TLI->has(LibFunc_sinhf_finite)))
1855  return ConstantFoldFP(sinh, V, Ty);
1856  else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc_sqrt)) ||
1857  (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc_sqrtf)))
1858  return ConstantFoldFP(sqrt, V, Ty);
1859  break;
1860  case 't':
1861  if ((Name == "tan" && TLI->has(LibFunc_tan)) ||
1862  (Name == "tanf" && TLI->has(LibFunc_tanf)))
1863  return ConstantFoldFP(tan, V, Ty);
1864  else if ((Name == "tanh" && TLI->has(LibFunc_tanh)) ||
1865  (Name == "tanhf" && TLI->has(LibFunc_tanhf)))
1866  return ConstantFoldFP(tanh, V, Ty);
1867  break;
1868  default:
1869  break;
1870  }
1871  return nullptr;
1872  }
1873 
1874  if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
1875  switch (IntrinsicID) {
1876  case Intrinsic::bswap:
1877  return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1878  case Intrinsic::ctpop:
1879  return ConstantInt::get(Ty, Op->getValue().countPopulation());
1880  case Intrinsic::bitreverse:
1881  return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
1882  case Intrinsic::convert_from_fp16: {
1883  APFloat Val(APFloat::IEEEhalf(), Op->getValue());
1884 
1885  bool lost = false;
1888 
1889  // Conversion is always precise.
1890  (void)status;
1891  assert(status == APFloat::opOK && !lost &&
1892  "Precision lost during fp16 constfolding");
1893 
1894  return ConstantFP::get(Ty->getContext(), Val);
1895  }
1896  default:
1897  return nullptr;
1898  }
1899  }
1900 
1901  // Support ConstantVector in case we have an Undef in the top.
1902  if (isa<ConstantVector>(Operands[0]) ||
1903  isa<ConstantDataVector>(Operands[0])) {
1904  auto *Op = cast<Constant>(Operands[0]);
1905  switch (IntrinsicID) {
1906  default: break;
1907  case Intrinsic::x86_sse_cvtss2si:
1908  case Intrinsic::x86_sse_cvtss2si64:
1909  case Intrinsic::x86_sse2_cvtsd2si:
1910  case Intrinsic::x86_sse2_cvtsd2si64:
1911  if (ConstantFP *FPOp =
1912  dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1913  return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1914  /*roundTowardZero=*/false, Ty,
1915  /*IsSigned*/true);
1916  break;
1917  case Intrinsic::x86_sse_cvttss2si:
1918  case Intrinsic::x86_sse_cvttss2si64:
1919  case Intrinsic::x86_sse2_cvttsd2si:
1920  case Intrinsic::x86_sse2_cvttsd2si64:
1921  if (ConstantFP *FPOp =
1922  dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1923  return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1924  /*roundTowardZero=*/true, Ty,
1925  /*IsSigned*/true);
1926  break;
1927  }
1928  }
1929 
1930  return nullptr;
1931  }
1932 
1933  if (Operands.size() == 2) {
1934  if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1935  if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1936  return nullptr;
1937  double Op1V = getValueAsDouble(Op1);
1938 
1939  if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1940  if (Op2->getType() != Op1->getType())
1941  return nullptr;
1942 
1943  double Op2V = getValueAsDouble(Op2);
1944  if (IntrinsicID == Intrinsic::pow) {
1945  return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1946  }
1947  if (IntrinsicID == Intrinsic::copysign) {
1948  APFloat V1 = Op1->getValueAPF();
1949  const APFloat &V2 = Op2->getValueAPF();
1950  V1.copySign(V2);
1951  return ConstantFP::get(Ty->getContext(), V1);
1952  }
1953 
1954  if (IntrinsicID == Intrinsic::minnum) {
1955  const APFloat &C1 = Op1->getValueAPF();
1956  const APFloat &C2 = Op2->getValueAPF();
1957  return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
1958  }
1959 
1960  if (IntrinsicID == Intrinsic::maxnum) {
1961  const APFloat &C1 = Op1->getValueAPF();
1962  const APFloat &C2 = Op2->getValueAPF();
1963  return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
1964  }
1965 
1966  if (IntrinsicID == Intrinsic::minimum) {
1967  const APFloat &C1 = Op1->getValueAPF();
1968  const APFloat &C2 = Op2->getValueAPF();
1969  return ConstantFP::get(Ty->getContext(), minimum(C1, C2));
1970  }
1971 
1972  if (IntrinsicID == Intrinsic::maximum) {
1973  const APFloat &C1 = Op1->getValueAPF();
1974  const APFloat &C2 = Op2->getValueAPF();
1975  return ConstantFP::get(Ty->getContext(), maximum(C1, C2));
1976  }
1977 
1978  if (!TLI)
1979  return nullptr;
1980  if ((Name == "pow" && TLI->has(LibFunc_pow)) ||
1981  (Name == "powf" && TLI->has(LibFunc_powf)) ||
1982  (Name == "__pow_finite" && TLI->has(LibFunc_pow_finite)) ||
1983  (Name == "__powf_finite" && TLI->has(LibFunc_powf_finite)))
1984  return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1985  if ((Name == "fmod" && TLI->has(LibFunc_fmod)) ||
1986  (Name == "fmodf" && TLI->has(LibFunc_fmodf)))
1987  return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
1988  if ((Name == "atan2" && TLI->has(LibFunc_atan2)) ||
1989  (Name == "atan2f" && TLI->has(LibFunc_atan2f)) ||
1990  (Name == "__atan2_finite" && TLI->has(LibFunc_atan2_finite)) ||
1991  (Name == "__atan2f_finite" && TLI->has(LibFunc_atan2f_finite)))
1992  return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
1993  } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
1994  if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
1995  return ConstantFP::get(Ty->getContext(),
1996  APFloat((float)std::pow((float)Op1V,
1997  (int)Op2C->getZExtValue())));
1998  if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
1999  return ConstantFP::get(Ty->getContext(),
2000  APFloat((float)std::pow((float)Op1V,
2001  (int)Op2C->getZExtValue())));
2002  if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2003  return ConstantFP::get(Ty->getContext(),
2004  APFloat((double)std::pow((double)Op1V,
2005  (int)Op2C->getZExtValue())));
2006  }
2007  return nullptr;
2008  }
2009 
2010  if (Operands[0]->getType()->isIntegerTy() &&
2011  Operands[1]->getType()->isIntegerTy()) {
2012  const APInt *C0, *C1;
2013  if (!getConstIntOrUndef(Operands[0], C0) ||
2014  !getConstIntOrUndef(Operands[1], C1))
2015  return nullptr;
2016 
2017  switch (IntrinsicID) {
2018  default: break;
2019  case Intrinsic::smul_with_overflow:
2020  case Intrinsic::umul_with_overflow:
2021  // Even if both operands are undef, we cannot fold muls to undef
2022  // in the general case. For example, on i2 there are no inputs
2023  // that would produce { i2 -1, i1 true } as the result.
2024  if (!C0 || !C1)
2025  return Constant::getNullValue(Ty);
2027  case Intrinsic::sadd_with_overflow:
2028  case Intrinsic::uadd_with_overflow:
2029  case Intrinsic::ssub_with_overflow:
2030  case Intrinsic::usub_with_overflow: {
2031  if (!C0 || !C1)
2032  return UndefValue::get(Ty);
2033 
2034  APInt Res;
2035  bool Overflow;
2036  switch (IntrinsicID) {
2037  default: llvm_unreachable("Invalid case");
2038  case Intrinsic::sadd_with_overflow:
2039  Res = C0->sadd_ov(*C1, Overflow);
2040  break;
2041  case Intrinsic::uadd_with_overflow:
2042  Res = C0->uadd_ov(*C1, Overflow);
2043  break;
2044  case Intrinsic::ssub_with_overflow:
2045  Res = C0->ssub_ov(*C1, Overflow);
2046  break;
2047  case Intrinsic::usub_with_overflow:
2048  Res = C0->usub_ov(*C1, Overflow);
2049  break;
2050  case Intrinsic::smul_with_overflow:
2051  Res = C0->smul_ov(*C1, Overflow);
2052  break;
2053  case Intrinsic::umul_with_overflow:
2054  Res = C0->umul_ov(*C1, Overflow);
2055  break;
2056  }
2057  Constant *Ops[] = {
2058  ConstantInt::get(Ty->getContext(), Res),
2059  ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2060  };
2061  return ConstantStruct::get(cast<StructType>(Ty), Ops);
2062  }
2063  case Intrinsic::uadd_sat:
2064  case Intrinsic::sadd_sat:
2065  if (!C0 && !C1)
2066  return UndefValue::get(Ty);
2067  if (!C0 || !C1)
2068  return Constant::getAllOnesValue(Ty);
2069  if (IntrinsicID == Intrinsic::uadd_sat)
2070  return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2071  else
2072  return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2073  case Intrinsic::usub_sat:
2074  case Intrinsic::ssub_sat:
2075  if (!C0 && !C1)
2076  return UndefValue::get(Ty);
2077  if (!C0 || !C1)
2078  return Constant::getNullValue(Ty);
2079  if (IntrinsicID == Intrinsic::usub_sat)
2080  return ConstantInt::get(Ty, C0->usub_sat(*C1));
2081  else
2082  return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2083  case Intrinsic::cttz:
2084  case Intrinsic::ctlz:
2085  assert(C1 && "Must be constant int");
2086 
2087  // cttz(0, 1) and ctlz(0, 1) are undef.
2088  if (C1->isOneValue() && (!C0 || C0->isNullValue()))
2089  return UndefValue::get(Ty);
2090  if (!C0)
2091  return Constant::getNullValue(Ty);
2092  if (IntrinsicID == Intrinsic::cttz)
2093  return ConstantInt::get(Ty, C0->countTrailingZeros());
2094  else
2095  return ConstantInt::get(Ty, C0->countLeadingZeros());
2096  }
2097 
2098  return nullptr;
2099  }
2100 
2101  // Support ConstantVector in case we have an Undef in the top.
2102  if ((isa<ConstantVector>(Operands[0]) ||
2103  isa<ConstantDataVector>(Operands[0])) &&
2104  // Check for default rounding mode.
2105  // FIXME: Support other rounding modes?
2106  isa<ConstantInt>(Operands[1]) &&
2107  cast<ConstantInt>(Operands[1])->getValue() == 4) {
2108  auto *Op = cast<Constant>(Operands[0]);
2109  switch (IntrinsicID) {
2110  default: break;
2111  case Intrinsic::x86_avx512_vcvtss2si32:
2112  case Intrinsic::x86_avx512_vcvtss2si64:
2113  case Intrinsic::x86_avx512_vcvtsd2si32:
2114  case Intrinsic::x86_avx512_vcvtsd2si64:
2115  if (ConstantFP *FPOp =
2116  dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2117  return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2118  /*roundTowardZero=*/false, Ty,
2119  /*IsSigned*/true);
2120  break;
2121  case Intrinsic::x86_avx512_vcvtss2usi32:
2122  case Intrinsic::x86_avx512_vcvtss2usi64:
2123  case Intrinsic::x86_avx512_vcvtsd2usi32:
2124  case Intrinsic::x86_avx512_vcvtsd2usi64:
2125  if (ConstantFP *FPOp =
2126  dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2127  return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2128  /*roundTowardZero=*/false, Ty,
2129  /*IsSigned*/false);
2130  break;
2131  case Intrinsic::x86_avx512_cvttss2si:
2132  case Intrinsic::x86_avx512_cvttss2si64:
2133  case Intrinsic::x86_avx512_cvttsd2si:
2134  case Intrinsic::x86_avx512_cvttsd2si64:
2135  if (ConstantFP *FPOp =
2136  dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2137  return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2138  /*roundTowardZero=*/true, Ty,
2139  /*IsSigned*/true);
2140  break;
2141  case Intrinsic::x86_avx512_cvttss2usi:
2142  case Intrinsic::x86_avx512_cvttss2usi64:
2143  case Intrinsic::x86_avx512_cvttsd2usi:
2144  case Intrinsic::x86_avx512_cvttsd2usi64:
2145  if (ConstantFP *FPOp =
2146  dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2147  return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2148  /*roundTowardZero=*/true, Ty,
2149  /*IsSigned*/false);
2150  break;
2151  }
2152  }
2153  return nullptr;
2154  }
2155 
2156  if (Operands.size() != 3)
2157  return nullptr;
2158 
2159  if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2160  if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2161  if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2162  switch (IntrinsicID) {
2163  default: break;
2164  case Intrinsic::fma:
2165  case Intrinsic::fmuladd: {
2166  APFloat V = Op1->getValueAPF();
2167  APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
2168  Op3->getValueAPF(),
2170  if (s != APFloat::opInvalidOp)
2171  return ConstantFP::get(Ty->getContext(), V);
2172 
2173  return nullptr;
2174  }
2175  }
2176  }
2177  }
2178  }
2179 
2180  if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2181  const APInt *C0, *C1, *C2;
2182  if (!getConstIntOrUndef(Operands[0], C0) ||
2183  !getConstIntOrUndef(Operands[1], C1) ||
2184  !getConstIntOrUndef(Operands[2], C2))
2185  return nullptr;
2186 
2187  bool IsRight = IntrinsicID == Intrinsic::fshr;
2188  if (!C2)
2189  return Operands[IsRight ? 1 : 0];
2190  if (!C0 && !C1)
2191  return UndefValue::get(Ty);
2192 
2193  // The shift amount is interpreted as modulo the bitwidth. If the shift
2194  // amount is effectively 0, avoid UB due to oversized inverse shift below.
2195  unsigned BitWidth = C2->getBitWidth();
2196  unsigned ShAmt = C2->urem(BitWidth);
2197  if (!ShAmt)
2198  return Operands[IsRight ? 1 : 0];
2199 
2200  // (C0 << ShlAmt) | (C1 >> LshrAmt)
2201  unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2202  unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2203  if (!C0)
2204  return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2205  if (!C1)
2206  return ConstantInt::get(Ty, C0->shl(ShlAmt));
2207  return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2208  }
2209 
2210  return nullptr;
2211 }
2212 
2213 Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
2214  VectorType *VTy, ArrayRef<Constant *> Operands,
2215  const DataLayout &DL,
2216  const TargetLibraryInfo *TLI,
2217  const CallBase *Call) {
2219  SmallVector<Constant *, 4> Lane(Operands.size());
2220  Type *Ty = VTy->getElementType();
2221 
2222  if (IntrinsicID == Intrinsic::masked_load) {
2223  auto *SrcPtr = Operands[0];
2224  auto *Mask = Operands[2];
2225  auto *Passthru = Operands[3];
2226 
2227  Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
2228 
2229  SmallVector<Constant *, 32> NewElements;
2230  for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2231  auto *MaskElt = Mask->getAggregateElement(I);
2232  if (!MaskElt)
2233  break;
2234  auto *PassthruElt = Passthru->getAggregateElement(I);
2235  auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2236  if (isa<UndefValue>(MaskElt)) {
2237  if (PassthruElt)
2238  NewElements.push_back(PassthruElt);
2239  else if (VecElt)
2240  NewElements.push_back(VecElt);
2241  else
2242  return nullptr;
2243  }
2244  if (MaskElt->isNullValue()) {
2245  if (!PassthruElt)
2246  return nullptr;
2247  NewElements.push_back(PassthruElt);
2248  } else if (MaskElt->isOneValue()) {
2249  if (!VecElt)
2250  return nullptr;
2251  NewElements.push_back(VecElt);
2252  } else {
2253  return nullptr;
2254  }
2255  }
2256  if (NewElements.size() != VTy->getNumElements())
2257  return nullptr;
2258  return ConstantVector::get(NewElements);
2259  }
2260 
2261  for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2262  // Gather a column of constants.
2263  for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2264  // These intrinsics use a scalar type for their second argument.
2265  if (J == 1 &&
2266  (IntrinsicID == Intrinsic::cttz || IntrinsicID == Intrinsic::ctlz ||
2267  IntrinsicID == Intrinsic::powi)) {
2268  Lane[J] = Operands[J];
2269  continue;
2270  }
2271 
2272  Constant *Agg = Operands[J]->getAggregateElement(I);
2273  if (!Agg)
2274  return nullptr;
2275 
2276  Lane[J] = Agg;
2277  }
2278 
2279  // Use the regular scalar folding to simplify this column.
2280  Constant *Folded =
2281  ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
2282  if (!Folded)
2283  return nullptr;
2284  Result[I] = Folded;
2285  }
2286 
2287  return ConstantVector::get(Result);
2288 }
2289 
2290 } // end anonymous namespace
2291 
2293  ArrayRef<Constant *> Operands,
2294  const TargetLibraryInfo *TLI) {
2295  if (Call->isNoBuiltin() || Call->isStrictFP())
2296  return nullptr;
2297  if (!F->hasName())
2298  return nullptr;
2299  StringRef Name = F->getName();
2300 
2301  Type *Ty = F->getReturnType();
2302 
2303  if (auto *VTy = dyn_cast<VectorType>(Ty))
2304  return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2305  F->getParent()->getDataLayout(), TLI, Call);
2306 
2307  return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
2308  Call);
2309 }
2310 
2312  const TargetLibraryInfo *TLI) {
2313  // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2314  // (and to some extent ConstantFoldScalarCall).
2315  if (Call->isNoBuiltin() || Call->isStrictFP())
2316  return false;
2317  Function *F = Call->getCalledFunction();
2318  if (!F)
2319  return false;
2320 
2321  LibFunc Func;
2322  if (!TLI || !TLI->getLibFunc(*F, Func))
2323  return false;
2324 
2325  if (Call->getNumArgOperands() == 1) {
2326  if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
2327  const APFloat &Op = OpC->getValueAPF();
2328  switch (Func) {
2329  case LibFunc_logl:
2330  case LibFunc_log:
2331  case LibFunc_logf:
2332  case LibFunc_log2l:
2333  case LibFunc_log2:
2334  case LibFunc_log2f:
2335  case LibFunc_log10l:
2336  case LibFunc_log10:
2337  case LibFunc_log10f:
2338  return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
2339 
2340  case LibFunc_expl:
2341  case LibFunc_exp:
2342  case LibFunc_expf:
2343  // FIXME: These boundaries are slightly conservative.
2344  if (OpC->getType()->isDoubleTy())
2345  return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
2346  Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
2347  if (OpC->getType()->isFloatTy())
2348  return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
2349  Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
2350  break;
2351 
2352  case LibFunc_exp2l:
2353  case LibFunc_exp2:
2354  case LibFunc_exp2f:
2355  // FIXME: These boundaries are slightly conservative.
2356  if (OpC->getType()->isDoubleTy())
2357  return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
2358  Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
2359  if (OpC->getType()->isFloatTy())
2360  return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
2361  Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
2362  break;
2363 
2364  case LibFunc_sinl:
2365  case LibFunc_sin:
2366  case LibFunc_sinf:
2367  case LibFunc_cosl:
2368  case LibFunc_cos:
2369  case LibFunc_cosf:
2370  return !Op.isInfinity();
2371 
2372  case LibFunc_tanl:
2373  case LibFunc_tan:
2374  case LibFunc_tanf: {
2375  // FIXME: Stop using the host math library.
2376  // FIXME: The computation isn't done in the right precision.
2377  Type *Ty = OpC->getType();
2378  if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2379  double OpV = getValueAsDouble(OpC);
2380  return ConstantFoldFP(tan, OpV, Ty) != nullptr;
2381  }
2382  break;
2383  }
2384 
2385  case LibFunc_asinl:
2386  case LibFunc_asin:
2387  case LibFunc_asinf:
2388  case LibFunc_acosl:
2389  case LibFunc_acos:
2390  case LibFunc_acosf:
2391  return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
2393  Op.compare(APFloat(Op.getSemantics(), "1")) !=
2395 
2396  case LibFunc_sinh:
2397  case LibFunc_cosh:
2398  case LibFunc_sinhf:
2399  case LibFunc_coshf:
2400  case LibFunc_sinhl:
2401  case LibFunc_coshl:
2402  // FIXME: These boundaries are slightly conservative.
2403  if (OpC->getType()->isDoubleTy())
2404  return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
2405  Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
2406  if (OpC->getType()->isFloatTy())
2407  return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
2408  Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
2409  break;
2410 
2411  case LibFunc_sqrtl:
2412  case LibFunc_sqrt:
2413  case LibFunc_sqrtf:
2414  return Op.isNaN() || Op.isZero() || !Op.isNegative();
2415 
2416  // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2417  // maybe others?
2418  default:
2419  break;
2420  }
2421  }
2422  }
2423 
2424  if (Call->getNumArgOperands() == 2) {
2425  ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
2426  ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
2427  if (Op0C && Op1C) {
2428  const APFloat &Op0 = Op0C->getValueAPF();
2429  const APFloat &Op1 = Op1C->getValueAPF();
2430 
2431  switch (Func) {
2432  case LibFunc_powl:
2433  case LibFunc_pow:
2434  case LibFunc_powf: {
2435  // FIXME: Stop using the host math library.
2436  // FIXME: The computation isn't done in the right precision.
2437  Type *Ty = Op0C->getType();
2438  if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2439  if (Ty == Op1C->getType()) {
2440  double Op0V = getValueAsDouble(Op0C);
2441  double Op1V = getValueAsDouble(Op1C);
2442  return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
2443  }
2444  }
2445  break;
2446  }
2447 
2448  case LibFunc_fmodl:
2449  case LibFunc_fmod:
2450  case LibFunc_fmodf:
2451  return Op0.isNaN() || Op1.isNaN() ||
2452  (!Op0.isInfinity() && !Op1.isZero());
2453 
2454  default:
2455  break;
2456  }
2457  }
2458  }
2459 
2460  return false;
2461 }
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
opStatus roundToIntegral(roundingMode RM)
Definition: APFloat.h:1007
Type * getVectorElementType() const
Definition: Type.h:370
uint64_t CallInst * C
static Constant * FoldBitCast(Constant *V, Type *DestTy)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
Definition: Any.h:26
bool isZero() const
Definition: APFloat.h:1142
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
Definition: Constants.cpp:99
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:172
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1562
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:157
MutableArrayRef< T > makeMutableArrayRef(T &OneElt)
Construct a MutableArrayRef from a single element.
Definition: ArrayRef.h:502
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Constant * ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE)
ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a getelementptr constantexpr, return the constant value being addressed by the constant expression, or null if something is funny and we can&#39;t decide.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1153
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
Function * getCaller()
Helper to get the caller (the parent function).
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant *> Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:607
APInt uadd_sat(const APInt &RHS) const
Definition: APInt.cpp:1966
Optional< unsigned > getInRangeIndex() const
Returns the offset of the index with an inrange attachment, or None if none.
Definition: Operator.h:463
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:647
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1769
float convertToFloat() const
Definition: APFloat.h:1097
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2112
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL)
If this constant is a constant offset from a global, return the global and the constant.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:709
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1185
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1014
F(f)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1154
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:534
An instruction for reading from memory.
Definition: Instructions.h:167
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:176
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:875
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
Definition: Constants.cpp:1965
Hexagon Common GEP
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2258
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:229
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
Definition: APFloat.h:1261
static IntegerType * getInt16Ty(LLVMContext &C)
Definition: Type.cpp:174
op_iterator op_begin()
Definition: User.h:229
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it...
Definition: DataLayout.cpp:83
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1508
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2134
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:274
amode Optimize addressing mode
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1155
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:1068
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Definition: APInt.h:1631
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:554
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:231
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
Definition: Constants.cpp:1622
static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy)
This method can be used to determine if a cast from S to DstTy using Opcode op is valid or not...
Type * getPointerElementType() const
Definition: Type.h:375
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Definition: DataLayout.cpp:665
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:450
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:161
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition: APInt.h:992
roundingMode
IEEE-754R 4.3: Rounding-direction attributes.
Definition: APFloat.h:173
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:891
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
static Constant * getLShr(Constant *C1, Constant *C2, bool isExact=false)
Definition: Constants.cpp:2325
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:651
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 minimum semantics.
Definition: APFloat.h:1248
Windows NT (Windows on ARM)
iterator_range< const unsigned char * > bytes() const
Definition: StringRef.h:111
uint64_t getNumElements() const
Definition: DerivedTypes.h:390
LLVM_NODISCARD bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:126
This file implements a class to represent arbitrary precision integral constant values and operations...
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:266
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Attempt to fold the constant using the specified DataLayout.
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1674
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:84
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:888
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1574
bool isInfinity() const
Definition: APFloat.h:1143
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:4446
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:457
bool has(LibFunc F) const
Tests whether a library function is available.
static Constant * getSelect(Constant *C, Constant *V1, Constant *V2, Type *OnlyIfReducedTy=nullptr)
Select constant expr.
Definition: Constants.cpp:1987
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:232
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
static const fltSemantics & IEEEdouble() LLVM_READNONE
Definition: APFloat.cpp:122
Value * getOperand(unsigned i) const
Definition: User.h:169
Class to represent pointers.
Definition: DerivedTypes.h:498
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:344
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return &#39;this&#39;.
Definition: Type.h:303
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1782
bool isFloatTy() const
Return true if this is &#39;float&#39;, a 32-bit IEEE fp type.
Definition: Type.h:146
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:769
static Constant * getInsertValue(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2180
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:363
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:168
bool isNegative() const
Definition: APFloat.h:1146
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1612
bool hasName() const
Definition: Value.h:250
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Definition: InstrTypes.h:1519
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
bool isNaN() const
Definition: APFloat.h:1144
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
const APInt & getConstant() const
Returns the value when all bits have a known value.
Definition: KnownBits.h:56
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
static Constant * getAnd(Constant *C1, Constant *C2)
Definition: Constants.cpp:2306
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:263
bool isOneValue() const
Determine if this is a value of 1.
Definition: APInt.h:410
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1887
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
double convertToDouble() const
Definition: APFloat.h:1096
static Constant * getShuffleVector(Constant *V1, Constant *V2, Constant *Mask, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2157
op_iterator op_end()
Definition: User.h:231
This file declares a class to represent arbitrary precision floating point values and provide a varie...
bool isHalfTy() const
Return true if this is &#39;half&#39;, a 16-bit IEEE fp type.
Definition: Type.h:143
bool isConstant() const
Returns true if we know the value of all bits.
Definition: KnownBits.h:49
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:646
bool isBinaryOp() const
Definition: Instruction.h:130
static Constant * get(StructType *T, ArrayRef< Constant *> V)
Definition: Constants.cpp:1053
op_range operands()
Definition: User.h:237
bool isX86_MMXTy() const
Return true if this is X86 MMX.
Definition: Type.h:181
Class to represent integer types.
Definition: DerivedTypes.h:39
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:681
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL)
ConstantFoldLoadFromConstPtr - Return the value that a load from C would produce if it is constant an...
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:328
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1424
const Constant * stripPointerCasts() const
Definition: Constant.h:177
bool isCast() const
Definition: Instruction.h:133
size_t size() const
Definition: SmallVector.h:52
Constant * ConstantFoldLoadThroughGEPIndices(Constant *C, ArrayRef< Constant *> Indices)
ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr indices (with an implied zero ...
static wasm::ValType getType(const TargetRegisterClass *RC)
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1881
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCompareInstOperands - Attempt to constant fold a compare instruction (icmp/fcmp) with the...
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
APInt ssub_sat(const APInt &RHS) const
Definition: APInt.cpp:1975
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void copySign(const APFloat &RHS)
Definition: APFloat.h:1054
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
Definition: APFloat.h:1237
const T * data() const
Definition: ArrayRef.h:145
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:970
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function...
const APFloat & getValueAPF() const
Definition: Constants.h:302
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:1596
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:226
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:239
Type * getSequentialElementType() const
Definition: Type.h:357
unsigned getNumOperands() const
Definition: User.h:191
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static const fltSemantics & IEEEhalf() LLVM_READNONE
Definition: APFloat.cpp:116
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Definition: Type.cpp:129
double Log2(double Value)
Return the log base 2 of the specified value.
Definition: MathExtras.h:527
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
Provides information about what library functions are available for the current target.
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:257
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:179
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1646
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:694
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1292
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1440
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:587
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:193
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:493
Class to represent vector types.
Definition: DerivedTypes.h:424
Class for arbitrary precision integers.
Definition: APInt.h:69
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
Definition: Constants.cpp:1539
Type * getResultElementType() const
Definition: Operator.cpp:28
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array...
Definition: ArrayRef.h:178
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:387
bool isNonIntegralPointerType(PointerType *PT) const
Definition: DataLayout.h:376
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:593
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:461
opStatus
IEEE-754R 7: Default exception handling.
Definition: APFloat.h:184
unsigned getNumArgOperands() const
Definition: InstrTypes.h:1153
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:576
static Type * getIndexedType(Type *Ty, ArrayRef< Value *> IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:96
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:214
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1201
APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1906
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1756
#define I(x, y, z)
Definition: MD5.cpp:58
static Constant * getOr(Constant *C1, Constant *C2)
Definition: Constants.cpp:2310
iterator_range< value_op_iterator > operand_values()
Definition: User.h:261
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant *> Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands...
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2318
bool isStrictFP() const
Determine if the call requires strict floating point semantics.
Definition: InstrTypes.h:1525
APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1916
bool getConstantStringInfo(const Value *V, StringRef &Str, uint64_t Offset=0, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
Definition: APFloat.h:994
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1874
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:114
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:605
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:250
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
uint64_t getTypeAllocSizeInBits(Type *Ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
Definition: DataLayout.h:471
Type * getElementType() const
Definition: DerivedTypes.h:391
APInt usub_sat(const APInt &RHS) const
Definition: APInt.cpp:1985
static Constant * getExtractValue(Constant *Agg, ArrayRef< unsigned > Idxs, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2204
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:761
Type * getSourceElementType() const
Definition: Operator.cpp:22
APInt bitcastToAPInt() const
Definition: APFloat.h:1093
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
Definition: APInt.h:1595
APInt sadd_sat(const APInt &RHS) const
Definition: APInt.cpp:1956
bool isDoubleTy() const
Return true if this is &#39;double&#39;, a 64-bit IEEE fp type.
Definition: Type.h:149
static Constant * get(ArrayRef< Constant *> V)
Definition: Constants.cpp:1088
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
Definition: APFloat.h:1226
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:806
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:273
bool isNullValue() const
Determine if all bits are clear.
Definition: APInt.h:405
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:217
cmpResult compare(const APFloat &RHS) const
Definition: APFloat.h:1101
const BasicBlock * getParent() const
Definition: Instruction.h:66
APInt sdiv_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1900
const fltSemantics & getFltSemantics() const
Definition: Type.h:168
APInt usub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1894
static Constant * get(unsigned Opcode, Constant *C1, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a unary operator constant expression, folding if possible.
Definition: Constants.cpp:1815
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.