LLVM  7.0.0svn
ConstantFolding.cpp
Go to the documentation of this file.
1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines routines for folding instructions into constants.
11 //
12 // Also, to supplement the basic IR ConstantExpr simplifications,
13 // this file defines some additional folding routines that can make use of
14 // DataLayout information. These functions cannot go in IR due to library
15 // dependency issues.
16 //
17 //===----------------------------------------------------------------------===//
18 
20 #include "llvm/ADT/APFloat.h"
21 #include "llvm/ADT/APInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
29 #include "llvm/Config/config.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalValue.h"
36 #include "llvm/IR/GlobalVariable.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/KnownBits.h"
47 #include <cassert>
48 #include <cerrno>
49 #include <cfenv>
50 #include <cmath>
51 #include <cstddef>
52 #include <cstdint>
53 
54 using namespace llvm;
55 
56 namespace {
57 
58 //===----------------------------------------------------------------------===//
59 // Constant Folding internal helper functions
60 //===----------------------------------------------------------------------===//
61 
62 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
63  Constant *C, Type *SrcEltTy,
64  unsigned NumSrcElts,
65  const DataLayout &DL) {
66  // Now that we know that the input value is a vector of integers, just shift
67  // and insert them into our result.
68  unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
69  for (unsigned i = 0; i != NumSrcElts; ++i) {
70  Constant *Element;
71  if (DL.isLittleEndian())
72  Element = C->getAggregateElement(NumSrcElts - i - 1);
73  else
74  Element = C->getAggregateElement(i);
75 
76  if (Element && isa<UndefValue>(Element)) {
77  Result <<= BitShift;
78  continue;
79  }
80 
81  auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
82  if (!ElementCI)
83  return ConstantExpr::getBitCast(C, DestTy);
84 
85  Result <<= BitShift;
86  Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
87  }
88 
89  return nullptr;
90 }
91 
92 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
93 /// This always returns a non-null constant, but it may be a
94 /// ConstantExpr if unfoldable.
95 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
96  // Catch the obvious splat cases.
97  if (C->isNullValue() && !DestTy->isX86_MMXTy())
98  return Constant::getNullValue(DestTy);
99  if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
100  !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
101  return Constant::getAllOnesValue(DestTy);
102 
103  if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
104  // Handle a vector->scalar integer/fp cast.
105  if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
106  unsigned NumSrcElts = VTy->getNumElements();
107  Type *SrcEltTy = VTy->getElementType();
108 
109  // If the vector is a vector of floating point, convert it to vector of int
110  // to simplify things.
111  if (SrcEltTy->isFloatingPointTy()) {
112  unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
113  Type *SrcIVTy =
114  VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
115  // Ask IR to do the conversion now that #elts line up.
116  C = ConstantExpr::getBitCast(C, SrcIVTy);
117  }
118 
119  APInt Result(DL.getTypeSizeInBits(DestTy), 0);
120  if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
121  SrcEltTy, NumSrcElts, DL))
122  return CE;
123 
124  if (isa<IntegerType>(DestTy))
125  return ConstantInt::get(DestTy, Result);
126 
127  APFloat FP(DestTy->getFltSemantics(), Result);
128  return ConstantFP::get(DestTy->getContext(), FP);
129  }
130  }
131 
132  // The code below only handles casts to vectors currently.
133  auto *DestVTy = dyn_cast<VectorType>(DestTy);
134  if (!DestVTy)
135  return ConstantExpr::getBitCast(C, DestTy);
136 
137  // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
138  // vector so the code below can handle it uniformly.
139  if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
140  Constant *Ops = C; // don't take the address of C!
141  return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
142  }
143 
144  // If this is a bitcast from constant vector -> vector, fold it.
145  if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
146  return ConstantExpr::getBitCast(C, DestTy);
147 
148  // If the element types match, IR can fold it.
149  unsigned NumDstElt = DestVTy->getNumElements();
150  unsigned NumSrcElt = C->getType()->getVectorNumElements();
151  if (NumDstElt == NumSrcElt)
152  return ConstantExpr::getBitCast(C, DestTy);
153 
154  Type *SrcEltTy = C->getType()->getVectorElementType();
155  Type *DstEltTy = DestVTy->getElementType();
156 
157  // Otherwise, we're changing the number of elements in a vector, which
158  // requires endianness information to do the right thing. For example,
159  // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
160  // folds to (little endian):
161  // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
162  // and to (big endian):
163  // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
164 
165  // First thing is first. We only want to think about integer here, so if
166  // we have something in FP form, recast it as integer.
167  if (DstEltTy->isFloatingPointTy()) {
168  // Fold to an vector of integers with same size as our FP type.
169  unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
170  Type *DestIVTy =
171  VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
172  // Recursively handle this integer conversion, if possible.
173  C = FoldBitCast(C, DestIVTy, DL);
174 
175  // Finally, IR can handle this now that #elts line up.
176  return ConstantExpr::getBitCast(C, DestTy);
177  }
178 
179  // Okay, we know the destination is integer, if the input is FP, convert
180  // it to integer first.
181  if (SrcEltTy->isFloatingPointTy()) {
182  unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
183  Type *SrcIVTy =
184  VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
185  // Ask IR to do the conversion now that #elts line up.
186  C = ConstantExpr::getBitCast(C, SrcIVTy);
187  // If IR wasn't able to fold it, bail out.
188  if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
189  !isa<ConstantDataVector>(C))
190  return C;
191  }
192 
193  // Now we know that the input and output vectors are both integer vectors
194  // of the same size, and that their #elements is not the same. Do the
195  // conversion here, which depends on whether the input or output has
196  // more elements.
197  bool isLittleEndian = DL.isLittleEndian();
198 
200  if (NumDstElt < NumSrcElt) {
201  // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
202  Constant *Zero = Constant::getNullValue(DstEltTy);
203  unsigned Ratio = NumSrcElt/NumDstElt;
204  unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
205  unsigned SrcElt = 0;
206  for (unsigned i = 0; i != NumDstElt; ++i) {
207  // Build each element of the result.
208  Constant *Elt = Zero;
209  unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
210  for (unsigned j = 0; j != Ratio; ++j) {
211  Constant *Src = C->getAggregateElement(SrcElt++);
212  if (Src && isa<UndefValue>(Src))
214  else
215  Src = dyn_cast_or_null<ConstantInt>(Src);
216  if (!Src) // Reject constantexpr elements.
217  return ConstantExpr::getBitCast(C, DestTy);
218 
219  // Zero extend the element to the right size.
220  Src = ConstantExpr::getZExt(Src, Elt->getType());
221 
222  // Shift it to the right place, depending on endianness.
223  Src = ConstantExpr::getShl(Src,
224  ConstantInt::get(Src->getType(), ShiftAmt));
225  ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
226 
227  // Mix it in.
228  Elt = ConstantExpr::getOr(Elt, Src);
229  }
230  Result.push_back(Elt);
231  }
232  return ConstantVector::get(Result);
233  }
234 
235  // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
236  unsigned Ratio = NumDstElt/NumSrcElt;
237  unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
238 
239  // Loop over each source value, expanding into multiple results.
240  for (unsigned i = 0; i != NumSrcElt; ++i) {
241  auto *Element = C->getAggregateElement(i);
242 
243  if (!Element) // Reject constantexpr elements.
244  return ConstantExpr::getBitCast(C, DestTy);
245 
246  if (isa<UndefValue>(Element)) {
247  // Correctly Propagate undef values.
248  Result.append(Ratio, UndefValue::get(DstEltTy));
249  continue;
250  }
251 
252  auto *Src = dyn_cast<ConstantInt>(Element);
253  if (!Src)
254  return ConstantExpr::getBitCast(C, DestTy);
255 
256  unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
257  for (unsigned j = 0; j != Ratio; ++j) {
258  // Shift the piece of the value into the right place, depending on
259  // endianness.
260  Constant *Elt = ConstantExpr::getLShr(Src,
261  ConstantInt::get(Src->getType(), ShiftAmt));
262  ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
263 
264  // Truncate the element to an integer with the same pointer size and
265  // convert the element back to a pointer using a inttoptr.
266  if (DstEltTy->isPointerTy()) {
267  IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
268  Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
269  Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
270  continue;
271  }
272 
273  // Truncate and remember this piece.
274  Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
275  }
276  }
277 
278  return ConstantVector::get(Result);
279 }
280 
281 } // end anonymous namespace
282 
283 /// If this constant is a constant offset from a global, return the global and
284 /// the constant. Because of constantexprs, this function is recursive.
286  APInt &Offset, const DataLayout &DL) {
287  // Trivial case, constant is the global.
288  if ((GV = dyn_cast<GlobalValue>(C))) {
289  unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
290  Offset = APInt(BitWidth, 0);
291  return true;
292  }
293 
294  // Otherwise, if this isn't a constant expr, bail out.
295  auto *CE = dyn_cast<ConstantExpr>(C);
296  if (!CE) return false;
297 
298  // Look through ptr->int and ptr->ptr casts.
299  if (CE->getOpcode() == Instruction::PtrToInt ||
300  CE->getOpcode() == Instruction::BitCast)
301  return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
302 
303  // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
304  auto *GEP = dyn_cast<GEPOperator>(CE);
305  if (!GEP)
306  return false;
307 
308  unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
309  APInt TmpOffset(BitWidth, 0);
310 
311  // If the base isn't a global+constant, we aren't either.
312  if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
313  return false;
314 
315  // Otherwise, add any offset that our operands provide.
316  if (!GEP->accumulateConstantOffset(DL, TmpOffset))
317  return false;
318 
319  Offset = TmpOffset;
320  return true;
321 }
322 
324  const DataLayout &DL) {
325  do {
326  Type *SrcTy = C->getType();
327 
328  // If the type sizes are the same and a cast is legal, just directly
329  // cast the constant.
330  if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
331  Instruction::CastOps Cast = Instruction::BitCast;
332  // If we are going from a pointer to int or vice versa, we spell the cast
333  // differently.
334  if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
335  Cast = Instruction::IntToPtr;
336  else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
337  Cast = Instruction::PtrToInt;
338 
339  if (CastInst::castIsValid(Cast, C, DestTy))
340  return ConstantExpr::getCast(Cast, C, DestTy);
341  }
342 
343  // If this isn't an aggregate type, there is nothing we can do to drill down
344  // and find a bitcastable constant.
345  if (!SrcTy->isAggregateType())
346  return nullptr;
347 
348  // We're simulating a load through a pointer that was bitcast to point to
349  // a different type, so we can try to walk down through the initial
350  // elements of an aggregate to see if some part of th e aggregate is
351  // castable to implement the "load" semantic model.
352  C = C->getAggregateElement(0u);
353  } while (C);
354 
355  return nullptr;
356 }
357 
358 namespace {
359 
360 /// Recursive helper to read bits out of global. C is the constant being copied
361 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
362 /// results into and BytesLeft is the number of bytes left in
363 /// the CurPtr buffer. DL is the DataLayout.
364 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
365  unsigned BytesLeft, const DataLayout &DL) {
366  assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
367  "Out of range access");
368 
369  // If this element is zero or undefined, we can just return since *CurPtr is
370  // zero initialized.
371  if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
372  return true;
373 
374  if (auto *CI = dyn_cast<ConstantInt>(C)) {
375  if (CI->getBitWidth() > 64 ||
376  (CI->getBitWidth() & 7) != 0)
377  return false;
378 
379  uint64_t Val = CI->getZExtValue();
380  unsigned IntBytes = unsigned(CI->getBitWidth()/8);
381 
382  for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
383  int n = ByteOffset;
384  if (!DL.isLittleEndian())
385  n = IntBytes - n - 1;
386  CurPtr[i] = (unsigned char)(Val >> (n * 8));
387  ++ByteOffset;
388  }
389  return true;
390  }
391 
392  if (auto *CFP = dyn_cast<ConstantFP>(C)) {
393  if (CFP->getType()->isDoubleTy()) {
394  C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
395  return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
396  }
397  if (CFP->getType()->isFloatTy()){
398  C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
399  return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
400  }
401  if (CFP->getType()->isHalfTy()){
402  C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
403  return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
404  }
405  return false;
406  }
407 
408  if (auto *CS = dyn_cast<ConstantStruct>(C)) {
409  const StructLayout *SL = DL.getStructLayout(CS->getType());
410  unsigned Index = SL->getElementContainingOffset(ByteOffset);
411  uint64_t CurEltOffset = SL->getElementOffset(Index);
412  ByteOffset -= CurEltOffset;
413 
414  while (true) {
415  // If the element access is to the element itself and not to tail padding,
416  // read the bytes from the element.
417  uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
418 
419  if (ByteOffset < EltSize &&
420  !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
421  BytesLeft, DL))
422  return false;
423 
424  ++Index;
425 
426  // Check to see if we read from the last struct element, if so we're done.
427  if (Index == CS->getType()->getNumElements())
428  return true;
429 
430  // If we read all of the bytes we needed from this element we're done.
431  uint64_t NextEltOffset = SL->getElementOffset(Index);
432 
433  if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
434  return true;
435 
436  // Move to the next element of the struct.
437  CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
438  BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
439  ByteOffset = 0;
440  CurEltOffset = NextEltOffset;
441  }
442  // not reached.
443  }
444 
445  if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
446  isa<ConstantDataSequential>(C)) {
447  Type *EltTy = C->getType()->getSequentialElementType();
448  uint64_t EltSize = DL.getTypeAllocSize(EltTy);
449  uint64_t Index = ByteOffset / EltSize;
450  uint64_t Offset = ByteOffset - Index * EltSize;
451  uint64_t NumElts;
452  if (auto *AT = dyn_cast<ArrayType>(C->getType()))
453  NumElts = AT->getNumElements();
454  else
455  NumElts = C->getType()->getVectorNumElements();
456 
457  for (; Index != NumElts; ++Index) {
458  if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
459  BytesLeft, DL))
460  return false;
461 
462  uint64_t BytesWritten = EltSize - Offset;
463  assert(BytesWritten <= EltSize && "Not indexing into this element?");
464  if (BytesWritten >= BytesLeft)
465  return true;
466 
467  Offset = 0;
468  BytesLeft -= BytesWritten;
469  CurPtr += BytesWritten;
470  }
471  return true;
472  }
473 
474  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
475  if (CE->getOpcode() == Instruction::IntToPtr &&
476  CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
477  return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
478  BytesLeft, DL);
479  }
480  }
481 
482  // Otherwise, unknown initializer type.
483  return false;
484 }
485 
486 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
487  const DataLayout &DL) {
488  auto *PTy = cast<PointerType>(C->getType());
489  auto *IntType = dyn_cast<IntegerType>(LoadTy);
490 
491  // If this isn't an integer load we can't fold it directly.
492  if (!IntType) {
493  unsigned AS = PTy->getAddressSpace();
494 
495  // If this is a float/double load, we can try folding it as an int32/64 load
496  // and then bitcast the result. This can be useful for union cases. Note
497  // that address spaces don't matter here since we're not going to result in
498  // an actual new load.
499  Type *MapTy;
500  if (LoadTy->isHalfTy())
501  MapTy = Type::getInt16Ty(C->getContext());
502  else if (LoadTy->isFloatTy())
503  MapTy = Type::getInt32Ty(C->getContext());
504  else if (LoadTy->isDoubleTy())
505  MapTy = Type::getInt64Ty(C->getContext());
506  else if (LoadTy->isVectorTy()) {
507  MapTy = PointerType::getIntNTy(C->getContext(),
508  DL.getTypeAllocSizeInBits(LoadTy));
509  } else
510  return nullptr;
511 
512  C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
513  if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
514  return FoldBitCast(Res, LoadTy, DL);
515  return nullptr;
516  }
517 
518  unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
519  if (BytesLoaded > 32 || BytesLoaded == 0)
520  return nullptr;
521 
522  GlobalValue *GVal;
523  APInt OffsetAI;
524  if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
525  return nullptr;
526 
527  auto *GV = dyn_cast<GlobalVariable>(GVal);
528  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
529  !GV->getInitializer()->getType()->isSized())
530  return nullptr;
531 
532  int64_t Offset = OffsetAI.getSExtValue();
533  int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
534 
535  // If we're not accessing anything in this constant, the result is undefined.
536  if (Offset + BytesLoaded <= 0)
537  return UndefValue::get(IntType);
538 
539  // If we're not accessing anything in this constant, the result is undefined.
540  if (Offset >= InitializerSize)
541  return UndefValue::get(IntType);
542 
543  unsigned char RawBytes[32] = {0};
544  unsigned char *CurPtr = RawBytes;
545  unsigned BytesLeft = BytesLoaded;
546 
547  // If we're loading off the beginning of the global, some bytes may be valid.
548  if (Offset < 0) {
549  CurPtr += -Offset;
550  BytesLeft += Offset;
551  Offset = 0;
552  }
553 
554  if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
555  return nullptr;
556 
557  APInt ResultVal = APInt(IntType->getBitWidth(), 0);
558  if (DL.isLittleEndian()) {
559  ResultVal = RawBytes[BytesLoaded - 1];
560  for (unsigned i = 1; i != BytesLoaded; ++i) {
561  ResultVal <<= 8;
562  ResultVal |= RawBytes[BytesLoaded - 1 - i];
563  }
564  } else {
565  ResultVal = RawBytes[0];
566  for (unsigned i = 1; i != BytesLoaded; ++i) {
567  ResultVal <<= 8;
568  ResultVal |= RawBytes[i];
569  }
570  }
571 
572  return ConstantInt::get(IntType->getContext(), ResultVal);
573 }
574 
575 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
576  const DataLayout &DL) {
577  auto *SrcPtr = CE->getOperand(0);
578  auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
579  if (!SrcPtrTy)
580  return nullptr;
581  Type *SrcTy = SrcPtrTy->getPointerElementType();
582 
583  Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
584  if (!C)
585  return nullptr;
586 
587  return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
588 }
589 
590 } // end anonymous namespace
591 
593  const DataLayout &DL) {
594  // First, try the easy cases:
595  if (auto *GV = dyn_cast<GlobalVariable>(C))
596  if (GV->isConstant() && GV->hasDefinitiveInitializer())
597  return GV->getInitializer();
598 
599  if (auto *GA = dyn_cast<GlobalAlias>(C))
600  if (GA->getAliasee() && !GA->isInterposable())
601  return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
602 
603  // If the loaded value isn't a constant expr, we can't handle it.
604  auto *CE = dyn_cast<ConstantExpr>(C);
605  if (!CE)
606  return nullptr;
607 
608  if (CE->getOpcode() == Instruction::GetElementPtr) {
609  if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
610  if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
611  if (Constant *V =
612  ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
613  return V;
614  }
615  }
616  }
617 
618  if (CE->getOpcode() == Instruction::BitCast)
619  if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
620  return LoadedC;
621 
622  // Instead of loading constant c string, use corresponding integer value
623  // directly if string length is small enough.
624  StringRef Str;
625  if (getConstantStringInfo(CE, Str) && !Str.empty()) {
626  size_t StrLen = Str.size();
627  unsigned NumBits = Ty->getPrimitiveSizeInBits();
628  // Replace load with immediate integer if the result is an integer or fp
629  // value.
630  if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
631  (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
632  APInt StrVal(NumBits, 0);
633  APInt SingleChar(NumBits, 0);
634  if (DL.isLittleEndian()) {
635  for (unsigned char C : reverse(Str.bytes())) {
636  SingleChar = static_cast<uint64_t>(C);
637  StrVal = (StrVal << 8) | SingleChar;
638  }
639  } else {
640  for (unsigned char C : Str.bytes()) {
641  SingleChar = static_cast<uint64_t>(C);
642  StrVal = (StrVal << 8) | SingleChar;
643  }
644  // Append NULL at the end.
645  SingleChar = 0;
646  StrVal = (StrVal << 8) | SingleChar;
647  }
648 
649  Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
650  if (Ty->isFloatingPointTy())
651  Res = ConstantExpr::getBitCast(Res, Ty);
652  return Res;
653  }
654  }
655 
656  // If this load comes from anywhere in a constant global, and if the global
657  // is all undef or zero, we know what it loads.
658  if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
659  if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
660  if (GV->getInitializer()->isNullValue())
661  return Constant::getNullValue(Ty);
662  if (isa<UndefValue>(GV->getInitializer()))
663  return UndefValue::get(Ty);
664  }
665  }
666 
667  // Try hard to fold loads from bitcasted strange and non-type-safe things.
668  return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
669 }
670 
671 namespace {
672 
673 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
674  if (LI->isVolatile()) return nullptr;
675 
676  if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
677  return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
678 
679  return nullptr;
680 }
681 
682 /// One of Op0/Op1 is a constant expression.
683 /// Attempt to symbolically evaluate the result of a binary operator merging
684 /// these together. If target data info is available, it is provided as DL,
685 /// otherwise DL is null.
686 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
687  const DataLayout &DL) {
688  // SROA
689 
690  // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
691  // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
692  // bits.
693 
694  if (Opc == Instruction::And) {
695  KnownBits Known0 = computeKnownBits(Op0, DL);
696  KnownBits Known1 = computeKnownBits(Op1, DL);
697  if ((Known1.One | Known0.Zero).isAllOnesValue()) {
698  // All the bits of Op0 that the 'and' could be masking are already zero.
699  return Op0;
700  }
701  if ((Known0.One | Known1.Zero).isAllOnesValue()) {
702  // All the bits of Op1 that the 'and' could be masking are already zero.
703  return Op1;
704  }
705 
706  Known0.Zero |= Known1.Zero;
707  Known0.One &= Known1.One;
708  if (Known0.isConstant())
709  return ConstantInt::get(Op0->getType(), Known0.getConstant());
710  }
711 
712  // If the constant expr is something like &A[123] - &A[4].f, fold this into a
713  // constant. This happens frequently when iterating over a global array.
714  if (Opc == Instruction::Sub) {
715  GlobalValue *GV1, *GV2;
716  APInt Offs1, Offs2;
717 
718  if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
719  if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
720  unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
721 
722  // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
723  // PtrToInt may change the bitwidth so we have convert to the right size
724  // first.
725  return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
726  Offs2.zextOrTrunc(OpSize));
727  }
728  }
729 
730  return nullptr;
731 }
732 
733 /// If array indices are not pointer-sized integers, explicitly cast them so
734 /// that they aren't implicitly casted by the getelementptr.
735 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
736  Type *ResultTy, Optional<unsigned> InRangeIndex,
737  const DataLayout &DL, const TargetLibraryInfo *TLI) {
738  Type *IntPtrTy = DL.getIntPtrType(ResultTy);
739  Type *IntPtrScalarTy = IntPtrTy->getScalarType();
740 
741  bool Any = false;
743  for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
744  if ((i == 1 ||
745  !isa<StructType>(GetElementPtrInst::getIndexedType(
746  SrcElemTy, Ops.slice(1, i - 1)))) &&
747  Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
748  Any = true;
749  Type *NewType = Ops[i]->getType()->isVectorTy()
750  ? IntPtrTy
751  : IntPtrTy->getScalarType();
753  true,
754  NewType,
755  true),
756  Ops[i], NewType));
757  } else
758  NewIdxs.push_back(Ops[i]);
759  }
760 
761  if (!Any)
762  return nullptr;
763 
765  SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
766  if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
767  C = Folded;
768 
769  return C;
770 }
771 
772 /// Strip the pointer casts, but preserve the address space information.
773 Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
774  assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
775  auto *OldPtrTy = cast<PointerType>(Ptr->getType());
776  Ptr = Ptr->stripPointerCasts();
777  auto *NewPtrTy = cast<PointerType>(Ptr->getType());
778 
779  ElemTy = NewPtrTy->getPointerElementType();
780 
781  // Preserve the address space number of the pointer.
782  if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
783  NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
784  Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
785  }
786  return Ptr;
787 }
788 
789 /// If we can symbolically evaluate the GEP constant expression, do so.
790 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
792  const DataLayout &DL,
793  const TargetLibraryInfo *TLI) {
794  const GEPOperator *InnermostGEP = GEP;
795  bool InBounds = GEP->isInBounds();
796 
797  Type *SrcElemTy = GEP->getSourceElementType();
798  Type *ResElemTy = GEP->getResultElementType();
799  Type *ResTy = GEP->getType();
800  if (!SrcElemTy->isSized())
801  return nullptr;
802 
803  if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
804  GEP->getInRangeIndex(), DL, TLI))
805  return C;
806 
807  Constant *Ptr = Ops[0];
808  if (!Ptr->getType()->isPointerTy())
809  return nullptr;
810 
811  Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
812 
813  // If this is a constant expr gep that is effectively computing an
814  // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
815  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
816  if (!isa<ConstantInt>(Ops[i])) {
817 
818  // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
819  // "inttoptr (sub (ptrtoint Ptr), V)"
820  if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
821  auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
822  assert((!CE || CE->getType() == IntPtrTy) &&
823  "CastGEPIndices didn't canonicalize index types!");
824  if (CE && CE->getOpcode() == Instruction::Sub &&
825  CE->getOperand(0)->isNullValue()) {
826  Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
827  Res = ConstantExpr::getSub(Res, CE->getOperand(1));
828  Res = ConstantExpr::getIntToPtr(Res, ResTy);
829  if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
830  Res = FoldedRes;
831  return Res;
832  }
833  }
834  return nullptr;
835  }
836 
837  unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
838  APInt Offset =
839  APInt(BitWidth,
841  SrcElemTy,
842  makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
843  Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
844 
845  // If this is a GEP of a GEP, fold it all into a single GEP.
846  while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
847  InnermostGEP = GEP;
848  InBounds &= GEP->isInBounds();
849 
850  SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
851 
852  // Do not try the incorporate the sub-GEP if some index is not a number.
853  bool AllConstantInt = true;
854  for (Value *NestedOp : NestedOps)
855  if (!isa<ConstantInt>(NestedOp)) {
856  AllConstantInt = false;
857  break;
858  }
859  if (!AllConstantInt)
860  break;
861 
862  Ptr = cast<Constant>(GEP->getOperand(0));
863  SrcElemTy = GEP->getSourceElementType();
864  Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
865  Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
866  }
867 
868  // If the base value for this address is a literal integer value, fold the
869  // getelementptr to the resulting integer value casted to the pointer type.
870  APInt BasePtr(BitWidth, 0);
871  if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
872  if (CE->getOpcode() == Instruction::IntToPtr) {
873  if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
874  BasePtr = Base->getValue().zextOrTrunc(BitWidth);
875  }
876  }
877 
878  auto *PTy = cast<PointerType>(Ptr->getType());
879  if ((Ptr->isNullValue() || BasePtr != 0) &&
880  !DL.isNonIntegralPointerType(PTy)) {
881  Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
882  return ConstantExpr::getIntToPtr(C, ResTy);
883  }
884 
885  // Otherwise form a regular getelementptr. Recompute the indices so that
886  // we eliminate over-indexing of the notional static type array bounds.
887  // This makes it easy to determine if the getelementptr is "inbounds".
888  // Also, this helps GlobalOpt do SROA on GlobalVariables.
889  Type *Ty = PTy;
891 
892  do {
893  if (!Ty->isStructTy()) {
894  if (Ty->isPointerTy()) {
895  // The only pointer indexing we'll do is on the first index of the GEP.
896  if (!NewIdxs.empty())
897  break;
898 
899  Ty = SrcElemTy;
900 
901  // Only handle pointers to sized types, not pointers to functions.
902  if (!Ty->isSized())
903  return nullptr;
904  } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
905  Ty = ATy->getElementType();
906  } else {
907  // We've reached some non-indexable type.
908  break;
909  }
910 
911  // Determine which element of the array the offset points into.
912  APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
913  if (ElemSize == 0) {
914  // The element size is 0. This may be [0 x Ty]*, so just use a zero
915  // index for this level and proceed to the next level to see if it can
916  // accommodate the offset.
917  NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
918  } else {
919  // The element size is non-zero divide the offset by the element
920  // size (rounding down), to compute the index at this level.
921  bool Overflow;
922  APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
923  if (Overflow)
924  break;
925  Offset -= NewIdx * ElemSize;
926  NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
927  }
928  } else {
929  auto *STy = cast<StructType>(Ty);
930  // If we end up with an offset that isn't valid for this struct type, we
931  // can't re-form this GEP in a regular form, so bail out. The pointer
932  // operand likely went through casts that are necessary to make the GEP
933  // sensible.
934  const StructLayout &SL = *DL.getStructLayout(STy);
935  if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
936  break;
937 
938  // Determine which field of the struct the offset points into. The
939  // getZExtValue is fine as we've already ensured that the offset is
940  // within the range representable by the StructLayout API.
941  unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
943  ElIdx));
944  Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
945  Ty = STy->getTypeAtIndex(ElIdx);
946  }
947  } while (Ty != ResElemTy);
948 
949  // If we haven't used up the entire offset by descending the static
950  // type, then the offset is pointing into the middle of an indivisible
951  // member, so we can't simplify it.
952  if (Offset != 0)
953  return nullptr;
954 
955  // Preserve the inrange index from the innermost GEP if possible. We must
956  // have calculated the same indices up to and including the inrange index.
957  Optional<unsigned> InRangeIndex;
958  if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
959  if (SrcElemTy == InnermostGEP->getSourceElementType() &&
960  NewIdxs.size() > *LastIRIndex) {
961  InRangeIndex = LastIRIndex;
962  for (unsigned I = 0; I <= *LastIRIndex; ++I)
963  if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) {
964  InRangeIndex = None;
965  break;
966  }
967  }
968 
969  // Create a GEP.
970  Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
971  InBounds, InRangeIndex);
972  assert(C->getType()->getPointerElementType() == Ty &&
973  "Computed GetElementPtr has unexpected type!");
974 
975  // If we ended up indexing a member with a type that doesn't match
976  // the type of what the original indices indexed, add a cast.
977  if (Ty != ResElemTy)
978  C = FoldBitCast(C, ResTy, DL);
979 
980  return C;
981 }
982 
983 /// Attempt to constant fold an instruction with the
984 /// specified opcode and operands. If successful, the constant result is
985 /// returned, if not, null is returned. Note that this function can fail when
986 /// attempting to fold instructions like loads and stores, which have no
987 /// constant expression form.
988 ///
989 /// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/inrange
990 /// etc information, due to only being passed an opcode and operands. Constant
991 /// folding using this function strips this information.
992 ///
993 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
995  const DataLayout &DL,
996  const TargetLibraryInfo *TLI) {
997  Type *DestTy = InstOrCE->getType();
998 
999  // Handle easy binops first.
1000  if (Instruction::isBinaryOp(Opcode))
1001  return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1002 
1003  if (Instruction::isCast(Opcode))
1004  return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1005 
1006  if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1007  if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1008  return C;
1009 
1011  Ops.slice(1), GEP->isInBounds(),
1012  GEP->getInRangeIndex());
1013  }
1014 
1015  if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1016  return CE->getWithOperands(Ops);
1017 
1018  switch (Opcode) {
1019  default: return nullptr;
1020  case Instruction::ICmp:
1021  case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1022  case Instruction::Call:
1023  if (auto *F = dyn_cast<Function>(Ops.back())) {
1024  ImmutableCallSite CS(cast<CallInst>(InstOrCE));
1025  if (canConstantFoldCallTo(CS, F))
1026  return ConstantFoldCall(CS, F, Ops.slice(0, Ops.size() - 1), TLI);
1027  }
1028  return nullptr;
1029  case Instruction::Select:
1030  return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1031  case Instruction::ExtractElement:
1032  return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1033  case Instruction::InsertElement:
1034  return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1035  case Instruction::ShuffleVector:
1036  return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
1037  }
1038 }
1039 
1040 } // end anonymous namespace
1041 
1042 //===----------------------------------------------------------------------===//
1043 // Constant Folding public APIs
1044 //===----------------------------------------------------------------------===//
1045 
1046 namespace {
1047 
1048 Constant *
1049 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1050  const TargetLibraryInfo *TLI,
1052  if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1053  return nullptr;
1054 
1056  for (const Use &NewU : C->operands()) {
1057  auto *NewC = cast<Constant>(&NewU);
1058  // Recursively fold the ConstantExpr's operands. If we have already folded
1059  // a ConstantExpr, we don't have to process it again.
1060  if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
1061  auto It = FoldedOps.find(NewC);
1062  if (It == FoldedOps.end()) {
1063  if (auto *FoldedC =
1064  ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
1065  FoldedOps.insert({NewC, FoldedC});
1066  NewC = FoldedC;
1067  } else {
1068  FoldedOps.insert({NewC, NewC});
1069  }
1070  } else {
1071  NewC = It->second;
1072  }
1073  }
1074  Ops.push_back(NewC);
1075  }
1076 
1077  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1078  if (CE->isCompare())
1079  return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1080  DL, TLI);
1081 
1082  return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1083  }
1084 
1085  assert(isa<ConstantVector>(C));
1086  return ConstantVector::get(Ops);
1087 }
1088 
1089 } // end anonymous namespace
1090 
1092  const TargetLibraryInfo *TLI) {
1093  // Handle PHI nodes quickly here...
1094  if (auto *PN = dyn_cast<PHINode>(I)) {
1095  Constant *CommonValue = nullptr;
1096 
1098  for (Value *Incoming : PN->incoming_values()) {
1099  // If the incoming value is undef then skip it. Note that while we could
1100  // skip the value if it is equal to the phi node itself we choose not to
1101  // because that would break the rule that constant folding only applies if
1102  // all operands are constants.
1103  if (isa<UndefValue>(Incoming))
1104  continue;
1105  // If the incoming value is not a constant, then give up.
1106  auto *C = dyn_cast<Constant>(Incoming);
1107  if (!C)
1108  return nullptr;
1109  // Fold the PHI's operands.
1110  if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
1111  C = FoldedC;
1112  // If the incoming value is a different constant to
1113  // the one we saw previously, then give up.
1114  if (CommonValue && C != CommonValue)
1115  return nullptr;
1116  CommonValue = C;
1117  }
1118 
1119  // If we reach here, all incoming values are the same constant or undef.
1120  return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1121  }
1122 
1123  // Scan the operand list, checking to see if they are all constants, if so,
1124  // hand off to ConstantFoldInstOperandsImpl.
1125  if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1126  return nullptr;
1127 
1130  for (const Use &OpU : I->operands()) {
1131  auto *Op = cast<Constant>(&OpU);
1132  // Fold the Instruction's operands.
1133  if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
1134  Op = FoldedOp;
1135 
1136  Ops.push_back(Op);
1137  }
1138 
1139  if (const auto *CI = dyn_cast<CmpInst>(I))
1140  return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1141  DL, TLI);
1142 
1143  if (const auto *LI = dyn_cast<LoadInst>(I))
1144  return ConstantFoldLoadInst(LI, DL);
1145 
1146  if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1148  cast<Constant>(IVI->getAggregateOperand()),
1149  cast<Constant>(IVI->getInsertedValueOperand()),
1150  IVI->getIndices());
1151  }
1152 
1153  if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1155  cast<Constant>(EVI->getAggregateOperand()),
1156  EVI->getIndices());
1157  }
1158 
1159  return ConstantFoldInstOperands(I, Ops, DL, TLI);
1160 }
1161 
1163  const TargetLibraryInfo *TLI) {
1165  return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1166 }
1167 
1170  const DataLayout &DL,
1171  const TargetLibraryInfo *TLI) {
1172  return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1173 }
1174 
1176  Constant *Ops0, Constant *Ops1,
1177  const DataLayout &DL,
1178  const TargetLibraryInfo *TLI) {
1179  // fold: icmp (inttoptr x), null -> icmp x, 0
1180  // fold: icmp null, (inttoptr x) -> icmp 0, x
1181  // fold: icmp (ptrtoint x), 0 -> icmp x, null
1182  // fold: icmp 0, (ptrtoint x) -> icmp null, x
1183  // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1184  // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1185  //
1186  // FIXME: The following comment is out of data and the DataLayout is here now.
1187  // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1188  // around to know if bit truncation is happening.
1189  if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1190  if (Ops1->isNullValue()) {
1191  if (CE0->getOpcode() == Instruction::IntToPtr) {
1192  Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1193  // Convert the integer value to the right size to ensure we get the
1194  // proper extension or truncation.
1195  Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1196  IntPtrTy, false);
1197  Constant *Null = Constant::getNullValue(C->getType());
1198  return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1199  }
1200 
1201  // Only do this transformation if the int is intptrty in size, otherwise
1202  // there is a truncation or extension that we aren't modeling.
1203  if (CE0->getOpcode() == Instruction::PtrToInt) {
1204  Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1205  if (CE0->getType() == IntPtrTy) {
1206  Constant *C = CE0->getOperand(0);
1207  Constant *Null = Constant::getNullValue(C->getType());
1208  return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1209  }
1210  }
1211  }
1212 
1213  if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1214  if (CE0->getOpcode() == CE1->getOpcode()) {
1215  if (CE0->getOpcode() == Instruction::IntToPtr) {
1216  Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1217 
1218  // Convert the integer value to the right size to ensure we get the
1219  // proper extension or truncation.
1220  Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1221  IntPtrTy, false);
1222  Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1223  IntPtrTy, false);
1224  return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1225  }
1226 
1227  // Only do this transformation if the int is intptrty in size, otherwise
1228  // there is a truncation or extension that we aren't modeling.
1229  if (CE0->getOpcode() == Instruction::PtrToInt) {
1230  Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1231  if (CE0->getType() == IntPtrTy &&
1232  CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1234  Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1235  }
1236  }
1237  }
1238  }
1239 
1240  // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1241  // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1242  if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1243  CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1245  Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1247  Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1248  unsigned OpC =
1249  Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1250  return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1251  }
1252  } else if (isa<ConstantExpr>(Ops1)) {
1253  // If RHS is a constant expression, but the left side isn't, swap the
1254  // operands and try again.
1255  Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1256  return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1257  }
1258 
1259  return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1260 }
1261 
1263  Constant *RHS,
1264  const DataLayout &DL) {
1266  if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1267  if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1268  return C;
1269 
1270  return ConstantExpr::get(Opcode, LHS, RHS);
1271 }
1272 
1274  Type *DestTy, const DataLayout &DL) {
1275  assert(Instruction::isCast(Opcode));
1276  switch (Opcode) {
1277  default:
1278  llvm_unreachable("Missing case");
1279  case Instruction::PtrToInt:
1280  // If the input is a inttoptr, eliminate the pair. This requires knowing
1281  // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1282  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1283  if (CE->getOpcode() == Instruction::IntToPtr) {
1284  Constant *Input = CE->getOperand(0);
1285  unsigned InWidth = Input->getType()->getScalarSizeInBits();
1286  unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1287  if (PtrWidth < InWidth) {
1288  Constant *Mask =
1289  ConstantInt::get(CE->getContext(),
1290  APInt::getLowBitsSet(InWidth, PtrWidth));
1291  Input = ConstantExpr::getAnd(Input, Mask);
1292  }
1293  // Do a zext or trunc to get to the dest size.
1294  return ConstantExpr::getIntegerCast(Input, DestTy, false);
1295  }
1296  }
1297  return ConstantExpr::getCast(Opcode, C, DestTy);
1298  case Instruction::IntToPtr:
1299  // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1300  // the int size is >= the ptr size and the address spaces are the same.
1301  // This requires knowing the width of a pointer, so it can't be done in
1302  // ConstantExpr::getCast.
1303  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1304  if (CE->getOpcode() == Instruction::PtrToInt) {
1305  Constant *SrcPtr = CE->getOperand(0);
1306  unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1307  unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1308 
1309  if (MidIntSize >= SrcPtrSize) {
1310  unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1311  if (SrcAS == DestTy->getPointerAddressSpace())
1312  return FoldBitCast(CE->getOperand(0), DestTy, DL);
1313  }
1314  }
1315  }
1316 
1317  return ConstantExpr::getCast(Opcode, C, DestTy);
1318  case Instruction::Trunc:
1319  case Instruction::ZExt:
1320  case Instruction::SExt:
1321  case Instruction::FPTrunc:
1322  case Instruction::FPExt:
1323  case Instruction::UIToFP:
1324  case Instruction::SIToFP:
1325  case Instruction::FPToUI:
1326  case Instruction::FPToSI:
1327  case Instruction::AddrSpaceCast:
1328  return ConstantExpr::getCast(Opcode, C, DestTy);
1329  case Instruction::BitCast:
1330  return FoldBitCast(C, DestTy, DL);
1331  }
1332 }
1333 
1335  ConstantExpr *CE) {
1336  if (!CE->getOperand(1)->isNullValue())
1337  return nullptr; // Do not allow stepping over the value!
1338 
1339  // Loop over all of the operands, tracking down which value we are
1340  // addressing.
1341  for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1342  C = C->getAggregateElement(CE->getOperand(i));
1343  if (!C)
1344  return nullptr;
1345  }
1346  return C;
1347 }
1348 
1349 Constant *
1351  ArrayRef<Constant *> Indices) {
1352  // Loop over all of the operands, tracking down which value we are
1353  // addressing.
1354  for (Constant *Index : Indices) {
1355  C = C->getAggregateElement(Index);
1356  if (!C)
1357  return nullptr;
1358  }
1359  return C;
1360 }
1361 
1362 //===----------------------------------------------------------------------===//
1363 // Constant Folding for Calls
1364 //
1365 
1367  if (CS.isNoBuiltin() || CS.isStrictFP())
1368  return false;
1369  switch (F->getIntrinsicID()) {
1370  case Intrinsic::fabs:
1371  case Intrinsic::minnum:
1372  case Intrinsic::maxnum:
1373  case Intrinsic::log:
1374  case Intrinsic::log2:
1375  case Intrinsic::log10:
1376  case Intrinsic::exp:
1377  case Intrinsic::exp2:
1378  case Intrinsic::floor:
1379  case Intrinsic::ceil:
1380  case Intrinsic::sqrt:
1381  case Intrinsic::sin:
1382  case Intrinsic::cos:
1383  case Intrinsic::trunc:
1384  case Intrinsic::rint:
1385  case Intrinsic::nearbyint:
1386  case Intrinsic::pow:
1387  case Intrinsic::powi:
1388  case Intrinsic::bswap:
1389  case Intrinsic::ctpop:
1390  case Intrinsic::ctlz:
1391  case Intrinsic::cttz:
1392  case Intrinsic::fma:
1393  case Intrinsic::fmuladd:
1394  case Intrinsic::copysign:
1395  case Intrinsic::launder_invariant_group:
1396  case Intrinsic::round:
1397  case Intrinsic::masked_load:
1398  case Intrinsic::sadd_with_overflow:
1399  case Intrinsic::uadd_with_overflow:
1400  case Intrinsic::ssub_with_overflow:
1401  case Intrinsic::usub_with_overflow:
1402  case Intrinsic::smul_with_overflow:
1403  case Intrinsic::umul_with_overflow:
1404  case Intrinsic::convert_from_fp16:
1405  case Intrinsic::convert_to_fp16:
1406  case Intrinsic::bitreverse:
1407  case Intrinsic::x86_sse_cvtss2si:
1408  case Intrinsic::x86_sse_cvtss2si64:
1409  case Intrinsic::x86_sse_cvttss2si:
1410  case Intrinsic::x86_sse_cvttss2si64:
1411  case Intrinsic::x86_sse2_cvtsd2si:
1412  case Intrinsic::x86_sse2_cvtsd2si64:
1413  case Intrinsic::x86_sse2_cvttsd2si:
1414  case Intrinsic::x86_sse2_cvttsd2si64:
1415  return true;
1416  default:
1417  return false;
1418  case Intrinsic::not_intrinsic: break;
1419  }
1420 
1421  if (!F->hasName())
1422  return false;
1423  StringRef Name = F->getName();
1424 
1425  // In these cases, the check of the length is required. We don't want to
1426  // return true for a name like "cos\0blah" which strcmp would return equal to
1427  // "cos", but has length 8.
1428  switch (Name[0]) {
1429  default:
1430  return false;
1431  case 'a':
1432  return Name == "acos" || Name == "asin" || Name == "atan" ||
1433  Name == "atan2" || Name == "acosf" || Name == "asinf" ||
1434  Name == "atanf" || Name == "atan2f";
1435  case 'c':
1436  return Name == "ceil" || Name == "cos" || Name == "cosh" ||
1437  Name == "ceilf" || Name == "cosf" || Name == "coshf";
1438  case 'e':
1439  return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
1440  case 'f':
1441  return Name == "fabs" || Name == "floor" || Name == "fmod" ||
1442  Name == "fabsf" || Name == "floorf" || Name == "fmodf";
1443  case 'l':
1444  return Name == "log" || Name == "log10" || Name == "logf" ||
1445  Name == "log10f";
1446  case 'p':
1447  return Name == "pow" || Name == "powf";
1448  case 'r':
1449  return Name == "round" || Name == "roundf";
1450  case 's':
1451  return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1452  Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
1453  case 't':
1454  return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
1455  case '_':
1456 
1457  // Check for various function names that get used for the math functions
1458  // when the header files are preprocessed with the macro
1459  // __FINITE_MATH_ONLY__ enabled.
1460  // The '12' here is the length of the shortest name that can match.
1461  // We need to check the size before looking at Name[1] and Name[2]
1462  // so we may as well check a limit that will eliminate mismatches.
1463  if (Name.size() < 12 || Name[1] != '_')
1464  return false;
1465  switch (Name[2]) {
1466  default:
1467  return false;
1468  case 'a':
1469  return Name == "__acos_finite" || Name == "__acosf_finite" ||
1470  Name == "__asin_finite" || Name == "__asinf_finite" ||
1471  Name == "__atan2_finite" || Name == "__atan2f_finite";
1472  case 'c':
1473  return Name == "__cosh_finite" || Name == "__coshf_finite";
1474  case 'e':
1475  return Name == "__exp_finite" || Name == "__expf_finite" ||
1476  Name == "__exp2_finite" || Name == "__exp2f_finite";
1477  case 'l':
1478  return Name == "__log_finite" || Name == "__logf_finite" ||
1479  Name == "__log10_finite" || Name == "__log10f_finite";
1480  case 'p':
1481  return Name == "__pow_finite" || Name == "__powf_finite";
1482  case 's':
1483  return Name == "__sinh_finite" || Name == "__sinhf_finite";
1484  }
1485  }
1486 }
1487 
1488 namespace {
1489 
1490 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1491  if (Ty->isHalfTy()) {
1492  APFloat APF(V);
1493  bool unused;
1495  return ConstantFP::get(Ty->getContext(), APF);
1496  }
1497  if (Ty->isFloatTy())
1498  return ConstantFP::get(Ty->getContext(), APFloat((float)V));
1499  if (Ty->isDoubleTy())
1500  return ConstantFP::get(Ty->getContext(), APFloat(V));
1501  llvm_unreachable("Can only constant fold half/float/double");
1502 }
1503 
1504 /// Clear the floating-point exception state.
1505 inline void llvm_fenv_clearexcept() {
1506 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1507  feclearexcept(FE_ALL_EXCEPT);
1508 #endif
1509  errno = 0;
1510 }
1511 
1512 /// Test if a floating-point exception was raised.
1513 inline bool llvm_fenv_testexcept() {
1514  int errno_val = errno;
1515  if (errno_val == ERANGE || errno_val == EDOM)
1516  return true;
1517 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1518  if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1519  return true;
1520 #endif
1521  return false;
1522 }
1523 
1524 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1525  llvm_fenv_clearexcept();
1526  V = NativeFP(V);
1527  if (llvm_fenv_testexcept()) {
1528  llvm_fenv_clearexcept();
1529  return nullptr;
1530  }
1531 
1532  return GetConstantFoldFPValue(V, Ty);
1533 }
1534 
1535 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1536  double W, Type *Ty) {
1537  llvm_fenv_clearexcept();
1538  V = NativeFP(V, W);
1539  if (llvm_fenv_testexcept()) {
1540  llvm_fenv_clearexcept();
1541  return nullptr;
1542  }
1543 
1544  return GetConstantFoldFPValue(V, Ty);
1545 }
1546 
1547 /// Attempt to fold an SSE floating point to integer conversion of a constant
1548 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1549 /// used (toward nearest, ties to even). This matches the behavior of the
1550 /// non-truncating SSE instructions in the default rounding mode. The desired
1551 /// integer type Ty is used to select how many bits are available for the
1552 /// result. Returns null if the conversion cannot be performed, otherwise
1553 /// returns the Constant value resulting from the conversion.
1554 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1555  Type *Ty) {
1556  // All of these conversion intrinsics form an integer of at most 64bits.
1557  unsigned ResultWidth = Ty->getIntegerBitWidth();
1558  assert(ResultWidth <= 64 &&
1559  "Can only constant fold conversions to 64 and 32 bit ints");
1560 
1561  uint64_t UIntVal;
1562  bool isExact = false;
1566  Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1567  /*isSigned=*/true, mode, &isExact);
1568  if (status != APFloat::opOK &&
1569  (!roundTowardZero || status != APFloat::opInexact))
1570  return nullptr;
1571  return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
1572 }
1573 
1574 double getValueAsDouble(ConstantFP *Op) {
1575  Type *Ty = Op->getType();
1576 
1577  if (Ty->isFloatTy())
1578  return Op->getValueAPF().convertToFloat();
1579 
1580  if (Ty->isDoubleTy())
1581  return Op->getValueAPF().convertToDouble();
1582 
1583  bool unused;
1584  APFloat APF = Op->getValueAPF();
1586  return APF.convertToDouble();
1587 }
1588 
1589 Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
1590  ArrayRef<Constant *> Operands,
1591  const TargetLibraryInfo *TLI) {
1592  if (Operands.size() == 1) {
1593  if (isa<UndefValue>(Operands[0])) {
1594  // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
1595  if (IntrinsicID == Intrinsic::cos)
1596  return Constant::getNullValue(Ty);
1597  if (IntrinsicID == Intrinsic::bswap ||
1598  IntrinsicID == Intrinsic::bitreverse ||
1599  IntrinsicID == Intrinsic::launder_invariant_group)
1600  return Operands[0];
1601  }
1602 
1603  if (isa<ConstantPointerNull>(Operands[0]) &&
1604  Operands[0]->getType()->getPointerAddressSpace() == 0) {
1605  // launder(null) == null iff in addrspace 0
1606  if (IntrinsicID == Intrinsic::launder_invariant_group)
1607  return Operands[0];
1608  return nullptr;
1609  }
1610 
1611  if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1612  if (IntrinsicID == Intrinsic::convert_to_fp16) {
1613  APFloat Val(Op->getValueAPF());
1614 
1615  bool lost = false;
1617 
1618  return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1619  }
1620 
1621  if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1622  return nullptr;
1623 
1624  if (IntrinsicID == Intrinsic::round) {
1625  APFloat V = Op->getValueAPF();
1627  return ConstantFP::get(Ty->getContext(), V);
1628  }
1629 
1630  if (IntrinsicID == Intrinsic::floor) {
1631  APFloat V = Op->getValueAPF();
1633  return ConstantFP::get(Ty->getContext(), V);
1634  }
1635 
1636  if (IntrinsicID == Intrinsic::ceil) {
1637  APFloat V = Op->getValueAPF();
1639  return ConstantFP::get(Ty->getContext(), V);
1640  }
1641 
1642  if (IntrinsicID == Intrinsic::trunc) {
1643  APFloat V = Op->getValueAPF();
1645  return ConstantFP::get(Ty->getContext(), V);
1646  }
1647 
1648  if (IntrinsicID == Intrinsic::rint) {
1649  APFloat V = Op->getValueAPF();
1651  return ConstantFP::get(Ty->getContext(), V);
1652  }
1653 
1654  if (IntrinsicID == Intrinsic::nearbyint) {
1655  APFloat V = Op->getValueAPF();
1657  return ConstantFP::get(Ty->getContext(), V);
1658  }
1659 
1660  /// We only fold functions with finite arguments. Folding NaN and inf is
1661  /// likely to be aborted with an exception anyway, and some host libms
1662  /// have known errors raising exceptions.
1663  if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1664  return nullptr;
1665 
1666  /// Currently APFloat versions of these functions do not exist, so we use
1667  /// the host native double versions. Float versions are not called
1668  /// directly but for all these it is true (float)(f((double)arg)) ==
1669  /// f(arg). Long double not supported yet.
1670  double V = getValueAsDouble(Op);
1671 
1672  switch (IntrinsicID) {
1673  default: break;
1674  case Intrinsic::fabs:
1675  return ConstantFoldFP(fabs, V, Ty);
1676  case Intrinsic::log2:
1677  return ConstantFoldFP(Log2, V, Ty);
1678  case Intrinsic::log:
1679  return ConstantFoldFP(log, V, Ty);
1680  case Intrinsic::log10:
1681  return ConstantFoldFP(log10, V, Ty);
1682  case Intrinsic::exp:
1683  return ConstantFoldFP(exp, V, Ty);
1684  case Intrinsic::exp2:
1685  return ConstantFoldFP(exp2, V, Ty);
1686  case Intrinsic::sin:
1687  return ConstantFoldFP(sin, V, Ty);
1688  case Intrinsic::cos:
1689  return ConstantFoldFP(cos, V, Ty);
1690  case Intrinsic::sqrt:
1691  return ConstantFoldFP(sqrt, V, Ty);
1692  }
1693 
1694  if (!TLI)
1695  return nullptr;
1696 
1697  char NameKeyChar = Name[0];
1698  if (Name[0] == '_' && Name.size() > 2 && Name[1] == '_')
1699  NameKeyChar = Name[2];
1700 
1701  switch (NameKeyChar) {
1702  case 'a':
1703  if ((Name == "acos" && TLI->has(LibFunc_acos)) ||
1704  (Name == "acosf" && TLI->has(LibFunc_acosf)) ||
1705  (Name == "__acos_finite" && TLI->has(LibFunc_acos_finite)) ||
1706  (Name == "__acosf_finite" && TLI->has(LibFunc_acosf_finite)))
1707  return ConstantFoldFP(acos, V, Ty);
1708  else if ((Name == "asin" && TLI->has(LibFunc_asin)) ||
1709  (Name == "asinf" && TLI->has(LibFunc_asinf)) ||
1710  (Name == "__asin_finite" && TLI->has(LibFunc_asin_finite)) ||
1711  (Name == "__asinf_finite" && TLI->has(LibFunc_asinf_finite)))
1712  return ConstantFoldFP(asin, V, Ty);
1713  else if ((Name == "atan" && TLI->has(LibFunc_atan)) ||
1714  (Name == "atanf" && TLI->has(LibFunc_atanf)))
1715  return ConstantFoldFP(atan, V, Ty);
1716  break;
1717  case 'c':
1718  if ((Name == "ceil" && TLI->has(LibFunc_ceil)) ||
1719  (Name == "ceilf" && TLI->has(LibFunc_ceilf)))
1720  return ConstantFoldFP(ceil, V, Ty);
1721  else if ((Name == "cos" && TLI->has(LibFunc_cos)) ||
1722  (Name == "cosf" && TLI->has(LibFunc_cosf)))
1723  return ConstantFoldFP(cos, V, Ty);
1724  else if ((Name == "cosh" && TLI->has(LibFunc_cosh)) ||
1725  (Name == "coshf" && TLI->has(LibFunc_coshf)) ||
1726  (Name == "__cosh_finite" && TLI->has(LibFunc_cosh_finite)) ||
1727  (Name == "__coshf_finite" && TLI->has(LibFunc_coshf_finite)))
1728  return ConstantFoldFP(cosh, V, Ty);
1729  break;
1730  case 'e':
1731  if ((Name == "exp" && TLI->has(LibFunc_exp)) ||
1732  (Name == "expf" && TLI->has(LibFunc_expf)) ||
1733  (Name == "__exp_finite" && TLI->has(LibFunc_exp_finite)) ||
1734  (Name == "__expf_finite" && TLI->has(LibFunc_expf_finite)))
1735  return ConstantFoldFP(exp, V, Ty);
1736  if ((Name == "exp2" && TLI->has(LibFunc_exp2)) ||
1737  (Name == "exp2f" && TLI->has(LibFunc_exp2f)) ||
1738  (Name == "__exp2_finite" && TLI->has(LibFunc_exp2_finite)) ||
1739  (Name == "__exp2f_finite" && TLI->has(LibFunc_exp2f_finite)))
1740  // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1741  // C99 library.
1742  return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1743  break;
1744  case 'f':
1745  if ((Name == "fabs" && TLI->has(LibFunc_fabs)) ||
1746  (Name == "fabsf" && TLI->has(LibFunc_fabsf)))
1747  return ConstantFoldFP(fabs, V, Ty);
1748  else if ((Name == "floor" && TLI->has(LibFunc_floor)) ||
1749  (Name == "floorf" && TLI->has(LibFunc_floorf)))
1750  return ConstantFoldFP(floor, V, Ty);
1751  break;
1752  case 'l':
1753  if ((Name == "log" && V > 0 && TLI->has(LibFunc_log)) ||
1754  (Name == "logf" && V > 0 && TLI->has(LibFunc_logf)) ||
1755  (Name == "__log_finite" && V > 0 &&
1756  TLI->has(LibFunc_log_finite)) ||
1757  (Name == "__logf_finite" && V > 0 &&
1758  TLI->has(LibFunc_logf_finite)))
1759  return ConstantFoldFP(log, V, Ty);
1760  else if ((Name == "log10" && V > 0 && TLI->has(LibFunc_log10)) ||
1761  (Name == "log10f" && V > 0 && TLI->has(LibFunc_log10f)) ||
1762  (Name == "__log10_finite" && V > 0 &&
1763  TLI->has(LibFunc_log10_finite)) ||
1764  (Name == "__log10f_finite" && V > 0 &&
1765  TLI->has(LibFunc_log10f_finite)))
1766  return ConstantFoldFP(log10, V, Ty);
1767  break;
1768  case 'r':
1769  if ((Name == "round" && TLI->has(LibFunc_round)) ||
1770  (Name == "roundf" && TLI->has(LibFunc_roundf)))
1771  return ConstantFoldFP(round, V, Ty);
1772  break;
1773  case 's':
1774  if ((Name == "sin" && TLI->has(LibFunc_sin)) ||
1775  (Name == "sinf" && TLI->has(LibFunc_sinf)))
1776  return ConstantFoldFP(sin, V, Ty);
1777  else if ((Name == "sinh" && TLI->has(LibFunc_sinh)) ||
1778  (Name == "sinhf" && TLI->has(LibFunc_sinhf)) ||
1779  (Name == "__sinh_finite" && TLI->has(LibFunc_sinh_finite)) ||
1780  (Name == "__sinhf_finite" && TLI->has(LibFunc_sinhf_finite)))
1781  return ConstantFoldFP(sinh, V, Ty);
1782  else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc_sqrt)) ||
1783  (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc_sqrtf)))
1784  return ConstantFoldFP(sqrt, V, Ty);
1785  break;
1786  case 't':
1787  if ((Name == "tan" && TLI->has(LibFunc_tan)) ||
1788  (Name == "tanf" && TLI->has(LibFunc_tanf)))
1789  return ConstantFoldFP(tan, V, Ty);
1790  else if ((Name == "tanh" && TLI->has(LibFunc_tanh)) ||
1791  (Name == "tanhf" && TLI->has(LibFunc_tanhf)))
1792  return ConstantFoldFP(tanh, V, Ty);
1793  break;
1794  default:
1795  break;
1796  }
1797  return nullptr;
1798  }
1799 
1800  if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
1801  switch (IntrinsicID) {
1802  case Intrinsic::bswap:
1803  return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1804  case Intrinsic::ctpop:
1805  return ConstantInt::get(Ty, Op->getValue().countPopulation());
1806  case Intrinsic::bitreverse:
1807  return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
1808  case Intrinsic::convert_from_fp16: {
1809  APFloat Val(APFloat::IEEEhalf(), Op->getValue());
1810 
1811  bool lost = false;
1814 
1815  // Conversion is always precise.
1816  (void)status;
1817  assert(status == APFloat::opOK && !lost &&
1818  "Precision lost during fp16 constfolding");
1819 
1820  return ConstantFP::get(Ty->getContext(), Val);
1821  }
1822  default:
1823  return nullptr;
1824  }
1825  }
1826 
1827  // Support ConstantVector in case we have an Undef in the top.
1828  if (isa<ConstantVector>(Operands[0]) ||
1829  isa<ConstantDataVector>(Operands[0])) {
1830  auto *Op = cast<Constant>(Operands[0]);
1831  switch (IntrinsicID) {
1832  default: break;
1833  case Intrinsic::x86_sse_cvtss2si:
1834  case Intrinsic::x86_sse_cvtss2si64:
1835  case Intrinsic::x86_sse2_cvtsd2si:
1836  case Intrinsic::x86_sse2_cvtsd2si64:
1837  if (ConstantFP *FPOp =
1838  dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1839  return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1840  /*roundTowardZero=*/false, Ty);
1841  break;
1842  case Intrinsic::x86_sse_cvttss2si:
1843  case Intrinsic::x86_sse_cvttss2si64:
1844  case Intrinsic::x86_sse2_cvttsd2si:
1845  case Intrinsic::x86_sse2_cvttsd2si64:
1846  if (ConstantFP *FPOp =
1847  dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1848  return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1849  /*roundTowardZero=*/true, Ty);
1850  break;
1851  }
1852  }
1853 
1854  return nullptr;
1855  }
1856 
1857  if (Operands.size() == 2) {
1858  if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1859  if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1860  return nullptr;
1861  double Op1V = getValueAsDouble(Op1);
1862 
1863  if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1864  if (Op2->getType() != Op1->getType())
1865  return nullptr;
1866 
1867  double Op2V = getValueAsDouble(Op2);
1868  if (IntrinsicID == Intrinsic::pow) {
1869  return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1870  }
1871  if (IntrinsicID == Intrinsic::copysign) {
1872  APFloat V1 = Op1->getValueAPF();
1873  const APFloat &V2 = Op2->getValueAPF();
1874  V1.copySign(V2);
1875  return ConstantFP::get(Ty->getContext(), V1);
1876  }
1877 
1878  if (IntrinsicID == Intrinsic::minnum) {
1879  const APFloat &C1 = Op1->getValueAPF();
1880  const APFloat &C2 = Op2->getValueAPF();
1881  return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
1882  }
1883 
1884  if (IntrinsicID == Intrinsic::maxnum) {
1885  const APFloat &C1 = Op1->getValueAPF();
1886  const APFloat &C2 = Op2->getValueAPF();
1887  return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
1888  }
1889 
1890  if (!TLI)
1891  return nullptr;
1892  if ((Name == "pow" && TLI->has(LibFunc_pow)) ||
1893  (Name == "powf" && TLI->has(LibFunc_powf)) ||
1894  (Name == "__pow_finite" && TLI->has(LibFunc_pow_finite)) ||
1895  (Name == "__powf_finite" && TLI->has(LibFunc_powf_finite)))
1896  return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1897  if ((Name == "fmod" && TLI->has(LibFunc_fmod)) ||
1898  (Name == "fmodf" && TLI->has(LibFunc_fmodf)))
1899  return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
1900  if ((Name == "atan2" && TLI->has(LibFunc_atan2)) ||
1901  (Name == "atan2f" && TLI->has(LibFunc_atan2f)) ||
1902  (Name == "__atan2_finite" && TLI->has(LibFunc_atan2_finite)) ||
1903  (Name == "__atan2f_finite" && TLI->has(LibFunc_atan2f_finite)))
1904  return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
1905  } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
1906  if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
1907  return ConstantFP::get(Ty->getContext(),
1908  APFloat((float)std::pow((float)Op1V,
1909  (int)Op2C->getZExtValue())));
1910  if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
1911  return ConstantFP::get(Ty->getContext(),
1912  APFloat((float)std::pow((float)Op1V,
1913  (int)Op2C->getZExtValue())));
1914  if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
1915  return ConstantFP::get(Ty->getContext(),
1916  APFloat((double)std::pow((double)Op1V,
1917  (int)Op2C->getZExtValue())));
1918  }
1919  return nullptr;
1920  }
1921 
1922  if (auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
1923  if (auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
1924  switch (IntrinsicID) {
1925  default: break;
1926  case Intrinsic::sadd_with_overflow:
1927  case Intrinsic::uadd_with_overflow:
1928  case Intrinsic::ssub_with_overflow:
1929  case Intrinsic::usub_with_overflow:
1930  case Intrinsic::smul_with_overflow:
1931  case Intrinsic::umul_with_overflow: {
1932  APInt Res;
1933  bool Overflow;
1934  switch (IntrinsicID) {
1935  default: llvm_unreachable("Invalid case");
1936  case Intrinsic::sadd_with_overflow:
1937  Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
1938  break;
1939  case Intrinsic::uadd_with_overflow:
1940  Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
1941  break;
1942  case Intrinsic::ssub_with_overflow:
1943  Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
1944  break;
1945  case Intrinsic::usub_with_overflow:
1946  Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
1947  break;
1948  case Intrinsic::smul_with_overflow:
1949  Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
1950  break;
1951  case Intrinsic::umul_with_overflow:
1952  Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
1953  break;
1954  }
1955  Constant *Ops[] = {
1956  ConstantInt::get(Ty->getContext(), Res),
1957  ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
1958  };
1959  return ConstantStruct::get(cast<StructType>(Ty), Ops);
1960  }
1961  case Intrinsic::cttz:
1962  if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
1963  return UndefValue::get(Ty);
1964  return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
1965  case Intrinsic::ctlz:
1966  if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
1967  return UndefValue::get(Ty);
1968  return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
1969  }
1970  }
1971 
1972  return nullptr;
1973  }
1974  return nullptr;
1975  }
1976 
1977  if (Operands.size() != 3)
1978  return nullptr;
1979 
1980  if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1981  if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1982  if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
1983  switch (IntrinsicID) {
1984  default: break;
1985  case Intrinsic::fma:
1986  case Intrinsic::fmuladd: {
1987  APFloat V = Op1->getValueAPF();
1988  APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
1989  Op3->getValueAPF(),
1991  if (s != APFloat::opInvalidOp)
1992  return ConstantFP::get(Ty->getContext(), V);
1993 
1994  return nullptr;
1995  }
1996  }
1997  }
1998  }
1999  }
2000 
2001  return nullptr;
2002 }
2003 
2004 Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
2005  VectorType *VTy, ArrayRef<Constant *> Operands,
2006  const DataLayout &DL,
2007  const TargetLibraryInfo *TLI) {
2009  SmallVector<Constant *, 4> Lane(Operands.size());
2010  Type *Ty = VTy->getElementType();
2011 
2012  if (IntrinsicID == Intrinsic::masked_load) {
2013  auto *SrcPtr = Operands[0];
2014  auto *Mask = Operands[2];
2015  auto *Passthru = Operands[3];
2016 
2017  Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
2018 
2019  SmallVector<Constant *, 32> NewElements;
2020  for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2021  auto *MaskElt = Mask->getAggregateElement(I);
2022  if (!MaskElt)
2023  break;
2024  auto *PassthruElt = Passthru->getAggregateElement(I);
2025  auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2026  if (isa<UndefValue>(MaskElt)) {
2027  if (PassthruElt)
2028  NewElements.push_back(PassthruElt);
2029  else if (VecElt)
2030  NewElements.push_back(VecElt);
2031  else
2032  return nullptr;
2033  }
2034  if (MaskElt->isNullValue()) {
2035  if (!PassthruElt)
2036  return nullptr;
2037  NewElements.push_back(PassthruElt);
2038  } else if (MaskElt->isOneValue()) {
2039  if (!VecElt)
2040  return nullptr;
2041  NewElements.push_back(VecElt);
2042  } else {
2043  return nullptr;
2044  }
2045  }
2046  if (NewElements.size() != VTy->getNumElements())
2047  return nullptr;
2048  return ConstantVector::get(NewElements);
2049  }
2050 
2051  for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2052  // Gather a column of constants.
2053  for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2054  // These intrinsics use a scalar type for their second argument.
2055  if (J == 1 &&
2056  (IntrinsicID == Intrinsic::cttz || IntrinsicID == Intrinsic::ctlz ||
2057  IntrinsicID == Intrinsic::powi)) {
2058  Lane[J] = Operands[J];
2059  continue;
2060  }
2061 
2062  Constant *Agg = Operands[J]->getAggregateElement(I);
2063  if (!Agg)
2064  return nullptr;
2065 
2066  Lane[J] = Agg;
2067  }
2068 
2069  // Use the regular scalar folding to simplify this column.
2070  Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI);
2071  if (!Folded)
2072  return nullptr;
2073  Result[I] = Folded;
2074  }
2075 
2076  return ConstantVector::get(Result);
2077 }
2078 
2079 } // end anonymous namespace
2080 
2081 Constant *
2083  ArrayRef<Constant *> Operands,
2084  const TargetLibraryInfo *TLI) {
2085  if (CS.isNoBuiltin() || CS.isStrictFP())
2086  return nullptr;
2087  if (!F->hasName())
2088  return nullptr;
2089  StringRef Name = F->getName();
2090 
2091  Type *Ty = F->getReturnType();
2092 
2093  if (auto *VTy = dyn_cast<VectorType>(Ty))
2094  return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2095  F->getParent()->getDataLayout(), TLI);
2096 
2097  return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);
2098 }
2099 
2101  // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2102  // (and to some extent ConstantFoldScalarCall).
2103  if (CS.isNoBuiltin() || CS.isStrictFP())
2104  return false;
2105  Function *F = CS.getCalledFunction();
2106  if (!F)
2107  return false;
2108 
2109  LibFunc Func;
2110  if (!TLI || !TLI->getLibFunc(*F, Func))
2111  return false;
2112 
2113  if (CS.getNumArgOperands() == 1) {
2114  if (ConstantFP *OpC = dyn_cast<ConstantFP>(CS.getArgOperand(0))) {
2115  const APFloat &Op = OpC->getValueAPF();
2116  switch (Func) {
2117  case LibFunc_logl:
2118  case LibFunc_log:
2119  case LibFunc_logf:
2120  case LibFunc_log2l:
2121  case LibFunc_log2:
2122  case LibFunc_log2f:
2123  case LibFunc_log10l:
2124  case LibFunc_log10:
2125  case LibFunc_log10f:
2126  return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
2127 
2128  case LibFunc_expl:
2129  case LibFunc_exp:
2130  case LibFunc_expf:
2131  // FIXME: These boundaries are slightly conservative.
2132  if (OpC->getType()->isDoubleTy())
2133  return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
2134  Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
2135  if (OpC->getType()->isFloatTy())
2136  return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
2137  Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
2138  break;
2139 
2140  case LibFunc_exp2l:
2141  case LibFunc_exp2:
2142  case LibFunc_exp2f:
2143  // FIXME: These boundaries are slightly conservative.
2144  if (OpC->getType()->isDoubleTy())
2145  return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
2146  Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
2147  if (OpC->getType()->isFloatTy())
2148  return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
2149  Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
2150  break;
2151 
2152  case LibFunc_sinl:
2153  case LibFunc_sin:
2154  case LibFunc_sinf:
2155  case LibFunc_cosl:
2156  case LibFunc_cos:
2157  case LibFunc_cosf:
2158  return !Op.isInfinity();
2159 
2160  case LibFunc_tanl:
2161  case LibFunc_tan:
2162  case LibFunc_tanf: {
2163  // FIXME: Stop using the host math library.
2164  // FIXME: The computation isn't done in the right precision.
2165  Type *Ty = OpC->getType();
2166  if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2167  double OpV = getValueAsDouble(OpC);
2168  return ConstantFoldFP(tan, OpV, Ty) != nullptr;
2169  }
2170  break;
2171  }
2172 
2173  case LibFunc_asinl:
2174  case LibFunc_asin:
2175  case LibFunc_asinf:
2176  case LibFunc_acosl:
2177  case LibFunc_acos:
2178  case LibFunc_acosf:
2179  return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
2181  Op.compare(APFloat(Op.getSemantics(), "1")) !=
2183 
2184  case LibFunc_sinh:
2185  case LibFunc_cosh:
2186  case LibFunc_sinhf:
2187  case LibFunc_coshf:
2188  case LibFunc_sinhl:
2189  case LibFunc_coshl:
2190  // FIXME: These boundaries are slightly conservative.
2191  if (OpC->getType()->isDoubleTy())
2192  return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
2193  Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
2194  if (OpC->getType()->isFloatTy())
2195  return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
2196  Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
2197  break;
2198 
2199  case LibFunc_sqrtl:
2200  case LibFunc_sqrt:
2201  case LibFunc_sqrtf:
2202  return Op.isNaN() || Op.isZero() || !Op.isNegative();
2203 
2204  // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2205  // maybe others?
2206  default:
2207  break;
2208  }
2209  }
2210  }
2211 
2212  if (CS.getNumArgOperands() == 2) {
2213  ConstantFP *Op0C = dyn_cast<ConstantFP>(CS.getArgOperand(0));
2214  ConstantFP *Op1C = dyn_cast<ConstantFP>(CS.getArgOperand(1));
2215  if (Op0C && Op1C) {
2216  const APFloat &Op0 = Op0C->getValueAPF();
2217  const APFloat &Op1 = Op1C->getValueAPF();
2218 
2219  switch (Func) {
2220  case LibFunc_powl:
2221  case LibFunc_pow:
2222  case LibFunc_powf: {
2223  // FIXME: Stop using the host math library.
2224  // FIXME: The computation isn't done in the right precision.
2225  Type *Ty = Op0C->getType();
2226  if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2227  if (Ty == Op1C->getType()) {
2228  double Op0V = getValueAsDouble(Op0C);
2229  double Op1V = getValueAsDouble(Op1C);
2230  return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
2231  }
2232  }
2233  break;
2234  }
2235 
2236  case LibFunc_fmodl:
2237  case LibFunc_fmod:
2238  case LibFunc_fmodf:
2239  return Op0.isNaN() || Op1.isNaN() ||
2240  (!Op0.isInfinity() && !Op1.isZero());
2241 
2242  default:
2243  break;
2244  }
2245  }
2246  }
2247 
2248  return false;
2249 }
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
opStatus roundToIntegral(roundingMode RM)
Definition: APFloat.h:1008
Type * getVectorElementType() const
Definition: Type.h:368
const NoneType None
Definition: None.h:24
uint64_t CallInst * C
static Constant * FoldBitCast(Constant *V, Type *DestTy)
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
bool isZero() const
Definition: APFloat.h:1143
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
Definition: Constants.cpp:100
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:173
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1547
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:158
MutableArrayRef< T > makeMutableArrayRef(T &OneElt)
Construct a MutableArrayRef from a single element.
Definition: ArrayRef.h:503
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
Constant * ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE)
ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a getelementptr constantexpr, return the constant value being addressed by the constant expression, or null if something is funny and we can&#39;t decide.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1128
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:262
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:137
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:588
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
Optional< unsigned > getInRangeIndex() const
Returns the offset of the index with an inrange attachment, or None if none.
Definition: Operator.h:450
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:641
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1727
float convertToFloat() const
Definition: APFloat.h:1098
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2042
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL)
If this constant is a constant offset from a global, return the global and the constant.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:713
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:908
F(f)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1155
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:503
An instruction for reading from memory.
Definition: Instructions.h:164
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:177
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:882
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
Definition: Constants.cpp:1894
Hexagon Common GEP
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2188
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:227
static IntegerType * getInt16Ty(LLVMContext &C)
Definition: Type.cpp:175
op_iterator op_begin()
Definition: User.h:230
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it...
Definition: DataLayout.cpp:84
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1493
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2064
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:258
amode Optimize addressing mode
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:1069
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:521
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
Definition: Constants.cpp:1580
static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy)
This method can be used to determine if a cast from S to DstTy using Opcode op is valid or not...
Type * getPointerElementType() const
Definition: Type.h:373
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Definition: DataLayout.cpp:638
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:162
roundingMode
IEEE-754R 4.3: Rounding-direction attributes.
Definition: APFloat.h:174
APInt zextOrSelf(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:898
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
static Constant * getLShr(Constant *C1, Constant *C2, bool isExact=false)
Definition: Constants.cpp:2255
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:639
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
Windows NT (Windows on ARM)
iterator_range< const unsigned char * > bytes() const
Definition: StringRef.h:116
uint64_t getNumElements() const
Definition: DerivedTypes.h:359
This file implements a class to represent arbitrary precision integral constant values and operations...
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:237
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Attempt to fold the constant using the specified DataLayout.
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible. ...
Definition: Constants.cpp:1773
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1632
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:85
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:875
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1559
bool isInfinity() const
Definition: APFloat.h:1144
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:4444
ValTy * getArgOperand(unsigned i) const
Definition: CallSite.h:297
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Definition: CallSite.h:428
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:444
bool has(LibFunc F) const
Tests whether a library function is available.
static Constant * getSelect(Constant *C, Constant *V1, Constant *V2, Type *OnlyIfReducedTy=nullptr)
Select constant expr.
Definition: Constants.cpp:1916
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
bool isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:221
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:126
static const fltSemantics & IEEEdouble() LLVM_READNONE
Definition: APFloat.cpp:123
Value * getOperand(unsigned i) const
Definition: User.h:170
Class to represent pointers.
Definition: DerivedTypes.h:467
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:328
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return &#39;this&#39;.
Definition: Type.h:301
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1740
bool isFloatTy() const
Return true if this is &#39;float&#39;, a 32-bit IEEE fp type.
Definition: Type.h:147
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:742
static Constant * getInsertValue(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2110
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:357
bool isStrictFP() const
Return true if the call requires strict floating point semantics.
Definition: CallSite.h:433
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:155
bool isNegative() const
Definition: APFloat.h:1147
bool hasName() const
Definition: Value.h:251
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
bool isNaN() const
Definition: APFloat.h:1145
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
const APInt & getConstant() const
Returns the value when all bits have a known value.
Definition: KnownBits.h:57
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:221
static Constant * getAnd(Constant *C1, Constant *C2)
Definition: Constants.cpp:2236
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:264
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1897
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
double convertToDouble() const
Definition: APFloat.h:1097
static Constant * getShuffleVector(Constant *V1, Constant *V2, Constant *Mask, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2087
op_iterator op_end()
Definition: User.h:232
This file declares a class to represent arbitrary precision floating point values and provide a varie...
bool isHalfTy() const
Return true if this is &#39;half&#39;, a 16-bit IEEE fp type.
Definition: Type.h:144
bool isConstant() const
Returns true if we know the value of all bits.
Definition: KnownBits.h:50
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:885
bool isBinaryOp() const
Definition: Instruction.h:130
static Constant * get(StructType *T, ArrayRef< Constant *> V)
Definition: Constants.cpp:1011
op_range operands()
Definition: User.h:238
bool isX86_MMXTy() const
Return true if this is X86 MMX.
Definition: Type.h:182
Class to represent integer types.
Definition: DerivedTypes.h:40
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:654
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL)
ConstantFoldLoadFromConstPtr - Return the value that a load from C would produce if it is constant an...
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:312
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1382
const Constant * stripPointerCasts() const
Definition: Constant.h:169
const AMDGPUAS & AS
unsigned getNumArgOperands() const
Definition: CallSite.h:293
bool isCast() const
Definition: Instruction.h:132
Constant * ConstantFoldLoadThroughGEPIndices(Constant *C, ArrayRef< Constant *> Indices)
ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr indices (with an implied zero ...
static wasm::ValType getType(const TargetRegisterClass *RC)
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1891
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCompareInstOperands - Attempt to constant fold a compare instruction (icmp/fcmp) with the...
Value * GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void copySign(const APFloat &RHS)
Definition: APFloat.h:1055
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
Definition: APFloat.h:1238
const T * data() const
Definition: ArrayRef.h:146
const APFloat & getValueAPF() const
Definition: Constants.h:299
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:1554
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:224
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:240
Type * getSequentialElementType() const
Definition: Type.h:355
unsigned getNumOperands() const
Definition: User.h:192
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
static const fltSemantics & IEEEhalf() LLVM_READNONE
Definition: APFloat.cpp:117
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
Definition: Type.cpp:130
double Log2(double Value)
Return the log base 2 of the specified value.
Definition: MathExtras.h:520
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
Provides information about what library functions are available for the current target.
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:255
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:180
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1604
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:611
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:674
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1277
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:180
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:462
Class to represent vector types.
Definition: DerivedTypes.h:393
Class for arbitrary precision integers.
Definition: APInt.h:69
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
Definition: Constants.cpp:1497
Type * getResultElementType() const
Definition: Operator.cpp:29
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array...
Definition: ArrayRef.h:179
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:395
bool isNonIntegralPointerType(PointerType *PT) const
Definition: DataLayout.h:346
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:560
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:428
opStatus
IEEE-754R 7: Default exception handling.
Definition: APFloat.h:185
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:543
static Type * getIndexedType(Type *Ty, ArrayRef< Value *> IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:97
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:62
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:224
Establish a view to a call site for examination.
Definition: CallSite.h:713
APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1916
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1714
#define I(x, y, z)
Definition: MD5.cpp:58
static Constant * getOr(Constant *C1, Constant *C2)
Definition: Constants.cpp:2240
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant *> Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands...
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2248
APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1926
bool getConstantStringInfo(const Value *V, StringRef &Str, uint64_t Offset=0, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:107
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
Definition: APFloat.h:995
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1884
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:73
Constant * ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, const DataLayout &DL)
ConstantFoldLoadThroughBitcast - try to cast constant to destination type returning null if unsuccess...
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:593
bool canConstantFoldCallTo(ImmutableCallSite CS, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function...
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
uint64_t getTypeAllocSizeInBits(Type *Ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
Definition: DataLayout.h:438
Type * getElementType() const
Definition: DerivedTypes.h:360
static Constant * getExtractValue(Constant *Agg, ArrayRef< unsigned > Idxs, Type *OnlyIfReducedTy=nullptr)
Definition: Constants.cpp:2134
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:999
Type * getSourceElementType() const
Definition: Operator.cpp:23
APInt bitcastToAPInt() const
Definition: APFloat.h:1094
bool isDoubleTy() const
Return true if this is &#39;double&#39;, a 64-bit IEEE fp type.
Definition: Type.h:150
Constant * ConstantFoldCall(ImmutableCallSite CS, Function *F, ArrayRef< Constant *> Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
static Constant * get(ArrayRef< Constant *> V)
Definition: Constants.cpp:1046
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
Definition: APFloat.h:1227
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:779
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:273
bool isNullValue() const
Determine if all bits are clear.
Definition: APInt.h:399
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:215
cmpResult compare(const APFloat &RHS) const
Definition: APFloat.h:1102
APInt sdiv_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1910
const fltSemantics & getFltSemantics() const
Definition: Type.h:169
APInt usub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1904
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.