LCOV - code coverage report
Current view: top level - lib/Analysis - ConstantFolding.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 828 896 92.4 %
Date: 2018-10-20 13:21:21 Functions: 33 34 97.1 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : // This file defines routines for folding instructions into constants.
      11             : //
      12             : // Also, to supplement the basic IR ConstantExpr simplifications,
      13             : // this file defines some additional folding routines that can make use of
      14             : // DataLayout information. These functions cannot go in IR due to library
      15             : // dependency issues.
      16             : //
      17             : //===----------------------------------------------------------------------===//
      18             : 
      19             : #include "llvm/Analysis/ConstantFolding.h"
      20             : #include "llvm/ADT/APFloat.h"
      21             : #include "llvm/ADT/APInt.h"
      22             : #include "llvm/ADT/ArrayRef.h"
      23             : #include "llvm/ADT/DenseMap.h"
      24             : #include "llvm/ADT/STLExtras.h"
      25             : #include "llvm/ADT/SmallVector.h"
      26             : #include "llvm/ADT/StringRef.h"
      27             : #include "llvm/Analysis/TargetLibraryInfo.h"
      28             : #include "llvm/Analysis/ValueTracking.h"
      29             : #include "llvm/Config/config.h"
      30             : #include "llvm/IR/Constant.h"
      31             : #include "llvm/IR/Constants.h"
      32             : #include "llvm/IR/DataLayout.h"
      33             : #include "llvm/IR/DerivedTypes.h"
      34             : #include "llvm/IR/Function.h"
      35             : #include "llvm/IR/GlobalValue.h"
      36             : #include "llvm/IR/GlobalVariable.h"
      37             : #include "llvm/IR/InstrTypes.h"
      38             : #include "llvm/IR/Instruction.h"
      39             : #include "llvm/IR/Instructions.h"
      40             : #include "llvm/IR/Operator.h"
      41             : #include "llvm/IR/Type.h"
      42             : #include "llvm/IR/Value.h"
      43             : #include "llvm/Support/Casting.h"
      44             : #include "llvm/Support/ErrorHandling.h"
      45             : #include "llvm/Support/KnownBits.h"
      46             : #include "llvm/Support/MathExtras.h"
      47             : #include <cassert>
      48             : #include <cerrno>
      49             : #include <cfenv>
      50             : #include <cmath>
      51             : #include <cstddef>
      52             : #include <cstdint>
      53             : 
      54             : using namespace llvm;
      55             : 
      56             : namespace {
      57             : 
      58             : //===----------------------------------------------------------------------===//
      59             : // Constant Folding internal helper functions
      60             : //===----------------------------------------------------------------------===//
      61             : 
      62        5583 : static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
      63             :                                         Constant *C, Type *SrcEltTy,
      64             :                                         unsigned NumSrcElts,
      65             :                                         const DataLayout &DL) {
      66             :   // Now that we know that the input value is a vector of integers, just shift
      67             :   // and insert them into our result.
      68        5583 :   unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
      69       39529 :   for (unsigned i = 0; i != NumSrcElts; ++i) {
      70             :     Constant *Element;
      71       33947 :     if (DL.isLittleEndian())
      72       32705 :       Element = C->getAggregateElement(NumSrcElts - i - 1);
      73             :     else
      74        1242 :       Element = C->getAggregateElement(i);
      75             : 
      76       33947 :     if (Element && isa<UndefValue>(Element)) {
      77         790 :       Result <<= BitShift;
      78             :       continue;
      79             :     }
      80             : 
      81             :     auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
      82             :     if (!ElementCI)
      83           1 :       return ConstantExpr::getBitCast(C, DestTy);
      84             : 
      85       33156 :     Result <<= BitShift;
      86       66312 :     Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
      87             :   }
      88             : 
      89             :   return nullptr;
      90             : }
      91             : 
      92             : /// Constant fold bitcast, symbolically evaluating it with DataLayout.
      93             : /// This always returns a non-null constant, but it may be a
      94             : /// ConstantExpr if unfoldable.
      95     1762497 : Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
      96             :   // Catch the obvious splat cases.
      97     1762497 :   if (C->isNullValue() && !DestTy->isX86_MMXTy())
      98         328 :     return Constant::getNullValue(DestTy);
      99     1763707 :   if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
     100             :       !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
     101        1536 :     return Constant::getAllOnesValue(DestTy);
     102             : 
     103     1760633 :   if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
     104             :     // Handle a vector->scalar integer/fp cast.
     105        5817 :     if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
     106        5583 :       unsigned NumSrcElts = VTy->getNumElements();
     107        5583 :       Type *SrcEltTy = VTy->getElementType();
     108             : 
     109             :       // If the vector is a vector of floating point, convert it to vector of int
     110             :       // to simplify things.
     111             :       if (SrcEltTy->isFloatingPointTy()) {
     112        1232 :         unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
     113             :         Type *SrcIVTy =
     114        1232 :           VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
     115             :         // Ask IR to do the conversion now that #elts line up.
     116        1232 :         C = ConstantExpr::getBitCast(C, SrcIVTy);
     117             :       }
     118             : 
     119        5583 :       APInt Result(DL.getTypeSizeInBits(DestTy), 0);
     120        5583 :       if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
     121             :                                                 SrcEltTy, NumSrcElts, DL))
     122             :         return CE;
     123             : 
     124        5582 :       if (isa<IntegerType>(DestTy))
     125        5570 :         return ConstantInt::get(DestTy, Result);
     126             : 
     127          12 :       APFloat FP(DestTy->getFltSemantics(), Result);
     128          12 :       return ConstantFP::get(DestTy->getContext(), FP);
     129             :     }
     130             :   }
     131             : 
     132             :   // The code below only handles casts to vectors currently.
     133             :   auto *DestVTy = dyn_cast<VectorType>(DestTy);
     134             :   if (!DestVTy)
     135     1754810 :     return ConstantExpr::getBitCast(C, DestTy);
     136             : 
     137             :   // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
     138             :   // vector so the code below can handle it uniformly.
     139         240 :   if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
     140          14 :     Constant *Ops = C; // don't take the address of C!
     141          14 :     return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
     142             :   }
     143             : 
     144             :   // If this is a bitcast from constant vector -> vector, fold it.
     145         226 :   if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
     146           9 :     return ConstantExpr::getBitCast(C, DestTy);
     147             : 
     148             :   // If the element types match, IR can fold it.
     149         217 :   unsigned NumDstElt = DestVTy->getNumElements();
     150             :   unsigned NumSrcElt = C->getType()->getVectorNumElements();
     151         217 :   if (NumDstElt == NumSrcElt)
     152          57 :     return ConstantExpr::getBitCast(C, DestTy);
     153             : 
     154         160 :   Type *SrcEltTy = C->getType()->getVectorElementType();
     155         160 :   Type *DstEltTy = DestVTy->getElementType();
     156             : 
     157             :   // Otherwise, we're changing the number of elements in a vector, which
     158             :   // requires endianness information to do the right thing.  For example,
     159             :   //    bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
     160             :   // folds to (little endian):
     161             :   //    <4 x i32> <i32 0, i32 0, i32 1, i32 0>
     162             :   // and to (big endian):
     163             :   //    <4 x i32> <i32 0, i32 0, i32 0, i32 1>
     164             : 
     165             :   // First thing is first.  We only want to think about integer here, so if
     166             :   // we have something in FP form, recast it as integer.
     167             :   if (DstEltTy->isFloatingPointTy()) {
     168             :     // Fold to an vector of integers with same size as our FP type.
     169          14 :     unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
     170             :     Type *DestIVTy =
     171          14 :       VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
     172             :     // Recursively handle this integer conversion, if possible.
     173          14 :     C = FoldBitCast(C, DestIVTy, DL);
     174             : 
     175             :     // Finally, IR can handle this now that #elts line up.
     176          14 :     return ConstantExpr::getBitCast(C, DestTy);
     177             :   }
     178             : 
     179             :   // Okay, we know the destination is integer, if the input is FP, convert
     180             :   // it to integer first.
     181             :   if (SrcEltTy->isFloatingPointTy()) {
     182           5 :     unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
     183             :     Type *SrcIVTy =
     184           5 :       VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
     185             :     // Ask IR to do the conversion now that #elts line up.
     186           5 :     C = ConstantExpr::getBitCast(C, SrcIVTy);
     187             :     // If IR wasn't able to fold it, bail out.
     188           5 :     if (!isa<ConstantVector>(C) &&  // FIXME: Remove ConstantVector.
     189             :         !isa<ConstantDataVector>(C))
     190             :       return C;
     191             :   }
     192             : 
     193             :   // Now we know that the input and output vectors are both integer vectors
     194             :   // of the same size, and that their #elements is not the same.  Do the
     195             :   // conversion here, which depends on whether the input or output has
     196             :   // more elements.
     197         146 :   bool isLittleEndian = DL.isLittleEndian();
     198             : 
     199             :   SmallVector<Constant*, 32> Result;
     200         146 :   if (NumDstElt < NumSrcElt) {
     201             :     // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
     202          46 :     Constant *Zero = Constant::getNullValue(DstEltTy);
     203          46 :     unsigned Ratio = NumSrcElt/NumDstElt;
     204          46 :     unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
     205             :     unsigned SrcElt = 0;
     206         221 :     for (unsigned i = 0; i != NumDstElt; ++i) {
     207             :       // Build each element of the result.
     208         175 :       Constant *Elt = Zero;
     209         175 :       unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
     210         605 :       for (unsigned j = 0; j != Ratio; ++j) {
     211         430 :         Constant *Src = C->getAggregateElement(SrcElt++);
     212         430 :         if (Src && isa<UndefValue>(Src))
     213         136 :           Src = Constant::getNullValue(C->getType()->getVectorElementType());
     214             :         else
     215             :           Src = dyn_cast_or_null<ConstantInt>(Src);
     216         430 :         if (!Src)  // Reject constantexpr elements.
     217           0 :           return ConstantExpr::getBitCast(C, DestTy);
     218             : 
     219             :         // Zero extend the element to the right size.
     220         430 :         Src = ConstantExpr::getZExt(Src, Elt->getType());
     221             : 
     222             :         // Shift it to the right place, depending on endianness.
     223         430 :         Src = ConstantExpr::getShl(Src,
     224             :                                    ConstantInt::get(Src->getType(), ShiftAmt));
     225         430 :         ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
     226             : 
     227             :         // Mix it in.
     228         430 :         Elt = ConstantExpr::getOr(Elt, Src);
     229             :       }
     230         175 :       Result.push_back(Elt);
     231             :     }
     232          46 :     return ConstantVector::get(Result);
     233             :   }
     234             : 
     235             :   // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
     236         100 :   unsigned Ratio = NumDstElt/NumSrcElt;
     237         100 :   unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
     238             : 
     239             :   // Loop over each source value, expanding into multiple results.
     240         382 :   for (unsigned i = 0; i != NumSrcElt; ++i) {
     241         284 :     auto *Element = C->getAggregateElement(i);
     242             : 
     243         284 :     if (!Element) // Reject constantexpr elements.
     244           0 :       return ConstantExpr::getBitCast(C, DestTy);
     245             : 
     246         284 :     if (isa<UndefValue>(Element)) {
     247             :       // Correctly Propagate undef values.
     248          45 :       Result.append(Ratio, UndefValue::get(DstEltTy));
     249             :       continue;
     250             :     }
     251             : 
     252             :     auto *Src = dyn_cast<ConstantInt>(Element);
     253             :     if (!Src)
     254           2 :       return ConstantExpr::getBitCast(C, DestTy);
     255             : 
     256         237 :     unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
     257        1253 :     for (unsigned j = 0; j != Ratio; ++j) {
     258             :       // Shift the piece of the value into the right place, depending on
     259             :       // endianness.
     260        1016 :       Constant *Elt = ConstantExpr::getLShr(Src,
     261        2032 :                                   ConstantInt::get(Src->getType(), ShiftAmt));
     262        1016 :       ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
     263             : 
     264             :       // Truncate the element to an integer with the same pointer size and
     265             :       // convert the element back to a pointer using a inttoptr.
     266        1016 :       if (DstEltTy->isPointerTy()) {
     267           6 :         IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
     268           6 :         Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
     269           6 :         Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
     270           6 :         continue;
     271             :       }
     272             : 
     273             :       // Truncate and remember this piece.
     274        1010 :       Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
     275             :     }
     276             :   }
     277             : 
     278          98 :   return ConstantVector::get(Result);
     279             : }
     280             : 
     281             : } // end anonymous namespace
     282             : 
     283             : /// If this constant is a constant offset from a global, return the global and
     284             : /// the constant. Because of constantexprs, this function is recursive.
     285    23322978 : bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
     286             :                                       APInt &Offset, const DataLayout &DL) {
     287             :   // Trivial case, constant is the global.
     288    23322978 :   if ((GV = dyn_cast<GlobalValue>(C))) {
     289    11640964 :     unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
     290    11640964 :     Offset = APInt(BitWidth, 0);
     291    11640964 :     return true;
     292             :   }
     293             : 
     294             :   // Otherwise, if this isn't a constant expr, bail out.
     295             :   auto *CE = dyn_cast<ConstantExpr>(C);
     296             :   if (!CE) return false;
     297             : 
     298             :   // Look through ptr->int and ptr->ptr casts.
     299    11681115 :   if (CE->getOpcode() == Instruction::PtrToInt ||
     300             :       CE->getOpcode() == Instruction::BitCast)
     301      702615 :     return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
     302             : 
     303             :   // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
     304             :   auto *GEP = dyn_cast<GEPOperator>(CE);
     305             :   if (!GEP)
     306             :     return false;
     307             : 
     308    10978339 :   unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
     309             :   APInt TmpOffset(BitWidth, 0);
     310             : 
     311             :   // If the base isn't a global+constant, we aren't either.
     312    10978339 :   if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
     313             :     return false;
     314             : 
     315             :   // Otherwise, add any offset that our operands provide.
     316    10977479 :   if (!GEP->accumulateConstantOffset(DL, TmpOffset))
     317             :     return false;
     318             : 
     319    10977475 :   Offset = TmpOffset;
     320    10977475 :   return true;
     321             : }
     322             : 
     323         629 : Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
     324             :                                          const DataLayout &DL) {
     325             :   do {
     326         926 :     Type *SrcTy = C->getType();
     327             : 
     328             :     // If the type sizes are the same and a cast is legal, just directly
     329             :     // cast the constant.
     330         926 :     if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
     331             :       Instruction::CastOps Cast = Instruction::BitCast;
     332             :       // If we are going from a pointer to int or vice versa, we spell the cast
     333             :       // differently.
     334         407 :       if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
     335             :         Cast = Instruction::IntToPtr;
     336         406 :       else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
     337             :         Cast = Instruction::PtrToInt;
     338             : 
     339         407 :       if (CastInst::castIsValid(Cast, C, DestTy))
     340         264 :         return ConstantExpr::getCast(Cast, C, DestTy);
     341             :     }
     342             : 
     343             :     // If this isn't an aggregate type, there is nothing we can do to drill down
     344             :     // and find a bitcastable constant.
     345             :     if (!SrcTy->isAggregateType())
     346             :       return nullptr;
     347             : 
     348             :     // We're simulating a load through a pointer that was bitcast to point to
     349             :     // a different type, so we can try to walk down through the initial
     350             :     // elements of an aggregate to see if some part of th e aggregate is
     351             :     // castable to implement the "load" semantic model.
     352         299 :     C = C->getAggregateElement(0u);
     353         299 :   } while (C);
     354             : 
     355             :   return nullptr;
     356             : }
     357             : 
     358             : namespace {
     359             : 
     360             : /// Recursive helper to read bits out of global. C is the constant being copied
     361             : /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
     362             : /// results into and BytesLeft is the number of bytes left in
     363             : /// the CurPtr buffer. DL is the DataLayout.
     364        1789 : bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
     365             :                         unsigned BytesLeft, const DataLayout &DL) {
     366             :   assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
     367             :          "Out of range access");
     368             : 
     369             :   // If this element is zero or undefined, we can just return since *CurPtr is
     370             :   // zero initialized.
     371        1829 :   if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
     372             :     return true;
     373             : 
     374             :   if (auto *CI = dyn_cast<ConstantInt>(C)) {
     375        1446 :     if (CI->getBitWidth() > 64 ||
     376        1446 :         (CI->getBitWidth() & 7) != 0)
     377             :       return false;
     378             : 
     379             :     uint64_t Val = CI->getZExtValue();
     380        1446 :     unsigned IntBytes = unsigned(CI->getBitWidth()/8);
     381             : 
     382        3333 :     for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
     383        1887 :       int n = ByteOffset;
     384        1887 :       if (!DL.isLittleEndian())
     385         108 :         n = IntBytes - n - 1;
     386        1887 :       CurPtr[i] = (unsigned char)(Val >> (n * 8));
     387        1887 :       ++ByteOffset;
     388             :     }
     389             :     return true;
     390             :   }
     391             : 
     392             :   if (auto *CFP = dyn_cast<ConstantFP>(C)) {
     393          68 :     if (CFP->getType()->isDoubleTy()) {
     394           2 :       C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
     395           2 :       return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
     396             :     }
     397          32 :     if (CFP->getType()->isFloatTy()){
     398          32 :       C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
     399          32 :       return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
     400             :     }
     401           0 :     if (CFP->getType()->isHalfTy()){
     402           0 :       C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
     403           0 :       return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
     404             :     }
     405             :     return false;
     406             :   }
     407             : 
     408             :   if (auto *CS = dyn_cast<ConstantStruct>(C)) {
     409         103 :     const StructLayout *SL = DL.getStructLayout(CS->getType());
     410         103 :     unsigned Index = SL->getElementContainingOffset(ByteOffset);
     411             :     uint64_t CurEltOffset = SL->getElementOffset(Index);
     412         103 :     ByteOffset -= CurEltOffset;
     413             : 
     414             :     while (true) {
     415             :       // If the element access is to the element itself and not to tail padding,
     416             :       // read the bytes from the element.
     417         171 :       uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
     418             : 
     419         342 :       if (ByteOffset < EltSize &&
     420         171 :           !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
     421             :                               BytesLeft, DL))
     422             :         return false;
     423             : 
     424         171 :       ++Index;
     425             : 
     426             :       // Check to see if we read from the last struct element, if so we're done.
     427         171 :       if (Index == CS->getType()->getNumElements())
     428             :         return true;
     429             : 
     430             :       // If we read all of the bytes we needed from this element we're done.
     431             :       uint64_t NextEltOffset = SL->getElementOffset(Index);
     432             : 
     433         127 :       if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
     434             :         return true;
     435             : 
     436             :       // Move to the next element of the struct.
     437          68 :       CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
     438          68 :       BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
     439             :       ByteOffset = 0;
     440             :       CurEltOffset = NextEltOffset;
     441          68 :     }
     442             :     // not reached.
     443             :   }
     444             : 
     445         237 :   if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
     446             :       isa<ConstantDataSequential>(C)) {
     447         231 :     Type *EltTy = C->getType()->getSequentialElementType();
     448         231 :     uint64_t EltSize = DL.getTypeAllocSize(EltTy);
     449         231 :     uint64_t Index = ByteOffset / EltSize;
     450             :     uint64_t Offset = ByteOffset - Index * EltSize;
     451             :     uint64_t NumElts;
     452         231 :     if (auto *AT = dyn_cast<ArrayType>(C->getType()))
     453         231 :       NumElts = AT->getNumElements();
     454             :     else
     455             :       NumElts = C->getType()->getVectorNumElements();
     456             : 
     457        1294 :     for (; Index != NumElts; ++Index) {
     458        1292 :       if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
     459             :                               BytesLeft, DL))
     460             :         return false;
     461             : 
     462        1292 :       uint64_t BytesWritten = EltSize - Offset;
     463             :       assert(BytesWritten <= EltSize && "Not indexing into this element?");
     464        1292 :       if (BytesWritten >= BytesLeft)
     465             :         return true;
     466             : 
     467             :       Offset = 0;
     468        1063 :       BytesLeft -= BytesWritten;
     469        1063 :       CurPtr += BytesWritten;
     470             :     }
     471             :     return true;
     472             :   }
     473             : 
     474             :   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
     475          12 :     if (CE->getOpcode() == Instruction::IntToPtr &&
     476           6 :         CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
     477             :       return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
     478           6 :                                 BytesLeft, DL);
     479             :     }
     480             :   }
     481             : 
     482             :   // Otherwise, unknown initializer type.
     483             :   return false;
     484             : }
     485             : 
     486    12356691 : Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
     487             :                                           const DataLayout &DL) {
     488    12356691 :   auto *PTy = cast<PointerType>(C->getType());
     489             :   auto *IntType = dyn_cast<IntegerType>(LoadTy);
     490             : 
     491             :   // If this isn't an integer load we can't fold it directly.
     492             :   if (!IntType) {
     493             :     unsigned AS = PTy->getAddressSpace();
     494             : 
     495             :     // If this is a float/double load, we can try folding it as an int32/64 load
     496             :     // and then bitcast the result.  This can be useful for union cases.  Note
     497             :     // that address spaces don't matter here since we're not going to result in
     498             :     // an actual new load.
     499             :     Type *MapTy;
     500      715542 :     if (LoadTy->isHalfTy())
     501           0 :       MapTy = Type::getInt16Ty(C->getContext());
     502      715542 :     else if (LoadTy->isFloatTy())
     503         185 :       MapTy = Type::getInt32Ty(C->getContext());
     504      715357 :     else if (LoadTy->isDoubleTy())
     505          57 :       MapTy = Type::getInt64Ty(C->getContext());
     506      715300 :     else if (LoadTy->isVectorTy()) {
     507      689365 :       MapTy = PointerType::getIntNTy(C->getContext(),
     508             :                                      DL.getTypeAllocSizeInBits(LoadTy));
     509             :     } else
     510             :       return nullptr;
     511             : 
     512      689607 :     C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
     513      689607 :     if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
     514          29 :       return FoldBitCast(Res, LoadTy, DL);
     515             :     return nullptr;
     516             :   }
     517             : 
     518    11641149 :   unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
     519    11641149 :   if (BytesLoaded > 32 || BytesLoaded == 0)
     520             :     return nullptr;
     521             : 
     522             :   GlobalValue *GVal;
     523             :   APInt OffsetAI;
     524    11641112 :   if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
     525             :     return nullptr;
     526             : 
     527    11640128 :   auto *GV = dyn_cast<GlobalVariable>(GVal);
     528    11640451 :   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
     529         333 :       !GV->getInitializer()->getType()->isSized())
     530    11639795 :     return nullptr;
     531             : 
     532             :   int64_t Offset = OffsetAI.getSExtValue();
     533         333 :   int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
     534             : 
     535             :   // If we're not accessing anything in this constant, the result is undefined.
     536         333 :   if (Offset + BytesLoaded <= 0)
     537           3 :     return UndefValue::get(IntType);
     538             : 
     539             :   // If we're not accessing anything in this constant, the result is undefined.
     540         330 :   if (Offset >= InitializerSize)
     541           4 :     return UndefValue::get(IntType);
     542             : 
     543         326 :   unsigned char RawBytes[32] = {0};
     544             :   unsigned char *CurPtr = RawBytes;
     545             :   unsigned BytesLeft = BytesLoaded;
     546             : 
     547             :   // If we're loading off the beginning of the global, some bytes may be valid.
     548         326 :   if (Offset < 0) {
     549           3 :     CurPtr += -Offset;
     550           3 :     BytesLeft += Offset;
     551             :     Offset = 0;
     552             :   }
     553             : 
     554         652 :   if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
     555             :     return nullptr;
     556             : 
     557             :   APInt ResultVal = APInt(IntType->getBitWidth(), 0);
     558         326 :   if (DL.isLittleEndian()) {
     559         294 :     ResultVal = RawBytes[BytesLoaded - 1];
     560        1839 :     for (unsigned i = 1; i != BytesLoaded; ++i) {
     561        1545 :       ResultVal <<= 8;
     562        1545 :       ResultVal |= RawBytes[BytesLoaded - 1 - i];
     563             :     }
     564             :   } else {
     565          32 :     ResultVal = RawBytes[0];
     566         112 :     for (unsigned i = 1; i != BytesLoaded; ++i) {
     567          80 :       ResultVal <<= 8;
     568          80 :       ResultVal |= RawBytes[i];
     569             :     }
     570             :   }
     571             : 
     572         326 :   return ConstantInt::get(IntType->getContext(), ResultVal);
     573             : }
     574             : 
     575      705419 : Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
     576             :                                              const DataLayout &DL) {
     577             :   auto *SrcPtr = CE->getOperand(0);
     578      705419 :   auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
     579             :   if (!SrcPtrTy)
     580             :     return nullptr;
     581      705419 :   Type *SrcTy = SrcPtrTy->getPointerElementType();
     582             : 
     583      705419 :   Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
     584      705419 :   if (!C)
     585             :     return nullptr;
     586             : 
     587         450 :   return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
     588             : }
     589             : 
     590             : } // end anonymous namespace
     591             : 
     592    12472269 : Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
     593             :                                              const DataLayout &DL) {
     594             :   // First, try the easy cases:
     595             :   if (auto *GV = dyn_cast<GlobalVariable>(C))
     596      798948 :     if (GV->isConstant() && GV->hasDefinitiveInitializer())
     597         375 :       return GV->getInitializer();
     598             : 
     599             :   if (auto *GA = dyn_cast<GlobalAlias>(C))
     600             :     if (GA->getAliasee() && !GA->isInterposable())
     601           8 :       return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
     602             : 
     603             :   // If the loaded value isn't a constant expr, we can't handle it.
     604             :   auto *CE = dyn_cast<ConstantExpr>(C);
     605             :   if (!CE)
     606             :     return nullptr;
     607             : 
     608    11669776 :   if (CE->getOpcode() == Instruction::GetElementPtr) {
     609             :     if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
     610    10963120 :       if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
     611        2515 :         if (Constant *V =
     612        2515 :              ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
     613             :           return V;
     614             :       }
     615             :     }
     616             :   }
     617             : 
     618    11667266 :   if (CE->getOpcode() == Instruction::BitCast)
     619      705419 :     if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
     620             :       return LoadedC;
     621             : 
     622             :   // Instead of loading constant c string, use corresponding integer value
     623             :   // directly if string length is small enough.
     624    11667179 :   StringRef Str;
     625    11667179 :   if (getConstantStringInfo(CE, Str) && !Str.empty()) {
     626             :     size_t StrLen = Str.size();
     627         251 :     unsigned NumBits = Ty->getPrimitiveSizeInBits();
     628             :     // Replace load with immediate integer if the result is an integer or fp
     629             :     // value.
     630         251 :     if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
     631             :         (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
     632             :       APInt StrVal(NumBits, 0);
     633             :       APInt SingleChar(NumBits, 0);
     634          89 :       if (DL.isLittleEndian()) {
     635         462 :         for (unsigned char C : reverse(Str.bytes())) {
     636         375 :           SingleChar = static_cast<uint64_t>(C);
     637         375 :           StrVal = (StrVal << 8) | SingleChar;
     638             :         }
     639             :       } else {
     640           8 :         for (unsigned char C : Str.bytes()) {
     641           6 :           SingleChar = static_cast<uint64_t>(C);
     642           6 :           StrVal = (StrVal << 8) | SingleChar;
     643             :         }
     644             :         // Append NULL at the end.
     645           2 :         SingleChar = 0;
     646           2 :         StrVal = (StrVal << 8) | SingleChar;
     647             :       }
     648             : 
     649          89 :       Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
     650             :       if (Ty->isFloatingPointTy())
     651           1 :         Res = ConstantExpr::getBitCast(Res, Ty);
     652             :       return Res;
     653             :     }
     654             :   }
     655             : 
     656             :   // If this load comes from anywhere in a constant global, and if the global
     657             :   // is all undef or zero, we know what it loads.
     658    11667090 :   if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
     659    11666004 :     if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
     660         398 :       if (GV->getInitializer()->isNullValue())
     661           5 :         return Constant::getNullValue(Ty);
     662         393 :       if (isa<UndefValue>(GV->getInitializer()))
     663           1 :         return UndefValue::get(Ty);
     664             :     }
     665             :   }
     666             : 
     667             :   // Try hard to fold loads from bitcasted strange and non-type-safe things.
     668    11667084 :   return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
     669             : }
     670             : 
     671             : namespace {
     672             : 
     673    11171842 : Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
     674    11171842 :   if (LI->isVolatile()) return nullptr;
     675             : 
     676             :   if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
     677    11081450 :     return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
     678             : 
     679             :   return nullptr;
     680             : }
     681             : 
     682             : /// One of Op0/Op1 is a constant expression.
     683             : /// Attempt to symbolically evaluate the result of a binary operator merging
     684             : /// these together.  If target data info is available, it is provided as DL,
     685             : /// otherwise DL is null.
     686       10167 : Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
     687             :                                     const DataLayout &DL) {
     688             :   // SROA
     689             : 
     690             :   // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
     691             :   // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
     692             :   // bits.
     693             : 
     694       10167 :   if (Opc == Instruction::And) {
     695        8361 :     KnownBits Known0 = computeKnownBits(Op0, DL);
     696        8361 :     KnownBits Known1 = computeKnownBits(Op1, DL);
     697        4647 :     if ((Known1.One | Known0.Zero).isAllOnesValue()) {
     698             :       // All the bits of Op0 that the 'and' could be masking are already zero.
     699         933 :       return Op0;
     700             :     }
     701        4646 :     if ((Known0.One | Known1.Zero).isAllOnesValue()) {
     702             :       // All the bits of Op1 that the 'and' could be masking are already zero.
     703             :       return Op1;
     704             :     }
     705             : 
     706             :     Known0.Zero |= Known1.Zero;
     707             :     Known0.One &= Known1.One;
     708        4646 :     if (Known0.isConstant())
     709         932 :       return ConstantInt::get(Op0->getType(), Known0.getConstant());
     710             :   }
     711             : 
     712             :   // If the constant expr is something like &A[123] - &A[4].f, fold this into a
     713             :   // constant.  This happens frequently when iterating over a global array.
     714        9234 :   if (Opc == Instruction::Sub) {
     715             :     GlobalValue *GV1, *GV2;
     716             :     APInt Offs1, Offs2;
     717             : 
     718         353 :     if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
     719         288 :       if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
     720         269 :         unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
     721             : 
     722             :         // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
     723             :         // PtrToInt may change the bitwidth so we have convert to the right size
     724             :         // first.
     725         538 :         return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
     726         538 :                                                 Offs2.zextOrTrunc(OpSize));
     727             :       }
     728             :   }
     729             : 
     730             :   return nullptr;
     731             : }
     732             : 
     733             : /// If array indices are not pointer-sized integers, explicitly cast them so
     734             : /// that they aren't implicitly casted by the getelementptr.
     735    16630660 : Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
     736             :                          Type *ResultTy, Optional<unsigned> InRangeIndex,
     737             :                          const DataLayout &DL, const TargetLibraryInfo *TLI) {
     738    16630660 :   Type *IntPtrTy = DL.getIntPtrType(ResultTy);
     739             :   Type *IntPtrScalarTy = IntPtrTy->getScalarType();
     740             : 
     741             :   bool Any = false;
     742             :   SmallVector<Constant*, 32> NewIdxs;
     743    49936098 :   for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
     744    16674778 :     if ((i == 1 ||
     745    16674778 :          !isa<StructType>(GetElementPtrInst::getIndexedType(
     746    66529304 :              SrcElemTy, Ops.slice(1, i - 1)))) &&
     747    66447732 :         Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
     748             :       Any = true;
     749             :       Type *NewType = Ops[i]->getType()->isVectorTy()
     750      483250 :                           ? IntPtrTy
     751             :                           : IntPtrTy->getScalarType();
     752      483250 :       NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
     753             :                                                                       true,
     754             :                                                                       NewType,
     755             :                                                                       true),
     756             :                                               Ops[i], NewType));
     757             :     } else
     758    65644376 :       NewIdxs.push_back(Ops[i]);
     759             :   }
     760             : 
     761    16630660 :   if (!Any)
     762             :     return nullptr;
     763             : 
     764      242454 :   Constant *C = ConstantExpr::getGetElementPtr(
     765             :       SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
     766      242454 :   if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
     767             :     C = Folded;
     768             : 
     769             :   return C;
     770             : }
     771             : 
     772             : /// Strip the pointer casts, but preserve the address space information.
     773    16389220 : Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
     774             :   assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
     775    16389220 :   auto *OldPtrTy = cast<PointerType>(Ptr->getType());
     776             :   Ptr = Ptr->stripPointerCasts();
     777    16389220 :   auto *NewPtrTy = cast<PointerType>(Ptr->getType());
     778             : 
     779    32778440 :   ElemTy = NewPtrTy->getPointerElementType();
     780             : 
     781             :   // Preserve the address space number of the pointer.
     782    16389220 :   if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
     783          44 :     NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
     784          44 :     Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
     785             :   }
     786    16389220 :   return Ptr;
     787             : }
     788             : 
     789             : /// If we can symbolically evaluate the GEP constant expression, do so.
     790    16630660 : Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
     791             :                                   ArrayRef<Constant *> Ops,
     792             :                                   const DataLayout &DL,
     793             :                                   const TargetLibraryInfo *TLI) {
     794             :   const GEPOperator *InnermostGEP = GEP;
     795             :   bool InBounds = GEP->isInBounds();
     796             : 
     797    16630660 :   Type *SrcElemTy = GEP->getSourceElementType();
     798    16630660 :   Type *ResElemTy = GEP->getResultElementType();
     799    16630660 :   Type *ResTy = GEP->getType();
     800    16630660 :   if (!SrcElemTy->isSized())
     801             :     return nullptr;
     802             : 
     803    16630660 :   if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
     804    16630660 :                                    GEP->getInRangeIndex(), DL, TLI))
     805             :     return C;
     806             : 
     807    16388206 :   Constant *Ptr = Ops[0];
     808    32776412 :   if (!Ptr->getType()->isPointerTy())
     809             :     return nullptr;
     810             : 
     811    16388204 :   Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
     812             : 
     813             :   // If this is a constant expr gep that is effectively computing an
     814             :   // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
     815    49207493 :   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
     816    98458041 :       if (!isa<ConstantInt>(Ops[i])) {
     817             : 
     818             :         // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
     819             :         // "inttoptr (sub (ptrtoint Ptr), V)"
     820          58 :         if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
     821          14 :           auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
     822             :           assert((!CE || CE->getType() == IntPtrTy) &&
     823             :                  "CastGEPIndices didn't canonicalize index types!");
     824          19 :           if (CE && CE->getOpcode() == Instruction::Sub &&
     825           9 :               CE->getOperand(0)->isNullValue()) {
     826           9 :             Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
     827           9 :             Res = ConstantExpr::getSub(Res, CE->getOperand(1));
     828           9 :             Res = ConstantExpr::getIntToPtr(Res, ResTy);
     829           9 :             if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
     830             :               Res = FoldedRes;
     831           9 :             return Res;
     832             :           }
     833             :         }
     834          49 :         return nullptr;
     835             :       }
     836             : 
     837    16388146 :   unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
     838             :   APInt Offset =
     839             :       APInt(BitWidth,
     840    16388146 :             DL.getIndexedOffsetInType(
     841             :                 SrcElemTy,
     842    16388146 :                 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
     843    16388146 :   Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
     844             : 
     845             :   // If this is a GEP of a GEP, fold it all into a single GEP.
     846             :   while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
     847             :     InnermostGEP = GEP;
     848             :     InBounds &= GEP->isInBounds();
     849             : 
     850        2148 :     SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
     851             : 
     852             :     // Do not try the incorporate the sub-GEP if some index is not a number.
     853             :     bool AllConstantInt = true;
     854        3269 :     for (Value *NestedOp : NestedOps)
     855        2195 :       if (!isa<ConstantInt>(NestedOp)) {
     856             :         AllConstantInt = false;
     857             :         break;
     858             :       }
     859        1074 :     if (!AllConstantInt)
     860             :       break;
     861             : 
     862             :     Ptr = cast<Constant>(GEP->getOperand(0));
     863        1074 :     SrcElemTy = GEP->getSourceElementType();
     864        2148 :     Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
     865        1074 :     Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
     866             :   }
     867             : 
     868             :   // If the base value for this address is a literal integer value, fold the
     869             :   // getelementptr to the resulting integer value casted to the pointer type.
     870             :   APInt BasePtr(BitWidth, 0);
     871             :   if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
     872        2995 :     if (CE->getOpcode() == Instruction::IntToPtr) {
     873             :       if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
     874         146 :         BasePtr = Base->getValue().zextOrTrunc(BitWidth);
     875             :     }
     876             :   }
     877             : 
     878    16388146 :   auto *PTy = cast<PointerType>(Ptr->getType());
     879    16389654 :   if ((Ptr->isNullValue() || BasePtr != 0) &&
     880        1508 :       !DL.isNonIntegralPointerType(PTy)) {
     881        3012 :     Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
     882        1506 :     return ConstantExpr::getIntToPtr(C, ResTy);
     883             :   }
     884             : 
     885             :   // Otherwise form a regular getelementptr. Recompute the indices so that
     886             :   // we eliminate over-indexing of the notional static type array bounds.
     887             :   // This makes it easy to determine if the getelementptr is "inbounds".
     888             :   // Also, this helps GlobalOpt do SROA on GlobalVariables.
     889             :   Type *Ty = PTy;
     890             :   SmallVector<Constant *, 32> NewIdxs;
     891             : 
     892             :   do {
     893    32819689 :     if (!Ty->isStructTy()) {
     894    32739965 :       if (Ty->isPointerTy()) {
     895             :         // The only pointer indexing we'll do is on the first index of the GEP.
     896    16386715 :         if (!NewIdxs.empty())
     897             :           break;
     898             : 
     899    16386640 :         Ty = SrcElemTy;
     900             : 
     901             :         // Only handle pointers to sized types, not pointers to functions.
     902    16386640 :         if (!Ty->isSized())
     903           1 :           return nullptr;
     904             :       } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
     905    16352002 :         Ty = ATy->getElementType();
     906             :       } else {
     907             :         // We've reached some non-indexable type.
     908             :         break;
     909             :       }
     910             : 
     911             :       // Determine which element of the array the offset points into.
     912    32738641 :       APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
     913    32738641 :       if (ElemSize == 0) {
     914             :         // The element size is 0. This may be [0 x Ty]*, so just use a zero
     915             :         // index for this level and proceed to the next level to see if it can
     916             :         // accommodate the offset.
     917         186 :         NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
     918             :       } else {
     919             :         // The element size is non-zero divide the offset by the element
     920             :         // size (rounding down), to compute the index at this level.
     921             :         bool Overflow;
     922    32738455 :         APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
     923    32738455 :         if (Overflow)
     924             :           break;
     925    32738455 :         Offset -= NewIdx * ElemSize;
     926    32738455 :         NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
     927             :       }
     928             :     } else {
     929             :       auto *STy = cast<StructType>(Ty);
     930             :       // If we end up with an offset that isn't valid for this struct type, we
     931             :       // can't re-form this GEP in a regular form, so bail out. The pointer
     932             :       // operand likely went through casts that are necessary to make the GEP
     933             :       // sensible.
     934       79724 :       const StructLayout &SL = *DL.getStructLayout(STy);
     935       79724 :       if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
     936             :         break;
     937             : 
     938             :       // Determine which field of the struct the offset points into. The
     939             :       // getZExtValue is fine as we've already ensured that the offset is
     940             :       // within the range representable by the StructLayout API.
     941       79719 :       unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
     942       79719 :       NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
     943             :                                          ElIdx));
     944       79719 :       Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
     945       79719 :       Ty = STy->getTypeAtIndex(ElIdx);
     946             :     }
     947    32818360 :   } while (Ty != ResElemTy);
     948             : 
     949             :   // If we haven't used up the entire offset by descending the static
     950             :   // type, then the offset is pointing into the middle of an indivisible
     951             :   // member, so we can't simplify it.
     952    16386639 :   if (Offset != 0)
     953             :     return nullptr;
     954             : 
     955             :   // Preserve the inrange index from the innermost GEP if possible. We must
     956             :   // have calculated the same indices up to and including the inrange index.
     957             :   Optional<unsigned> InRangeIndex;
     958    16386511 :   if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
     959       31940 :     if (SrcElemTy == InnermostGEP->getSourceElementType() &&
     960       31940 :         NewIdxs.size() > *LastIRIndex) {
     961             :       InRangeIndex = LastIRIndex;
     962       95719 :       for (unsigned I = 0; I <= *LastIRIndex; ++I)
     963      191487 :         if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
     964             :           return nullptr;
     965             :     }
     966             : 
     967             :   // Create a GEP.
     968    32772922 :   Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
     969             :                                                InBounds, InRangeIndex);
     970             :   assert(C->getType()->getPointerElementType() == Ty &&
     971             :          "Computed GetElementPtr has unexpected type!");
     972             : 
     973             :   // If we ended up indexing a member with a type that doesn't match
     974             :   // the type of what the original indices indexed, add a cast.
     975    16386461 :   if (Ty != ResElemTy)
     976        1198 :     C = FoldBitCast(C, ResTy, DL);
     977             : 
     978             :   return C;
     979             : }
     980             : 
     981             : /// Attempt to constant fold an instruction with the
     982             : /// specified opcode and operands.  If successful, the constant result is
     983             : /// returned, if not, null is returned.  Note that this function can fail when
     984             : /// attempting to fold instructions like loads and stores, which have no
     985             : /// constant expression form.
     986    22728454 : Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
     987             :                                        ArrayRef<Constant *> Ops,
     988             :                                        const DataLayout &DL,
     989             :                                        const TargetLibraryInfo *TLI) {
     990    22728454 :   Type *DestTy = InstOrCE->getType();
     991             : 
     992             :   // Handle easy binops first.
     993    22728454 :   if (Instruction::isBinaryOp(Opcode))
     994       29669 :     return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
     995             : 
     996    22698785 :   if (Instruction::isCast(Opcode))
     997     1056063 :     return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
     998             : 
     999             :   if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
    1000    16630660 :     if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
    1001             :       return C;
    1002             : 
    1003         460 :     return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
    1004             :                                           Ops.slice(1), GEP->isInBounds(),
    1005             :                                           GEP->getInRangeIndex());
    1006             :   }
    1007             : 
    1008             :   if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
    1009         337 :     return CE->getWithOperands(Ops);
    1010             : 
    1011     5011725 :   switch (Opcode) {
    1012             :   default: return nullptr;
    1013             :   case Instruction::ICmp:
    1014             :   case Instruction::FCmp: llvm_unreachable("Invalid for compares");
    1015       73622 :   case Instruction::Call:
    1016       73622 :     if (auto *F = dyn_cast<Function>(Ops.back())) {
    1017             :       ImmutableCallSite CS(cast<CallInst>(InstOrCE));
    1018       73524 :       if (canConstantFoldCallTo(CS, F))
    1019         779 :         return ConstantFoldCall(CS, F, Ops.slice(0, Ops.size() - 1), TLI);
    1020             :     }
    1021             :     return nullptr;
    1022         958 :   case Instruction::Select:
    1023         958 :     return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
    1024          47 :   case Instruction::ExtractElement:
    1025          47 :     return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
    1026         119 :   case Instruction::InsertElement:
    1027         119 :     return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
    1028          31 :   case Instruction::ShuffleVector:
    1029          31 :     return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
    1030             :   }
    1031             : }
    1032             : 
    1033             : } // end anonymous namespace
    1034             : 
    1035             : //===----------------------------------------------------------------------===//
    1036             : // Constant Folding public APIs
    1037             : //===----------------------------------------------------------------------===//
    1038             : 
    1039             : namespace {
    1040             : 
    1041             : Constant *
    1042    20328073 : ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
    1043             :                          const TargetLibraryInfo *TLI,
    1044             :                          SmallDenseMap<Constant *, Constant *> &FoldedOps) {
    1045    20328073 :   if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
    1046             :     return nullptr;
    1047             : 
    1048             :   SmallVector<Constant *, 8> Ops;
    1049    86431316 :   for (const Use &NewU : C->operands()) {
    1050    51032504 :     auto *NewC = cast<Constant>(&NewU);
    1051             :     // Recursively fold the ConstantExpr's operands. If we have already folded
    1052             :     // a ConstantExpr, we don't have to process it again.
    1053    51032504 :     if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
    1054      117250 :       auto It = FoldedOps.find(NewC);
    1055      117250 :       if (It == FoldedOps.end()) {
    1056      111867 :         if (auto *FoldedC =
    1057      111867 :                 ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
    1058      111867 :           FoldedOps.insert({NewC, FoldedC});
    1059      111867 :           NewC = FoldedC;
    1060             :         } else {
    1061           0 :           FoldedOps.insert({NewC, NewC});
    1062             :         }
    1063             :       } else {
    1064        5383 :         NewC = It->second;
    1065             :       }
    1066             :     }
    1067    51032504 :     Ops.push_back(NewC);
    1068             :   }
    1069             : 
    1070             :   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
    1071    17695344 :     if (CE->isCompare())
    1072        2089 :       return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
    1073        2089 :                                              DL, TLI);
    1074             : 
    1075    17693255 :     return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
    1076             :   }
    1077             : 
    1078             :   assert(isa<ConstantVector>(C));
    1079        4062 :   return ConstantVector::get(Ops);
    1080             : }
    1081             : 
    1082             : } // end anonymous namespace
    1083             : 
    1084    40961770 : Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
    1085             :                                         const TargetLibraryInfo *TLI) {
    1086             :   // Handle PHI nodes quickly here...
    1087             :   if (auto *PN = dyn_cast<PHINode>(I)) {
    1088             :     Constant *CommonValue = nullptr;
    1089             : 
    1090             :     SmallDenseMap<Constant *, Constant *> FoldedOps;
    1091     2265867 :     for (Value *Incoming : PN->incoming_values()) {
    1092             :       // If the incoming value is undef then skip it.  Note that while we could
    1093             :       // skip the value if it is equal to the phi node itself we choose not to
    1094             :       // because that would break the rule that constant folding only applies if
    1095             :       // all operands are constants.
    1096     2265674 :       if (isa<UndefValue>(Incoming))
    1097             :         continue;
    1098             :       // If the incoming value is not a constant, then give up.
    1099             :       auto *C = dyn_cast<Constant>(Incoming);
    1100             :       if (!C)
    1101             :         return nullptr;
    1102             :       // Fold the PHI's operands.
    1103     2093799 :       if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
    1104             :         C = FoldedC;
    1105             :       // If the incoming value is a different constant to
    1106             :       // the one we saw previously, then give up.
    1107     2093799 :       if (CommonValue && C != CommonValue)
    1108             :         return nullptr;
    1109             :       CommonValue = C;
    1110             :     }
    1111             : 
    1112             :     // If we reach here, all incoming values are the same constant or undef.
    1113         193 :     return CommonValue ? CommonValue : UndefValue::get(PN->getType());
    1114             :   }
    1115             : 
    1116             :   // Scan the operand list, checking to see if they are all constants, if so,
    1117             :   // hand off to ConstantFoldInstOperandsImpl.
    1118    79638286 :   if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
    1119             :     return nullptr;
    1120             : 
    1121             :   SmallDenseMap<Constant *, Constant *> FoldedOps;
    1122             :   SmallVector<Constant *, 8> Ops;
    1123    26782161 :   for (const Use &OpU : I->operands()) {
    1124    13195495 :     auto *Op = cast<Constant>(&OpU);
    1125             :     // Fold the Instruction's operands.
    1126    13195495 :     if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
    1127    11304769 :       Op = FoldedOp;
    1128             : 
    1129    13195495 :     Ops.push_back(Op);
    1130             :   }
    1131             : 
    1132             :   if (const auto *CI = dyn_cast<CmpInst>(I))
    1133        2096 :     return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
    1134        1048 :                                            DL, TLI);
    1135             : 
    1136             :   if (const auto *LI = dyn_cast<LoadInst>(I))
    1137    11171842 :     return ConstantFoldLoadInst(LI, DL);
    1138             : 
    1139             :   if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
    1140          31 :     return ConstantExpr::getInsertValue(
    1141             :                                 cast<Constant>(IVI->getAggregateOperand()),
    1142             :                                 cast<Constant>(IVI->getInsertedValueOperand()),
    1143          31 :                                 IVI->getIndices());
    1144             :   }
    1145             : 
    1146             :   if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
    1147          55 :     return ConstantExpr::getExtractValue(
    1148             :                                     cast<Constant>(EVI->getAggregateOperand()),
    1149          55 :                                     EVI->getIndices());
    1150             :   }
    1151             : 
    1152     2413690 :   return ConstantFoldInstOperands(I, Ops, DL, TLI);
    1153             : }
    1154             : 
    1155     4926912 : Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
    1156             :                                      const TargetLibraryInfo *TLI) {
    1157             :   SmallDenseMap<Constant *, Constant *> FoldedOps;
    1158     4926912 :   return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
    1159             : }
    1160             : 
    1161     5035199 : Constant *llvm::ConstantFoldInstOperands(Instruction *I,
    1162             :                                          ArrayRef<Constant *> Ops,
    1163             :                                          const DataLayout &DL,
    1164             :                                          const TargetLibraryInfo *TLI) {
    1165     5035199 :   return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
    1166             : }
    1167             : 
    1168       70958 : Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
    1169             :                                                 Constant *Ops0, Constant *Ops1,
    1170             :                                                 const DataLayout &DL,
    1171             :                                                 const TargetLibraryInfo *TLI) {
    1172             :   // fold: icmp (inttoptr x), null         -> icmp x, 0
    1173             :   // fold: icmp null, (inttoptr x)         -> icmp 0, x
    1174             :   // fold: icmp (ptrtoint x), 0            -> icmp x, null
    1175             :   // fold: icmp 0, (ptrtoint x)            -> icmp null, x
    1176             :   // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
    1177             :   // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
    1178             :   //
    1179             :   // FIXME: The following comment is out of data and the DataLayout is here now.
    1180             :   // ConstantExpr::getCompare cannot do this, because it doesn't have DL
    1181             :   // around to know if bit truncation is happening.
    1182             :   if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
    1183        7487 :     if (Ops1->isNullValue()) {
    1184        6549 :       if (CE0->getOpcode() == Instruction::IntToPtr) {
    1185         165 :         Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
    1186             :         // Convert the integer value to the right size to ensure we get the
    1187             :         // proper extension or truncation.
    1188         165 :         Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
    1189             :                                                    IntPtrTy, false);
    1190         165 :         Constant *Null = Constant::getNullValue(C->getType());
    1191         165 :         return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
    1192             :       }
    1193             : 
    1194             :       // Only do this transformation if the int is intptrty in size, otherwise
    1195             :       // there is a truncation or extension that we aren't modeling.
    1196        6384 :       if (CE0->getOpcode() == Instruction::PtrToInt) {
    1197          60 :         Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
    1198          60 :         if (CE0->getType() == IntPtrTy) {
    1199             :           Constant *C = CE0->getOperand(0);
    1200          18 :           Constant *Null = Constant::getNullValue(C->getType());
    1201          18 :           return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
    1202             :         }
    1203             :       }
    1204             :     }
    1205             : 
    1206             :     if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
    1207         768 :       if (CE0->getOpcode() == CE1->getOpcode()) {
    1208         469 :         if (CE0->getOpcode() == Instruction::IntToPtr) {
    1209          12 :           Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
    1210             : 
    1211             :           // Convert the integer value to the right size to ensure we get the
    1212             :           // proper extension or truncation.
    1213          12 :           Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
    1214             :                                                       IntPtrTy, false);
    1215          12 :           Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
    1216             :                                                       IntPtrTy, false);
    1217          12 :           return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
    1218             :         }
    1219             : 
    1220             :         // Only do this transformation if the int is intptrty in size, otherwise
    1221             :         // there is a truncation or extension that we aren't modeling.
    1222         457 :         if (CE0->getOpcode() == Instruction::PtrToInt) {
    1223          27 :           Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
    1224          27 :           if (CE0->getType() == IntPtrTy &&
    1225          27 :               CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
    1226             :             return ConstantFoldCompareInstOperands(
    1227             :                 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
    1228             :           }
    1229             :         }
    1230             :       }
    1231             :     }
    1232             : 
    1233             :     // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
    1234             :     // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
    1235        6547 :     if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
    1236        7303 :         CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
    1237          20 :       Constant *LHS = ConstantFoldCompareInstOperands(
    1238             :           Predicate, CE0->getOperand(0), Ops1, DL, TLI);
    1239          20 :       Constant *RHS = ConstantFoldCompareInstOperands(
    1240             :           Predicate, CE0->getOperand(1), Ops1, DL, TLI);
    1241             :       unsigned OpC =
    1242          20 :         Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
    1243          20 :       return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
    1244             :     }
    1245       63780 :   } else if (isa<ConstantExpr>(Ops1)) {
    1246             :     // If RHS is a constant expression, but the left side isn't, swap the
    1247             :     // operands and try again.
    1248         105 :     Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
    1249         105 :     return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
    1250             :   }
    1251             : 
    1252       70938 :   return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
    1253             : }
    1254             : 
    1255      197671 : Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
    1256             :                                              Constant *RHS,
    1257             :                                              const DataLayout &DL) {
    1258             :   assert(Instruction::isBinaryOp(Opcode));
    1259      197671 :   if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
    1260       10167 :     if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
    1261             :       return C;
    1262             : 
    1263      196469 :   return ConstantExpr::get(Opcode, LHS, RHS);
    1264             : }
    1265             : 
    1266     1098493 : Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
    1267             :                                         Type *DestTy, const DataLayout &DL) {
    1268             :   assert(Instruction::isCast(Opcode));
    1269     1098493 :   switch (Opcode) {
    1270           0 :   default:
    1271           0 :     llvm_unreachable("Missing case");
    1272             :   case Instruction::PtrToInt:
    1273             :     // If the input is a inttoptr, eliminate the pair.  This requires knowing
    1274             :     // the width of a pointer, so it can't be done in ConstantExpr::getCast.
    1275             :     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
    1276        5351 :       if (CE->getOpcode() == Instruction::IntToPtr) {
    1277             :         Constant *Input = CE->getOperand(0);
    1278          69 :         unsigned InWidth = Input->getType()->getScalarSizeInBits();
    1279          69 :         unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
    1280          69 :         if (PtrWidth < InWidth) {
    1281             :           Constant *Mask =
    1282           2 :             ConstantInt::get(CE->getContext(),
    1283           2 :                              APInt::getLowBitsSet(InWidth, PtrWidth));
    1284           2 :           Input = ConstantExpr::getAnd(Input, Mask);
    1285             :         }
    1286             :         // Do a zext or trunc to get to the dest size.
    1287          69 :         return ConstantExpr::getIntegerCast(Input, DestTy, false);
    1288             :       }
    1289             :     }
    1290        9391 :     return ConstantExpr::getCast(Opcode, C, DestTy);
    1291             :   case Instruction::IntToPtr:
    1292             :     // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
    1293             :     // the int size is >= the ptr size and the address spaces are the same.
    1294             :     // This requires knowing the width of a pointer, so it can't be done in
    1295             :     // ConstantExpr::getCast.
    1296             :     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
    1297        4051 :       if (CE->getOpcode() == Instruction::PtrToInt) {
    1298             :         Constant *SrcPtr = CE->getOperand(0);
    1299        1063 :         unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
    1300        1063 :         unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
    1301             : 
    1302        1063 :         if (MidIntSize >= SrcPtrSize) {
    1303        1061 :           unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
    1304        1061 :           if (SrcAS == DestTy->getPointerAddressSpace())
    1305        1057 :             return FoldBitCast(CE->getOperand(0), DestTy, DL);
    1306             :         }
    1307             :       }
    1308             :     }
    1309             : 
    1310        5891 :     return ConstantExpr::getCast(Opcode, C, DestTy);
    1311       11541 :   case Instruction::Trunc:
    1312             :   case Instruction::ZExt:
    1313             :   case Instruction::SExt:
    1314             :   case Instruction::FPTrunc:
    1315             :   case Instruction::FPExt:
    1316             :   case Instruction::UIToFP:
    1317             :   case Instruction::SIToFP:
    1318             :   case Instruction::FPToUI:
    1319             :   case Instruction::FPToSI:
    1320             :   case Instruction::AddrSpaceCast:
    1321       11541 :       return ConstantExpr::getCast(Opcode, C, DestTy);
    1322     1070544 :   case Instruction::BitCast:
    1323     1070544 :     return FoldBitCast(C, DestTy, DL);
    1324             :   }
    1325             : }
    1326             : 
    1327        2732 : Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
    1328             :                                                        ConstantExpr *CE) {
    1329        2732 :   if (!CE->getOperand(1)->isNullValue())
    1330             :     return nullptr;  // Do not allow stepping over the value!
    1331             : 
    1332             :   // Loop over all of the operands, tracking down which value we are
    1333             :   // addressing.
    1334        8057 :   for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
    1335        5332 :     C = C->getAggregateElement(CE->getOperand(i));
    1336        5332 :     if (!C)
    1337             :       return nullptr;
    1338             :   }
    1339             :   return C;
    1340             : }
    1341             : 
    1342             : Constant *
    1343           0 : llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
    1344             :                                         ArrayRef<Constant *> Indices) {
    1345             :   // Loop over all of the operands, tracking down which value we are
    1346             :   // addressing.
    1347           0 :   for (Constant *Index : Indices) {
    1348           0 :     C = C->getAggregateElement(Index);
    1349           0 :     if (!C)
    1350             :       return nullptr;
    1351             :   }
    1352             :   return C;
    1353             : }
    1354             : 
    1355             : //===----------------------------------------------------------------------===//
    1356             : //  Constant Folding for Calls
    1357             : //
    1358             : 
    1359     6365041 : bool llvm::canConstantFoldCallTo(ImmutableCallSite CS, const Function *F) {
    1360     6365041 :   if (CS.isNoBuiltin() || CS.isStrictFP())
    1361       82792 :     return false;
    1362     6282249 :   switch (F->getIntrinsicID()) {
    1363             :   case Intrinsic::fabs:
    1364             :   case Intrinsic::minnum:
    1365             :   case Intrinsic::maxnum:
    1366             :   case Intrinsic::log:
    1367             :   case Intrinsic::log2:
    1368             :   case Intrinsic::log10:
    1369             :   case Intrinsic::exp:
    1370             :   case Intrinsic::exp2:
    1371             :   case Intrinsic::floor:
    1372             :   case Intrinsic::ceil:
    1373             :   case Intrinsic::sqrt:
    1374             :   case Intrinsic::sin:
    1375             :   case Intrinsic::cos:
    1376             :   case Intrinsic::trunc:
    1377             :   case Intrinsic::rint:
    1378             :   case Intrinsic::nearbyint:
    1379             :   case Intrinsic::pow:
    1380             :   case Intrinsic::powi:
    1381             :   case Intrinsic::bswap:
    1382             :   case Intrinsic::ctpop:
    1383             :   case Intrinsic::ctlz:
    1384             :   case Intrinsic::cttz:
    1385             :   case Intrinsic::fshl:
    1386             :   case Intrinsic::fshr:
    1387             :   case Intrinsic::fma:
    1388             :   case Intrinsic::fmuladd:
    1389             :   case Intrinsic::copysign:
    1390             :   case Intrinsic::launder_invariant_group:
    1391             :   case Intrinsic::strip_invariant_group:
    1392             :   case Intrinsic::round:
    1393             :   case Intrinsic::masked_load:
    1394             :   case Intrinsic::sadd_with_overflow:
    1395             :   case Intrinsic::uadd_with_overflow:
    1396             :   case Intrinsic::ssub_with_overflow:
    1397             :   case Intrinsic::usub_with_overflow:
    1398             :   case Intrinsic::smul_with_overflow:
    1399             :   case Intrinsic::umul_with_overflow:
    1400             :   case Intrinsic::convert_from_fp16:
    1401             :   case Intrinsic::convert_to_fp16:
    1402             :   case Intrinsic::bitreverse:
    1403             :   case Intrinsic::x86_sse_cvtss2si:
    1404             :   case Intrinsic::x86_sse_cvtss2si64:
    1405             :   case Intrinsic::x86_sse_cvttss2si:
    1406             :   case Intrinsic::x86_sse_cvttss2si64:
    1407             :   case Intrinsic::x86_sse2_cvtsd2si:
    1408             :   case Intrinsic::x86_sse2_cvtsd2si64:
    1409             :   case Intrinsic::x86_sse2_cvttsd2si:
    1410             :   case Intrinsic::x86_sse2_cvttsd2si64:
    1411             :   case Intrinsic::x86_avx512_vcvtss2si32:
    1412             :   case Intrinsic::x86_avx512_vcvtss2si64:
    1413             :   case Intrinsic::x86_avx512_cvttss2si:
    1414             :   case Intrinsic::x86_avx512_cvttss2si64:
    1415             :   case Intrinsic::x86_avx512_vcvtsd2si32:
    1416             :   case Intrinsic::x86_avx512_vcvtsd2si64:
    1417             :   case Intrinsic::x86_avx512_cvttsd2si:
    1418             :   case Intrinsic::x86_avx512_cvttsd2si64:
    1419             :   case Intrinsic::x86_avx512_vcvtss2usi32:
    1420             :   case Intrinsic::x86_avx512_vcvtss2usi64:
    1421             :   case Intrinsic::x86_avx512_cvttss2usi:
    1422             :   case Intrinsic::x86_avx512_cvttss2usi64:
    1423             :   case Intrinsic::x86_avx512_vcvtsd2usi32:
    1424             :   case Intrinsic::x86_avx512_vcvtsd2usi64:
    1425             :   case Intrinsic::x86_avx512_cvttsd2usi:
    1426             :   case Intrinsic::x86_avx512_cvttsd2usi64:
    1427             :     return true;
    1428     2775310 :   default:
    1429     2775310 :     return false;
    1430             :   case Intrinsic::not_intrinsic: break;
    1431             :   }
    1432             : 
    1433     3472153 :   if (!F->hasName())
    1434             :     return false;
    1435     3472153 :   StringRef Name = F->getName();
    1436             : 
    1437             :   // In these cases, the check of the length is required.  We don't want to
    1438             :   // return true for a name like "cos\0blah" which strcmp would return equal to
    1439             :   // "cos", but has length 8.
    1440     3472153 :   switch (Name[0]) {
    1441             :   default:
    1442             :     return false;
    1443             :   case 'a':
    1444             :     return Name == "acos" || Name == "asin" || Name == "atan" ||
    1445             :            Name == "atan2" || Name == "acosf" || Name == "asinf" ||
    1446             :            Name == "atanf" || Name == "atan2f";
    1447             :   case 'c':
    1448             :     return Name == "ceil" || Name == "cos" || Name == "cosh" ||
    1449             :            Name == "ceilf" || Name == "cosf" || Name == "coshf";
    1450             :   case 'e':
    1451             :     return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
    1452             :   case 'f':
    1453             :     return Name == "fabs" || Name == "floor" || Name == "fmod" ||
    1454             :            Name == "fabsf" || Name == "floorf" || Name == "fmodf";
    1455             :   case 'l':
    1456             :     return Name == "log" || Name == "log10" || Name == "logf" ||
    1457             :            Name == "log10f";
    1458             :   case 'p':
    1459             :     return Name == "pow" || Name == "powf";
    1460             :   case 'r':
    1461             :     return Name == "round" || Name == "roundf";
    1462             :   case 's':
    1463             :     return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
    1464             :            Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
    1465             :   case 't':
    1466             :     return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
    1467             :   case '_':
    1468             : 
    1469             :     // Check for various function names that get used for the math functions
    1470             :     // when the header files are preprocessed with the macro
    1471             :     // __FINITE_MATH_ONLY__ enabled.
    1472             :     // The '12' here is the length of the shortest name that can match.
    1473             :     // We need to check the size before looking at Name[1] and Name[2]
    1474             :     // so we may as well check a limit that will eliminate mismatches.
    1475     3183815 :     if (Name.size() < 12 || Name[1] != '_')
    1476             :       return false;
    1477      537211 :     switch (Name[2]) {
    1478             :     default:
    1479             :       return false;
    1480             :     case 'a':
    1481             :       return Name == "__acos_finite" || Name == "__acosf_finite" ||
    1482             :              Name == "__asin_finite" || Name == "__asinf_finite" ||
    1483             :              Name == "__atan2_finite" || Name == "__atan2f_finite";
    1484             :     case 'c':
    1485             :       return Name == "__cosh_finite" || Name == "__coshf_finite";
    1486             :     case 'e':
    1487             :       return Name == "__exp_finite" || Name == "__expf_finite" ||
    1488             :              Name == "__exp2_finite" || Name == "__exp2f_finite";
    1489             :     case 'l':
    1490             :       return Name == "__log_finite" || Name == "__logf_finite" ||
    1491             :              Name == "__log10_finite" || Name == "__log10f_finite";
    1492             :     case 'p':
    1493             :       return Name == "__pow_finite" || Name == "__powf_finite";
    1494             :     case 's':
    1495             :       return Name == "__sinh_finite" || Name == "__sinhf_finite";
    1496             :     }
    1497             :   }
    1498             : }
    1499             : 
    1500             : namespace {
    1501             : 
    1502         131 : Constant *GetConstantFoldFPValue(double V, Type *Ty) {
    1503         131 :   if (Ty->isHalfTy()) {
    1504           0 :     APFloat APF(V);
    1505             :     bool unused;
    1506           0 :     APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &unused);
    1507           0 :     return ConstantFP::get(Ty->getContext(), APF);
    1508             :   }
    1509         131 :   if (Ty->isFloatTy())
    1510         124 :     return ConstantFP::get(Ty->getContext(), APFloat((float)V));
    1511          69 :   if (Ty->isDoubleTy())
    1512         138 :     return ConstantFP::get(Ty->getContext(), APFloat(V));
    1513           0 :   llvm_unreachable("Can only constant fold half/float/double");
    1514             : }
    1515             : 
    1516             : /// Clear the floating-point exception state.
    1517             : inline void llvm_fenv_clearexcept() {
    1518             : #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
    1519         133 :   feclearexcept(FE_ALL_EXCEPT);
    1520             : #endif
    1521         133 :   errno = 0;
    1522             : }
    1523             : 
    1524             : /// Test if a floating-point exception was raised.
    1525         132 : inline bool llvm_fenv_testexcept() {
    1526         132 :   int errno_val = errno;
    1527         132 :   if (errno_val == ERANGE || errno_val == EDOM)
    1528             :     return true;
    1529             : #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
    1530         131 :   if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
    1531           0 :     return true;
    1532             : #endif
    1533             :   return false;
    1534             : }
    1535             : 
    1536          95 : Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
    1537             :   llvm_fenv_clearexcept();
    1538          95 :   V = NativeFP(V);
    1539          95 :   if (llvm_fenv_testexcept()) {
    1540             :     llvm_fenv_clearexcept();
    1541           1 :     return nullptr;
    1542             :   }
    1543             : 
    1544          94 :   return GetConstantFoldFPValue(V, Ty);
    1545             : }
    1546             : 
    1547          37 : Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
    1548             :                                double W, Type *Ty) {
    1549             :   llvm_fenv_clearexcept();
    1550          37 :   V = NativeFP(V, W);
    1551          37 :   if (llvm_fenv_testexcept()) {
    1552             :     llvm_fenv_clearexcept();
    1553           0 :     return nullptr;
    1554             :   }
    1555             : 
    1556          37 :   return GetConstantFoldFPValue(V, Ty);
    1557             : }
    1558             : 
    1559             : /// Attempt to fold an SSE floating point to integer conversion of a constant
    1560             : /// floating point. If roundTowardZero is false, the default IEEE rounding is
    1561             : /// used (toward nearest, ties to even). This matches the behavior of the
    1562             : /// non-truncating SSE instructions in the default rounding mode. The desired
    1563             : /// integer type Ty is used to select how many bits are available for the
    1564             : /// result. Returns null if the conversion cannot be performed, otherwise
    1565             : /// returns the Constant value resulting from the conversion.
    1566         130 : Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
    1567             :                                       Type *Ty, bool IsSigned) {
    1568             :   // All of these conversion intrinsics form an integer of at most 64bits.
    1569             :   unsigned ResultWidth = Ty->getIntegerBitWidth();
    1570             :   assert(ResultWidth <= 64 &&
    1571             :          "Can only constant fold conversions to 64 and 32 bit ints");
    1572             : 
    1573             :   uint64_t UIntVal;
    1574         130 :   bool isExact = false;
    1575         130 :   APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
    1576             :                                               : APFloat::rmNearestTiesToEven;
    1577             :   APFloat::opStatus status =
    1578         130 :       Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
    1579             :                            IsSigned, mode, &isExact);
    1580         130 :   if (status != APFloat::opOK &&
    1581         103 :       (!roundTowardZero || status != APFloat::opInexact))
    1582             :     return nullptr;
    1583          39 :   return ConstantInt::get(Ty, UIntVal, IsSigned);
    1584             : }
    1585             : 
    1586         355 : double getValueAsDouble(ConstantFP *Op) {
    1587         355 :   Type *Ty = Op->getType();
    1588             : 
    1589         355 :   if (Ty->isFloatTy())
    1590         210 :     return Op->getValueAPF().convertToFloat();
    1591             : 
    1592         145 :   if (Ty->isDoubleTy())
    1593         145 :     return Op->getValueAPF().convertToDouble();
    1594             : 
    1595             :   bool unused;
    1596             :   APFloat APF = Op->getValueAPF();
    1597           0 :   APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
    1598           0 :   return APF.convertToDouble();
    1599             : }
    1600             : 
    1601        1015 : Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
    1602             :                                  ArrayRef<Constant *> Operands,
    1603             :                                  const TargetLibraryInfo *TLI,
    1604             :                                  ImmutableCallSite CS) {
    1605        1015 :   if (Operands.size() == 1) {
    1606         680 :     if (isa<UndefValue>(Operands[0])) {
    1607             :       // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
    1608          10 :       if (IntrinsicID == Intrinsic::cos)
    1609           2 :         return Constant::getNullValue(Ty);
    1610          16 :       if (IntrinsicID == Intrinsic::bswap ||
    1611           8 :           IntrinsicID == Intrinsic::bitreverse ||
    1612           8 :           IntrinsicID == Intrinsic::launder_invariant_group ||
    1613             :           IntrinsicID == Intrinsic::strip_invariant_group)
    1614             :         return Operands[0];
    1615             :     }
    1616             : 
    1617         331 :     if (isa<ConstantPointerNull>(Operands[0])) {
    1618             :       // launder(null) == null == strip(null) iff in addrspace 0
    1619          38 :       if (IntrinsicID == Intrinsic::launder_invariant_group ||
    1620          19 :           IntrinsicID == Intrinsic::strip_invariant_group) {
    1621             :         // If instruction is not yet put in a basic block (e.g. when cloning
    1622             :         // a function during inlining), CS caller may not be available.
    1623             :         // So check CS's BB first before querying CS.getCaller.
    1624          19 :         const Function *Caller = CS.getParent() ? CS.getCaller() : nullptr;
    1625          36 :         if (Caller &&
    1626          18 :             !NullPointerIsDefined(
    1627             :                 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
    1628           2 :           return Operands[0];
    1629             :         }
    1630          17 :         return nullptr;
    1631             :       }
    1632             :     }
    1633             : 
    1634             :     if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
    1635         227 :       if (IntrinsicID == Intrinsic::convert_to_fp16) {
    1636             :         APFloat Val(Op->getValueAPF());
    1637             : 
    1638           5 :         bool lost = false;
    1639           5 :         Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
    1640             : 
    1641          10 :         return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
    1642             :       }
    1643             : 
    1644         222 :       if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
    1645             :         return nullptr;
    1646             : 
    1647         222 :       if (IntrinsicID == Intrinsic::round) {
    1648             :         APFloat V = Op->getValueAPF();
    1649          19 :         V.roundToIntegral(APFloat::rmNearestTiesToAway);
    1650          19 :         return ConstantFP::get(Ty->getContext(), V);
    1651             :       }
    1652             : 
    1653         203 :       if (IntrinsicID == Intrinsic::floor) {
    1654             :         APFloat V = Op->getValueAPF();
    1655          11 :         V.roundToIntegral(APFloat::rmTowardNegative);
    1656          11 :         return ConstantFP::get(Ty->getContext(), V);
    1657             :       }
    1658             : 
    1659         192 :       if (IntrinsicID == Intrinsic::ceil) {
    1660             :         APFloat V = Op->getValueAPF();
    1661          19 :         V.roundToIntegral(APFloat::rmTowardPositive);
    1662          19 :         return ConstantFP::get(Ty->getContext(), V);
    1663             :       }
    1664             : 
    1665         173 :       if (IntrinsicID == Intrinsic::trunc) {
    1666             :         APFloat V = Op->getValueAPF();
    1667           9 :         V.roundToIntegral(APFloat::rmTowardZero);
    1668           9 :         return ConstantFP::get(Ty->getContext(), V);
    1669             :       }
    1670             : 
    1671         164 :       if (IntrinsicID == Intrinsic::rint) {
    1672             :         APFloat V = Op->getValueAPF();
    1673           2 :         V.roundToIntegral(APFloat::rmNearestTiesToEven);
    1674           2 :         return ConstantFP::get(Ty->getContext(), V);
    1675             :       }
    1676             : 
    1677         162 :       if (IntrinsicID == Intrinsic::nearbyint) {
    1678             :         APFloat V = Op->getValueAPF();
    1679           9 :         V.roundToIntegral(APFloat::rmNearestTiesToEven);
    1680           9 :         return ConstantFP::get(Ty->getContext(), V);
    1681             :       }
    1682             : 
    1683             :       /// We only fold functions with finite arguments. Folding NaN and inf is
    1684             :       /// likely to be aborted with an exception anyway, and some host libms
    1685             :       /// have known errors raising exceptions.
    1686         153 :       if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
    1687             :         return nullptr;
    1688             : 
    1689             :       /// Currently APFloat versions of these functions do not exist, so we use
    1690             :       /// the host native double versions.  Float versions are not called
    1691             :       /// directly but for all these it is true (float)(f((double)arg)) ==
    1692             :       /// f(arg).  Long double not supported yet.
    1693         139 :       double V = getValueAsDouble(Op);
    1694             : 
    1695         139 :       switch (IntrinsicID) {
    1696             :         default: break;
    1697          33 :         case Intrinsic::fabs:
    1698          33 :           return ConstantFoldFP(fabs, V, Ty);
    1699           0 :         case Intrinsic::log2:
    1700           0 :           return ConstantFoldFP(Log2, V, Ty);
    1701           0 :         case Intrinsic::log:
    1702           0 :           return ConstantFoldFP(log, V, Ty);
    1703           0 :         case Intrinsic::log10:
    1704           0 :           return ConstantFoldFP(log10, V, Ty);
    1705           0 :         case Intrinsic::exp:
    1706           0 :           return ConstantFoldFP(exp, V, Ty);
    1707           0 :         case Intrinsic::exp2:
    1708           0 :           return ConstantFoldFP(exp2, V, Ty);
    1709           1 :         case Intrinsic::sin:
    1710           1 :           return ConstantFoldFP(sin, V, Ty);
    1711           1 :         case Intrinsic::cos:
    1712           1 :           return ConstantFoldFP(cos, V, Ty);
    1713           0 :         case Intrinsic::sqrt:
    1714           0 :           return ConstantFoldFP(sqrt, V, Ty);
    1715             :       }
    1716             : 
    1717         104 :       if (!TLI)
    1718             :         return nullptr;
    1719             : 
    1720             :       char NameKeyChar = Name[0];
    1721         102 :       if (Name[0] == '_' && Name.size() > 2 && Name[1] == '_')
    1722             :         NameKeyChar = Name[2];
    1723             : 
    1724         102 :       switch (NameKeyChar) {
    1725             :       case 'a':
    1726           6 :         if ((Name == "acos" && TLI->has(LibFunc_acos)) ||
    1727           6 :             (Name == "acosf" && TLI->has(LibFunc_acosf)) ||
    1728           2 :             (Name == "__acos_finite" && TLI->has(LibFunc_acos_finite)) ||
    1729           2 :             (Name == "__acosf_finite" && TLI->has(LibFunc_acosf_finite)))
    1730           6 :           return ConstantFoldFP(acos, V, Ty);
    1731           4 :         else if ((Name == "asin" && TLI->has(LibFunc_asin)) ||
    1732           4 :                  (Name == "asinf" && TLI->has(LibFunc_asinf)) ||
    1733           2 :                  (Name == "__asin_finite" && TLI->has(LibFunc_asin_finite)) ||
    1734           2 :                  (Name == "__asinf_finite" && TLI->has(LibFunc_asinf_finite)))
    1735           4 :           return ConstantFoldFP(asin, V, Ty);
    1736           4 :         else if ((Name == "atan" && TLI->has(LibFunc_atan)) ||
    1737           4 :                  (Name == "atanf" && TLI->has(LibFunc_atanf)))
    1738           2 :           return ConstantFoldFP(atan, V, Ty);
    1739             :         break;
    1740             :       case 'c':
    1741           4 :         if ((Name == "ceil" && TLI->has(LibFunc_ceil)) ||
    1742           4 :             (Name == "ceilf" && TLI->has(LibFunc_ceilf)))
    1743           2 :           return ConstantFoldFP(ceil, V, Ty);
    1744           6 :         else if ((Name == "cos" && TLI->has(LibFunc_cos)) ||
    1745           4 :                  (Name == "cosf" && TLI->has(LibFunc_cosf)))
    1746           3 :           return ConstantFoldFP(cos, V, Ty);
    1747           4 :         else if ((Name == "cosh" && TLI->has(LibFunc_cosh)) ||
    1748           4 :                  (Name == "coshf" && TLI->has(LibFunc_coshf)) ||
    1749           2 :                  (Name == "__cosh_finite" && TLI->has(LibFunc_cosh_finite)) ||
    1750           2 :                  (Name == "__coshf_finite" && TLI->has(LibFunc_coshf_finite)))
    1751           4 :           return ConstantFoldFP(cosh, V, Ty);
    1752             :         break;
    1753             :       case 'e':
    1754           4 :         if ((Name == "exp" && TLI->has(LibFunc_exp)) ||
    1755           4 :             (Name == "expf" && TLI->has(LibFunc_expf)) ||
    1756           2 :             (Name == "__exp_finite" && TLI->has(LibFunc_exp_finite)) ||
    1757           2 :             (Name == "__expf_finite" && TLI->has(LibFunc_expf_finite)))
    1758           4 :           return ConstantFoldFP(exp, V, Ty);
    1759           8 :         if ((Name == "exp2" && TLI->has(LibFunc_exp2)) ||
    1760           4 :             (Name == "exp2f" && TLI->has(LibFunc_exp2f)) ||
    1761           2 :             (Name == "__exp2_finite" && TLI->has(LibFunc_exp2_finite)) ||
    1762           2 :             (Name == "__exp2f_finite" && TLI->has(LibFunc_exp2f_finite)))
    1763             :           // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
    1764             :           // C99 library.
    1765           5 :           return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
    1766             :         break;
    1767             :       case 'f':
    1768           8 :         if ((Name == "fabs" && TLI->has(LibFunc_fabs)) ||
    1769           4 :             (Name == "fabsf" && TLI->has(LibFunc_fabsf)))
    1770           4 :           return ConstantFoldFP(fabs, V, Ty);
    1771           4 :         else if ((Name == "floor" && TLI->has(LibFunc_floor)) ||
    1772           4 :                  (Name == "floorf" && TLI->has(LibFunc_floorf)))
    1773           2 :           return ConstantFoldFP(floor, V, Ty);
    1774             :         break;
    1775             :       case 'l':
    1776           4 :         if ((Name == "log" && V > 0 && TLI->has(LibFunc_log)) ||
    1777           2 :             (Name == "logf" && V > 0 && TLI->has(LibFunc_logf)) ||
    1778           1 :             (Name == "__log_finite" && V > 0 &&
    1779           1 :               TLI->has(LibFunc_log_finite)) ||
    1780           1 :             (Name == "__logf_finite" && V > 0 &&
    1781           1 :               TLI->has(LibFunc_logf_finite)))
    1782           6 :           return ConstantFoldFP(log, V, Ty);
    1783           2 :         else if ((Name == "log10" && V > 0 && TLI->has(LibFunc_log10)) ||
    1784           2 :                  (Name == "log10f" && V > 0 && TLI->has(LibFunc_log10f)) ||
    1785           1 :                  (Name == "__log10_finite" && V > 0 &&
    1786           1 :                    TLI->has(LibFunc_log10_finite)) ||
    1787           1 :                  (Name == "__log10f_finite" && V > 0 &&
    1788           1 :                    TLI->has(LibFunc_log10f_finite)))
    1789           4 :           return ConstantFoldFP(log10, V, Ty);
    1790             :         break;
    1791             :       case 'r':
    1792           4 :         if ((Name == "round" && TLI->has(LibFunc_round)) ||
    1793           4 :             (Name == "roundf" && TLI->has(LibFunc_roundf)))
    1794           2 :           return ConstantFoldFP(round, V, Ty);
    1795             :         break;
    1796             :       case 's':
    1797           8 :         if ((Name == "sin" && TLI->has(LibFunc_sin)) ||
    1798           4 :             (Name == "sinf" && TLI->has(LibFunc_sinf)))
    1799           4 :           return ConstantFoldFP(sin, V, Ty);
    1800           4 :         else if ((Name == "sinh" && TLI->has(LibFunc_sinh)) ||
    1801           4 :                  (Name == "sinhf" && TLI->has(LibFunc_sinhf)) ||
    1802           2 :                  (Name == "__sinh_finite" && TLI->has(LibFunc_sinh_finite)) ||
    1803           2 :                  (Name == "__sinhf_finite" && TLI->has(LibFunc_sinhf_finite)))
    1804           4 :           return ConstantFoldFP(sinh, V, Ty);
    1805           5 :         else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc_sqrt)) ||
    1806           2 :                  (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc_sqrtf)))
    1807           5 :           return ConstantFoldFP(sqrt, V, Ty);
    1808             :         break;
    1809             :       case 't':
    1810           4 :         if ((Name == "tan" && TLI->has(LibFunc_tan)) ||
    1811           4 :             (Name == "tanf" && TLI->has(LibFunc_tanf)))
    1812           2 :           return ConstantFoldFP(tan, V, Ty);
    1813           4 :         else if ((Name == "tanh" && TLI->has(LibFunc_tanh)) ||
    1814           4 :                  (Name == "tanhf" && TLI->has(LibFunc_tanhf)))
    1815           2 :           return ConstantFoldFP(tanh, V, Ty);
    1816             :         break;
    1817             :       default:
    1818             :         break;
    1819             :       }
    1820             :       return nullptr;
    1821             :     }
    1822             : 
    1823             :     if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
    1824          36 :       switch (IntrinsicID) {
    1825             :       case Intrinsic::bswap:
    1826           8 :         return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
    1827             :       case Intrinsic::ctpop:
    1828           7 :         return ConstantInt::get(Ty, Op->getValue().countPopulation());
    1829             :       case Intrinsic::bitreverse:
    1830          30 :         return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
    1831             :       case Intrinsic::convert_from_fp16: {
    1832          10 :         APFloat Val(APFloat::IEEEhalf(), Op->getValue());
    1833             : 
    1834          10 :         bool lost = false;
    1835          10 :         APFloat::opStatus status = Val.convert(
    1836             :             Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
    1837             : 
    1838             :         // Conversion is always precise.
    1839             :         (void)status;
    1840             :         assert(status == APFloat::opOK && !lost &&
    1841             :                "Precision lost during fp16 constfolding");
    1842             : 
    1843          10 :         return ConstantFP::get(Ty->getContext(), Val);
    1844             :       }
    1845             :       default:
    1846             :         return nullptr;
    1847             :       }
    1848             :     }
    1849             : 
    1850             :     // Support ConstantVector in case we have an Undef in the top.
    1851          49 :     if (isa<ConstantVector>(Operands[0]) ||
    1852             :         isa<ConstantDataVector>(Operands[0])) {
    1853             :       auto *Op = cast<Constant>(Operands[0]);
    1854             :       switch (IntrinsicID) {
    1855             :       default: break;
    1856          20 :       case Intrinsic::x86_sse_cvtss2si:
    1857             :       case Intrinsic::x86_sse_cvtss2si64:
    1858             :       case Intrinsic::x86_sse2_cvtsd2si:
    1859             :       case Intrinsic::x86_sse2_cvtsd2si64:
    1860             :         if (ConstantFP *FPOp =
    1861          20 :                 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
    1862          20 :           return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
    1863             :                                              /*roundTowardZero=*/false, Ty,
    1864          20 :                                              /*IsSigned*/true);
    1865             :         break;
    1866          20 :       case Intrinsic::x86_sse_cvttss2si:
    1867             :       case Intrinsic::x86_sse_cvttss2si64:
    1868             :       case Intrinsic::x86_sse2_cvttsd2si:
    1869             :       case Intrinsic::x86_sse2_cvttsd2si64:
    1870             :         if (ConstantFP *FPOp =
    1871          20 :                 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
    1872          20 :           return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
    1873             :                                              /*roundTowardZero=*/true, Ty,
    1874          20 :                                              /*IsSigned*/true);
    1875             :         break;
    1876             :       }
    1877             :     }
    1878             : 
    1879           9 :     return nullptr;
    1880             :   }
    1881             : 
    1882         675 :   if (Operands.size() == 2) {
    1883         611 :     if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
    1884         112 :       if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
    1885             :         return nullptr;
    1886         112 :       double Op1V = getValueAsDouble(Op1);
    1887             : 
    1888         112 :       if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
    1889         102 :         if (Op2->getType() != Op1->getType())
    1890             :           return nullptr;
    1891             : 
    1892         102 :         double Op2V = getValueAsDouble(Op2);
    1893         102 :         if (IntrinsicID == Intrinsic::pow) {
    1894           2 :           return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
    1895             :         }
    1896         100 :         if (IntrinsicID == Intrinsic::copysign) {
    1897             :           APFloat V1 = Op1->getValueAPF();
    1898             :           const APFloat &V2 = Op2->getValueAPF();
    1899           6 :           V1.copySign(V2);
    1900           6 :           return ConstantFP::get(Ty->getContext(), V1);
    1901             :         }
    1902             : 
    1903          94 :         if (IntrinsicID == Intrinsic::minnum) {
    1904             :           const APFloat &C1 = Op1->getValueAPF();
    1905             :           const APFloat &C2 = Op2->getValueAPF();
    1906          62 :           return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
    1907             :         }
    1908             : 
    1909          63 :         if (IntrinsicID == Intrinsic::maxnum) {
    1910             :           const APFloat &C1 = Op1->getValueAPF();
    1911             :           const APFloat &C2 = Op2->getValueAPF();
    1912          56 :           return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
    1913             :         }
    1914             : 
    1915          35 :         if (!TLI)
    1916             :           return nullptr;
    1917          22 :         if ((Name == "pow" && TLI->has(LibFunc_pow)) ||
    1918          22 :             (Name == "powf" && TLI->has(LibFunc_powf)) ||
    1919           2 :             (Name == "__pow_finite" && TLI->has(LibFunc_pow_finite)) ||
    1920           2 :             (Name == "__powf_finite" && TLI->has(LibFunc_powf_finite)))
    1921          22 :           return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
    1922           4 :         if ((Name == "fmod" && TLI->has(LibFunc_fmod)) ||
    1923           6 :             (Name == "fmodf" && TLI->has(LibFunc_fmodf)))
    1924           3 :           return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
    1925           4 :         if ((Name == "atan2" && TLI->has(LibFunc_atan2)) ||
    1926           4 :             (Name == "atan2f" && TLI->has(LibFunc_atan2f)) ||
    1927           2 :             (Name == "__atan2_finite" && TLI->has(LibFunc_atan2_finite)) ||
    1928           2 :             (Name == "__atan2f_finite" && TLI->has(LibFunc_atan2f_finite)))
    1929           4 :           return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
    1930             :       } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
    1931           6 :         if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
    1932           0 :           return ConstantFP::get(Ty->getContext(),
    1933           0 :                                  APFloat((float)std::pow((float)Op1V,
    1934             :                                                  (int)Op2C->getZExtValue())));
    1935           6 :         if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
    1936           0 :           return ConstantFP::get(Ty->getContext(),
    1937           0 :                                  APFloat((float)std::pow((float)Op1V,
    1938             :                                                  (int)Op2C->getZExtValue())));
    1939           6 :         if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
    1940           6 :           return ConstantFP::get(Ty->getContext(),
    1941          18 :                                  APFloat((double)std::pow((double)Op1V,
    1942             :                                                    (int)Op2C->getZExtValue())));
    1943             :       }
    1944          10 :       return nullptr;
    1945             :     }
    1946             : 
    1947             :     if (auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
    1948         402 :       if (auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
    1949         402 :         switch (IntrinsicID) {
    1950             :         default: break;
    1951             :         case Intrinsic::sadd_with_overflow:
    1952             :         case Intrinsic::uadd_with_overflow:
    1953             :         case Intrinsic::ssub_with_overflow:
    1954             :         case Intrinsic::usub_with_overflow:
    1955             :         case Intrinsic::smul_with_overflow:
    1956             :         case Intrinsic::umul_with_overflow: {
    1957             :           APInt Res;
    1958             :           bool Overflow;
    1959             :           switch (IntrinsicID) {
    1960           0 :           default: llvm_unreachable("Invalid case");
    1961             :           case Intrinsic::sadd_with_overflow:
    1962           9 :             Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
    1963           9 :             break;
    1964             :           case Intrinsic::uadd_with_overflow:
    1965          10 :             Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
    1966          10 :             break;
    1967             :           case Intrinsic::ssub_with_overflow:
    1968           8 :             Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
    1969           8 :             break;
    1970             :           case Intrinsic::usub_with_overflow:
    1971           2 :             Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
    1972           2 :             break;
    1973             :           case Intrinsic::smul_with_overflow:
    1974           1 :             Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
    1975           1 :             break;
    1976             :           case Intrinsic::umul_with_overflow:
    1977           3 :             Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
    1978           3 :             break;
    1979             :           }
    1980             :           Constant *Ops[] = {
    1981          33 :             ConstantInt::get(Ty->getContext(), Res),
    1982          33 :             ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
    1983          66 :           };
    1984          33 :           return ConstantStruct::get(cast<StructType>(Ty), Ops);
    1985             :         }
    1986             :         case Intrinsic::cttz:
    1987         293 :           if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
    1988          69 :             return UndefValue::get(Ty);
    1989          96 :           return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
    1990             :         case Intrinsic::ctlz:
    1991         362 :           if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
    1992          74 :             return UndefValue::get(Ty);
    1993         130 :           return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
    1994             :         }
    1995             :       }
    1996             : 
    1997             :       return nullptr;
    1998             :     }
    1999             : 
    2000             :     // Support ConstantVector in case we have an Undef in the top.
    2001           7 :     if ((isa<ConstantVector>(Operands[0]) ||
    2002          90 :          isa<ConstantDataVector>(Operands[0])) &&
    2003             :         // Check for default rounding mode.
    2004             :         // FIXME: Support other rounding modes?
    2005          97 :         isa<ConstantInt>(Operands[1]) &&
    2006          90 :         cast<ConstantInt>(Operands[1])->getValue() == 4) {
    2007             :       auto *Op = cast<Constant>(Operands[0]);
    2008          90 :       switch (IntrinsicID) {
    2009             :       default: break;
    2010          24 :       case Intrinsic::x86_avx512_vcvtss2si32:
    2011             :       case Intrinsic::x86_avx512_vcvtss2si64:
    2012             :       case Intrinsic::x86_avx512_vcvtsd2si32:
    2013             :       case Intrinsic::x86_avx512_vcvtsd2si64:
    2014             :         if (ConstantFP *FPOp =
    2015          24 :                 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
    2016          24 :           return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
    2017             :                                              /*roundTowardZero=*/false, Ty,
    2018          24 :                                              /*IsSigned*/true);
    2019             :         break;
    2020          26 :       case Intrinsic::x86_avx512_vcvtss2usi32:
    2021             :       case Intrinsic::x86_avx512_vcvtss2usi64:
    2022             :       case Intrinsic::x86_avx512_vcvtsd2usi32:
    2023             :       case Intrinsic::x86_avx512_vcvtsd2usi64:
    2024             :         if (ConstantFP *FPOp =
    2025          26 :                 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
    2026          26 :           return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
    2027             :                                              /*roundTowardZero=*/false, Ty,
    2028          26 :                                              /*IsSigned*/false);
    2029             :         break;
    2030          20 :       case Intrinsic::x86_avx512_cvttss2si:
    2031             :       case Intrinsic::x86_avx512_cvttss2si64:
    2032             :       case Intrinsic::x86_avx512_cvttsd2si:
    2033             :       case Intrinsic::x86_avx512_cvttsd2si64:
    2034             :         if (ConstantFP *FPOp =
    2035          20 :                 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
    2036          20 :           return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
    2037             :                                              /*roundTowardZero=*/true, Ty,
    2038          20 :                                              /*IsSigned*/true);
    2039             :         break;
    2040          20 :       case Intrinsic::x86_avx512_cvttss2usi:
    2041             :       case Intrinsic::x86_avx512_cvttss2usi64:
    2042             :       case Intrinsic::x86_avx512_cvttsd2usi:
    2043             :       case Intrinsic::x86_avx512_cvttsd2usi64:
    2044             :         if (ConstantFP *FPOp =
    2045          20 :                 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
    2046          20 :           return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
    2047             :                                              /*roundTowardZero=*/true, Ty,
    2048          20 :                                              /*IsSigned*/false);
    2049             :         break;
    2050             :       }
    2051             :     }
    2052           7 :     return nullptr;
    2053             :   }
    2054             : 
    2055          64 :   if (Operands.size() != 3)
    2056             :     return nullptr;
    2057             : 
    2058          47 :   if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
    2059          22 :     if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
    2060          22 :       if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
    2061          22 :         switch (IntrinsicID) {
    2062             :         default: break;
    2063             :         case Intrinsic::fma:
    2064             :         case Intrinsic::fmuladd: {
    2065             :           APFloat V = Op1->getValueAPF();
    2066          22 :           APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
    2067             :                                                    Op3->getValueAPF(),
    2068             :                                                    APFloat::rmNearestTiesToEven);
    2069          22 :           if (s != APFloat::opInvalidOp)
    2070          22 :             return ConstantFP::get(Ty->getContext(), V);
    2071             : 
    2072             :           return nullptr;
    2073             :         }
    2074             :         }
    2075             :       }
    2076             :     }
    2077             :   }
    2078             : 
    2079          25 :   if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
    2080             :     auto *C0 = dyn_cast<ConstantInt>(Operands[0]);
    2081          16 :     auto *C1 = dyn_cast<ConstantInt>(Operands[1]);
    2082          16 :     auto *C2 = dyn_cast<ConstantInt>(Operands[2]);
    2083          16 :     if (!(C0 && C1 && C2))
    2084             :       return nullptr;
    2085             : 
    2086             :     // The shift amount is interpreted as modulo the bitwidth. If the shift
    2087             :     // amount is effectively 0, avoid UB due to oversized inverse shift below.
    2088             :     unsigned BitWidth = C0->getBitWidth();
    2089          16 :     unsigned ShAmt = C2->getValue().urem(BitWidth);
    2090             :     bool IsRight = IntrinsicID == Intrinsic::fshr;
    2091          16 :     if (!ShAmt)
    2092           6 :       return IsRight ? C1 : C0;
    2093             : 
    2094             :     // (X << ShlAmt) | (Y >> LshrAmt)
    2095             :     const APInt &X = C0->getValue();
    2096             :     const APInt &Y = C1->getValue();
    2097          12 :     unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
    2098          12 :     unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
    2099          36 :     return ConstantInt::get(Ty->getContext(), X.shl(ShlAmt) | Y.lshr(LshrAmt));
    2100             :   }
    2101             : 
    2102             :   return nullptr;
    2103             : }
    2104             : 
    2105          23 : Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
    2106             :                                  VectorType *VTy, ArrayRef<Constant *> Operands,
    2107             :                                  const DataLayout &DL,
    2108             :                                  const TargetLibraryInfo *TLI,
    2109             :                                  ImmutableCallSite CS) {
    2110          23 :   SmallVector<Constant *, 4> Result(VTy->getNumElements());
    2111          23 :   SmallVector<Constant *, 4> Lane(Operands.size());
    2112          23 :   Type *Ty = VTy->getElementType();
    2113             : 
    2114          23 :   if (IntrinsicID == Intrinsic::masked_load) {
    2115           2 :     auto *SrcPtr = Operands[0];
    2116           2 :     auto *Mask = Operands[2];
    2117           2 :     auto *Passthru = Operands[3];
    2118             : 
    2119           2 :     Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
    2120             : 
    2121             :     SmallVector<Constant *, 32> NewElements;
    2122          18 :     for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
    2123          16 :       auto *MaskElt = Mask->getAggregateElement(I);
    2124          16 :       if (!MaskElt)
    2125             :         break;
    2126          16 :       auto *PassthruElt = Passthru->getAggregateElement(I);
    2127          16 :       auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
    2128          16 :       if (isa<UndefValue>(MaskElt)) {
    2129           0 :         if (PassthruElt)
    2130           0 :           NewElements.push_back(PassthruElt);
    2131           0 :         else if (VecElt)
    2132           0 :           NewElements.push_back(VecElt);
    2133             :         else
    2134           0 :           return nullptr;
    2135             :       }
    2136          16 :       if (MaskElt->isNullValue()) {
    2137           4 :         if (!PassthruElt)
    2138             :           return nullptr;
    2139           4 :         NewElements.push_back(PassthruElt);
    2140          12 :       } else if (MaskElt->isOneValue()) {
    2141          12 :         if (!VecElt)
    2142             :           return nullptr;
    2143          12 :         NewElements.push_back(VecElt);
    2144             :       } else {
    2145             :         return nullptr;
    2146             :       }
    2147             :     }
    2148           4 :     if (NewElements.size() != VTy->getNumElements())
    2149             :       return nullptr;
    2150           2 :     return ConstantVector::get(NewElements);
    2151             :   }
    2152             : 
    2153          73 :   for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
    2154             :     // Gather a column of constants.
    2155         160 :     for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
    2156             :       // These intrinsics use a scalar type for their second argument.
    2157         107 :       if (J == 1 &&
    2158          41 :           (IntrinsicID == Intrinsic::cttz || IntrinsicID == Intrinsic::ctlz ||
    2159             :            IntrinsicID == Intrinsic::powi)) {
    2160          20 :         Lane[J] = Operands[J];
    2161          20 :         continue;
    2162             :       }
    2163             : 
    2164         174 :       Constant *Agg = Operands[J]->getAggregateElement(I);
    2165          87 :       if (!Agg)
    2166             :         return nullptr;
    2167             : 
    2168          87 :       Lane[J] = Agg;
    2169             :     }
    2170             : 
    2171             :     // Use the regular scalar folding to simplify this column.
    2172          53 :     Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, CS);
    2173          53 :     if (!Folded)
    2174             :       return nullptr;
    2175         104 :     Result[I] = Folded;
    2176             :   }
    2177             : 
    2178          20 :   return ConstantVector::get(Result);
    2179             : }
    2180             : 
    2181             : } // end anonymous namespace
    2182             : 
    2183             : Constant *
    2184         985 : llvm::ConstantFoldCall(ImmutableCallSite CS, Function *F,
    2185             :                        ArrayRef<Constant *> Operands,
    2186             :                        const TargetLibraryInfo *TLI) {
    2187         985 :   if (CS.isNoBuiltin() || CS.isStrictFP())
    2188           0 :     return nullptr;
    2189         985 :   if (!F->hasName())
    2190             :     return nullptr;
    2191         985 :   StringRef Name = F->getName();
    2192             : 
    2193             :   Type *Ty = F->getReturnType();
    2194             : 
    2195             :   if (auto *VTy = dyn_cast<VectorType>(Ty))
    2196          23 :     return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
    2197          23 :                                   F->getParent()->getDataLayout(), TLI, CS);
    2198             : 
    2199         962 :   return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, CS);
    2200             : }
    2201             : 
    2202     4261064 : bool llvm::isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI) {
    2203             :   // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
    2204             :   // (and to some extent ConstantFoldScalarCall).
    2205     4261064 :   if (CS.isNoBuiltin() || CS.isStrictFP())
    2206       38849 :     return false;
    2207             :   Function *F = CS.getCalledFunction();
    2208             :   if (!F)
    2209             :     return false;
    2210             : 
    2211             :   LibFunc Func;
    2212     4038669 :   if (!TLI || !TLI->getLibFunc(*F, Func))
    2213     3942558 :     return false;
    2214             : 
    2215       96111 :   if (CS.getNumArgOperands() == 1) {
    2216       83372 :     if (ConstantFP *OpC = dyn_cast<ConstantFP>(CS.getArgOperand(0))) {
    2217             :       const APFloat &Op = OpC->getValueAPF();
    2218          13 :       switch (Func) {
    2219             :       case LibFunc_logl:
    2220             :       case LibFunc_log:
    2221             :       case LibFunc_logf:
    2222             :       case LibFunc_log2l:
    2223             :       case LibFunc_log2:
    2224             :       case LibFunc_log2f:
    2225             :       case LibFunc_log10l:
    2226             :       case LibFunc_log10:
    2227             :       case LibFunc_log10f:
    2228           3 :         return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
    2229             : 
    2230           2 :       case LibFunc_expl:
    2231             :       case LibFunc_exp:
    2232             :       case LibFunc_expf:
    2233             :         // FIXME: These boundaries are slightly conservative.
    2234           4 :         if (OpC->getType()->isDoubleTy())
    2235           6 :           return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
    2236           5 :                  Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
    2237           0 :         if (OpC->getType()->isFloatTy())
    2238           0 :           return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
    2239           0 :                  Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
    2240             :         break;
    2241             : 
    2242           0 :       case LibFunc_exp2l:
    2243             :       case LibFunc_exp2:
    2244             :       case LibFunc_exp2f:
    2245             :         // FIXME: These boundaries are slightly conservative.
    2246           0 :         if (OpC->getType()->isDoubleTy())
    2247           0 :           return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
    2248           0 :                  Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
    2249           0 :         if (OpC->getType()->isFloatTy())
    2250           0 :           return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
    2251           0 :                  Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
    2252             :         break;
    2253             : 
    2254             :       case LibFunc_sinl:
    2255             :       case LibFunc_sin:
    2256             :       case LibFunc_sinf:
    2257             :       case LibFunc_cosl:
    2258             :       case LibFunc_cos:
    2259             :       case LibFunc_cosf:
    2260           3 :         return !Op.isInfinity();
    2261             : 
    2262           0 :       case LibFunc_tanl:
    2263             :       case LibFunc_tan:
    2264             :       case LibFunc_tanf: {
    2265             :         // FIXME: Stop using the host math library.
    2266             :         // FIXME: The computation isn't done in the right precision.
    2267           0 :         Type *Ty = OpC->getType();
    2268           0 :         if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
    2269           0 :           double OpV = getValueAsDouble(OpC);
    2270           0 :           return ConstantFoldFP(tan, OpV, Ty) != nullptr;
    2271             :         }
    2272             :         break;
    2273             :       }
    2274             : 
    2275           3 :       case LibFunc_asinl:
    2276             :       case LibFunc_asin:
    2277             :       case LibFunc_asinf:
    2278             :       case LibFunc_acosl:
    2279             :       case LibFunc_acos:
    2280             :       case LibFunc_acosf:
    2281           6 :         return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
    2282           6 :                    APFloat::cmpLessThan &&
    2283           8 :                Op.compare(APFloat(Op.getSemantics(), "1")) !=
    2284             :                    APFloat::cmpGreaterThan;
    2285             : 
    2286           0 :       case LibFunc_sinh:
    2287             :       case LibFunc_cosh:
    2288             :       case LibFunc_sinhf:
    2289             :       case LibFunc_coshf:
    2290             :       case LibFunc_sinhl:
    2291             :       case LibFunc_coshl:
    2292             :         // FIXME: These boundaries are slightly conservative.
    2293           0 :         if (OpC->getType()->isDoubleTy())
    2294           0 :           return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
    2295           0 :                  Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
    2296           0 :         if (OpC->getType()->isFloatTy())
    2297           0 :           return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
    2298           0 :                  Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
    2299             :         break;
    2300             : 
    2301             :       case LibFunc_sqrtl:
    2302             :       case LibFunc_sqrt:
    2303             :       case LibFunc_sqrtf:
    2304           0 :         return Op.isNaN() || Op.isZero() || !Op.isNegative();
    2305             : 
    2306             :       // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
    2307             :       // maybe others?
    2308             :       default:
    2309             :         break;
    2310             :       }
    2311             :     }
    2312             :   }
    2313             : 
    2314       96100 :   if (CS.getNumArgOperands() == 2) {
    2315        5644 :     ConstantFP *Op0C = dyn_cast<ConstantFP>(CS.getArgOperand(0));
    2316        5644 :     ConstantFP *Op1C = dyn_cast<ConstantFP>(CS.getArgOperand(1));
    2317        5644 :     if (Op0C && Op1C) {
    2318             :       const APFloat &Op0 = Op0C->getValueAPF();
    2319             :       const APFloat &Op1 = Op1C->getValueAPF();
    2320             : 
    2321           3 :       switch (Func) {
    2322           1 :       case LibFunc_powl:
    2323             :       case LibFunc_pow:
    2324             :       case LibFunc_powf: {
    2325             :         // FIXME: Stop using the host math library.
    2326             :         // FIXME: The computation isn't done in the right precision.
    2327           1 :         Type *Ty = Op0C->getType();
    2328           1 :         if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
    2329           1 :           if (Ty == Op1C->getType()) {
    2330           1 :             double Op0V = getValueAsDouble(Op0C);
    2331           1 :             double Op1V = getValueAsDouble(Op1C);
    2332           1 :             return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
    2333             :           }
    2334             :         }
    2335             :         break;
    2336             :       }
    2337             : 
    2338             :       case LibFunc_fmodl:
    2339             :       case LibFunc_fmod:
    2340             :       case LibFunc_fmodf:
    2341           4 :         return Op0.isNaN() || Op1.isNaN() ||
    2342           0 :                (!Op0.isInfinity() && !Op1.isZero());
    2343             : 
    2344             :       default:
    2345             :         break;
    2346             :       }
    2347             :     }
    2348             :   }
    2349             : 
    2350             :   return false;
    2351             : }

Generated by: LCOV version 1.13