LLVM  mainline
InstCombineCasts.cpp
Go to the documentation of this file.
00001 //===- InstCombineCasts.cpp -----------------------------------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file implements the visit functions for cast operations.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "InstCombineInternal.h"
00015 #include "llvm/Analysis/ConstantFolding.h"
00016 #include "llvm/IR/DataLayout.h"
00017 #include "llvm/IR/PatternMatch.h"
00018 #include "llvm/Analysis/TargetLibraryInfo.h"
00019 using namespace llvm;
00020 using namespace PatternMatch;
00021 
00022 #define DEBUG_TYPE "instcombine"
00023 
00024 /// Analyze 'Val', seeing if it is a simple linear expression.
00025 /// If so, decompose it, returning some value X, such that Val is
00026 /// X*Scale+Offset.
00027 ///
00028 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
00029                                         uint64_t &Offset) {
00030   if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
00031     Offset = CI->getZExtValue();
00032     Scale  = 0;
00033     return ConstantInt::get(Val->getType(), 0);
00034   }
00035 
00036   if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
00037     // Cannot look past anything that might overflow.
00038     OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
00039     if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) {
00040       Scale = 1;
00041       Offset = 0;
00042       return Val;
00043     }
00044 
00045     if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
00046       if (I->getOpcode() == Instruction::Shl) {
00047         // This is a value scaled by '1 << the shift amt'.
00048         Scale = UINT64_C(1) << RHS->getZExtValue();
00049         Offset = 0;
00050         return I->getOperand(0);
00051       }
00052 
00053       if (I->getOpcode() == Instruction::Mul) {
00054         // This value is scaled by 'RHS'.
00055         Scale = RHS->getZExtValue();
00056         Offset = 0;
00057         return I->getOperand(0);
00058       }
00059 
00060       if (I->getOpcode() == Instruction::Add) {
00061         // We have X+C.  Check to see if we really have (X*C2)+C1,
00062         // where C1 is divisible by C2.
00063         unsigned SubScale;
00064         Value *SubVal =
00065           decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
00066         Offset += RHS->getZExtValue();
00067         Scale = SubScale;
00068         return SubVal;
00069       }
00070     }
00071   }
00072 
00073   // Otherwise, we can't look past this.
00074   Scale = 1;
00075   Offset = 0;
00076   return Val;
00077 }
00078 
00079 /// If we find a cast of an allocation instruction, try to eliminate the cast by
00080 /// moving the type information into the alloc.
00081 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
00082                                                    AllocaInst &AI) {
00083   PointerType *PTy = cast<PointerType>(CI.getType());
00084 
00085   BuilderTy AllocaBuilder(*Builder);
00086   AllocaBuilder.SetInsertPoint(&AI);
00087 
00088   // Get the type really allocated and the type casted to.
00089   Type *AllocElTy = AI.getAllocatedType();
00090   Type *CastElTy = PTy->getElementType();
00091   if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
00092 
00093   unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy);
00094   unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy);
00095   if (CastElTyAlign < AllocElTyAlign) return nullptr;
00096 
00097   // If the allocation has multiple uses, only promote it if we are strictly
00098   // increasing the alignment of the resultant allocation.  If we keep it the
00099   // same, we open the door to infinite loops of various kinds.
00100   if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
00101 
00102   uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy);
00103   uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy);
00104   if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
00105 
00106   // If the allocation has multiple uses, only promote it if we're not
00107   // shrinking the amount of memory being allocated.
00108   uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy);
00109   uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy);
00110   if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
00111 
00112   // See if we can satisfy the modulus by pulling a scale out of the array
00113   // size argument.
00114   unsigned ArraySizeScale;
00115   uint64_t ArrayOffset;
00116   Value *NumElements = // See if the array size is a decomposable linear expr.
00117     decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
00118 
00119   // If we can now satisfy the modulus, by using a non-1 scale, we really can
00120   // do the xform.
00121   if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
00122       (AllocElTySize*ArrayOffset   ) % CastElTySize != 0) return nullptr;
00123 
00124   unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
00125   Value *Amt = nullptr;
00126   if (Scale == 1) {
00127     Amt = NumElements;
00128   } else {
00129     Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
00130     // Insert before the alloca, not before the cast.
00131     Amt = AllocaBuilder.CreateMul(Amt, NumElements);
00132   }
00133 
00134   if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
00135     Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
00136                                   Offset, true);
00137     Amt = AllocaBuilder.CreateAdd(Amt, Off);
00138   }
00139 
00140   AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
00141   New->setAlignment(AI.getAlignment());
00142   New->takeName(&AI);
00143   New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
00144 
00145   // If the allocation has multiple real uses, insert a cast and change all
00146   // things that used it to use the new cast.  This will also hack on CI, but it
00147   // will die soon.
00148   if (!AI.hasOneUse()) {
00149     // New is the allocation instruction, pointer typed. AI is the original
00150     // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
00151     Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
00152     ReplaceInstUsesWith(AI, NewCast);
00153   }
00154   return ReplaceInstUsesWith(CI, New);
00155 }
00156 
00157 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
00158 /// true for, actually insert the code to evaluate the expression.
00159 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
00160                                              bool isSigned) {
00161   if (Constant *C = dyn_cast<Constant>(V)) {
00162     C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
00163     // If we got a constantexpr back, try to simplify it with DL info.
00164     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
00165       C = ConstantFoldConstantExpression(CE, DL, TLI);
00166     return C;
00167   }
00168 
00169   // Otherwise, it must be an instruction.
00170   Instruction *I = cast<Instruction>(V);
00171   Instruction *Res = nullptr;
00172   unsigned Opc = I->getOpcode();
00173   switch (Opc) {
00174   case Instruction::Add:
00175   case Instruction::Sub:
00176   case Instruction::Mul:
00177   case Instruction::And:
00178   case Instruction::Or:
00179   case Instruction::Xor:
00180   case Instruction::AShr:
00181   case Instruction::LShr:
00182   case Instruction::Shl:
00183   case Instruction::UDiv:
00184   case Instruction::URem: {
00185     Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
00186     Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
00187     Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
00188     break;
00189   }
00190   case Instruction::Trunc:
00191   case Instruction::ZExt:
00192   case Instruction::SExt:
00193     // If the source type of the cast is the type we're trying for then we can
00194     // just return the source.  There's no need to insert it because it is not
00195     // new.
00196     if (I->getOperand(0)->getType() == Ty)
00197       return I->getOperand(0);
00198 
00199     // Otherwise, must be the same type of cast, so just reinsert a new one.
00200     // This also handles the case of zext(trunc(x)) -> zext(x).
00201     Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
00202                                       Opc == Instruction::SExt);
00203     break;
00204   case Instruction::Select: {
00205     Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
00206     Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
00207     Res = SelectInst::Create(I->getOperand(0), True, False);
00208     break;
00209   }
00210   case Instruction::PHI: {
00211     PHINode *OPN = cast<PHINode>(I);
00212     PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
00213     for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
00214       Value *V =
00215           EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
00216       NPN->addIncoming(V, OPN->getIncomingBlock(i));
00217     }
00218     Res = NPN;
00219     break;
00220   }
00221   default:
00222     // TODO: Can handle more cases here.
00223     llvm_unreachable("Unreachable!");
00224   }
00225 
00226   Res->takeName(I);
00227   return InsertNewInstWith(Res, *I);
00228 }
00229 
00230 
00231 /// This function is a wrapper around CastInst::isEliminableCastPair. It
00232 /// simply extracts arguments and returns what that function returns.
00233 static Instruction::CastOps
00234 isEliminableCastPair(const CastInst *CI, ///< First cast instruction
00235                      unsigned opcode,    ///< Opcode for the second cast
00236                      Type *DstTy,        ///< Target type for the second cast
00237                      const DataLayout &DL) {
00238   Type *SrcTy = CI->getOperand(0)->getType();   // A from above
00239   Type *MidTy = CI->getType();                  // B from above
00240 
00241   // Get the opcodes of the two Cast instructions
00242   Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
00243   Instruction::CastOps secondOp = Instruction::CastOps(opcode);
00244   Type *SrcIntPtrTy =
00245       SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
00246   Type *MidIntPtrTy =
00247       MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
00248   Type *DstIntPtrTy =
00249       DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
00250   unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
00251                                                 DstTy, SrcIntPtrTy, MidIntPtrTy,
00252                                                 DstIntPtrTy);
00253 
00254   // We don't want to form an inttoptr or ptrtoint that converts to an integer
00255   // type that differs from the pointer size.
00256   if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
00257       (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
00258     Res = 0;
00259 
00260   return Instruction::CastOps(Res);
00261 }
00262 
00263 /// Return true if the cast from "V to Ty" actually results in any code being
00264 /// generated and is interesting to optimize out.
00265 /// If the cast can be eliminated by some other simple transformation, we prefer
00266 /// to do the simplification first.
00267 bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
00268                                       Type *Ty) {
00269   // Noop casts and casts of constants should be eliminated trivially.
00270   if (V->getType() == Ty || isa<Constant>(V)) return false;
00271 
00272   // If this is another cast that can be eliminated, we prefer to have it
00273   // eliminated.
00274   if (const CastInst *CI = dyn_cast<CastInst>(V))
00275     if (isEliminableCastPair(CI, opc, Ty, DL))
00276       return false;
00277 
00278   // If this is a vector sext from a compare, then we don't want to break the
00279   // idiom where each element of the extended vector is either zero or all ones.
00280   if (opc == Instruction::SExt && isa<CmpInst>(V) && Ty->isVectorTy())
00281     return false;
00282 
00283   return true;
00284 }
00285 
00286 
00287 /// @brief Implement the transforms common to all CastInst visitors.
00288 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
00289   Value *Src = CI.getOperand(0);
00290 
00291   // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
00292   // eliminate it now.
00293   if (CastInst *CSrc = dyn_cast<CastInst>(Src)) {   // A->B->C cast
00294     if (Instruction::CastOps opc =
00295             isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), DL)) {
00296       // The first cast (CSrc) is eliminable so we need to fix up or replace
00297       // the second cast (CI). CSrc will then have a good chance of being dead.
00298       return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
00299     }
00300   }
00301 
00302   // If we are casting a select then fold the cast into the select
00303   if (SelectInst *SI = dyn_cast<SelectInst>(Src))
00304     if (Instruction *NV = FoldOpIntoSelect(CI, SI))
00305       return NV;
00306 
00307   // If we are casting a PHI then fold the cast into the PHI
00308   if (isa<PHINode>(Src)) {
00309     // We don't do this if this would create a PHI node with an illegal type if
00310     // it is currently legal.
00311     if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
00312         ShouldChangeType(CI.getType(), Src->getType()))
00313       if (Instruction *NV = FoldOpIntoPhi(CI))
00314         return NV;
00315   }
00316 
00317   return nullptr;
00318 }
00319 
00320 /// Return true if we can evaluate the specified expression tree as type Ty
00321 /// instead of its larger type, and arrive with the same value.
00322 /// This is used by code that tries to eliminate truncates.
00323 ///
00324 /// Ty will always be a type smaller than V.  We should return true if trunc(V)
00325 /// can be computed by computing V in the smaller type.  If V is an instruction,
00326 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
00327 /// makes sense if x and y can be efficiently truncated.
00328 ///
00329 /// This function works on both vectors and scalars.
00330 ///
00331 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC,
00332                                  Instruction *CxtI) {
00333   // We can always evaluate constants in another type.
00334   if (isa<Constant>(V))
00335     return true;
00336 
00337   Instruction *I = dyn_cast<Instruction>(V);
00338   if (!I) return false;
00339 
00340   Type *OrigTy = V->getType();
00341 
00342   // If this is an extension from the dest type, we can eliminate it, even if it
00343   // has multiple uses.
00344   if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
00345       I->getOperand(0)->getType() == Ty)
00346     return true;
00347 
00348   // We can't extend or shrink something that has multiple uses: doing so would
00349   // require duplicating the instruction in general, which isn't profitable.
00350   if (!I->hasOneUse()) return false;
00351 
00352   unsigned Opc = I->getOpcode();
00353   switch (Opc) {
00354   case Instruction::Add:
00355   case Instruction::Sub:
00356   case Instruction::Mul:
00357   case Instruction::And:
00358   case Instruction::Or:
00359   case Instruction::Xor:
00360     // These operators can all arbitrarily be extended or truncated.
00361     return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
00362            canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
00363 
00364   case Instruction::UDiv:
00365   case Instruction::URem: {
00366     // UDiv and URem can be truncated if all the truncated bits are zero.
00367     uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
00368     uint32_t BitWidth = Ty->getScalarSizeInBits();
00369     if (BitWidth < OrigBitWidth) {
00370       APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth);
00371       if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) &&
00372           IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) {
00373         return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
00374                canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
00375       }
00376     }
00377     break;
00378   }
00379   case Instruction::Shl:
00380     // If we are truncating the result of this SHL, and if it's a shift of a
00381     // constant amount, we can always perform a SHL in a smaller type.
00382     if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
00383       uint32_t BitWidth = Ty->getScalarSizeInBits();
00384       if (CI->getLimitedValue(BitWidth) < BitWidth)
00385         return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
00386     }
00387     break;
00388   case Instruction::LShr:
00389     // If this is a truncate of a logical shr, we can truncate it to a smaller
00390     // lshr iff we know that the bits we would otherwise be shifting in are
00391     // already zeros.
00392     if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
00393       uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
00394       uint32_t BitWidth = Ty->getScalarSizeInBits();
00395       if (IC.MaskedValueIsZero(I->getOperand(0),
00396             APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth), 0, CxtI) &&
00397           CI->getLimitedValue(BitWidth) < BitWidth) {
00398         return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
00399       }
00400     }
00401     break;
00402   case Instruction::Trunc:
00403     // trunc(trunc(x)) -> trunc(x)
00404     return true;
00405   case Instruction::ZExt:
00406   case Instruction::SExt:
00407     // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
00408     // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest
00409     return true;
00410   case Instruction::Select: {
00411     SelectInst *SI = cast<SelectInst>(I);
00412     return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) &&
00413            canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI);
00414   }
00415   case Instruction::PHI: {
00416     // We can change a phi if we can change all operands.  Note that we never
00417     // get into trouble with cyclic PHIs here because we only consider
00418     // instructions with a single use.
00419     PHINode *PN = cast<PHINode>(I);
00420     for (Value *IncValue : PN->incoming_values())
00421       if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI))
00422         return false;
00423     return true;
00424   }
00425   default:
00426     // TODO: Can handle more cases here.
00427     break;
00428   }
00429 
00430   return false;
00431 }
00432 
00433 /// Given a vector that is bitcast to an integer, optionally logically
00434 /// right-shifted, and truncated, convert it to an extractelement.
00435 /// Example (big endian):
00436 ///   trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
00437 ///   --->
00438 ///   extractelement <4 x i32> %X, 1
00439 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC,
00440                                          const DataLayout &DL) {
00441   Value *TruncOp = Trunc.getOperand(0);
00442   Type *DestType = Trunc.getType();
00443   if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
00444     return nullptr;
00445 
00446   Value *VecInput = nullptr;
00447   ConstantInt *ShiftVal = nullptr;
00448   if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)),
00449                                   m_LShr(m_BitCast(m_Value(VecInput)),
00450                                          m_ConstantInt(ShiftVal)))) ||
00451       !isa<VectorType>(VecInput->getType()))
00452     return nullptr;
00453 
00454   VectorType *VecType = cast<VectorType>(VecInput->getType());
00455   unsigned VecWidth = VecType->getPrimitiveSizeInBits();
00456   unsigned DestWidth = DestType->getPrimitiveSizeInBits();
00457   unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0;
00458 
00459   if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
00460     return nullptr;
00461 
00462   // If the element type of the vector doesn't match the result type,
00463   // bitcast it to a vector type that we can extract from.
00464   unsigned NumVecElts = VecWidth / DestWidth;
00465   if (VecType->getElementType() != DestType) {
00466     VecType = VectorType::get(DestType, NumVecElts);
00467     VecInput = IC.Builder->CreateBitCast(VecInput, VecType, "bc");
00468   }
00469 
00470   unsigned Elt = ShiftAmount / DestWidth;
00471   if (DL.isBigEndian())
00472     Elt = NumVecElts - 1 - Elt;
00473 
00474   return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
00475 }
00476 
00477 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
00478   if (Instruction *Result = commonCastTransforms(CI))
00479     return Result;
00480 
00481   // Test if the trunc is the user of a select which is part of a
00482   // minimum or maximum operation. If so, don't do any more simplification.
00483   // Even simplifying demanded bits can break the canonical form of a 
00484   // min/max.
00485   Value *LHS, *RHS;
00486   if (SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0)))
00487     if (matchSelectPattern(SI, LHS, RHS).Flavor != SPF_UNKNOWN)
00488       return nullptr;
00489   
00490   // See if we can simplify any instructions used by the input whose sole
00491   // purpose is to compute bits we don't care about.
00492   if (SimplifyDemandedInstructionBits(CI))
00493     return &CI;
00494 
00495   Value *Src = CI.getOperand(0);
00496   Type *DestTy = CI.getType(), *SrcTy = Src->getType();
00497 
00498   // Attempt to truncate the entire input expression tree to the destination
00499   // type.   Only do this if the dest type is a simple type, don't convert the
00500   // expression tree to something weird like i93 unless the source is also
00501   // strange.
00502   if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
00503       canEvaluateTruncated(Src, DestTy, *this, &CI)) {
00504 
00505     // If this cast is a truncate, evaluting in a different type always
00506     // eliminates the cast, so it is always a win.
00507     DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
00508           " to avoid cast: " << CI << '\n');
00509     Value *Res = EvaluateInDifferentType(Src, DestTy, false);
00510     assert(Res->getType() == DestTy);
00511     return ReplaceInstUsesWith(CI, Res);
00512   }
00513 
00514   // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector.
00515   if (DestTy->getScalarSizeInBits() == 1) {
00516     Constant *One = ConstantInt::get(SrcTy, 1);
00517     Src = Builder->CreateAnd(Src, One);
00518     Value *Zero = Constant::getNullValue(Src->getType());
00519     return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
00520   }
00521 
00522   // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
00523   Value *A = nullptr; ConstantInt *Cst = nullptr;
00524   if (Src->hasOneUse() &&
00525       match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) {
00526     // We have three types to worry about here, the type of A, the source of
00527     // the truncate (MidSize), and the destination of the truncate. We know that
00528     // ASize < MidSize   and MidSize > ResultSize, but don't know the relation
00529     // between ASize and ResultSize.
00530     unsigned ASize = A->getType()->getPrimitiveSizeInBits();
00531 
00532     // If the shift amount is larger than the size of A, then the result is
00533     // known to be zero because all the input bits got shifted out.
00534     if (Cst->getZExtValue() >= ASize)
00535       return ReplaceInstUsesWith(CI, Constant::getNullValue(DestTy));
00536 
00537     // Since we're doing an lshr and a zero extend, and know that the shift
00538     // amount is smaller than ASize, it is always safe to do the shift in A's
00539     // type, then zero extend or truncate to the result.
00540     Value *Shift = Builder->CreateLShr(A, Cst->getZExtValue());
00541     Shift->takeName(Src);
00542     return CastInst::CreateIntegerCast(Shift, DestTy, false);
00543   }
00544 
00545   // Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type
00546   // conversion.
00547   // It works because bits coming from sign extension have the same value as
00548   // the sign bit of the original value; performing ashr instead of lshr
00549   // generates bits of the same value as the sign bit.
00550   if (Src->hasOneUse() &&
00551       match(Src, m_LShr(m_SExt(m_Value(A)), m_ConstantInt(Cst))) &&
00552       cast<Instruction>(Src)->getOperand(0)->hasOneUse()) {
00553     const unsigned ASize = A->getType()->getPrimitiveSizeInBits();
00554     // This optimization can be only performed when zero bits generated by
00555     // the original lshr aren't pulled into the value after truncation, so we
00556     // can only shift by values smaller than the size of destination type (in
00557     // bits).
00558     if (Cst->getValue().ult(ASize)) {
00559       Value *Shift = Builder->CreateAShr(A, Cst->getZExtValue());
00560       Shift->takeName(Src);
00561       return CastInst::CreateIntegerCast(Shift, CI.getType(), true);
00562     }
00563   }
00564 
00565   // Transform "trunc (and X, cst)" -> "and (trunc X), cst" so long as the dest
00566   // type isn't non-native.
00567   if (Src->hasOneUse() && isa<IntegerType>(SrcTy) &&
00568       ShouldChangeType(SrcTy, DestTy) &&
00569       match(Src, m_And(m_Value(A), m_ConstantInt(Cst)))) {
00570     Value *NewTrunc = Builder->CreateTrunc(A, DestTy, A->getName() + ".tr");
00571     return BinaryOperator::CreateAnd(NewTrunc,
00572                                      ConstantExpr::getTrunc(Cst, DestTy));
00573   }
00574 
00575   if (Instruction *I = foldVecTruncToExtElt(CI, *this, DL))
00576     return I;
00577 
00578   return nullptr;
00579 }
00580 
00581 /// Transform (zext icmp) to bitwise / integer operations in order to eliminate
00582 /// the icmp.
00583 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
00584                                              bool DoXform) {
00585   // If we are just checking for a icmp eq of a single bit and zext'ing it
00586   // to an integer, then shift the bit to the appropriate place and then
00587   // cast to integer to avoid the comparison.
00588   if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
00589     const APInt &Op1CV = Op1C->getValue();
00590 
00591     // zext (x <s  0) to i32 --> x>>u31      true if signbit set.
00592     // zext (x >s -1) to i32 --> (x>>u31)^1  true if signbit clear.
00593     if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
00594         (ICI->getPredicate() == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) {
00595       if (!DoXform) return ICI;
00596 
00597       Value *In = ICI->getOperand(0);
00598       Value *Sh = ConstantInt::get(In->getType(),
00599                                    In->getType()->getScalarSizeInBits() - 1);
00600       In = Builder->CreateLShr(In, Sh, In->getName() + ".lobit");
00601       if (In->getType() != CI.getType())
00602         In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/);
00603 
00604       if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
00605         Constant *One = ConstantInt::get(In->getType(), 1);
00606         In = Builder->CreateXor(In, One, In->getName() + ".not");
00607       }
00608 
00609       return ReplaceInstUsesWith(CI, In);
00610     }
00611 
00612     // zext (X == 0) to i32 --> X^1      iff X has only the low bit set.
00613     // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
00614     // zext (X == 1) to i32 --> X        iff X has only the low bit set.
00615     // zext (X == 2) to i32 --> X>>1     iff X has only the 2nd bit set.
00616     // zext (X != 0) to i32 --> X        iff X has only the low bit set.
00617     // zext (X != 0) to i32 --> X>>1     iff X has only the 2nd bit set.
00618     // zext (X != 1) to i32 --> X^1      iff X has only the low bit set.
00619     // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
00620     if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
00621         // This only works for EQ and NE
00622         ICI->isEquality()) {
00623       // If Op1C some other power of two, convert:
00624       uint32_t BitWidth = Op1C->getType()->getBitWidth();
00625       APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
00626       computeKnownBits(ICI->getOperand(0), KnownZero, KnownOne, 0, &CI);
00627 
00628       APInt KnownZeroMask(~KnownZero);
00629       if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
00630         if (!DoXform) return ICI;
00631 
00632         bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
00633         if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
00634           // (X&4) == 2 --> false
00635           // (X&4) != 2 --> true
00636           Constant *Res = ConstantInt::get(Type::getInt1Ty(CI.getContext()),
00637                                            isNE);
00638           Res = ConstantExpr::getZExt(Res, CI.getType());
00639           return ReplaceInstUsesWith(CI, Res);
00640         }
00641 
00642         uint32_t ShAmt = KnownZeroMask.logBase2();
00643         Value *In = ICI->getOperand(0);
00644         if (ShAmt) {
00645           // Perform a logical shr by shiftamt.
00646           // Insert the shift to put the result in the low bit.
00647           In = Builder->CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
00648                                    In->getName() + ".lobit");
00649         }
00650 
00651         if ((Op1CV != 0) == isNE) { // Toggle the low bit.
00652           Constant *One = ConstantInt::get(In->getType(), 1);
00653           In = Builder->CreateXor(In, One);
00654         }
00655 
00656         if (CI.getType() == In->getType())
00657           return ReplaceInstUsesWith(CI, In);
00658         return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
00659       }
00660     }
00661   }
00662 
00663   // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
00664   // It is also profitable to transform icmp eq into not(xor(A, B)) because that
00665   // may lead to additional simplifications.
00666   if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
00667     if (IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
00668       uint32_t BitWidth = ITy->getBitWidth();
00669       Value *LHS = ICI->getOperand(0);
00670       Value *RHS = ICI->getOperand(1);
00671 
00672       APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
00673       APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
00674       computeKnownBits(LHS, KnownZeroLHS, KnownOneLHS, 0, &CI);
00675       computeKnownBits(RHS, KnownZeroRHS, KnownOneRHS, 0, &CI);
00676 
00677       if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
00678         APInt KnownBits = KnownZeroLHS | KnownOneLHS;
00679         APInt UnknownBit = ~KnownBits;
00680         if (UnknownBit.countPopulation() == 1) {
00681           if (!DoXform) return ICI;
00682 
00683           Value *Result = Builder->CreateXor(LHS, RHS);
00684 
00685           // Mask off any bits that are set and won't be shifted away.
00686           if (KnownOneLHS.uge(UnknownBit))
00687             Result = Builder->CreateAnd(Result,
00688                                         ConstantInt::get(ITy, UnknownBit));
00689 
00690           // Shift the bit we're testing down to the lsb.
00691           Result = Builder->CreateLShr(
00692                Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
00693 
00694           if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
00695             Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1));
00696           Result->takeName(ICI);
00697           return ReplaceInstUsesWith(CI, Result);
00698         }
00699       }
00700     }
00701   }
00702 
00703   return nullptr;
00704 }
00705 
00706 /// Determine if the specified value can be computed in the specified wider type
00707 /// and produce the same low bits. If not, return false.
00708 ///
00709 /// If this function returns true, it can also return a non-zero number of bits
00710 /// (in BitsToClear) which indicates that the value it computes is correct for
00711 /// the zero extend, but that the additional BitsToClear bits need to be zero'd
00712 /// out.  For example, to promote something like:
00713 ///
00714 ///   %B = trunc i64 %A to i32
00715 ///   %C = lshr i32 %B, 8
00716 ///   %E = zext i32 %C to i64
00717 ///
00718 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
00719 /// set to 8 to indicate that the promoted value needs to have bits 24-31
00720 /// cleared in addition to bits 32-63.  Since an 'and' will be generated to
00721 /// clear the top bits anyway, doing this has no extra cost.
00722 ///
00723 /// This function works on both vectors and scalars.
00724 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear,
00725                              InstCombiner &IC, Instruction *CxtI) {
00726   BitsToClear = 0;
00727   if (isa<Constant>(V))
00728     return true;
00729 
00730   Instruction *I = dyn_cast<Instruction>(V);
00731   if (!I) return false;
00732 
00733   // If the input is a truncate from the destination type, we can trivially
00734   // eliminate it.
00735   if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
00736     return true;
00737 
00738   // We can't extend or shrink something that has multiple uses: doing so would
00739   // require duplicating the instruction in general, which isn't profitable.
00740   if (!I->hasOneUse()) return false;
00741 
00742   unsigned Opc = I->getOpcode(), Tmp;
00743   switch (Opc) {
00744   case Instruction::ZExt:  // zext(zext(x)) -> zext(x).
00745   case Instruction::SExt:  // zext(sext(x)) -> sext(x).
00746   case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
00747     return true;
00748   case Instruction::And:
00749   case Instruction::Or:
00750   case Instruction::Xor:
00751   case Instruction::Add:
00752   case Instruction::Sub:
00753   case Instruction::Mul:
00754     if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
00755         !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI))
00756       return false;
00757     // These can all be promoted if neither operand has 'bits to clear'.
00758     if (BitsToClear == 0 && Tmp == 0)
00759       return true;
00760 
00761     // If the operation is an AND/OR/XOR and the bits to clear are zero in the
00762     // other side, BitsToClear is ok.
00763     if (Tmp == 0 &&
00764         (Opc == Instruction::And || Opc == Instruction::Or ||
00765          Opc == Instruction::Xor)) {
00766       // We use MaskedValueIsZero here for generality, but the case we care
00767       // about the most is constant RHS.
00768       unsigned VSize = V->getType()->getScalarSizeInBits();
00769       if (IC.MaskedValueIsZero(I->getOperand(1),
00770                                APInt::getHighBitsSet(VSize, BitsToClear),
00771                                0, CxtI))
00772         return true;
00773     }
00774 
00775     // Otherwise, we don't know how to analyze this BitsToClear case yet.
00776     return false;
00777 
00778   case Instruction::Shl:
00779     // We can promote shl(x, cst) if we can promote x.  Since shl overwrites the
00780     // upper bits we can reduce BitsToClear by the shift amount.
00781     if (ConstantInt *Amt = dyn_cast<ConstantInt>(I->getOperand(1))) {
00782       if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
00783         return false;
00784       uint64_t ShiftAmt = Amt->getZExtValue();
00785       BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
00786       return true;
00787     }
00788     return false;
00789   case Instruction::LShr:
00790     // We can promote lshr(x, cst) if we can promote x.  This requires the
00791     // ultimate 'and' to clear out the high zero bits we're clearing out though.
00792     if (ConstantInt *Amt = dyn_cast<ConstantInt>(I->getOperand(1))) {
00793       if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
00794         return false;
00795       BitsToClear += Amt->getZExtValue();
00796       if (BitsToClear > V->getType()->getScalarSizeInBits())
00797         BitsToClear = V->getType()->getScalarSizeInBits();
00798       return true;
00799     }
00800     // Cannot promote variable LSHR.
00801     return false;
00802   case Instruction::Select:
00803     if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) ||
00804         !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
00805         // TODO: If important, we could handle the case when the BitsToClear are
00806         // known zero in the disagreeing side.
00807         Tmp != BitsToClear)
00808       return false;
00809     return true;
00810 
00811   case Instruction::PHI: {
00812     // We can change a phi if we can change all operands.  Note that we never
00813     // get into trouble with cyclic PHIs here because we only consider
00814     // instructions with a single use.
00815     PHINode *PN = cast<PHINode>(I);
00816     if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI))
00817       return false;
00818     for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
00819       if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) ||
00820           // TODO: If important, we could handle the case when the BitsToClear
00821           // are known zero in the disagreeing input.
00822           Tmp != BitsToClear)
00823         return false;
00824     return true;
00825   }
00826   default:
00827     // TODO: Can handle more cases here.
00828     return false;
00829   }
00830 }
00831 
00832 Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
00833   // If this zero extend is only used by a truncate, let the truncate be
00834   // eliminated before we try to optimize this zext.
00835   if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
00836     return nullptr;
00837 
00838   // If one of the common conversion will work, do it.
00839   if (Instruction *Result = commonCastTransforms(CI))
00840     return Result;
00841 
00842   // See if we can simplify any instructions used by the input whose sole
00843   // purpose is to compute bits we don't care about.
00844   if (SimplifyDemandedInstructionBits(CI))
00845     return &CI;
00846 
00847   Value *Src = CI.getOperand(0);
00848   Type *SrcTy = Src->getType(), *DestTy = CI.getType();
00849 
00850   // Attempt to extend the entire input expression tree to the destination
00851   // type.   Only do this if the dest type is a simple type, don't convert the
00852   // expression tree to something weird like i93 unless the source is also
00853   // strange.
00854   unsigned BitsToClear;
00855   if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
00856       canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
00857     assert(BitsToClear < SrcTy->getScalarSizeInBits() &&
00858            "Unreasonable BitsToClear");
00859 
00860     // Okay, we can transform this!  Insert the new expression now.
00861     DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
00862           " to avoid zero extend: " << CI << '\n');
00863     Value *Res = EvaluateInDifferentType(Src, DestTy, false);
00864     assert(Res->getType() == DestTy);
00865 
00866     uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
00867     uint32_t DestBitSize = DestTy->getScalarSizeInBits();
00868 
00869     // If the high bits are already filled with zeros, just replace this
00870     // cast with the result.
00871     if (MaskedValueIsZero(Res,
00872                           APInt::getHighBitsSet(DestBitSize,
00873                                                 DestBitSize-SrcBitsKept),
00874                              0, &CI))
00875       return ReplaceInstUsesWith(CI, Res);
00876 
00877     // We need to emit an AND to clear the high bits.
00878     Constant *C = ConstantInt::get(Res->getType(),
00879                                APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
00880     return BinaryOperator::CreateAnd(Res, C);
00881   }
00882 
00883   // If this is a TRUNC followed by a ZEXT then we are dealing with integral
00884   // types and if the sizes are just right we can convert this into a logical
00885   // 'and' which will be much cheaper than the pair of casts.
00886   if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) {   // A->B->C cast
00887     // TODO: Subsume this into EvaluateInDifferentType.
00888 
00889     // Get the sizes of the types involved.  We know that the intermediate type
00890     // will be smaller than A or C, but don't know the relation between A and C.
00891     Value *A = CSrc->getOperand(0);
00892     unsigned SrcSize = A->getType()->getScalarSizeInBits();
00893     unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
00894     unsigned DstSize = CI.getType()->getScalarSizeInBits();
00895     // If we're actually extending zero bits, then if
00896     // SrcSize <  DstSize: zext(a & mask)
00897     // SrcSize == DstSize: a & mask
00898     // SrcSize  > DstSize: trunc(a) & mask
00899     if (SrcSize < DstSize) {
00900       APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
00901       Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
00902       Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
00903       return new ZExtInst(And, CI.getType());
00904     }
00905 
00906     if (SrcSize == DstSize) {
00907       APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
00908       return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
00909                                                            AndValue));
00910     }
00911     if (SrcSize > DstSize) {
00912       Value *Trunc = Builder->CreateTrunc(A, CI.getType());
00913       APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
00914       return BinaryOperator::CreateAnd(Trunc,
00915                                        ConstantInt::get(Trunc->getType(),
00916                                                         AndValue));
00917     }
00918   }
00919 
00920   if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
00921     return transformZExtICmp(ICI, CI);
00922 
00923   BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
00924   if (SrcI && SrcI->getOpcode() == Instruction::Or) {
00925     // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
00926     // of the (zext icmp) will be transformed.
00927     ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
00928     ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
00929     if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
00930         (transformZExtICmp(LHS, CI, false) ||
00931          transformZExtICmp(RHS, CI, false))) {
00932       Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName());
00933       Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName());
00934       return BinaryOperator::Create(Instruction::Or, LCast, RCast);
00935     }
00936   }
00937 
00938   // zext(trunc(X) & C) -> (X & zext(C)).
00939   Constant *C;
00940   Value *X;
00941   if (SrcI &&
00942       match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
00943       X->getType() == CI.getType())
00944     return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
00945 
00946   // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
00947   Value *And;
00948   if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
00949       match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
00950       X->getType() == CI.getType()) {
00951     Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
00952     return BinaryOperator::CreateXor(Builder->CreateAnd(X, ZC), ZC);
00953   }
00954 
00955   // zext (xor i1 X, true) to i32  --> xor (zext i1 X to i32), 1
00956   if (SrcI && SrcI->hasOneUse() &&
00957       SrcI->getType()->getScalarType()->isIntegerTy(1) &&
00958       match(SrcI, m_Not(m_Value(X))) && (!X->hasOneUse() || !isa<CmpInst>(X))) {
00959     Value *New = Builder->CreateZExt(X, CI.getType());
00960     return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
00961   }
00962 
00963   return nullptr;
00964 }
00965 
00966 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp.
00967 Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
00968   Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1);
00969   ICmpInst::Predicate Pred = ICI->getPredicate();
00970 
00971   // Don't bother if Op1 isn't of vector or integer type.
00972   if (!Op1->getType()->isIntOrIntVectorTy())
00973     return nullptr;
00974 
00975   if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
00976     // (x <s  0) ? -1 : 0 -> ashr x, 31        -> all ones if negative
00977     // (x >s -1) ? -1 : 0 -> not (ashr x, 31)  -> all ones if positive
00978     if ((Pred == ICmpInst::ICMP_SLT && Op1C->isNullValue()) ||
00979         (Pred == ICmpInst::ICMP_SGT && Op1C->isAllOnesValue())) {
00980 
00981       Value *Sh = ConstantInt::get(Op0->getType(),
00982                                    Op0->getType()->getScalarSizeInBits()-1);
00983       Value *In = Builder->CreateAShr(Op0, Sh, Op0->getName()+".lobit");
00984       if (In->getType() != CI.getType())
00985         In = Builder->CreateIntCast(In, CI.getType(), true/*SExt*/);
00986 
00987       if (Pred == ICmpInst::ICMP_SGT)
00988         In = Builder->CreateNot(In, In->getName()+".not");
00989       return ReplaceInstUsesWith(CI, In);
00990     }
00991   }
00992 
00993   if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
00994     // If we know that only one bit of the LHS of the icmp can be set and we
00995     // have an equality comparison with zero or a power of 2, we can transform
00996     // the icmp and sext into bitwise/integer operations.
00997     if (ICI->hasOneUse() &&
00998         ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
00999       unsigned BitWidth = Op1C->getType()->getBitWidth();
01000       APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
01001       computeKnownBits(Op0, KnownZero, KnownOne, 0, &CI);
01002 
01003       APInt KnownZeroMask(~KnownZero);
01004       if (KnownZeroMask.isPowerOf2()) {
01005         Value *In = ICI->getOperand(0);
01006 
01007         // If the icmp tests for a known zero bit we can constant fold it.
01008         if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
01009           Value *V = Pred == ICmpInst::ICMP_NE ?
01010                        ConstantInt::getAllOnesValue(CI.getType()) :
01011                        ConstantInt::getNullValue(CI.getType());
01012           return ReplaceInstUsesWith(CI, V);
01013         }
01014 
01015         if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
01016           // sext ((x & 2^n) == 0)   -> (x >> n) - 1
01017           // sext ((x & 2^n) != 2^n) -> (x >> n) - 1
01018           unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
01019           // Perform a right shift to place the desired bit in the LSB.
01020           if (ShiftAmt)
01021             In = Builder->CreateLShr(In,
01022                                      ConstantInt::get(In->getType(), ShiftAmt));
01023 
01024           // At this point "In" is either 1 or 0. Subtract 1 to turn
01025           // {1, 0} -> {0, -1}.
01026           In = Builder->CreateAdd(In,
01027                                   ConstantInt::getAllOnesValue(In->getType()),
01028                                   "sext");
01029         } else {
01030           // sext ((x & 2^n) != 0)   -> (x << bitwidth-n) a>> bitwidth-1
01031           // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
01032           unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
01033           // Perform a left shift to place the desired bit in the MSB.
01034           if (ShiftAmt)
01035             In = Builder->CreateShl(In,
01036                                     ConstantInt::get(In->getType(), ShiftAmt));
01037 
01038           // Distribute the bit over the whole bit width.
01039           In = Builder->CreateAShr(In, ConstantInt::get(In->getType(),
01040                                                         BitWidth - 1), "sext");
01041         }
01042 
01043         if (CI.getType() == In->getType())
01044           return ReplaceInstUsesWith(CI, In);
01045         return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
01046       }
01047     }
01048   }
01049 
01050   return nullptr;
01051 }
01052 
01053 /// Return true if we can take the specified value and return it as type Ty
01054 /// without inserting any new casts and without changing the value of the common
01055 /// low bits.  This is used by code that tries to promote integer operations to
01056 /// a wider types will allow us to eliminate the extension.
01057 ///
01058 /// This function works on both vectors and scalars.
01059 ///
01060 static bool canEvaluateSExtd(Value *V, Type *Ty) {
01061   assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
01062          "Can't sign extend type to a smaller type");
01063   // If this is a constant, it can be trivially promoted.
01064   if (isa<Constant>(V))
01065     return true;
01066 
01067   Instruction *I = dyn_cast<Instruction>(V);
01068   if (!I) return false;
01069 
01070   // If this is a truncate from the dest type, we can trivially eliminate it.
01071   if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
01072     return true;
01073 
01074   // We can't extend or shrink something that has multiple uses: doing so would
01075   // require duplicating the instruction in general, which isn't profitable.
01076   if (!I->hasOneUse()) return false;
01077 
01078   switch (I->getOpcode()) {
01079   case Instruction::SExt:  // sext(sext(x)) -> sext(x)
01080   case Instruction::ZExt:  // sext(zext(x)) -> zext(x)
01081   case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
01082     return true;
01083   case Instruction::And:
01084   case Instruction::Or:
01085   case Instruction::Xor:
01086   case Instruction::Add:
01087   case Instruction::Sub:
01088   case Instruction::Mul:
01089     // These operators can all arbitrarily be extended if their inputs can.
01090     return canEvaluateSExtd(I->getOperand(0), Ty) &&
01091            canEvaluateSExtd(I->getOperand(1), Ty);
01092 
01093   //case Instruction::Shl:   TODO
01094   //case Instruction::LShr:  TODO
01095 
01096   case Instruction::Select:
01097     return canEvaluateSExtd(I->getOperand(1), Ty) &&
01098            canEvaluateSExtd(I->getOperand(2), Ty);
01099 
01100   case Instruction::PHI: {
01101     // We can change a phi if we can change all operands.  Note that we never
01102     // get into trouble with cyclic PHIs here because we only consider
01103     // instructions with a single use.
01104     PHINode *PN = cast<PHINode>(I);
01105     for (Value *IncValue : PN->incoming_values())
01106       if (!canEvaluateSExtd(IncValue, Ty)) return false;
01107     return true;
01108   }
01109   default:
01110     // TODO: Can handle more cases here.
01111     break;
01112   }
01113 
01114   return false;
01115 }
01116 
01117 Instruction *InstCombiner::visitSExt(SExtInst &CI) {
01118   // If this sign extend is only used by a truncate, let the truncate be
01119   // eliminated before we try to optimize this sext.
01120   if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
01121     return nullptr;
01122 
01123   if (Instruction *I = commonCastTransforms(CI))
01124     return I;
01125 
01126   // See if we can simplify any instructions used by the input whose sole
01127   // purpose is to compute bits we don't care about.
01128   if (SimplifyDemandedInstructionBits(CI))
01129     return &CI;
01130 
01131   Value *Src = CI.getOperand(0);
01132   Type *SrcTy = Src->getType(), *DestTy = CI.getType();
01133 
01134   // If we know that the value being extended is positive, we can use a zext
01135   // instead. 
01136   bool KnownZero, KnownOne;
01137   ComputeSignBit(Src, KnownZero, KnownOne, 0, &CI);
01138   if (KnownZero) {
01139     Value *ZExt = Builder->CreateZExt(Src, DestTy);
01140     return ReplaceInstUsesWith(CI, ZExt);
01141   }
01142 
01143   // Attempt to extend the entire input expression tree to the destination
01144   // type.   Only do this if the dest type is a simple type, don't convert the
01145   // expression tree to something weird like i93 unless the source is also
01146   // strange.
01147   if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
01148       canEvaluateSExtd(Src, DestTy)) {
01149     // Okay, we can transform this!  Insert the new expression now.
01150     DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
01151           " to avoid sign extend: " << CI << '\n');
01152     Value *Res = EvaluateInDifferentType(Src, DestTy, true);
01153     assert(Res->getType() == DestTy);
01154 
01155     uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
01156     uint32_t DestBitSize = DestTy->getScalarSizeInBits();
01157 
01158     // If the high bits are already filled with sign bit, just replace this
01159     // cast with the result.
01160     if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
01161       return ReplaceInstUsesWith(CI, Res);
01162 
01163     // We need to emit a shl + ashr to do the sign extend.
01164     Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
01165     return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"),
01166                                       ShAmt);
01167   }
01168 
01169   // If this input is a trunc from our destination, then turn sext(trunc(x))
01170   // into shifts.
01171   if (TruncInst *TI = dyn_cast<TruncInst>(Src))
01172     if (TI->hasOneUse() && TI->getOperand(0)->getType() == DestTy) {
01173       uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
01174       uint32_t DestBitSize = DestTy->getScalarSizeInBits();
01175 
01176       // We need to emit a shl + ashr to do the sign extend.
01177       Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
01178       Value *Res = Builder->CreateShl(TI->getOperand(0), ShAmt, "sext");
01179       return BinaryOperator::CreateAShr(Res, ShAmt);
01180     }
01181 
01182   if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
01183     return transformSExtICmp(ICI, CI);
01184 
01185   // If the input is a shl/ashr pair of a same constant, then this is a sign
01186   // extension from a smaller value.  If we could trust arbitrary bitwidth
01187   // integers, we could turn this into a truncate to the smaller bit and then
01188   // use a sext for the whole extension.  Since we don't, look deeper and check
01189   // for a truncate.  If the source and dest are the same type, eliminate the
01190   // trunc and extend and just do shifts.  For example, turn:
01191   //   %a = trunc i32 %i to i8
01192   //   %b = shl i8 %a, 6
01193   //   %c = ashr i8 %b, 6
01194   //   %d = sext i8 %c to i32
01195   // into:
01196   //   %a = shl i32 %i, 30
01197   //   %d = ashr i32 %a, 30
01198   Value *A = nullptr;
01199   // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
01200   ConstantInt *BA = nullptr, *CA = nullptr;
01201   if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)),
01202                         m_ConstantInt(CA))) &&
01203       BA == CA && A->getType() == CI.getType()) {
01204     unsigned MidSize = Src->getType()->getScalarSizeInBits();
01205     unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
01206     unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
01207     Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
01208     A = Builder->CreateShl(A, ShAmtV, CI.getName());
01209     return BinaryOperator::CreateAShr(A, ShAmtV);
01210   }
01211 
01212   return nullptr;
01213 }
01214 
01215 
01216 /// Return a Constant* for the specified floating-point constant if it fits
01217 /// in the specified FP type without changing its value.
01218 static Constant *fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
01219   bool losesInfo;
01220   APFloat F = CFP->getValueAPF();
01221   (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
01222   if (!losesInfo)
01223     return ConstantFP::get(CFP->getContext(), F);
01224   return nullptr;
01225 }
01226 
01227 /// If this is a floating-point extension instruction, look
01228 /// through it until we get the source value.
01229 static Value *lookThroughFPExtensions(Value *V) {
01230   if (Instruction *I = dyn_cast<Instruction>(V))
01231     if (I->getOpcode() == Instruction::FPExt)
01232       return lookThroughFPExtensions(I->getOperand(0));
01233 
01234   // If this value is a constant, return the constant in the smallest FP type
01235   // that can accurately represent it.  This allows us to turn
01236   // (float)((double)X+2.0) into x+2.0f.
01237   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
01238     if (CFP->getType() == Type::getPPC_FP128Ty(V->getContext()))
01239       return V;  // No constant folding of this.
01240     // See if the value can be truncated to half and then reextended.
01241     if (Value *V = fitsInFPType(CFP, APFloat::IEEEhalf))
01242       return V;
01243     // See if the value can be truncated to float and then reextended.
01244     if (Value *V = fitsInFPType(CFP, APFloat::IEEEsingle))
01245       return V;
01246     if (CFP->getType()->isDoubleTy())
01247       return V;  // Won't shrink.
01248     if (Value *V = fitsInFPType(CFP, APFloat::IEEEdouble))
01249       return V;
01250     // Don't try to shrink to various long double types.
01251   }
01252 
01253   return V;
01254 }
01255 
01256 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
01257   if (Instruction *I = commonCastTransforms(CI))
01258     return I;
01259   // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
01260   // simplify this expression to avoid one or more of the trunc/extend
01261   // operations if we can do so without changing the numerical results.
01262   //
01263   // The exact manner in which the widths of the operands interact to limit
01264   // what we can and cannot do safely varies from operation to operation, and
01265   // is explained below in the various case statements.
01266   BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
01267   if (OpI && OpI->hasOneUse()) {
01268     Value *LHSOrig = lookThroughFPExtensions(OpI->getOperand(0));
01269     Value *RHSOrig = lookThroughFPExtensions(OpI->getOperand(1));
01270     unsigned OpWidth = OpI->getType()->getFPMantissaWidth();
01271     unsigned LHSWidth = LHSOrig->getType()->getFPMantissaWidth();
01272     unsigned RHSWidth = RHSOrig->getType()->getFPMantissaWidth();
01273     unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
01274     unsigned DstWidth = CI.getType()->getFPMantissaWidth();
01275     switch (OpI->getOpcode()) {
01276       default: break;
01277       case Instruction::FAdd:
01278       case Instruction::FSub:
01279         // For addition and subtraction, the infinitely precise result can
01280         // essentially be arbitrarily wide; proving that double rounding
01281         // will not occur because the result of OpI is exact (as we will for
01282         // FMul, for example) is hopeless.  However, we *can* nonetheless
01283         // frequently know that double rounding cannot occur (or that it is
01284         // innocuous) by taking advantage of the specific structure of
01285         // infinitely-precise results that admit double rounding.
01286         //
01287         // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
01288         // to represent both sources, we can guarantee that the double
01289         // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
01290         // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
01291         // for proof of this fact).
01292         //
01293         // Note: Figueroa does not consider the case where DstFormat !=
01294         // SrcFormat.  It's possible (likely even!) that this analysis
01295         // could be tightened for those cases, but they are rare (the main
01296         // case of interest here is (float)((double)float + float)).
01297         if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
01298           if (LHSOrig->getType() != CI.getType())
01299             LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
01300           if (RHSOrig->getType() != CI.getType())
01301             RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
01302           Instruction *RI =
01303             BinaryOperator::Create(OpI->getOpcode(), LHSOrig, RHSOrig);
01304           RI->copyFastMathFlags(OpI);
01305           return RI;
01306         }
01307         break;
01308       case Instruction::FMul:
01309         // For multiplication, the infinitely precise result has at most
01310         // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
01311         // that such a value can be exactly represented, then no double
01312         // rounding can possibly occur; we can safely perform the operation
01313         // in the destination format if it can represent both sources.
01314         if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
01315           if (LHSOrig->getType() != CI.getType())
01316             LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
01317           if (RHSOrig->getType() != CI.getType())
01318             RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
01319           Instruction *RI =
01320             BinaryOperator::CreateFMul(LHSOrig, RHSOrig);
01321           RI->copyFastMathFlags(OpI);
01322           return RI;
01323         }
01324         break;
01325       case Instruction::FDiv:
01326         // For division, we use again use the bound from Figueroa's
01327         // dissertation.  I am entirely certain that this bound can be
01328         // tightened in the unbalanced operand case by an analysis based on
01329         // the diophantine rational approximation bound, but the well-known
01330         // condition used here is a good conservative first pass.
01331         // TODO: Tighten bound via rigorous analysis of the unbalanced case.
01332         if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
01333           if (LHSOrig->getType() != CI.getType())
01334             LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType());
01335           if (RHSOrig->getType() != CI.getType())
01336             RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType());
01337           Instruction *RI =
01338             BinaryOperator::CreateFDiv(LHSOrig, RHSOrig);
01339           RI->copyFastMathFlags(OpI);
01340           return RI;
01341         }
01342         break;
01343       case Instruction::FRem:
01344         // Remainder is straightforward.  Remainder is always exact, so the
01345         // type of OpI doesn't enter into things at all.  We simply evaluate
01346         // in whichever source type is larger, then convert to the
01347         // destination type.
01348         if (SrcWidth == OpWidth)
01349           break;
01350         if (LHSWidth < SrcWidth)
01351           LHSOrig = Builder->CreateFPExt(LHSOrig, RHSOrig->getType());
01352         else if (RHSWidth <= SrcWidth)
01353           RHSOrig = Builder->CreateFPExt(RHSOrig, LHSOrig->getType());
01354         if (LHSOrig != OpI->getOperand(0) || RHSOrig != OpI->getOperand(1)) {
01355           Value *ExactResult = Builder->CreateFRem(LHSOrig, RHSOrig);
01356           if (Instruction *RI = dyn_cast<Instruction>(ExactResult))
01357             RI->copyFastMathFlags(OpI);
01358           return CastInst::CreateFPCast(ExactResult, CI.getType());
01359         }
01360     }
01361 
01362     // (fptrunc (fneg x)) -> (fneg (fptrunc x))
01363     if (BinaryOperator::isFNeg(OpI)) {
01364       Value *InnerTrunc = Builder->CreateFPTrunc(OpI->getOperand(1),
01365                                                  CI.getType());
01366       Instruction *RI = BinaryOperator::CreateFNeg(InnerTrunc);
01367       RI->copyFastMathFlags(OpI);
01368       return RI;
01369     }
01370   }
01371 
01372   // (fptrunc (select cond, R1, Cst)) -->
01373   // (select cond, (fptrunc R1), (fptrunc Cst))
01374   //
01375   //  - but only if this isn't part of a min/max operation, else we'll
01376   // ruin min/max canonical form which is to have the select and
01377   // compare's operands be of the same type with no casts to look through.
01378   Value *LHS, *RHS;
01379   SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0));
01380   if (SI &&
01381       (isa<ConstantFP>(SI->getOperand(1)) ||
01382        isa<ConstantFP>(SI->getOperand(2))) &&
01383       matchSelectPattern(SI, LHS, RHS).Flavor == SPF_UNKNOWN) {
01384     Value *LHSTrunc = Builder->CreateFPTrunc(SI->getOperand(1),
01385                                              CI.getType());
01386     Value *RHSTrunc = Builder->CreateFPTrunc(SI->getOperand(2),
01387                                              CI.getType());
01388     return SelectInst::Create(SI->getOperand(0), LHSTrunc, RHSTrunc);
01389   }
01390 
01391   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI.getOperand(0));
01392   if (II) {
01393     switch (II->getIntrinsicID()) {
01394       default: break;
01395       case Intrinsic::fabs: {
01396         // (fptrunc (fabs x)) -> (fabs (fptrunc x))
01397         Value *InnerTrunc = Builder->CreateFPTrunc(II->getArgOperand(0),
01398                                                    CI.getType());
01399         Type *IntrinsicType[] = { CI.getType() };
01400         Function *Overload = Intrinsic::getDeclaration(
01401             CI.getModule(), II->getIntrinsicID(), IntrinsicType);
01402 
01403         Value *Args[] = { InnerTrunc };
01404         return CallInst::Create(Overload, Args, II->getName());
01405       }
01406     }
01407   }
01408 
01409   return nullptr;
01410 }
01411 
01412 Instruction *InstCombiner::visitFPExt(CastInst &CI) {
01413   return commonCastTransforms(CI);
01414 }
01415 
01416 // fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X)
01417 // This is safe if the intermediate type has enough bits in its mantissa to
01418 // accurately represent all values of X.  For example, this won't work with
01419 // i64 -> float -> i64.
01420 Instruction *InstCombiner::FoldItoFPtoI(Instruction &FI) {
01421   if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0)))
01422     return nullptr;
01423   Instruction *OpI = cast<Instruction>(FI.getOperand(0));
01424 
01425   Value *SrcI = OpI->getOperand(0);
01426   Type *FITy = FI.getType();
01427   Type *OpITy = OpI->getType();
01428   Type *SrcTy = SrcI->getType();
01429   bool IsInputSigned = isa<SIToFPInst>(OpI);
01430   bool IsOutputSigned = isa<FPToSIInst>(FI);
01431 
01432   // We can safely assume the conversion won't overflow the output range,
01433   // because (for example) (uint8_t)18293.f is undefined behavior.
01434 
01435   // Since we can assume the conversion won't overflow, our decision as to
01436   // whether the input will fit in the float should depend on the minimum
01437   // of the input range and output range.
01438 
01439   // This means this is also safe for a signed input and unsigned output, since
01440   // a negative input would lead to undefined behavior.
01441   int InputSize = (int)SrcTy->getScalarSizeInBits() - IsInputSigned;
01442   int OutputSize = (int)FITy->getScalarSizeInBits() - IsOutputSigned;
01443   int ActualSize = std::min(InputSize, OutputSize);
01444 
01445   if (ActualSize <= OpITy->getFPMantissaWidth()) {
01446     if (FITy->getScalarSizeInBits() > SrcTy->getScalarSizeInBits()) {
01447       if (IsInputSigned && IsOutputSigned)
01448         return new SExtInst(SrcI, FITy);
01449       return new ZExtInst(SrcI, FITy);
01450     }
01451     if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits())
01452       return new TruncInst(SrcI, FITy);
01453     if (SrcTy == FITy)
01454       return ReplaceInstUsesWith(FI, SrcI);
01455     return new BitCastInst(SrcI, FITy);
01456   }
01457   return nullptr;
01458 }
01459 
01460 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
01461   Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
01462   if (!OpI)
01463     return commonCastTransforms(FI);
01464 
01465   if (Instruction *I = FoldItoFPtoI(FI))
01466     return I;
01467 
01468   return commonCastTransforms(FI);
01469 }
01470 
01471 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
01472   Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
01473   if (!OpI)
01474     return commonCastTransforms(FI);
01475 
01476   if (Instruction *I = FoldItoFPtoI(FI))
01477     return I;
01478 
01479   return commonCastTransforms(FI);
01480 }
01481 
01482 Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
01483   return commonCastTransforms(CI);
01484 }
01485 
01486 Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
01487   return commonCastTransforms(CI);
01488 }
01489 
01490 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
01491   // If the source integer type is not the intptr_t type for this target, do a
01492   // trunc or zext to the intptr_t type, then inttoptr of it.  This allows the
01493   // cast to be exposed to other transforms.
01494   unsigned AS = CI.getAddressSpace();
01495   if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
01496       DL.getPointerSizeInBits(AS)) {
01497     Type *Ty = DL.getIntPtrType(CI.getContext(), AS);
01498     if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
01499       Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
01500 
01501     Value *P = Builder->CreateZExtOrTrunc(CI.getOperand(0), Ty);
01502     return new IntToPtrInst(P, CI.getType());
01503   }
01504 
01505   if (Instruction *I = commonCastTransforms(CI))
01506     return I;
01507 
01508   return nullptr;
01509 }
01510 
01511 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
01512 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
01513   Value *Src = CI.getOperand(0);
01514 
01515   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
01516     // If casting the result of a getelementptr instruction with no offset, turn
01517     // this into a cast of the original pointer!
01518     if (GEP->hasAllZeroIndices() &&
01519         // If CI is an addrspacecast and GEP changes the poiner type, merging
01520         // GEP into CI would undo canonicalizing addrspacecast with different
01521         // pointer types, causing infinite loops.
01522         (!isa<AddrSpaceCastInst>(CI) ||
01523           GEP->getType() == GEP->getPointerOperand()->getType())) {
01524       // Changing the cast operand is usually not a good idea but it is safe
01525       // here because the pointer operand is being replaced with another
01526       // pointer operand so the opcode doesn't need to change.
01527       Worklist.Add(GEP);
01528       CI.setOperand(0, GEP->getOperand(0));
01529       return &CI;
01530     }
01531   }
01532 
01533   return commonCastTransforms(CI);
01534 }
01535 
01536 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
01537   // If the destination integer type is not the intptr_t type for this target,
01538   // do a ptrtoint to intptr_t then do a trunc or zext.  This allows the cast
01539   // to be exposed to other transforms.
01540 
01541   Type *Ty = CI.getType();
01542   unsigned AS = CI.getPointerAddressSpace();
01543 
01544   if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS))
01545     return commonPointerCastTransforms(CI);
01546 
01547   Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS);
01548   if (Ty->isVectorTy()) // Handle vectors of pointers.
01549     PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
01550 
01551   Value *P = Builder->CreatePtrToInt(CI.getOperand(0), PtrTy);
01552   return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
01553 }
01554 
01555 /// This input value (which is known to have vector type) is being zero extended
01556 /// or truncated to the specified vector type.
01557 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible.
01558 ///
01559 /// The source and destination vector types may have different element types.
01560 static Instruction *optimizeVectorResize(Value *InVal, VectorType *DestTy,
01561                                          InstCombiner &IC) {
01562   // We can only do this optimization if the output is a multiple of the input
01563   // element size, or the input is a multiple of the output element size.
01564   // Convert the input type to have the same element type as the output.
01565   VectorType *SrcTy = cast<VectorType>(InVal->getType());
01566 
01567   if (SrcTy->getElementType() != DestTy->getElementType()) {
01568     // The input types don't need to be identical, but for now they must be the
01569     // same size.  There is no specific reason we couldn't handle things like
01570     // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
01571     // there yet.
01572     if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
01573         DestTy->getElementType()->getPrimitiveSizeInBits())
01574       return nullptr;
01575 
01576     SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
01577     InVal = IC.Builder->CreateBitCast(InVal, SrcTy);
01578   }
01579 
01580   // Now that the element types match, get the shuffle mask and RHS of the
01581   // shuffle to use, which depends on whether we're increasing or decreasing the
01582   // size of the input.
01583   SmallVector<uint32_t, 16> ShuffleMask;
01584   Value *V2;
01585 
01586   if (SrcTy->getNumElements() > DestTy->getNumElements()) {
01587     // If we're shrinking the number of elements, just shuffle in the low
01588     // elements from the input and use undef as the second shuffle input.
01589     V2 = UndefValue::get(SrcTy);
01590     for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i)
01591       ShuffleMask.push_back(i);
01592 
01593   } else {
01594     // If we're increasing the number of elements, shuffle in all of the
01595     // elements from InVal and fill the rest of the result elements with zeros
01596     // from a constant zero.
01597     V2 = Constant::getNullValue(SrcTy);
01598     unsigned SrcElts = SrcTy->getNumElements();
01599     for (unsigned i = 0, e = SrcElts; i != e; ++i)
01600       ShuffleMask.push_back(i);
01601 
01602     // The excess elements reference the first element of the zero input.
01603     for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i)
01604       ShuffleMask.push_back(SrcElts);
01605   }
01606 
01607   return new ShuffleVectorInst(InVal, V2,
01608                                ConstantDataVector::get(V2->getContext(),
01609                                                        ShuffleMask));
01610 }
01611 
01612 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
01613   return Value % Ty->getPrimitiveSizeInBits() == 0;
01614 }
01615 
01616 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
01617   return Value / Ty->getPrimitiveSizeInBits();
01618 }
01619 
01620 /// V is a value which is inserted into a vector of VecEltTy.
01621 /// Look through the value to see if we can decompose it into
01622 /// insertions into the vector.  See the example in the comment for
01623 /// OptimizeIntegerToVectorInsertions for the pattern this handles.
01624 /// The type of V is always a non-zero multiple of VecEltTy's size.
01625 /// Shift is the number of bits between the lsb of V and the lsb of
01626 /// the vector.
01627 ///
01628 /// This returns false if the pattern can't be matched or true if it can,
01629 /// filling in Elements with the elements found here.
01630 static bool collectInsertionElements(Value *V, unsigned Shift,
01631                                      SmallVectorImpl<Value *> &Elements,
01632                                      Type *VecEltTy, bool isBigEndian) {
01633   assert(isMultipleOfTypeSize(Shift, VecEltTy) &&
01634          "Shift should be a multiple of the element type size");
01635 
01636   // Undef values never contribute useful bits to the result.
01637   if (isa<UndefValue>(V)) return true;
01638 
01639   // If we got down to a value of the right type, we win, try inserting into the
01640   // right element.
01641   if (V->getType() == VecEltTy) {
01642     // Inserting null doesn't actually insert any elements.
01643     if (Constant *C = dyn_cast<Constant>(V))
01644       if (C->isNullValue())
01645         return true;
01646 
01647     unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
01648     if (isBigEndian)
01649       ElementIndex = Elements.size() - ElementIndex - 1;
01650 
01651     // Fail if multiple elements are inserted into this slot.
01652     if (Elements[ElementIndex])
01653       return false;
01654 
01655     Elements[ElementIndex] = V;
01656     return true;
01657   }
01658 
01659   if (Constant *C = dyn_cast<Constant>(V)) {
01660     // Figure out the # elements this provides, and bitcast it or slice it up
01661     // as required.
01662     unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(),
01663                                         VecEltTy);
01664     // If the constant is the size of a vector element, we just need to bitcast
01665     // it to the right type so it gets properly inserted.
01666     if (NumElts == 1)
01667       return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
01668                                       Shift, Elements, VecEltTy, isBigEndian);
01669 
01670     // Okay, this is a constant that covers multiple elements.  Slice it up into
01671     // pieces and insert each element-sized piece into the vector.
01672     if (!isa<IntegerType>(C->getType()))
01673       C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
01674                                        C->getType()->getPrimitiveSizeInBits()));
01675     unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
01676     Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
01677 
01678     for (unsigned i = 0; i != NumElts; ++i) {
01679       unsigned ShiftI = Shift+i*ElementSize;
01680       Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
01681                                                                   ShiftI));
01682       Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
01683       if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
01684                                     isBigEndian))
01685         return false;
01686     }
01687     return true;
01688   }
01689 
01690   if (!V->hasOneUse()) return false;
01691 
01692   Instruction *I = dyn_cast<Instruction>(V);
01693   if (!I) return false;
01694   switch (I->getOpcode()) {
01695   default: return false; // Unhandled case.
01696   case Instruction::BitCast:
01697     return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
01698                                     isBigEndian);
01699   case Instruction::ZExt:
01700     if (!isMultipleOfTypeSize(
01701                           I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
01702                               VecEltTy))
01703       return false;
01704     return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
01705                                     isBigEndian);
01706   case Instruction::Or:
01707     return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
01708                                     isBigEndian) &&
01709            collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
01710                                     isBigEndian);
01711   case Instruction::Shl: {
01712     // Must be shifting by a constant that is a multiple of the element size.
01713     ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
01714     if (!CI) return false;
01715     Shift += CI->getZExtValue();
01716     if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
01717     return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
01718                                     isBigEndian);
01719   }
01720 
01721   }
01722 }
01723 
01724 
01725 /// If the input is an 'or' instruction, we may be doing shifts and ors to
01726 /// assemble the elements of the vector manually.
01727 /// Try to rip the code out and replace it with insertelements.  This is to
01728 /// optimize code like this:
01729 ///
01730 ///    %tmp37 = bitcast float %inc to i32
01731 ///    %tmp38 = zext i32 %tmp37 to i64
01732 ///    %tmp31 = bitcast float %inc5 to i32
01733 ///    %tmp32 = zext i32 %tmp31 to i64
01734 ///    %tmp33 = shl i64 %tmp32, 32
01735 ///    %ins35 = or i64 %tmp33, %tmp38
01736 ///    %tmp43 = bitcast i64 %ins35 to <2 x float>
01737 ///
01738 /// Into two insertelements that do "buildvector{%inc, %inc5}".
01739 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
01740                                                 InstCombiner &IC) {
01741   VectorType *DestVecTy = cast<VectorType>(CI.getType());
01742   Value *IntInput = CI.getOperand(0);
01743 
01744   SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
01745   if (!collectInsertionElements(IntInput, 0, Elements,
01746                                 DestVecTy->getElementType(),
01747                                 IC.getDataLayout().isBigEndian()))
01748     return nullptr;
01749 
01750   // If we succeeded, we know that all of the element are specified by Elements
01751   // or are zero if Elements has a null entry.  Recast this as a set of
01752   // insertions.
01753   Value *Result = Constant::getNullValue(CI.getType());
01754   for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
01755     if (!Elements[i]) continue;  // Unset element.
01756 
01757     Result = IC.Builder->CreateInsertElement(Result, Elements[i],
01758                                              IC.Builder->getInt32(i));
01759   }
01760 
01761   return Result;
01762 }
01763 
01764 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the
01765 /// vector followed by extract element. The backend tends to handle bitcasts of
01766 /// vectors better than bitcasts of scalars because vector registers are
01767 /// usually not type-specific like scalar integer or scalar floating-point.
01768 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
01769                                               InstCombiner &IC,
01770                                               const DataLayout &DL) {
01771   // TODO: Create and use a pattern matcher for ExtractElementInst.
01772   auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0));
01773   if (!ExtElt || !ExtElt->hasOneUse())
01774     return nullptr;
01775 
01776   // The bitcast must be to a vectorizable type, otherwise we can't make a new
01777   // type to extract from.
01778   Type *DestType = BitCast.getType();
01779   if (!VectorType::isValidElementType(DestType))
01780     return nullptr;
01781 
01782   unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements();
01783   auto *NewVecType = VectorType::get(DestType, NumElts);
01784   auto *NewBC = IC.Builder->CreateBitCast(ExtElt->getVectorOperand(),
01785                                           NewVecType, "bc");
01786   return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
01787 }
01788 
01789 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
01790   // If the operands are integer typed then apply the integer transforms,
01791   // otherwise just apply the common ones.
01792   Value *Src = CI.getOperand(0);
01793   Type *SrcTy = Src->getType();
01794   Type *DestTy = CI.getType();
01795 
01796   // Get rid of casts from one type to the same type. These are useless and can
01797   // be replaced by the operand.
01798   if (DestTy == Src->getType())
01799     return ReplaceInstUsesWith(CI, Src);
01800 
01801   if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
01802     PointerType *SrcPTy = cast<PointerType>(SrcTy);
01803     Type *DstElTy = DstPTy->getElementType();
01804     Type *SrcElTy = SrcPTy->getElementType();
01805 
01806     // If we are casting a alloca to a pointer to a type of the same
01807     // size, rewrite the allocation instruction to allocate the "right" type.
01808     // There is no need to modify malloc calls because it is their bitcast that
01809     // needs to be cleaned up.
01810     if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
01811       if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
01812         return V;
01813 
01814     // If the source and destination are pointers, and this cast is equivalent
01815     // to a getelementptr X, 0, 0, 0...  turn it into the appropriate gep.
01816     // This can enhance SROA and other transforms that want type-safe pointers.
01817     unsigned NumZeros = 0;
01818     while (SrcElTy != DstElTy &&
01819            isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
01820            SrcElTy->getNumContainedTypes() /* not "{}" */) {
01821       SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(0U);
01822       ++NumZeros;
01823     }
01824 
01825     // If we found a path from the src to dest, create the getelementptr now.
01826     if (SrcElTy == DstElTy) {
01827       SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder->getInt32(0));
01828       return GetElementPtrInst::CreateInBounds(Src, Idxs);
01829     }
01830   }
01831 
01832   if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
01833     if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
01834       Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
01835       return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
01836                      Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
01837       // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
01838     }
01839 
01840     if (isa<IntegerType>(SrcTy)) {
01841       // If this is a cast from an integer to vector, check to see if the input
01842       // is a trunc or zext of a bitcast from vector.  If so, we can replace all
01843       // the casts with a shuffle and (potentially) a bitcast.
01844       if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
01845         CastInst *SrcCast = cast<CastInst>(Src);
01846         if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0)))
01847           if (isa<VectorType>(BCIn->getOperand(0)->getType()))
01848             if (Instruction *I = optimizeVectorResize(BCIn->getOperand(0),
01849                                                cast<VectorType>(DestTy), *this))
01850               return I;
01851       }
01852 
01853       // If the input is an 'or' instruction, we may be doing shifts and ors to
01854       // assemble the elements of the vector manually.  Try to rip the code out
01855       // and replace it with insertelements.
01856       if (Value *V = optimizeIntegerToVectorInsertions(CI, *this))
01857         return ReplaceInstUsesWith(CI, V);
01858     }
01859   }
01860 
01861   if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
01862     if (SrcVTy->getNumElements() == 1) {
01863       // If our destination is not a vector, then make this a straight
01864       // scalar-scalar cast.
01865       if (!DestTy->isVectorTy()) {
01866         Value *Elem =
01867           Builder->CreateExtractElement(Src,
01868                      Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
01869         return CastInst::Create(Instruction::BitCast, Elem, DestTy);
01870       }
01871 
01872       // Otherwise, see if our source is an insert. If so, then use the scalar
01873       // component directly.
01874       if (InsertElementInst *IEI =
01875             dyn_cast<InsertElementInst>(CI.getOperand(0)))
01876         return CastInst::Create(Instruction::BitCast, IEI->getOperand(1),
01877                                 DestTy);
01878     }
01879   }
01880 
01881   if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
01882     // Okay, we have (bitcast (shuffle ..)).  Check to see if this is
01883     // a bitcast to a vector with the same # elts.
01884     if (SVI->hasOneUse() && DestTy->isVectorTy() &&
01885         DestTy->getVectorNumElements() == SVI->getType()->getNumElements() &&
01886         SVI->getType()->getNumElements() ==
01887         SVI->getOperand(0)->getType()->getVectorNumElements()) {
01888       BitCastInst *Tmp;
01889       // If either of the operands is a cast from CI.getType(), then
01890       // evaluating the shuffle in the casted destination's type will allow
01891       // us to eliminate at least one cast.
01892       if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) &&
01893            Tmp->getOperand(0)->getType() == DestTy) ||
01894           ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
01895            Tmp->getOperand(0)->getType() == DestTy)) {
01896         Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
01897         Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
01898         // Return a new shuffle vector.  Use the same element ID's, as we
01899         // know the vector types match #elts.
01900         return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
01901       }
01902     }
01903   }
01904 
01905   if (Instruction *I = canonicalizeBitCastExtElt(CI, *this, DL))
01906     return I;
01907 
01908   if (SrcTy->isPointerTy())
01909     return commonPointerCastTransforms(CI);
01910   return commonCastTransforms(CI);
01911 }
01912 
01913 Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
01914   // If the destination pointer element type is not the same as the source's
01915   // first do a bitcast to the destination type, and then the addrspacecast.
01916   // This allows the cast to be exposed to other transforms.
01917   Value *Src = CI.getOperand(0);
01918   PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
01919   PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
01920 
01921   Type *DestElemTy = DestTy->getElementType();
01922   if (SrcTy->getElementType() != DestElemTy) {
01923     Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace());
01924     if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) {
01925       // Handle vectors of pointers.
01926       MidTy = VectorType::get(MidTy, VT->getNumElements());
01927     }
01928 
01929     Value *NewBitCast = Builder->CreateBitCast(Src, MidTy);
01930     return new AddrSpaceCastInst(NewBitCast, CI.getType());
01931   }
01932 
01933   return commonPointerCastTransforms(CI);
01934 }