LLVM API Documentation

SelectionDAG.cpp
Go to the documentation of this file.
00001 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This implements the SelectionDAG class.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "llvm/CodeGen/SelectionDAG.h"
00015 #include "SDNodeDbgValue.h"
00016 #include "llvm/ADT/SetVector.h"
00017 #include "llvm/ADT/SmallPtrSet.h"
00018 #include "llvm/ADT/SmallSet.h"
00019 #include "llvm/ADT/SmallVector.h"
00020 #include "llvm/ADT/StringExtras.h"
00021 #include "llvm/Analysis/ValueTracking.h"
00022 #include "llvm/CodeGen/MachineBasicBlock.h"
00023 #include "llvm/CodeGen/MachineConstantPool.h"
00024 #include "llvm/CodeGen/MachineFrameInfo.h"
00025 #include "llvm/CodeGen/MachineModuleInfo.h"
00026 #include "llvm/IR/CallingConv.h"
00027 #include "llvm/IR/Constants.h"
00028 #include "llvm/IR/DataLayout.h"
00029 #include "llvm/IR/DebugInfo.h"
00030 #include "llvm/IR/DerivedTypes.h"
00031 #include "llvm/IR/Function.h"
00032 #include "llvm/IR/GlobalAlias.h"
00033 #include "llvm/IR/GlobalVariable.h"
00034 #include "llvm/IR/Intrinsics.h"
00035 #include "llvm/Support/CommandLine.h"
00036 #include "llvm/Support/Debug.h"
00037 #include "llvm/Support/ErrorHandling.h"
00038 #include "llvm/Support/ManagedStatic.h"
00039 #include "llvm/Support/MathExtras.h"
00040 #include "llvm/Support/Mutex.h"
00041 #include "llvm/Support/raw_ostream.h"
00042 #include "llvm/Target/TargetInstrInfo.h"
00043 #include "llvm/Target/TargetIntrinsicInfo.h"
00044 #include "llvm/Target/TargetLowering.h"
00045 #include "llvm/Target/TargetMachine.h"
00046 #include "llvm/Target/TargetOptions.h"
00047 #include "llvm/Target/TargetRegisterInfo.h"
00048 #include "llvm/Target/TargetSelectionDAGInfo.h"
00049 #include <algorithm>
00050 #include <cmath>
00051 using namespace llvm;
00052 
00053 /// makeVTList - Return an instance of the SDVTList struct initialized with the
00054 /// specified members.
00055 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
00056   SDVTList Res = {VTs, NumVTs};
00057   return Res;
00058 }
00059 
00060 // Default null implementations of the callbacks.
00061 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
00062 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
00063 
00064 //===----------------------------------------------------------------------===//
00065 //                              ConstantFPSDNode Class
00066 //===----------------------------------------------------------------------===//
00067 
00068 /// isExactlyValue - We don't rely on operator== working on double values, as
00069 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
00070 /// As such, this method can be used to do an exact bit-for-bit comparison of
00071 /// two floating point values.
00072 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
00073   return getValueAPF().bitwiseIsEqual(V);
00074 }
00075 
00076 bool ConstantFPSDNode::isValueValidForType(EVT VT,
00077                                            const APFloat& Val) {
00078   assert(VT.isFloatingPoint() && "Can only convert between FP types");
00079 
00080   // convert modifies in place, so make a copy.
00081   APFloat Val2 = APFloat(Val);
00082   bool losesInfo;
00083   (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
00084                       APFloat::rmNearestTiesToEven,
00085                       &losesInfo);
00086   return !losesInfo;
00087 }
00088 
00089 //===----------------------------------------------------------------------===//
00090 //                              ISD Namespace
00091 //===----------------------------------------------------------------------===//
00092 
00093 /// isBuildVectorAllOnes - Return true if the specified node is a
00094 /// BUILD_VECTOR where all of the elements are ~0 or undef.
00095 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
00096   // Look through a bit convert.
00097   if (N->getOpcode() == ISD::BITCAST)
00098     N = N->getOperand(0).getNode();
00099 
00100   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
00101 
00102   unsigned i = 0, e = N->getNumOperands();
00103 
00104   // Skip over all of the undef values.
00105   while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
00106     ++i;
00107 
00108   // Do not accept an all-undef vector.
00109   if (i == e) return false;
00110 
00111   // Do not accept build_vectors that aren't all constants or which have non-~0
00112   // elements. We have to be a bit careful here, as the type of the constant
00113   // may not be the same as the type of the vector elements due to type
00114   // legalization (the elements are promoted to a legal type for the target and
00115   // a vector of a type may be legal when the base element type is not).
00116   // We only want to check enough bits to cover the vector elements, because
00117   // we care if the resultant vector is all ones, not whether the individual
00118   // constants are.
00119   SDValue NotZero = N->getOperand(i);
00120   unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
00121   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
00122     if (CN->getAPIntValue().countTrailingOnes() < EltSize)
00123       return false;
00124   } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
00125     if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
00126       return false;
00127   } else
00128     return false;
00129 
00130   // Okay, we have at least one ~0 value, check to see if the rest match or are
00131   // undefs. Even with the above element type twiddling, this should be OK, as
00132   // the same type legalization should have applied to all the elements.
00133   for (++i; i != e; ++i)
00134     if (N->getOperand(i) != NotZero &&
00135         N->getOperand(i).getOpcode() != ISD::UNDEF)
00136       return false;
00137   return true;
00138 }
00139 
00140 
00141 /// isBuildVectorAllZeros - Return true if the specified node is a
00142 /// BUILD_VECTOR where all of the elements are 0 or undef.
00143 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
00144   // Look through a bit convert.
00145   if (N->getOpcode() == ISD::BITCAST)
00146     N = N->getOperand(0).getNode();
00147 
00148   if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
00149 
00150   unsigned i = 0, e = N->getNumOperands();
00151 
00152   // Skip over all of the undef values.
00153   while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
00154     ++i;
00155 
00156   // Do not accept an all-undef vector.
00157   if (i == e) return false;
00158 
00159   // Do not accept build_vectors that aren't all constants or which have non-0
00160   // elements.
00161   SDValue Zero = N->getOperand(i);
00162   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
00163     if (!CN->isNullValue())
00164       return false;
00165   } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
00166     if (!CFPN->getValueAPF().isPosZero())
00167       return false;
00168   } else
00169     return false;
00170 
00171   // Okay, we have at least one 0 value, check to see if the rest match or are
00172   // undefs.
00173   for (++i; i != e; ++i)
00174     if (N->getOperand(i) != Zero &&
00175         N->getOperand(i).getOpcode() != ISD::UNDEF)
00176       return false;
00177   return true;
00178 }
00179 
00180 /// \brief Return true if the specified node is a BUILD_VECTOR node of
00181 /// all ConstantSDNode or undef.
00182 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
00183   if (N->getOpcode() != ISD::BUILD_VECTOR)
00184     return false;
00185 
00186   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
00187     SDValue Op = N->getOperand(i);
00188     if (Op.getOpcode() == ISD::UNDEF)
00189       continue;
00190     if (!isa<ConstantSDNode>(Op))
00191       return false;
00192   }
00193   return true;
00194 }
00195 
00196 /// isScalarToVector - Return true if the specified node is a
00197 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
00198 /// element is not an undef.
00199 bool ISD::isScalarToVector(const SDNode *N) {
00200   if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
00201     return true;
00202 
00203   if (N->getOpcode() != ISD::BUILD_VECTOR)
00204     return false;
00205   if (N->getOperand(0).getOpcode() == ISD::UNDEF)
00206     return false;
00207   unsigned NumElems = N->getNumOperands();
00208   if (NumElems == 1)
00209     return false;
00210   for (unsigned i = 1; i < NumElems; ++i) {
00211     SDValue V = N->getOperand(i);
00212     if (V.getOpcode() != ISD::UNDEF)
00213       return false;
00214   }
00215   return true;
00216 }
00217 
00218 /// allOperandsUndef - Return true if the node has at least one operand
00219 /// and all operands of the specified node are ISD::UNDEF.
00220 bool ISD::allOperandsUndef(const SDNode *N) {
00221   // Return false if the node has no operands.
00222   // This is "logically inconsistent" with the definition of "all" but
00223   // is probably the desired behavior.
00224   if (N->getNumOperands() == 0)
00225     return false;
00226 
00227   for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
00228     if (N->getOperand(i).getOpcode() != ISD::UNDEF)
00229       return false;
00230 
00231   return true;
00232 }
00233 
00234 ISD::NodeType ISD::getExtForLoadExtType(ISD::LoadExtType ExtType) {
00235   switch (ExtType) {
00236   case ISD::EXTLOAD:
00237     return ISD::ANY_EXTEND;
00238   case ISD::SEXTLOAD:
00239     return ISD::SIGN_EXTEND;
00240   case ISD::ZEXTLOAD:
00241     return ISD::ZERO_EXTEND;
00242   default:
00243     break;
00244   }
00245 
00246   llvm_unreachable("Invalid LoadExtType");
00247 }
00248 
00249 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
00250 /// when given the operation for (X op Y).
00251 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
00252   // To perform this operation, we just need to swap the L and G bits of the
00253   // operation.
00254   unsigned OldL = (Operation >> 2) & 1;
00255   unsigned OldG = (Operation >> 1) & 1;
00256   return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits
00257                        (OldL << 1) |       // New G bit
00258                        (OldG << 2));       // New L bit.
00259 }
00260 
00261 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
00262 /// 'op' is a valid SetCC operation.
00263 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
00264   unsigned Operation = Op;
00265   if (isInteger)
00266     Operation ^= 7;   // Flip L, G, E bits, but not U.
00267   else
00268     Operation ^= 15;  // Flip all of the condition bits.
00269 
00270   if (Operation > ISD::SETTRUE2)
00271     Operation &= ~8;  // Don't let N and U bits get set.
00272 
00273   return ISD::CondCode(Operation);
00274 }
00275 
00276 
00277 /// isSignedOp - For an integer comparison, return 1 if the comparison is a
00278 /// signed operation and 2 if the result is an unsigned comparison.  Return zero
00279 /// if the operation does not depend on the sign of the input (setne and seteq).
00280 static int isSignedOp(ISD::CondCode Opcode) {
00281   switch (Opcode) {
00282   default: llvm_unreachable("Illegal integer setcc operation!");
00283   case ISD::SETEQ:
00284   case ISD::SETNE: return 0;
00285   case ISD::SETLT:
00286   case ISD::SETLE:
00287   case ISD::SETGT:
00288   case ISD::SETGE: return 1;
00289   case ISD::SETULT:
00290   case ISD::SETULE:
00291   case ISD::SETUGT:
00292   case ISD::SETUGE: return 2;
00293   }
00294 }
00295 
00296 /// getSetCCOrOperation - Return the result of a logical OR between different
00297 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)).  This function
00298 /// returns SETCC_INVALID if it is not possible to represent the resultant
00299 /// comparison.
00300 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
00301                                        bool isInteger) {
00302   if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
00303     // Cannot fold a signed integer setcc with an unsigned integer setcc.
00304     return ISD::SETCC_INVALID;
00305 
00306   unsigned Op = Op1 | Op2;  // Combine all of the condition bits.
00307 
00308   // If the N and U bits get set then the resultant comparison DOES suddenly
00309   // care about orderedness, and is true when ordered.
00310   if (Op > ISD::SETTRUE2)
00311     Op &= ~16;     // Clear the U bit if the N bit is set.
00312 
00313   // Canonicalize illegal integer setcc's.
00314   if (isInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT
00315     Op = ISD::SETNE;
00316 
00317   return ISD::CondCode(Op);
00318 }
00319 
00320 /// getSetCCAndOperation - Return the result of a logical AND between different
00321 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)).  This
00322 /// function returns zero if it is not possible to represent the resultant
00323 /// comparison.
00324 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
00325                                         bool isInteger) {
00326   if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
00327     // Cannot fold a signed setcc with an unsigned setcc.
00328     return ISD::SETCC_INVALID;
00329 
00330   // Combine all of the condition bits.
00331   ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
00332 
00333   // Canonicalize illegal integer setcc's.
00334   if (isInteger) {
00335     switch (Result) {
00336     default: break;
00337     case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
00338     case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
00339     case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
00340     case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
00341     case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
00342     }
00343   }
00344 
00345   return Result;
00346 }
00347 
00348 //===----------------------------------------------------------------------===//
00349 //                           SDNode Profile Support
00350 //===----------------------------------------------------------------------===//
00351 
00352 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
00353 ///
00354 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  {
00355   ID.AddInteger(OpC);
00356 }
00357 
00358 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
00359 /// solely with their pointer.
00360 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
00361   ID.AddPointer(VTList.VTs);
00362 }
00363 
00364 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
00365 ///
00366 static void AddNodeIDOperands(FoldingSetNodeID &ID,
00367                               const SDValue *Ops, unsigned NumOps) {
00368   for (; NumOps; --NumOps, ++Ops) {
00369     ID.AddPointer(Ops->getNode());
00370     ID.AddInteger(Ops->getResNo());
00371   }
00372 }
00373 
00374 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
00375 ///
00376 static void AddNodeIDOperands(FoldingSetNodeID &ID,
00377                               const SDUse *Ops, unsigned NumOps) {
00378   for (; NumOps; --NumOps, ++Ops) {
00379     ID.AddPointer(Ops->getNode());
00380     ID.AddInteger(Ops->getResNo());
00381   }
00382 }
00383 
00384 static void AddNodeIDNode(FoldingSetNodeID &ID,
00385                           unsigned short OpC, SDVTList VTList,
00386                           const SDValue *OpList, unsigned N) {
00387   AddNodeIDOpcode(ID, OpC);
00388   AddNodeIDValueTypes(ID, VTList);
00389   AddNodeIDOperands(ID, OpList, N);
00390 }
00391 
00392 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to
00393 /// the NodeID data.
00394 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
00395   switch (N->getOpcode()) {
00396   case ISD::TargetExternalSymbol:
00397   case ISD::ExternalSymbol:
00398     llvm_unreachable("Should only be used on nodes with operands");
00399   default: break;  // Normal nodes don't need extra info.
00400   case ISD::TargetConstant:
00401   case ISD::Constant: {
00402     const ConstantSDNode *C = cast<ConstantSDNode>(N);
00403     ID.AddPointer(C->getConstantIntValue());
00404     ID.AddBoolean(C->isOpaque());
00405     break;
00406   }
00407   case ISD::TargetConstantFP:
00408   case ISD::ConstantFP: {
00409     ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
00410     break;
00411   }
00412   case ISD::TargetGlobalAddress:
00413   case ISD::GlobalAddress:
00414   case ISD::TargetGlobalTLSAddress:
00415   case ISD::GlobalTLSAddress: {
00416     const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
00417     ID.AddPointer(GA->getGlobal());
00418     ID.AddInteger(GA->getOffset());
00419     ID.AddInteger(GA->getTargetFlags());
00420     ID.AddInteger(GA->getAddressSpace());
00421     break;
00422   }
00423   case ISD::BasicBlock:
00424     ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
00425     break;
00426   case ISD::Register:
00427     ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
00428     break;
00429   case ISD::RegisterMask:
00430     ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
00431     break;
00432   case ISD::SRCVALUE:
00433     ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
00434     break;
00435   case ISD::FrameIndex:
00436   case ISD::TargetFrameIndex:
00437     ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
00438     break;
00439   case ISD::JumpTable:
00440   case ISD::TargetJumpTable:
00441     ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
00442     ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
00443     break;
00444   case ISD::ConstantPool:
00445   case ISD::TargetConstantPool: {
00446     const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
00447     ID.AddInteger(CP->getAlignment());
00448     ID.AddInteger(CP->getOffset());
00449     if (CP->isMachineConstantPoolEntry())
00450       CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
00451     else
00452       ID.AddPointer(CP->getConstVal());
00453     ID.AddInteger(CP->getTargetFlags());
00454     break;
00455   }
00456   case ISD::TargetIndex: {
00457     const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
00458     ID.AddInteger(TI->getIndex());
00459     ID.AddInteger(TI->getOffset());
00460     ID.AddInteger(TI->getTargetFlags());
00461     break;
00462   }
00463   case ISD::LOAD: {
00464     const LoadSDNode *LD = cast<LoadSDNode>(N);
00465     ID.AddInteger(LD->getMemoryVT().getRawBits());
00466     ID.AddInteger(LD->getRawSubclassData());
00467     ID.AddInteger(LD->getPointerInfo().getAddrSpace());
00468     break;
00469   }
00470   case ISD::STORE: {
00471     const StoreSDNode *ST = cast<StoreSDNode>(N);
00472     ID.AddInteger(ST->getMemoryVT().getRawBits());
00473     ID.AddInteger(ST->getRawSubclassData());
00474     ID.AddInteger(ST->getPointerInfo().getAddrSpace());
00475     break;
00476   }
00477   case ISD::ATOMIC_CMP_SWAP:
00478   case ISD::ATOMIC_SWAP:
00479   case ISD::ATOMIC_LOAD_ADD:
00480   case ISD::ATOMIC_LOAD_SUB:
00481   case ISD::ATOMIC_LOAD_AND:
00482   case ISD::ATOMIC_LOAD_OR:
00483   case ISD::ATOMIC_LOAD_XOR:
00484   case ISD::ATOMIC_LOAD_NAND:
00485   case ISD::ATOMIC_LOAD_MIN:
00486   case ISD::ATOMIC_LOAD_MAX:
00487   case ISD::ATOMIC_LOAD_UMIN:
00488   case ISD::ATOMIC_LOAD_UMAX:
00489   case ISD::ATOMIC_LOAD:
00490   case ISD::ATOMIC_STORE: {
00491     const AtomicSDNode *AT = cast<AtomicSDNode>(N);
00492     ID.AddInteger(AT->getMemoryVT().getRawBits());
00493     ID.AddInteger(AT->getRawSubclassData());
00494     ID.AddInteger(AT->getPointerInfo().getAddrSpace());
00495     break;
00496   }
00497   case ISD::PREFETCH: {
00498     const MemSDNode *PF = cast<MemSDNode>(N);
00499     ID.AddInteger(PF->getPointerInfo().getAddrSpace());
00500     break;
00501   }
00502   case ISD::VECTOR_SHUFFLE: {
00503     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
00504     for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
00505          i != e; ++i)
00506       ID.AddInteger(SVN->getMaskElt(i));
00507     break;
00508   }
00509   case ISD::TargetBlockAddress:
00510   case ISD::BlockAddress: {
00511     const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
00512     ID.AddPointer(BA->getBlockAddress());
00513     ID.AddInteger(BA->getOffset());
00514     ID.AddInteger(BA->getTargetFlags());
00515     break;
00516   }
00517   } // end switch (N->getOpcode())
00518 
00519   // Target specific memory nodes could also have address spaces to check.
00520   if (N->isTargetMemoryOpcode())
00521     ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
00522 }
00523 
00524 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
00525 /// data.
00526 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
00527   AddNodeIDOpcode(ID, N->getOpcode());
00528   // Add the return value info.
00529   AddNodeIDValueTypes(ID, N->getVTList());
00530   // Add the operand info.
00531   AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
00532 
00533   // Handle SDNode leafs with special info.
00534   AddNodeIDCustom(ID, N);
00535 }
00536 
00537 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in
00538 /// the CSE map that carries volatility, temporalness, indexing mode, and
00539 /// extension/truncation information.
00540 ///
00541 static inline unsigned
00542 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
00543                      bool isNonTemporal, bool isInvariant) {
00544   assert((ConvType & 3) == ConvType &&
00545          "ConvType may not require more than 2 bits!");
00546   assert((AM & 7) == AM &&
00547          "AM may not require more than 3 bits!");
00548   return ConvType |
00549          (AM << 2) |
00550          (isVolatile << 5) |
00551          (isNonTemporal << 6) |
00552          (isInvariant << 7);
00553 }
00554 
00555 //===----------------------------------------------------------------------===//
00556 //                              SelectionDAG Class
00557 //===----------------------------------------------------------------------===//
00558 
00559 /// doNotCSE - Return true if CSE should not be performed for this node.
00560 static bool doNotCSE(SDNode *N) {
00561   if (N->getValueType(0) == MVT::Glue)
00562     return true; // Never CSE anything that produces a flag.
00563 
00564   switch (N->getOpcode()) {
00565   default: break;
00566   case ISD::HANDLENODE:
00567   case ISD::EH_LABEL:
00568     return true;   // Never CSE these nodes.
00569   }
00570 
00571   // Check that remaining values produced are not flags.
00572   for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
00573     if (N->getValueType(i) == MVT::Glue)
00574       return true; // Never CSE anything that produces a flag.
00575 
00576   return false;
00577 }
00578 
00579 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
00580 /// SelectionDAG.
00581 void SelectionDAG::RemoveDeadNodes() {
00582   // Create a dummy node (which is not added to allnodes), that adds a reference
00583   // to the root node, preventing it from being deleted.
00584   HandleSDNode Dummy(getRoot());
00585 
00586   SmallVector<SDNode*, 128> DeadNodes;
00587 
00588   // Add all obviously-dead nodes to the DeadNodes worklist.
00589   for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
00590     if (I->use_empty())
00591       DeadNodes.push_back(I);
00592 
00593   RemoveDeadNodes(DeadNodes);
00594 
00595   // If the root changed (e.g. it was a dead load, update the root).
00596   setRoot(Dummy.getValue());
00597 }
00598 
00599 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
00600 /// given list, and any nodes that become unreachable as a result.
00601 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
00602 
00603   // Process the worklist, deleting the nodes and adding their uses to the
00604   // worklist.
00605   while (!DeadNodes.empty()) {
00606     SDNode *N = DeadNodes.pop_back_val();
00607 
00608     for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
00609       DUL->NodeDeleted(N, nullptr);
00610 
00611     // Take the node out of the appropriate CSE map.
00612     RemoveNodeFromCSEMaps(N);
00613 
00614     // Next, brutally remove the operand list.  This is safe to do, as there are
00615     // no cycles in the graph.
00616     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
00617       SDUse &Use = *I++;
00618       SDNode *Operand = Use.getNode();
00619       Use.set(SDValue());
00620 
00621       // Now that we removed this operand, see if there are no uses of it left.
00622       if (Operand->use_empty())
00623         DeadNodes.push_back(Operand);
00624     }
00625 
00626     DeallocateNode(N);
00627   }
00628 }
00629 
00630 void SelectionDAG::RemoveDeadNode(SDNode *N){
00631   SmallVector<SDNode*, 16> DeadNodes(1, N);
00632 
00633   // Create a dummy node that adds a reference to the root node, preventing
00634   // it from being deleted.  (This matters if the root is an operand of the
00635   // dead node.)
00636   HandleSDNode Dummy(getRoot());
00637 
00638   RemoveDeadNodes(DeadNodes);
00639 }
00640 
00641 void SelectionDAG::DeleteNode(SDNode *N) {
00642   // First take this out of the appropriate CSE map.
00643   RemoveNodeFromCSEMaps(N);
00644 
00645   // Finally, remove uses due to operands of this node, remove from the
00646   // AllNodes list, and delete the node.
00647   DeleteNodeNotInCSEMaps(N);
00648 }
00649 
00650 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
00651   assert(N != AllNodes.begin() && "Cannot delete the entry node!");
00652   assert(N->use_empty() && "Cannot delete a node that is not dead!");
00653 
00654   // Drop all of the operands and decrement used node's use counts.
00655   N->DropOperands();
00656 
00657   DeallocateNode(N);
00658 }
00659 
00660 void SelectionDAG::DeallocateNode(SDNode *N) {
00661   if (N->OperandsNeedDelete)
00662     delete[] N->OperandList;
00663 
00664   // Set the opcode to DELETED_NODE to help catch bugs when node
00665   // memory is reallocated.
00666   N->NodeType = ISD::DELETED_NODE;
00667 
00668   NodeAllocator.Deallocate(AllNodes.remove(N));
00669 
00670   // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
00671   ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
00672   for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
00673     DbgVals[i]->setIsInvalidated();
00674 }
00675 
00676 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
00677 /// correspond to it.  This is useful when we're about to delete or repurpose
00678 /// the node.  We don't want future request for structurally identical nodes
00679 /// to return N anymore.
00680 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
00681   bool Erased = false;
00682   switch (N->getOpcode()) {
00683   case ISD::HANDLENODE: return false;  // noop.
00684   case ISD::CONDCODE:
00685     assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
00686            "Cond code doesn't exist!");
00687     Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
00688     CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
00689     break;
00690   case ISD::ExternalSymbol:
00691     Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
00692     break;
00693   case ISD::TargetExternalSymbol: {
00694     ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
00695     Erased = TargetExternalSymbols.erase(
00696                std::pair<std::string,unsigned char>(ESN->getSymbol(),
00697                                                     ESN->getTargetFlags()));
00698     break;
00699   }
00700   case ISD::VALUETYPE: {
00701     EVT VT = cast<VTSDNode>(N)->getVT();
00702     if (VT.isExtended()) {
00703       Erased = ExtendedValueTypeNodes.erase(VT);
00704     } else {
00705       Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
00706       ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
00707     }
00708     break;
00709   }
00710   default:
00711     // Remove it from the CSE Map.
00712     assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
00713     assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
00714     Erased = CSEMap.RemoveNode(N);
00715     break;
00716   }
00717 #ifndef NDEBUG
00718   // Verify that the node was actually in one of the CSE maps, unless it has a
00719   // flag result (which cannot be CSE'd) or is one of the special cases that are
00720   // not subject to CSE.
00721   if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
00722       !N->isMachineOpcode() && !doNotCSE(N)) {
00723     N->dump(this);
00724     dbgs() << "\n";
00725     llvm_unreachable("Node is not in map!");
00726   }
00727 #endif
00728   return Erased;
00729 }
00730 
00731 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
00732 /// maps and modified in place. Add it back to the CSE maps, unless an identical
00733 /// node already exists, in which case transfer all its users to the existing
00734 /// node. This transfer can potentially trigger recursive merging.
00735 ///
00736 void
00737 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
00738   // For node types that aren't CSE'd, just act as if no identical node
00739   // already exists.
00740   if (!doNotCSE(N)) {
00741     SDNode *Existing = CSEMap.GetOrInsertNode(N);
00742     if (Existing != N) {
00743       // If there was already an existing matching node, use ReplaceAllUsesWith
00744       // to replace the dead one with the existing one.  This can cause
00745       // recursive merging of other unrelated nodes down the line.
00746       ReplaceAllUsesWith(N, Existing);
00747 
00748       // N is now dead. Inform the listeners and delete it.
00749       for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
00750         DUL->NodeDeleted(N, Existing);
00751       DeleteNodeNotInCSEMaps(N);
00752       return;
00753     }
00754   }
00755 
00756   // If the node doesn't already exist, we updated it.  Inform listeners.
00757   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
00758     DUL->NodeUpdated(N);
00759 }
00760 
00761 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
00762 /// were replaced with those specified.  If this node is never memoized,
00763 /// return null, otherwise return a pointer to the slot it would take.  If a
00764 /// node already exists with these operands, the slot will be non-null.
00765 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
00766                                            void *&InsertPos) {
00767   if (doNotCSE(N))
00768     return nullptr;
00769 
00770   SDValue Ops[] = { Op };
00771   FoldingSetNodeID ID;
00772   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
00773   AddNodeIDCustom(ID, N);
00774   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
00775   return Node;
00776 }
00777 
00778 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
00779 /// were replaced with those specified.  If this node is never memoized,
00780 /// return null, otherwise return a pointer to the slot it would take.  If a
00781 /// node already exists with these operands, the slot will be non-null.
00782 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
00783                                            SDValue Op1, SDValue Op2,
00784                                            void *&InsertPos) {
00785   if (doNotCSE(N))
00786     return nullptr;
00787 
00788   SDValue Ops[] = { Op1, Op2 };
00789   FoldingSetNodeID ID;
00790   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
00791   AddNodeIDCustom(ID, N);
00792   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
00793   return Node;
00794 }
00795 
00796 
00797 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
00798 /// were replaced with those specified.  If this node is never memoized,
00799 /// return null, otherwise return a pointer to the slot it would take.  If a
00800 /// node already exists with these operands, the slot will be non-null.
00801 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
00802                                            const SDValue *Ops,unsigned NumOps,
00803                                            void *&InsertPos) {
00804   if (doNotCSE(N))
00805     return nullptr;
00806 
00807   FoldingSetNodeID ID;
00808   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
00809   AddNodeIDCustom(ID, N);
00810   SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
00811   return Node;
00812 }
00813 
00814 #ifndef NDEBUG
00815 /// VerifyNodeCommon - Sanity check the given node.  Aborts if it is invalid.
00816 static void VerifyNodeCommon(SDNode *N) {
00817   switch (N->getOpcode()) {
00818   default:
00819     break;
00820   case ISD::BUILD_PAIR: {
00821     EVT VT = N->getValueType(0);
00822     assert(N->getNumValues() == 1 && "Too many results!");
00823     assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
00824            "Wrong return type!");
00825     assert(N->getNumOperands() == 2 && "Wrong number of operands!");
00826     assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
00827            "Mismatched operand types!");
00828     assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
00829            "Wrong operand type!");
00830     assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
00831            "Wrong return type size");
00832     break;
00833   }
00834   case ISD::BUILD_VECTOR: {
00835     assert(N->getNumValues() == 1 && "Too many results!");
00836     assert(N->getValueType(0).isVector() && "Wrong return type!");
00837     assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
00838            "Wrong number of operands!");
00839     EVT EltVT = N->getValueType(0).getVectorElementType();
00840     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
00841       assert((I->getValueType() == EltVT ||
00842              (EltVT.isInteger() && I->getValueType().isInteger() &&
00843               EltVT.bitsLE(I->getValueType()))) &&
00844             "Wrong operand type!");
00845       assert(I->getValueType() == N->getOperand(0).getValueType() &&
00846              "Operands must all have the same type");
00847     }
00848     break;
00849   }
00850   }
00851 }
00852 
00853 /// VerifySDNode - Sanity check the given SDNode.  Aborts if it is invalid.
00854 static void VerifySDNode(SDNode *N) {
00855   // The SDNode allocators cannot be used to allocate nodes with fields that are
00856   // not present in an SDNode!
00857   assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
00858   assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
00859   assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
00860   assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
00861   assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
00862   assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
00863   assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
00864   assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
00865   assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
00866   assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
00867   assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
00868   assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
00869   assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
00870   assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
00871   assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
00872   assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
00873   assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
00874   assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
00875   assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
00876 
00877   VerifyNodeCommon(N);
00878 }
00879 
00880 /// VerifyMachineNode - Sanity check the given MachineNode.  Aborts if it is
00881 /// invalid.
00882 static void VerifyMachineNode(SDNode *N) {
00883   // The MachineNode allocators cannot be used to allocate nodes with fields
00884   // that are not present in a MachineNode!
00885   // Currently there are no such nodes.
00886 
00887   VerifyNodeCommon(N);
00888 }
00889 #endif // NDEBUG
00890 
00891 /// getEVTAlignment - Compute the default alignment value for the
00892 /// given type.
00893 ///
00894 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
00895   Type *Ty = VT == MVT::iPTR ?
00896                    PointerType::get(Type::getInt8Ty(*getContext()), 0) :
00897                    VT.getTypeForEVT(*getContext());
00898 
00899   return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
00900 }
00901 
00902 // EntryNode could meaningfully have debug info if we can find it...
00903 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
00904   : TM(tm), TSI(*tm.getSelectionDAGInfo()), TLI(nullptr), OptLevel(OL),
00905     EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
00906     Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
00907     UpdateListeners(nullptr) {
00908   AllNodes.push_back(&EntryNode);
00909   DbgInfo = new SDDbgInfo();
00910 }
00911 
00912 void SelectionDAG::init(MachineFunction &mf, const TargetLowering *tli) {
00913   MF = &mf;
00914   TLI = tli;
00915   Context = &mf.getFunction()->getContext();
00916 }
00917 
00918 SelectionDAG::~SelectionDAG() {
00919   assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
00920   allnodes_clear();
00921   delete DbgInfo;
00922 }
00923 
00924 void SelectionDAG::allnodes_clear() {
00925   assert(&*AllNodes.begin() == &EntryNode);
00926   AllNodes.remove(AllNodes.begin());
00927   while (!AllNodes.empty())
00928     DeallocateNode(AllNodes.begin());
00929 }
00930 
00931 void SelectionDAG::clear() {
00932   allnodes_clear();
00933   OperandAllocator.Reset();
00934   CSEMap.clear();
00935 
00936   ExtendedValueTypeNodes.clear();
00937   ExternalSymbols.clear();
00938   TargetExternalSymbols.clear();
00939   std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
00940             static_cast<CondCodeSDNode*>(nullptr));
00941   std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
00942             static_cast<SDNode*>(nullptr));
00943 
00944   EntryNode.UseList = nullptr;
00945   AllNodes.push_back(&EntryNode);
00946   Root = getEntryNode();
00947   DbgInfo->clear();
00948 }
00949 
00950 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
00951   return VT.bitsGT(Op.getValueType()) ?
00952     getNode(ISD::ANY_EXTEND, DL, VT, Op) :
00953     getNode(ISD::TRUNCATE, DL, VT, Op);
00954 }
00955 
00956 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
00957   return VT.bitsGT(Op.getValueType()) ?
00958     getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
00959     getNode(ISD::TRUNCATE, DL, VT, Op);
00960 }
00961 
00962 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) {
00963   return VT.bitsGT(Op.getValueType()) ?
00964     getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
00965     getNode(ISD::TRUNCATE, DL, VT, Op);
00966 }
00967 
00968 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) {
00969   assert(!VT.isVector() &&
00970          "getZeroExtendInReg should use the vector element type instead of "
00971          "the vector type!");
00972   if (Op.getValueType() == VT) return Op;
00973   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
00974   APInt Imm = APInt::getLowBitsSet(BitWidth,
00975                                    VT.getSizeInBits());
00976   return getNode(ISD::AND, DL, Op.getValueType(), Op,
00977                  getConstant(Imm, Op.getValueType()));
00978 }
00979 
00980 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
00981 ///
00982 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) {
00983   EVT EltVT = VT.getScalarType();
00984   SDValue NegOne =
00985     getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
00986   return getNode(ISD::XOR, DL, VT, Val, NegOne);
00987 }
00988 
00989 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT, bool isO) {
00990   EVT EltVT = VT.getScalarType();
00991   assert((EltVT.getSizeInBits() >= 64 ||
00992          (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
00993          "getConstant with a uint64_t value that doesn't fit in the type!");
00994   return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT, isO);
00995 }
00996 
00997 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT, bool isO)
00998 {
00999   return getConstant(*ConstantInt::get(*Context, Val), VT, isT, isO);
01000 }
01001 
01002 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT,
01003                                   bool isO) {
01004   assert(VT.isInteger() && "Cannot create FP integer constant!");
01005 
01006   EVT EltVT = VT.getScalarType();
01007   const ConstantInt *Elt = &Val;
01008 
01009   const TargetLowering *TLI = TM.getTargetLowering();
01010 
01011   // In some cases the vector type is legal but the element type is illegal and
01012   // needs to be promoted, for example v8i8 on ARM.  In this case, promote the
01013   // inserted value (the type does not need to match the vector element type).
01014   // Any extra bits introduced will be truncated away.
01015   if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
01016       TargetLowering::TypePromoteInteger) {
01017    EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
01018    APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
01019    Elt = ConstantInt::get(*getContext(), NewVal);
01020   }
01021   // In other cases the element type is illegal and needs to be expanded, for
01022   // example v2i64 on MIPS32. In this case, find the nearest legal type, split
01023   // the value into n parts and use a vector type with n-times the elements.
01024   // Then bitcast to the type requested.
01025   // Legalizing constants too early makes the DAGCombiner's job harder so we
01026   // only legalize if the DAG tells us we must produce legal types.
01027   else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
01028            TLI->getTypeAction(*getContext(), EltVT) ==
01029            TargetLowering::TypeExpandInteger) {
01030     APInt NewVal = Elt->getValue();
01031     EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
01032     unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
01033     unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
01034     EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
01035 
01036     // Check the temporary vector is the correct size. If this fails then
01037     // getTypeToTransformTo() probably returned a type whose size (in bits)
01038     // isn't a power-of-2 factor of the requested type size.
01039     assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
01040 
01041     SmallVector<SDValue, 2> EltParts;
01042     for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
01043       EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
01044                                            .trunc(ViaEltSizeInBits),
01045                                      ViaEltVT, isT, isO));
01046     }
01047 
01048     // EltParts is currently in little endian order. If we actually want
01049     // big-endian order then reverse it now.
01050     if (TLI->isBigEndian())
01051       std::reverse(EltParts.begin(), EltParts.end());
01052 
01053     // The elements must be reversed when the element order is different
01054     // to the endianness of the elements (because the BITCAST is itself a
01055     // vector shuffle in this situation). However, we do not need any code to
01056     // perform this reversal because getConstant() is producing a vector
01057     // splat.
01058     // This situation occurs in MIPS MSA.
01059 
01060     SmallVector<SDValue, 8> Ops;
01061     for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
01062       Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
01063 
01064     SDValue Result = getNode(ISD::BITCAST, SDLoc(), VT,
01065                              getNode(ISD::BUILD_VECTOR, SDLoc(), ViaVecVT,
01066                                      &Ops[0], Ops.size()));
01067     return Result;
01068   }
01069 
01070   assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
01071          "APInt size does not match type size!");
01072   unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
01073   FoldingSetNodeID ID;
01074   AddNodeIDNode(ID, Opc, getVTList(EltVT), nullptr, 0);
01075   ID.AddPointer(Elt);
01076   ID.AddBoolean(isO);
01077   void *IP = nullptr;
01078   SDNode *N = nullptr;
01079   if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
01080     if (!VT.isVector())
01081       return SDValue(N, 0);
01082 
01083   if (!N) {
01084     N = new (NodeAllocator) ConstantSDNode(isT, isO, Elt, EltVT);
01085     CSEMap.InsertNode(N, IP);
01086     AllNodes.push_back(N);
01087   }
01088 
01089   SDValue Result(N, 0);
01090   if (VT.isVector()) {
01091     SmallVector<SDValue, 8> Ops;
01092     Ops.assign(VT.getVectorNumElements(), Result);
01093     Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
01094   }
01095   return Result;
01096 }
01097 
01098 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
01099   return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
01100 }
01101 
01102 
01103 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
01104   return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
01105 }
01106 
01107 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
01108   assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
01109 
01110   EVT EltVT = VT.getScalarType();
01111 
01112   // Do the map lookup using the actual bit pattern for the floating point
01113   // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
01114   // we don't have issues with SNANs.
01115   unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
01116   FoldingSetNodeID ID;
01117   AddNodeIDNode(ID, Opc, getVTList(EltVT), nullptr, 0);
01118   ID.AddPointer(&V);
01119   void *IP = nullptr;
01120   SDNode *N = nullptr;
01121   if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
01122     if (!VT.isVector())
01123       return SDValue(N, 0);
01124 
01125   if (!N) {
01126     N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
01127     CSEMap.InsertNode(N, IP);
01128     AllNodes.push_back(N);
01129   }
01130 
01131   SDValue Result(N, 0);
01132   if (VT.isVector()) {
01133     SmallVector<SDValue, 8> Ops;
01134     Ops.assign(VT.getVectorNumElements(), Result);
01135     // FIXME SDLoc info might be appropriate here
01136     Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size());
01137   }
01138   return Result;
01139 }
01140 
01141 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
01142   EVT EltVT = VT.getScalarType();
01143   if (EltVT==MVT::f32)
01144     return getConstantFP(APFloat((float)Val), VT, isTarget);
01145   else if (EltVT==MVT::f64)
01146     return getConstantFP(APFloat(Val), VT, isTarget);
01147   else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
01148            EltVT==MVT::f16) {
01149     bool ignored;
01150     APFloat apf = APFloat(Val);
01151     apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
01152                 &ignored);
01153     return getConstantFP(apf, VT, isTarget);
01154   } else
01155     llvm_unreachable("Unsupported type in getConstantFP");
01156 }
01157 
01158 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
01159                                        EVT VT, int64_t Offset,
01160                                        bool isTargetGA,
01161                                        unsigned char TargetFlags) {
01162   assert((TargetFlags == 0 || isTargetGA) &&
01163          "Cannot set target flags on target-independent globals");
01164   const TargetLowering *TLI = TM.getTargetLowering();
01165 
01166   // Truncate (with sign-extension) the offset value to the pointer size.
01167   unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
01168   if (BitWidth < 64)
01169     Offset = SignExtend64(Offset, BitWidth);
01170 
01171   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
01172   if (!GVar) {
01173     // If GV is an alias then use the aliasee for determining thread-localness.
01174     if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
01175       GVar = dyn_cast_or_null<GlobalVariable>(GA->getAliasedGlobal());
01176   }
01177 
01178   unsigned Opc;
01179   if (GVar && GVar->isThreadLocal())
01180     Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
01181   else
01182     Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
01183 
01184   FoldingSetNodeID ID;
01185   AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
01186   ID.AddPointer(GV);
01187   ID.AddInteger(Offset);
01188   ID.AddInteger(TargetFlags);
01189   ID.AddInteger(GV->getType()->getAddressSpace());
01190   void *IP = nullptr;
01191   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01192     return SDValue(E, 0);
01193 
01194   SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(),
01195                                                       DL.getDebugLoc(), GV, VT,
01196                                                       Offset, TargetFlags);
01197   CSEMap.InsertNode(N, IP);
01198   AllNodes.push_back(N);
01199   return SDValue(N, 0);
01200 }
01201 
01202 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
01203   unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
01204   FoldingSetNodeID ID;
01205   AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
01206   ID.AddInteger(FI);
01207   void *IP = nullptr;
01208   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01209     return SDValue(E, 0);
01210 
01211   SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
01212   CSEMap.InsertNode(N, IP);
01213   AllNodes.push_back(N);
01214   return SDValue(N, 0);
01215 }
01216 
01217 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
01218                                    unsigned char TargetFlags) {
01219   assert((TargetFlags == 0 || isTarget) &&
01220          "Cannot set target flags on target-independent jump tables");
01221   unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
01222   FoldingSetNodeID ID;
01223   AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
01224   ID.AddInteger(JTI);
01225   ID.AddInteger(TargetFlags);
01226   void *IP = nullptr;
01227   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01228     return SDValue(E, 0);
01229 
01230   SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
01231                                                   TargetFlags);
01232   CSEMap.InsertNode(N, IP);
01233   AllNodes.push_back(N);
01234   return SDValue(N, 0);
01235 }
01236 
01237 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
01238                                       unsigned Alignment, int Offset,
01239                                       bool isTarget,
01240                                       unsigned char TargetFlags) {
01241   assert((TargetFlags == 0 || isTarget) &&
01242          "Cannot set target flags on target-independent globals");
01243   if (Alignment == 0)
01244     Alignment =
01245     TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
01246   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
01247   FoldingSetNodeID ID;
01248   AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
01249   ID.AddInteger(Alignment);
01250   ID.AddInteger(Offset);
01251   ID.AddPointer(C);
01252   ID.AddInteger(TargetFlags);
01253   void *IP = nullptr;
01254   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01255     return SDValue(E, 0);
01256 
01257   SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
01258                                                      Alignment, TargetFlags);
01259   CSEMap.InsertNode(N, IP);
01260   AllNodes.push_back(N);
01261   return SDValue(N, 0);
01262 }
01263 
01264 
01265 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
01266                                       unsigned Alignment, int Offset,
01267                                       bool isTarget,
01268                                       unsigned char TargetFlags) {
01269   assert((TargetFlags == 0 || isTarget) &&
01270          "Cannot set target flags on target-independent globals");
01271   if (Alignment == 0)
01272     Alignment =
01273     TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
01274   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
01275   FoldingSetNodeID ID;
01276   AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
01277   ID.AddInteger(Alignment);
01278   ID.AddInteger(Offset);
01279   C->addSelectionDAGCSEId(ID);
01280   ID.AddInteger(TargetFlags);
01281   void *IP = nullptr;
01282   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01283     return SDValue(E, 0);
01284 
01285   SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
01286                                                      Alignment, TargetFlags);
01287   CSEMap.InsertNode(N, IP);
01288   AllNodes.push_back(N);
01289   return SDValue(N, 0);
01290 }
01291 
01292 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
01293                                      unsigned char TargetFlags) {
01294   FoldingSetNodeID ID;
01295   AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), nullptr, 0);
01296   ID.AddInteger(Index);
01297   ID.AddInteger(Offset);
01298   ID.AddInteger(TargetFlags);
01299   void *IP = nullptr;
01300   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01301     return SDValue(E, 0);
01302 
01303   SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
01304                                                     TargetFlags);
01305   CSEMap.InsertNode(N, IP);
01306   AllNodes.push_back(N);
01307   return SDValue(N, 0);
01308 }
01309 
01310 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
01311   FoldingSetNodeID ID;
01312   AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), nullptr, 0);
01313   ID.AddPointer(MBB);
01314   void *IP = nullptr;
01315   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01316     return SDValue(E, 0);
01317 
01318   SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
01319   CSEMap.InsertNode(N, IP);
01320   AllNodes.push_back(N);
01321   return SDValue(N, 0);
01322 }
01323 
01324 SDValue SelectionDAG::getValueType(EVT VT) {
01325   if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
01326       ValueTypeNodes.size())
01327     ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
01328 
01329   SDNode *&N = VT.isExtended() ?
01330     ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
01331 
01332   if (N) return SDValue(N, 0);
01333   N = new (NodeAllocator) VTSDNode(VT);
01334   AllNodes.push_back(N);
01335   return SDValue(N, 0);
01336 }
01337 
01338 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
01339   SDNode *&N = ExternalSymbols[Sym];
01340   if (N) return SDValue(N, 0);
01341   N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
01342   AllNodes.push_back(N);
01343   return SDValue(N, 0);
01344 }
01345 
01346 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
01347                                               unsigned char TargetFlags) {
01348   SDNode *&N =
01349     TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
01350                                                                TargetFlags)];
01351   if (N) return SDValue(N, 0);
01352   N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
01353   AllNodes.push_back(N);
01354   return SDValue(N, 0);
01355 }
01356 
01357 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
01358   if ((unsigned)Cond >= CondCodeNodes.size())
01359     CondCodeNodes.resize(Cond+1);
01360 
01361   if (!CondCodeNodes[Cond]) {
01362     CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
01363     CondCodeNodes[Cond] = N;
01364     AllNodes.push_back(N);
01365   }
01366 
01367   return SDValue(CondCodeNodes[Cond], 0);
01368 }
01369 
01370 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
01371 // the shuffle mask M that point at N1 to point at N2, and indices that point
01372 // N2 to point at N1.
01373 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
01374   std::swap(N1, N2);
01375   int NElts = M.size();
01376   for (int i = 0; i != NElts; ++i) {
01377     if (M[i] >= NElts)
01378       M[i] -= NElts;
01379     else if (M[i] >= 0)
01380       M[i] += NElts;
01381   }
01382 }
01383 
01384 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1,
01385                                        SDValue N2, const int *Mask) {
01386   assert(VT == N1.getValueType() && VT == N2.getValueType() &&
01387          "Invalid VECTOR_SHUFFLE");
01388 
01389   // Canonicalize shuffle undef, undef -> undef
01390   if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
01391     return getUNDEF(VT);
01392 
01393   // Validate that all indices in Mask are within the range of the elements
01394   // input to the shuffle.
01395   unsigned NElts = VT.getVectorNumElements();
01396   SmallVector<int, 8> MaskVec;
01397   for (unsigned i = 0; i != NElts; ++i) {
01398     assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
01399     MaskVec.push_back(Mask[i]);
01400   }
01401 
01402   // Canonicalize shuffle v, v -> v, undef
01403   if (N1 == N2) {
01404     N2 = getUNDEF(VT);
01405     for (unsigned i = 0; i != NElts; ++i)
01406       if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
01407   }
01408 
01409   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
01410   if (N1.getOpcode() == ISD::UNDEF)
01411     commuteShuffle(N1, N2, MaskVec);
01412 
01413   // Canonicalize all index into lhs, -> shuffle lhs, undef
01414   // Canonicalize all index into rhs, -> shuffle rhs, undef
01415   bool AllLHS = true, AllRHS = true;
01416   bool N2Undef = N2.getOpcode() == ISD::UNDEF;
01417   for (unsigned i = 0; i != NElts; ++i) {
01418     if (MaskVec[i] >= (int)NElts) {
01419       if (N2Undef)
01420         MaskVec[i] = -1;
01421       else
01422         AllLHS = false;
01423     } else if (MaskVec[i] >= 0) {
01424       AllRHS = false;
01425     }
01426   }
01427   if (AllLHS && AllRHS)
01428     return getUNDEF(VT);
01429   if (AllLHS && !N2Undef)
01430     N2 = getUNDEF(VT);
01431   if (AllRHS) {
01432     N1 = getUNDEF(VT);
01433     commuteShuffle(N1, N2, MaskVec);
01434   }
01435 
01436   // If Identity shuffle return that node.
01437   bool Identity = true;
01438   for (unsigned i = 0; i != NElts; ++i) {
01439     if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
01440   }
01441   if (Identity && NElts)
01442     return N1;
01443 
01444   FoldingSetNodeID ID;
01445   SDValue Ops[2] = { N1, N2 };
01446   AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
01447   for (unsigned i = 0; i != NElts; ++i)
01448     ID.AddInteger(MaskVec[i]);
01449 
01450   void* IP = nullptr;
01451   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01452     return SDValue(E, 0);
01453 
01454   // Allocate the mask array for the node out of the BumpPtrAllocator, since
01455   // SDNode doesn't have access to it.  This memory will be "leaked" when
01456   // the node is deallocated, but recovered when the NodeAllocator is released.
01457   int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
01458   memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
01459 
01460   ShuffleVectorSDNode *N =
01461     new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(),
01462                                             dl.getDebugLoc(), N1, N2,
01463                                             MaskAlloc);
01464   CSEMap.InsertNode(N, IP);
01465   AllNodes.push_back(N);
01466   return SDValue(N, 0);
01467 }
01468 
01469 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl,
01470                                        SDValue Val, SDValue DTy,
01471                                        SDValue STy, SDValue Rnd, SDValue Sat,
01472                                        ISD::CvtCode Code) {
01473   // If the src and dest types are the same and the conversion is between
01474   // integer types of the same sign or two floats, no conversion is necessary.
01475   if (DTy == STy &&
01476       (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
01477     return Val;
01478 
01479   FoldingSetNodeID ID;
01480   SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
01481   AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
01482   void* IP = nullptr;
01483   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01484     return SDValue(E, 0);
01485 
01486   CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(),
01487                                                            dl.getDebugLoc(),
01488                                                            Ops, 5, Code);
01489   CSEMap.InsertNode(N, IP);
01490   AllNodes.push_back(N);
01491   return SDValue(N, 0);
01492 }
01493 
01494 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
01495   FoldingSetNodeID ID;
01496   AddNodeIDNode(ID, ISD::Register, getVTList(VT), nullptr, 0);
01497   ID.AddInteger(RegNo);
01498   void *IP = nullptr;
01499   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01500     return SDValue(E, 0);
01501 
01502   SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
01503   CSEMap.InsertNode(N, IP);
01504   AllNodes.push_back(N);
01505   return SDValue(N, 0);
01506 }
01507 
01508 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
01509   FoldingSetNodeID ID;
01510   AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), nullptr, 0);
01511   ID.AddPointer(RegMask);
01512   void *IP = nullptr;
01513   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01514     return SDValue(E, 0);
01515 
01516   SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
01517   CSEMap.InsertNode(N, IP);
01518   AllNodes.push_back(N);
01519   return SDValue(N, 0);
01520 }
01521 
01522 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) {
01523   FoldingSetNodeID ID;
01524   SDValue Ops[] = { Root };
01525   AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
01526   ID.AddPointer(Label);
01527   void *IP = nullptr;
01528   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01529     return SDValue(E, 0);
01530 
01531   SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(),
01532                                                 dl.getDebugLoc(), Root, Label);
01533   CSEMap.InsertNode(N, IP);
01534   AllNodes.push_back(N);
01535   return SDValue(N, 0);
01536 }
01537 
01538 
01539 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
01540                                       int64_t Offset,
01541                                       bool isTarget,
01542                                       unsigned char TargetFlags) {
01543   unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
01544 
01545   FoldingSetNodeID ID;
01546   AddNodeIDNode(ID, Opc, getVTList(VT), nullptr, 0);
01547   ID.AddPointer(BA);
01548   ID.AddInteger(Offset);
01549   ID.AddInteger(TargetFlags);
01550   void *IP = nullptr;
01551   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01552     return SDValue(E, 0);
01553 
01554   SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
01555                                                      TargetFlags);
01556   CSEMap.InsertNode(N, IP);
01557   AllNodes.push_back(N);
01558   return SDValue(N, 0);
01559 }
01560 
01561 SDValue SelectionDAG::getSrcValue(const Value *V) {
01562   assert((!V || V->getType()->isPointerTy()) &&
01563          "SrcValue is not a pointer?");
01564 
01565   FoldingSetNodeID ID;
01566   AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), nullptr, 0);
01567   ID.AddPointer(V);
01568 
01569   void *IP = nullptr;
01570   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01571     return SDValue(E, 0);
01572 
01573   SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
01574   CSEMap.InsertNode(N, IP);
01575   AllNodes.push_back(N);
01576   return SDValue(N, 0);
01577 }
01578 
01579 /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
01580 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
01581   FoldingSetNodeID ID;
01582   AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), nullptr, 0);
01583   ID.AddPointer(MD);
01584 
01585   void *IP = nullptr;
01586   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01587     return SDValue(E, 0);
01588 
01589   SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
01590   CSEMap.InsertNode(N, IP);
01591   AllNodes.push_back(N);
01592   return SDValue(N, 0);
01593 }
01594 
01595 /// getAddrSpaceCast - Return an AddrSpaceCastSDNode.
01596 SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
01597                                        unsigned SrcAS, unsigned DestAS) {
01598   SDValue Ops[] = {Ptr};
01599   FoldingSetNodeID ID;
01600   AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), &Ops[0], 1);
01601   ID.AddInteger(SrcAS);
01602   ID.AddInteger(DestAS);
01603 
01604   void *IP = nullptr;
01605   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
01606     return SDValue(E, 0);
01607 
01608   SDNode *N = new (NodeAllocator) AddrSpaceCastSDNode(dl.getIROrder(),
01609                                                       dl.getDebugLoc(),
01610                                                       VT, Ptr, SrcAS, DestAS);
01611   CSEMap.InsertNode(N, IP);
01612   AllNodes.push_back(N);
01613   return SDValue(N, 0);
01614 }
01615 
01616 /// getShiftAmountOperand - Return the specified value casted to
01617 /// the target's desired shift amount type.
01618 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
01619   EVT OpTy = Op.getValueType();
01620   EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
01621   if (OpTy == ShTy || OpTy.isVector()) return Op;
01622 
01623   ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ?  ISD::TRUNCATE : ISD::ZERO_EXTEND;
01624   return getNode(Opcode, SDLoc(Op), ShTy, Op);
01625 }
01626 
01627 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
01628 /// specified value type.
01629 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
01630   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
01631   unsigned ByteSize = VT.getStoreSize();
01632   Type *Ty = VT.getTypeForEVT(*getContext());
01633   const TargetLowering *TLI = TM.getTargetLowering();
01634   unsigned StackAlign =
01635   std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
01636 
01637   int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
01638   return getFrameIndex(FrameIdx, TLI->getPointerTy());
01639 }
01640 
01641 /// CreateStackTemporary - Create a stack temporary suitable for holding
01642 /// either of the specified value types.
01643 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
01644   unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
01645                             VT2.getStoreSizeInBits())/8;
01646   Type *Ty1 = VT1.getTypeForEVT(*getContext());
01647   Type *Ty2 = VT2.getTypeForEVT(*getContext());
01648   const TargetLowering *TLI = TM.getTargetLowering();
01649   const DataLayout *TD = TLI->getDataLayout();
01650   unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
01651                             TD->getPrefTypeAlignment(Ty2));
01652 
01653   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
01654   int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
01655   return getFrameIndex(FrameIdx, TLI->getPointerTy());
01656 }
01657 
01658 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
01659                                 SDValue N2, ISD::CondCode Cond, SDLoc dl) {
01660   // These setcc operations always fold.
01661   switch (Cond) {
01662   default: break;
01663   case ISD::SETFALSE:
01664   case ISD::SETFALSE2: return getConstant(0, VT);
01665   case ISD::SETTRUE:
01666   case ISD::SETTRUE2: {
01667     const TargetLowering *TLI = TM.getTargetLowering();
01668     TargetLowering::BooleanContent Cnt = TLI->getBooleanContents(VT.isVector());
01669     return getConstant(
01670         Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, VT);
01671   }
01672 
01673   case ISD::SETOEQ:
01674   case ISD::SETOGT:
01675   case ISD::SETOGE:
01676   case ISD::SETOLT:
01677   case ISD::SETOLE:
01678   case ISD::SETONE:
01679   case ISD::SETO:
01680   case ISD::SETUO:
01681   case ISD::SETUEQ:
01682   case ISD::SETUNE:
01683     assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
01684     break;
01685   }
01686 
01687   if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
01688     const APInt &C2 = N2C->getAPIntValue();
01689     if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
01690       const APInt &C1 = N1C->getAPIntValue();
01691 
01692       switch (Cond) {
01693       default: llvm_unreachable("Unknown integer setcc!");
01694       case ISD::SETEQ:  return getConstant(C1 == C2, VT);
01695       case ISD::SETNE:  return getConstant(C1 != C2, VT);
01696       case ISD::SETULT: return getConstant(C1.ult(C2), VT);
01697       case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
01698       case ISD::SETULE: return getConstant(C1.ule(C2), VT);
01699       case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
01700       case ISD::SETLT:  return getConstant(C1.slt(C2), VT);
01701       case ISD::SETGT:  return getConstant(C1.sgt(C2), VT);
01702       case ISD::SETLE:  return getConstant(C1.sle(C2), VT);
01703       case ISD::SETGE:  return getConstant(C1.sge(C2), VT);
01704       }
01705     }
01706   }
01707   if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
01708     if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
01709       APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
01710       switch (Cond) {
01711       default: break;
01712       case ISD::SETEQ:  if (R==APFloat::cmpUnordered)
01713                           return getUNDEF(VT);
01714                         // fall through
01715       case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
01716       case ISD::SETNE:  if (R==APFloat::cmpUnordered)
01717                           return getUNDEF(VT);
01718                         // fall through
01719       case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
01720                                            R==APFloat::cmpLessThan, VT);
01721       case ISD::SETLT:  if (R==APFloat::cmpUnordered)
01722                           return getUNDEF(VT);
01723                         // fall through
01724       case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
01725       case ISD::SETGT:  if (R==APFloat::cmpUnordered)
01726                           return getUNDEF(VT);
01727                         // fall through
01728       case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
01729       case ISD::SETLE:  if (R==APFloat::cmpUnordered)
01730                           return getUNDEF(VT);
01731                         // fall through
01732       case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
01733                                            R==APFloat::cmpEqual, VT);
01734       case ISD::SETGE:  if (R==APFloat::cmpUnordered)
01735                           return getUNDEF(VT);
01736                         // fall through
01737       case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
01738                                            R==APFloat::cmpEqual, VT);
01739       case ISD::SETO:   return getConstant(R!=APFloat::cmpUnordered, VT);
01740       case ISD::SETUO:  return getConstant(R==APFloat::cmpUnordered, VT);
01741       case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
01742                                            R==APFloat::cmpEqual, VT);
01743       case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
01744       case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
01745                                            R==APFloat::cmpLessThan, VT);
01746       case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
01747                                            R==APFloat::cmpUnordered, VT);
01748       case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
01749       case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
01750       }
01751     } else {
01752       // Ensure that the constant occurs on the RHS.
01753       ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
01754       MVT CompVT = N1.getValueType().getSimpleVT();
01755       if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT))
01756         return SDValue();
01757 
01758       return getSetCC(dl, VT, N2, N1, SwappedCond);
01759     }
01760   }
01761 
01762   // Could not fold it.
01763   return SDValue();
01764 }
01765 
01766 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We
01767 /// use this predicate to simplify operations downstream.
01768 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
01769   // This predicate is not safe for vector operations.
01770   if (Op.getValueType().isVector())
01771     return false;
01772 
01773   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
01774   return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
01775 }
01776 
01777 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
01778 /// this predicate to simplify operations downstream.  Mask is known to be zero
01779 /// for bits that V cannot have.
01780 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
01781                                      unsigned Depth) const {
01782   APInt KnownZero, KnownOne;
01783   ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
01784   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01785   return (KnownZero & Mask) == Mask;
01786 }
01787 
01788 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
01789 /// known to be either zero or one and return them in the KnownZero/KnownOne
01790 /// bitsets.  This code only analyzes bits in Mask, in order to short-circuit
01791 /// processing.
01792 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
01793                                      APInt &KnownOne, unsigned Depth) const {
01794   const TargetLowering *TLI = TM.getTargetLowering();
01795   unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
01796 
01797   KnownZero = KnownOne = APInt(BitWidth, 0);   // Don't know anything.
01798   if (Depth == 6)
01799     return;  // Limit search depth.
01800 
01801   APInt KnownZero2, KnownOne2;
01802 
01803   switch (Op.getOpcode()) {
01804   case ISD::Constant:
01805     // We know all of the bits for a constant!
01806     KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
01807     KnownZero = ~KnownOne;
01808     return;
01809   case ISD::AND:
01810     // If either the LHS or the RHS are Zero, the result is zero.
01811     ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
01812     ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
01813     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01814     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
01815 
01816     // Output known-1 bits are only known if set in both the LHS & RHS.
01817     KnownOne &= KnownOne2;
01818     // Output known-0 are known to be clear if zero in either the LHS | RHS.
01819     KnownZero |= KnownZero2;
01820     return;
01821   case ISD::OR:
01822     ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
01823     ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
01824     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01825     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
01826 
01827     // Output known-0 bits are only known if clear in both the LHS & RHS.
01828     KnownZero &= KnownZero2;
01829     // Output known-1 are known to be set if set in either the LHS | RHS.
01830     KnownOne |= KnownOne2;
01831     return;
01832   case ISD::XOR: {
01833     ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
01834     ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
01835     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01836     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
01837 
01838     // Output known-0 bits are known if clear or set in both the LHS & RHS.
01839     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
01840     // Output known-1 are known to be set if set in only one of the LHS, RHS.
01841     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
01842     KnownZero = KnownZeroOut;
01843     return;
01844   }
01845   case ISD::MUL: {
01846     ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
01847     ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
01848     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01849     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
01850 
01851     // If low bits are zero in either operand, output low known-0 bits.
01852     // Also compute a conserative estimate for high known-0 bits.
01853     // More trickiness is possible, but this is sufficient for the
01854     // interesting case of alignment computation.
01855     KnownOne.clearAllBits();
01856     unsigned TrailZ = KnownZero.countTrailingOnes() +
01857                       KnownZero2.countTrailingOnes();
01858     unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
01859                                KnownZero2.countLeadingOnes(),
01860                                BitWidth) - BitWidth;
01861 
01862     TrailZ = std::min(TrailZ, BitWidth);
01863     LeadZ = std::min(LeadZ, BitWidth);
01864     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
01865                 APInt::getHighBitsSet(BitWidth, LeadZ);
01866     return;
01867   }
01868   case ISD::UDIV: {
01869     // For the purposes of computing leading zeros we can conservatively
01870     // treat a udiv as a logical right shift by the power of 2 known to
01871     // be less than the denominator.
01872     ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
01873     unsigned LeadZ = KnownZero2.countLeadingOnes();
01874 
01875     KnownOne2.clearAllBits();
01876     KnownZero2.clearAllBits();
01877     ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
01878     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
01879     if (RHSUnknownLeadingOnes != BitWidth)
01880       LeadZ = std::min(BitWidth,
01881                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
01882 
01883     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
01884     return;
01885   }
01886   case ISD::SELECT:
01887     ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
01888     ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
01889     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01890     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
01891 
01892     // Only known if known in both the LHS and RHS.
01893     KnownOne &= KnownOne2;
01894     KnownZero &= KnownZero2;
01895     return;
01896   case ISD::SELECT_CC:
01897     ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
01898     ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
01899     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01900     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
01901 
01902     // Only known if known in both the LHS and RHS.
01903     KnownOne &= KnownOne2;
01904     KnownZero &= KnownZero2;
01905     return;
01906   case ISD::SADDO:
01907   case ISD::UADDO:
01908   case ISD::SSUBO:
01909   case ISD::USUBO:
01910   case ISD::SMULO:
01911   case ISD::UMULO:
01912     if (Op.getResNo() != 1)
01913       return;
01914     // The boolean result conforms to getBooleanContents.  Fall through.
01915   case ISD::SETCC:
01916     // If we know the result of a setcc has the top bits zero, use this info.
01917     if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
01918         TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
01919       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
01920     return;
01921   case ISD::SHL:
01922     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
01923     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
01924       unsigned ShAmt = SA->getZExtValue();
01925 
01926       // If the shift count is an invalid immediate, don't do anything.
01927       if (ShAmt >= BitWidth)
01928         return;
01929 
01930       ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
01931       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01932       KnownZero <<= ShAmt;
01933       KnownOne  <<= ShAmt;
01934       // low bits known zero.
01935       KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
01936     }
01937     return;
01938   case ISD::SRL:
01939     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
01940     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
01941       unsigned ShAmt = SA->getZExtValue();
01942 
01943       // If the shift count is an invalid immediate, don't do anything.
01944       if (ShAmt >= BitWidth)
01945         return;
01946 
01947       ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
01948       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01949       KnownZero = KnownZero.lshr(ShAmt);
01950       KnownOne  = KnownOne.lshr(ShAmt);
01951 
01952       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
01953       KnownZero |= HighBits;  // High bits known zero.
01954     }
01955     return;
01956   case ISD::SRA:
01957     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
01958       unsigned ShAmt = SA->getZExtValue();
01959 
01960       // If the shift count is an invalid immediate, don't do anything.
01961       if (ShAmt >= BitWidth)
01962         return;
01963 
01964       // If any of the demanded bits are produced by the sign extension, we also
01965       // demand the input sign bit.
01966       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
01967 
01968       ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
01969       assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
01970       KnownZero = KnownZero.lshr(ShAmt);
01971       KnownOne  = KnownOne.lshr(ShAmt);
01972 
01973       // Handle the sign bits.
01974       APInt SignBit = APInt::getSignBit(BitWidth);
01975       SignBit = SignBit.lshr(ShAmt);  // Adjust to where it is now in the mask.
01976 
01977       if (KnownZero.intersects(SignBit)) {
01978         KnownZero |= HighBits;  // New bits are known zero.
01979       } else if (KnownOne.intersects(SignBit)) {
01980         KnownOne  |= HighBits;  // New bits are known one.
01981       }
01982     }
01983     return;
01984   case ISD::SIGN_EXTEND_INREG: {
01985     EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
01986     unsigned EBits = EVT.getScalarType().getSizeInBits();
01987 
01988     // Sign extension.  Compute the demanded bits in the result that are not
01989     // present in the input.
01990     APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
01991 
01992     APInt InSignBit = APInt::getSignBit(EBits);
01993     APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
01994 
01995     // If the sign extended bits are demanded, we know that the sign
01996     // bit is demanded.
01997     InSignBit = InSignBit.zext(BitWidth);
01998     if (NewBits.getBoolValue())
01999       InputDemandedBits |= InSignBit;
02000 
02001     ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
02002     KnownOne &= InputDemandedBits;
02003     KnownZero &= InputDemandedBits;
02004     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
02005 
02006     // If the sign bit of the input is known set or clear, then we know the
02007     // top bits of the result.
02008     if (KnownZero.intersects(InSignBit)) {         // Input sign bit known clear
02009       KnownZero |= NewBits;
02010       KnownOne  &= ~NewBits;
02011     } else if (KnownOne.intersects(InSignBit)) {   // Input sign bit known set
02012       KnownOne  |= NewBits;
02013       KnownZero &= ~NewBits;
02014     } else {                              // Input sign bit unknown
02015       KnownZero &= ~NewBits;
02016       KnownOne  &= ~NewBits;
02017     }
02018     return;
02019   }
02020   case ISD::CTTZ:
02021   case ISD::CTTZ_ZERO_UNDEF:
02022   case ISD::CTLZ:
02023   case ISD::CTLZ_ZERO_UNDEF:
02024   case ISD::CTPOP: {
02025     unsigned LowBits = Log2_32(BitWidth)+1;
02026     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
02027     KnownOne.clearAllBits();
02028     return;
02029   }
02030   case ISD::LOAD: {
02031     LoadSDNode *LD = cast<LoadSDNode>(Op);
02032     // If this is a ZEXTLoad and we are looking at the loaded value.
02033     if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
02034       EVT VT = LD->getMemoryVT();
02035       unsigned MemBits = VT.getScalarType().getSizeInBits();
02036       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
02037     } else if (const MDNode *Ranges = LD->getRanges()) {
02038       computeMaskedBitsLoad(*Ranges, KnownZero);
02039     }
02040     return;
02041   }
02042   case ISD::ZERO_EXTEND: {
02043     EVT InVT = Op.getOperand(0).getValueType();
02044     unsigned InBits = InVT.getScalarType().getSizeInBits();
02045     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
02046     KnownZero = KnownZero.trunc(InBits);
02047     KnownOne = KnownOne.trunc(InBits);
02048     ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
02049     KnownZero = KnownZero.zext(BitWidth);
02050     KnownOne = KnownOne.zext(BitWidth);
02051     KnownZero |= NewBits;
02052     return;
02053   }
02054   case ISD::SIGN_EXTEND: {
02055     EVT InVT = Op.getOperand(0).getValueType();
02056     unsigned InBits = InVT.getScalarType().getSizeInBits();
02057     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
02058 
02059     KnownZero = KnownZero.trunc(InBits);
02060     KnownOne = KnownOne.trunc(InBits);
02061     ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
02062 
02063     // Note if the sign bit is known to be zero or one.
02064     bool SignBitKnownZero = KnownZero.isNegative();
02065     bool SignBitKnownOne  = KnownOne.isNegative();
02066     assert(!(SignBitKnownZero && SignBitKnownOne) &&
02067            "Sign bit can't be known to be both zero and one!");
02068 
02069     KnownZero = KnownZero.zext(BitWidth);
02070     KnownOne = KnownOne.zext(BitWidth);
02071 
02072     // If the sign bit is known zero or one, the top bits match.
02073     if (SignBitKnownZero)
02074       KnownZero |= NewBits;
02075     else if (SignBitKnownOne)
02076       KnownOne  |= NewBits;
02077     return;
02078   }
02079   case ISD::ANY_EXTEND: {
02080     EVT InVT = Op.getOperand(0).getValueType();
02081     unsigned InBits = InVT.getScalarType().getSizeInBits();
02082     KnownZero = KnownZero.trunc(InBits);
02083     KnownOne = KnownOne.trunc(InBits);
02084     ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
02085     KnownZero = KnownZero.zext(BitWidth);
02086     KnownOne = KnownOne.zext(BitWidth);
02087     return;
02088   }
02089   case ISD::TRUNCATE: {
02090     EVT InVT = Op.getOperand(0).getValueType();
02091     unsigned InBits = InVT.getScalarType().getSizeInBits();
02092     KnownZero = KnownZero.zext(InBits);
02093     KnownOne = KnownOne.zext(InBits);
02094     ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
02095     assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
02096     KnownZero = KnownZero.trunc(BitWidth);
02097     KnownOne = KnownOne.trunc(BitWidth);
02098     break;
02099   }
02100   case ISD::AssertZext: {
02101     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
02102     APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
02103     ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
02104     KnownZero |= (~InMask);
02105     KnownOne  &= (~KnownZero);
02106     return;
02107   }
02108   case ISD::FGETSIGN:
02109     // All bits are zero except the low bit.
02110     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
02111     return;
02112 
02113   case ISD::SUB: {
02114     if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
02115       // We know that the top bits of C-X are clear if X contains less bits
02116       // than C (i.e. no wrap-around can happen).  For example, 20-X is
02117       // positive if we can prove that X is >= 0 and < 16.
02118       if (CLHS->getAPIntValue().isNonNegative()) {
02119         unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
02120         // NLZ can't be BitWidth with no sign bit
02121         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
02122         ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
02123 
02124         // If all of the MaskV bits are known to be zero, then we know the
02125         // output top bits are zero, because we now know that the output is
02126         // from [0-C].
02127         if ((KnownZero2 & MaskV) == MaskV) {
02128           unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
02129           // Top bits known zero.
02130           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
02131         }
02132       }
02133     }
02134   }
02135   // fall through
02136   case ISD::ADD:
02137   case ISD::ADDE: {
02138     // Output known-0 bits are known if clear or set in both the low clear bits
02139     // common to both LHS & RHS.  For example, 8+(X<<3) is known to have the
02140     // low 3 bits clear.
02141     ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
02142     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
02143     unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
02144 
02145     ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
02146     assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
02147     KnownZeroOut = std::min(KnownZeroOut,
02148                             KnownZero2.countTrailingOnes());
02149 
02150     if (Op.getOpcode() == ISD::ADD) {
02151       KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
02152       return;
02153     }
02154 
02155     // With ADDE, a carry bit may be added in, so we can only use this
02156     // information if we know (at least) that the low two bits are clear.  We
02157     // then return to the caller that the low bit is unknown but that other bits
02158     // are known zero.
02159     if (KnownZeroOut >= 2) // ADDE
02160       KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
02161     return;
02162   }
02163   case ISD::SREM:
02164     if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
02165       const APInt &RA = Rem->getAPIntValue().abs();
02166       if (RA.isPowerOf2()) {
02167         APInt LowBits = RA - 1;
02168         ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
02169 
02170         // The low bits of the first operand are unchanged by the srem.
02171         KnownZero = KnownZero2 & LowBits;
02172         KnownOne = KnownOne2 & LowBits;
02173 
02174         // If the first operand is non-negative or has all low bits zero, then
02175         // the upper bits are all zero.
02176         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
02177           KnownZero |= ~LowBits;
02178 
02179         // If the first operand is negative and not all low bits are zero, then
02180         // the upper bits are all one.
02181         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
02182           KnownOne |= ~LowBits;
02183         assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
02184       }
02185     }
02186     return;
02187   case ISD::UREM: {
02188     if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
02189       const APInt &RA = Rem->getAPIntValue();
02190       if (RA.isPowerOf2()) {
02191         APInt LowBits = (RA - 1);
02192         KnownZero |= ~LowBits;
02193         ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
02194         assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
02195         break;
02196       }
02197     }
02198 
02199     // Since the result is less than or equal to either operand, any leading
02200     // zero bits in either operand must also exist in the result.
02201     ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
02202     ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
02203 
02204     uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
02205                                 KnownZero2.countLeadingOnes());
02206     KnownOne.clearAllBits();
02207     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
02208     return;
02209   }
02210   case ISD::FrameIndex:
02211   case ISD::TargetFrameIndex:
02212     if (unsigned Align = InferPtrAlignment(Op)) {
02213       // The low bits are known zero if the pointer is aligned.
02214       KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
02215       return;
02216     }
02217     break;
02218 
02219   default:
02220     if (Op.getOpcode() < ISD::BUILTIN_OP_END)
02221       break;
02222     // Fallthrough
02223   case ISD::INTRINSIC_WO_CHAIN:
02224   case ISD::INTRINSIC_W_CHAIN:
02225   case ISD::INTRINSIC_VOID:
02226     // Allow the target to implement this method for its nodes.
02227     TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
02228     return;
02229   }
02230 }
02231 
02232 /// ComputeNumSignBits - Return the number of times the sign bit of the
02233 /// register is replicated into the other bits.  We know that at least 1 bit
02234 /// is always equal to the sign bit (itself), but other cases can give us
02235 /// information.  For example, immediately after an "SRA X, 2", we know that
02236 /// the top 3 bits are all equal to each other, so we return 3.
02237 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
02238   const TargetLowering *TLI = TM.getTargetLowering();
02239   EVT VT = Op.getValueType();
02240   assert(VT.isInteger() && "Invalid VT!");
02241   unsigned VTBits = VT.getScalarType().getSizeInBits();
02242   unsigned Tmp, Tmp2;
02243   unsigned FirstAnswer = 1;
02244 
02245   if (Depth == 6)
02246     return 1;  // Limit search depth.
02247 
02248   switch (Op.getOpcode()) {
02249   default: break;
02250   case ISD::AssertSext:
02251     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
02252     return VTBits-Tmp+1;
02253   case ISD::AssertZext:
02254     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
02255     return VTBits-Tmp;
02256 
02257   case ISD::Constant: {
02258     const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
02259     return Val.getNumSignBits();
02260   }
02261 
02262   case ISD::SIGN_EXTEND:
02263     Tmp =
02264         VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
02265     return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
02266 
02267   case ISD::SIGN_EXTEND_INREG:
02268     // Max of the input and what this extends.
02269     Tmp =
02270       cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
02271     Tmp = VTBits-Tmp+1;
02272 
02273     Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
02274     return std::max(Tmp, Tmp2);
02275 
02276   case ISD::SRA:
02277     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
02278     // SRA X, C   -> adds C sign bits.
02279     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
02280       Tmp += C->getZExtValue();
02281       if (Tmp > VTBits) Tmp = VTBits;
02282     }
02283     return Tmp;
02284   case ISD::SHL:
02285     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
02286       // shl destroys sign bits.
02287       Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
02288       if (C->getZExtValue() >= VTBits ||      // Bad shift.
02289           C->getZExtValue() >= Tmp) break;    // Shifted all sign bits out.
02290       return Tmp - C->getZExtValue();
02291     }
02292     break;
02293   case ISD::AND:
02294   case ISD::OR:
02295   case ISD::XOR:    // NOT is handled here.
02296     // Logical binary ops preserve the number of sign bits at the worst.
02297     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
02298     if (Tmp != 1) {
02299       Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
02300       FirstAnswer = std::min(Tmp, Tmp2);
02301       // We computed what we know about the sign bits as our first
02302       // answer. Now proceed to the generic code that uses
02303       // ComputeMaskedBits, and pick whichever answer is better.
02304     }
02305     break;
02306 
02307   case ISD::SELECT:
02308     Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
02309     if (Tmp == 1) return 1;  // Early out.
02310     Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
02311     return std::min(Tmp, Tmp2);
02312 
02313   case ISD::SADDO:
02314   case ISD::UADDO:
02315   case ISD::SSUBO:
02316   case ISD::USUBO:
02317   case ISD::SMULO:
02318   case ISD::UMULO:
02319     if (Op.getResNo() != 1)
02320       break;
02321     // The boolean result conforms to getBooleanContents.  Fall through.
02322   case ISD::SETCC:
02323     // If setcc returns 0/-1, all bits are sign bits.
02324     if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
02325         TargetLowering::ZeroOrNegativeOneBooleanContent)
02326       return VTBits;
02327     break;
02328   case ISD::ROTL:
02329   case ISD::ROTR:
02330     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
02331       unsigned RotAmt = C->getZExtValue() & (VTBits-1);
02332 
02333       // Handle rotate right by N like a rotate left by 32-N.
02334       if (Op.getOpcode() == ISD::ROTR)
02335         RotAmt = (VTBits-RotAmt) & (VTBits-1);
02336 
02337       // If we aren't rotating out all of the known-in sign bits, return the
02338       // number that are left.  This handles rotl(sext(x), 1) for example.
02339       Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
02340       if (Tmp > RotAmt+1) return Tmp-RotAmt;
02341     }
02342     break;
02343   case ISD::ADD:
02344     // Add can have at most one carry bit.  Thus we know that the output
02345     // is, at worst, one more bit than the inputs.
02346     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
02347     if (Tmp == 1) return 1;  // Early out.
02348 
02349     // Special case decrementing a value (ADD X, -1):
02350     if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
02351       if (CRHS->isAllOnesValue()) {
02352         APInt KnownZero, KnownOne;
02353         ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
02354 
02355         // If the input is known to be 0 or 1, the output is 0/-1, which is all
02356         // sign bits set.
02357         if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
02358           return VTBits;
02359 
02360         // If we are subtracting one from a positive number, there is no carry
02361         // out of the result.
02362         if (KnownZero.isNegative())
02363           return Tmp;
02364       }
02365 
02366     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
02367     if (Tmp2 == 1) return 1;
02368     return std::min(Tmp, Tmp2)-1;
02369 
02370   case ISD::SUB:
02371     Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
02372     if (Tmp2 == 1) return 1;
02373 
02374     // Handle NEG.
02375     if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
02376       if (CLHS->isNullValue()) {
02377         APInt KnownZero, KnownOne;
02378         ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
02379         // If the input is known to be 0 or 1, the output is 0/-1, which is all
02380         // sign bits set.
02381         if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
02382           return VTBits;
02383 
02384         // If the input is known to be positive (the sign bit is known clear),
02385         // the output of the NEG has the same number of sign bits as the input.
02386         if (KnownZero.isNegative())
02387           return Tmp2;
02388 
02389         // Otherwise, we treat this like a SUB.
02390       }
02391 
02392     // Sub can have at most one carry bit.  Thus we know that the output
02393     // is, at worst, one more bit than the inputs.
02394     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
02395     if (Tmp == 1) return 1;  // Early out.
02396     return std::min(Tmp, Tmp2)-1;
02397   case ISD::TRUNCATE:
02398     // FIXME: it's tricky to do anything useful for this, but it is an important
02399     // case for targets like X86.
02400     break;
02401   }
02402 
02403   // If we are looking at the loaded value of the SDNode.
02404   if (Op.getResNo() == 0) {
02405     // Handle LOADX separately here. EXTLOAD case will fallthrough.
02406     if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
02407       unsigned ExtType = LD->getExtensionType();
02408       switch (ExtType) {
02409         default: break;
02410         case ISD::SEXTLOAD:    // '17' bits known
02411           Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
02412           return VTBits-Tmp+1;
02413         case ISD::ZEXTLOAD:    // '16' bits known
02414           Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
02415           return VTBits-Tmp;
02416       }
02417     }
02418   }
02419 
02420   // Allow the target to implement this method for its nodes.
02421   if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
02422       Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
02423       Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
02424       Op.getOpcode() == ISD::INTRINSIC_VOID) {
02425     unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
02426     if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
02427   }
02428 
02429   // Finally, if we can prove that the top bits of the result are 0's or 1's,
02430   // use this information.
02431   APInt KnownZero, KnownOne;
02432   ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
02433 
02434   APInt Mask;
02435   if (KnownZero.isNegative()) {        // sign bit is 0
02436     Mask = KnownZero;
02437   } else if (KnownOne.isNegative()) {  // sign bit is 1;
02438     Mask = KnownOne;
02439   } else {
02440     // Nothing known.
02441     return FirstAnswer;
02442   }
02443 
02444   // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
02445   // the number of identical bits in the top of the input value.
02446   Mask = ~Mask;
02447   Mask <<= Mask.getBitWidth()-VTBits;
02448   // Return # leading zeros.  We use 'min' here in case Val was zero before
02449   // shifting.  We don't want to return '64' as for an i32 "0".
02450   return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
02451 }
02452 
02453 /// isBaseWithConstantOffset - Return true if the specified operand is an
02454 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
02455 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
02456 /// semantics as an ADD.  This handles the equivalence:
02457 ///     X|Cst == X+Cst iff X&Cst = 0.
02458 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
02459   if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
02460       !isa<ConstantSDNode>(Op.getOperand(1)))
02461     return false;
02462 
02463   if (Op.getOpcode() == ISD::OR &&
02464       !MaskedValueIsZero(Op.getOperand(0),
02465                      cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
02466     return false;
02467 
02468   return true;
02469 }
02470 
02471 
02472 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
02473   // If we're told that NaNs won't happen, assume they won't.
02474   if (getTarget().Options.NoNaNsFPMath)
02475     return true;
02476 
02477   // If the value is a constant, we can obviously see if it is a NaN or not.
02478   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
02479     return !C->getValueAPF().isNaN();
02480 
02481   // TODO: Recognize more cases here.
02482 
02483   return false;
02484 }
02485 
02486 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
02487   // If the value is a constant, we can obviously see if it is a zero or not.
02488   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
02489     return !C->isZero();
02490 
02491   // TODO: Recognize more cases here.
02492   switch (Op.getOpcode()) {
02493   default: break;
02494   case ISD::OR:
02495     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
02496       return !C->isNullValue();
02497     break;
02498   }
02499 
02500   return false;
02501 }
02502 
02503 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
02504   // Check the obvious case.
02505   if (A == B) return true;
02506 
02507   // For for negative and positive zero.
02508   if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
02509     if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
02510       if (CA->isZero() && CB->isZero()) return true;
02511 
02512   // Otherwise they may not be equal.
02513   return false;
02514 }
02515 
02516 /// getNode - Gets or creates the specified node.
02517 ///
02518 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) {
02519   FoldingSetNodeID ID;
02520   AddNodeIDNode(ID, Opcode, getVTList(VT), nullptr, 0);
02521   void *IP = nullptr;
02522   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
02523     return SDValue(E, 0);
02524 
02525   SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(),
02526                                          DL.getDebugLoc(), getVTList(VT));
02527   CSEMap.InsertNode(N, IP);
02528 
02529   AllNodes.push_back(N);
02530 #ifndef NDEBUG
02531   VerifySDNode(N);
02532 #endif
02533   return SDValue(N, 0);
02534 }
02535 
02536 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
02537                               EVT VT, SDValue Operand) {
02538   // Constant fold unary operations with an integer constant operand. Even
02539   // opaque constant will be folded, because the folding of unary operations
02540   // doesn't create new constants with different values. Nevertheless, the
02541   // opaque flag is preserved during folding to prevent future folding with
02542   // other constants.
02543   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
02544     const APInt &Val = C->getAPIntValue();
02545     switch (Opcode) {
02546     default: break;
02547     case ISD::SIGN_EXTEND:
02548       return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT,
02549                          C->isTargetOpcode(), C->isOpaque());
02550     case ISD::ANY_EXTEND:
02551     case ISD::ZERO_EXTEND:
02552     case ISD::TRUNCATE:
02553       return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT,
02554                          C->isTargetOpcode(), C->isOpaque());
02555     case ISD::UINT_TO_FP:
02556     case ISD::SINT_TO_FP: {
02557       APFloat apf(EVTToAPFloatSemantics(VT),
02558                   APInt::getNullValue(VT.getSizeInBits()));
02559       (void)apf.convertFromAPInt(Val,
02560                                  Opcode==ISD::SINT_TO_FP,
02561                                  APFloat::rmNearestTiesToEven);
02562       return getConstantFP(apf, VT);
02563     }
02564     case ISD::BITCAST:
02565       if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
02566         return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
02567       else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
02568         return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
02569       break;
02570     case ISD::BSWAP:
02571       return getConstant(Val.byteSwap(), VT, C->isTargetOpcode(),
02572                          C->isOpaque());
02573     case ISD::CTPOP:
02574       return getConstant(Val.countPopulation(), VT, C->isTargetOpcode(),
02575                          C->isOpaque());
02576     case ISD::CTLZ:
02577     case ISD::CTLZ_ZERO_UNDEF:
02578       return getConstant(Val.countLeadingZeros(), VT, C->isTargetOpcode(),
02579                          C->isOpaque());
02580     case ISD::CTTZ:
02581     case ISD::CTTZ_ZERO_UNDEF:
02582       return getConstant(Val.countTrailingZeros(), VT, C->isTargetOpcode(),
02583                          C->isOpaque());
02584     }
02585   }
02586 
02587   // Constant fold unary operations with a floating point constant operand.
02588   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
02589     APFloat V = C->getValueAPF();    // make copy
02590     switch (Opcode) {
02591     case ISD::FNEG:
02592       V.changeSign();
02593       return getConstantFP(V, VT);
02594     case ISD::FABS:
02595       V.clearSign();
02596       return getConstantFP(V, VT);
02597     case ISD::FCEIL: {
02598       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
02599       if (fs == APFloat::opOK || fs == APFloat::opInexact)
02600         return getConstantFP(V, VT);
02601       break;
02602     }
02603     case ISD::FTRUNC: {
02604       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
02605       if (fs == APFloat::opOK || fs == APFloat::opInexact)
02606         return getConstantFP(V, VT);
02607       break;
02608     }
02609     case ISD::FFLOOR: {
02610       APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
02611       if (fs == APFloat::opOK || fs == APFloat::opInexact)
02612         return getConstantFP(V, VT);
02613       break;
02614     }
02615     case ISD::FP_EXTEND: {
02616       bool ignored;
02617       // This can return overflow, underflow, or inexact; we don't care.
02618       // FIXME need to be more flexible about rounding mode.
02619       (void)V.convert(EVTToAPFloatSemantics(VT),
02620                       APFloat::rmNearestTiesToEven, &ignored);
02621       return getConstantFP(V, VT);
02622     }
02623     case ISD::FP_TO_SINT:
02624     case ISD::FP_TO_UINT: {
02625       integerPart x[2];
02626       bool ignored;
02627       assert(integerPartWidth >= 64);
02628       // FIXME need to be more flexible about rounding mode.
02629       APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
02630                             Opcode==ISD::FP_TO_SINT,
02631                             APFloat::rmTowardZero, &ignored);
02632       if (s==APFloat::opInvalidOp)     // inexact is OK, in fact usual
02633         break;
02634       APInt api(VT.getSizeInBits(), x);
02635       return getConstant(api, VT);
02636     }
02637     case ISD::BITCAST:
02638       if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
02639         return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
02640       else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
02641         return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
02642       break;
02643     }
02644   }
02645 
02646   unsigned OpOpcode = Operand.getNode()->getOpcode();
02647   switch (Opcode) {
02648   case ISD::TokenFactor:
02649   case ISD::MERGE_VALUES:
02650   case ISD::CONCAT_VECTORS:
02651     return Operand;         // Factor, merge or concat of one node?  No need.
02652   case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
02653   case ISD::FP_EXTEND:
02654     assert(VT.isFloatingPoint() &&
02655            Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
02656     if (Operand.getValueType() == VT) return Operand;  // noop conversion.
02657     assert((!VT.isVector() ||
02658             VT.getVectorNumElements() ==
02659             Operand.getValueType().getVectorNumElements()) &&
02660            "Vector element count mismatch!");
02661     if (Operand.getOpcode() == ISD::UNDEF)
02662       return getUNDEF(VT);
02663     break;
02664   case ISD::SIGN_EXTEND:
02665     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
02666            "Invalid SIGN_EXTEND!");
02667     if (Operand.getValueType() == VT) return Operand;   // noop extension
02668     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
02669            "Invalid sext node, dst < src!");
02670     assert((!VT.isVector() ||
02671             VT.getVectorNumElements() ==
02672             Operand.getValueType().getVectorNumElements()) &&
02673            "Vector element count mismatch!");
02674     if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
02675       return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
02676     else if (OpOpcode == ISD::UNDEF)
02677       // sext(undef) = 0, because the top bits will all be the same.
02678       return getConstant(0, VT);
02679     break;
02680   case ISD::ZERO_EXTEND:
02681     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
02682            "Invalid ZERO_EXTEND!");
02683     if (Operand.getValueType() == VT) return Operand;   // noop extension
02684     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
02685            "Invalid zext node, dst < src!");
02686     assert((!VT.isVector() ||
02687             VT.getVectorNumElements() ==
02688             Operand.getValueType().getVectorNumElements()) &&
02689            "Vector element count mismatch!");
02690     if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
02691       return getNode(ISD::ZERO_EXTEND, DL, VT,
02692                      Operand.getNode()->getOperand(0));
02693     else if (OpOpcode == ISD::UNDEF)
02694       // zext(undef) = 0, because the top bits will be zero.
02695       return getConstant(0, VT);
02696     break;
02697   case ISD::ANY_EXTEND:
02698     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
02699            "Invalid ANY_EXTEND!");
02700     if (Operand.getValueType() == VT) return Operand;   // noop extension
02701     assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
02702            "Invalid anyext node, dst < src!");
02703     assert((!VT.isVector() ||
02704             VT.getVectorNumElements() ==
02705             Operand.getValueType().getVectorNumElements()) &&
02706            "Vector element count mismatch!");
02707 
02708     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
02709         OpOpcode == ISD::ANY_EXTEND)
02710       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
02711       return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
02712     else if (OpOpcode == ISD::UNDEF)
02713       return getUNDEF(VT);
02714 
02715     // (ext (trunx x)) -> x
02716     if (OpOpcode == ISD::TRUNCATE) {
02717       SDValue OpOp = Operand.getNode()->getOperand(0);
02718       if (OpOp.getValueType() == VT)
02719         return OpOp;
02720     }
02721     break;
02722   case ISD::TRUNCATE:
02723     assert(VT.isInteger() && Operand.getValueType().isInteger() &&
02724            "Invalid TRUNCATE!");
02725     if (Operand.getValueType() == VT) return Operand;   // noop truncate
02726     assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
02727            "Invalid truncate node, src < dst!");
02728     assert((!VT.isVector() ||
02729             VT.getVectorNumElements() ==
02730             Operand.getValueType().getVectorNumElements()) &&
02731            "Vector element count mismatch!");
02732     if (OpOpcode == ISD::TRUNCATE)
02733       return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
02734     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
02735         OpOpcode == ISD::ANY_EXTEND) {
02736       // If the source is smaller than the dest, we still need an extend.
02737       if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
02738             .bitsLT(VT.getScalarType()))
02739         return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
02740       if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
02741         return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
02742       return Operand.getNode()->getOperand(0);
02743     }
02744     if (OpOpcode == ISD::UNDEF)
02745       return getUNDEF(VT);
02746     break;
02747   case ISD::BITCAST:
02748     // Basic sanity checking.
02749     assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
02750            && "Cannot BITCAST between types of different sizes!");
02751     if (VT == Operand.getValueType()) return Operand;  // noop conversion.
02752     if (OpOpcode == ISD::BITCAST)  // bitconv(bitconv(x)) -> bitconv(x)
02753       return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
02754     if (OpOpcode == ISD::UNDEF)
02755       return getUNDEF(VT);
02756     break;
02757   case ISD::SCALAR_TO_VECTOR:
02758     assert(VT.isVector() && !Operand.getValueType().isVector() &&
02759            (VT.getVectorElementType() == Operand.getValueType() ||
02760             (VT.getVectorElementType().isInteger() &&
02761              Operand.getValueType().isInteger() &&
02762              VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
02763            "Illegal SCALAR_TO_VECTOR node!");
02764     if (OpOpcode == ISD::UNDEF)
02765       return getUNDEF(VT);
02766     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
02767     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
02768         isa<ConstantSDNode>(Operand.getOperand(1)) &&
02769         Operand.getConstantOperandVal(1) == 0 &&
02770         Operand.getOperand(0).getValueType() == VT)
02771       return Operand.getOperand(0);
02772     break;
02773   case ISD::FNEG:
02774     // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
02775     if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
02776       return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
02777                      Operand.getNode()->getOperand(0));
02778     if (OpOpcode == ISD::FNEG)  // --X -> X
02779       return Operand.getNode()->getOperand(0);
02780     break;
02781   case ISD::FABS:
02782     if (OpOpcode == ISD::FNEG)  // abs(-X) -> abs(X)
02783       return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
02784     break;
02785   }
02786 
02787   SDNode *N;
02788   SDVTList VTs = getVTList(VT);
02789   if (VT != MVT::Glue) { // Don't CSE flag producing nodes
02790     FoldingSetNodeID ID;
02791     SDValue Ops[1] = { Operand };
02792     AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
02793     void *IP = nullptr;
02794     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
02795       return SDValue(E, 0);
02796 
02797     N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
02798                                         DL.getDebugLoc(), VTs, Operand);
02799     CSEMap.InsertNode(N, IP);
02800   } else {
02801     N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
02802                                         DL.getDebugLoc(), VTs, Operand);
02803   }
02804 
02805   AllNodes.push_back(N);
02806 #ifndef NDEBUG
02807   VerifySDNode(N);
02808 #endif
02809   return SDValue(N, 0);
02810 }
02811 
02812 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
02813                                              SDNode *Cst1, SDNode *Cst2) {
02814   // If the opcode is a target-specific ISD node, there's nothing we can
02815   // do here and the operand rules may not line up with the below, so
02816   // bail early.
02817   if (Opcode >= ISD::BUILTIN_OP_END)
02818     return SDValue();
02819 
02820   SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
02821   SmallVector<SDValue, 4> Outputs;
02822   EVT SVT = VT.getScalarType();
02823 
02824   ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
02825   ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
02826   if (Scalar1 && Scalar2 && (Scalar1->isOpaque() || Scalar2->isOpaque()))
02827     return SDValue();
02828 
02829   if (Scalar1 && Scalar2)
02830     // Scalar instruction.
02831     Inputs.push_back(std::make_pair(Scalar1, Scalar2));
02832   else {
02833     // For vectors extract each constant element into Inputs so we can constant
02834     // fold them individually.
02835     BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
02836     BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
02837     if (!BV1 || !BV2)
02838       return SDValue();
02839 
02840     assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
02841 
02842     for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
02843       ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
02844       ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
02845       if (!V1 || !V2) // Not a constant, bail.
02846         return SDValue();
02847 
02848       if (V1->isOpaque() || V2->isOpaque())
02849         return SDValue();
02850 
02851       // Avoid BUILD_VECTOR nodes that perform implicit truncation.
02852       // FIXME: This is valid and could be handled by truncating the APInts.
02853       if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
02854         return SDValue();
02855 
02856       Inputs.push_back(std::make_pair(V1, V2));
02857     }
02858   }
02859 
02860   // We have a number of constant values, constant fold them element by element.
02861   for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
02862     const APInt &C1 = Inputs[I].first->getAPIntValue();
02863     const APInt &C2 = Inputs[I].second->getAPIntValue();
02864 
02865     switch (Opcode) {
02866     case ISD::ADD:
02867       Outputs.push_back(getConstant(C1 + C2, SVT));
02868       break;
02869     case ISD::SUB:
02870       Outputs.push_back(getConstant(C1 - C2, SVT));
02871       break;
02872     case ISD::MUL:
02873       Outputs.push_back(getConstant(C1 * C2, SVT));
02874       break;
02875     case ISD::UDIV:
02876       if (!C2.getBoolValue())
02877         return SDValue();
02878       Outputs.push_back(getConstant(C1.udiv(C2), SVT));
02879       break;
02880     case ISD::UREM:
02881       if (!C2.getBoolValue())
02882         return SDValue();
02883       Outputs.push_back(getConstant(C1.urem(C2), SVT));
02884       break;
02885     case ISD::SDIV:
02886       if (!C2.getBoolValue())
02887         return SDValue();
02888       Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
02889       break;
02890     case ISD::SREM:
02891       if (!C2.getBoolValue())
02892         return SDValue();
02893       Outputs.push_back(getConstant(C1.srem(C2), SVT));
02894       break;
02895     case ISD::AND:
02896       Outputs.push_back(getConstant(C1 & C2, SVT));
02897       break;
02898     case ISD::OR:
02899       Outputs.push_back(getConstant(C1 | C2, SVT));
02900       break;
02901     case ISD::XOR:
02902       Outputs.push_back(getConstant(C1 ^ C2, SVT));
02903       break;
02904     case ISD::SHL:
02905       Outputs.push_back(getConstant(C1 << C2, SVT));
02906       break;
02907     case ISD::SRL:
02908       Outputs.push_back(getConstant(C1.lshr(C2), SVT));
02909       break;
02910     case ISD::SRA:
02911       Outputs.push_back(getConstant(C1.ashr(C2), SVT));
02912       break;
02913     case ISD::ROTL:
02914       Outputs.push_back(getConstant(C1.rotl(C2), SVT));
02915       break;
02916     case ISD::ROTR:
02917       Outputs.push_back(getConstant(C1.rotr(C2), SVT));
02918       break;
02919     default:
02920       return SDValue();
02921     }
02922   }
02923 
02924   // Handle the scalar case first.
02925   if (Scalar1 && Scalar2)
02926     return Outputs.back();
02927 
02928   // Otherwise build a big vector out of the scalar elements we generated.
02929   return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs.data(),
02930                  Outputs.size());
02931 }
02932 
02933 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
02934                               SDValue N2) {
02935   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
02936   ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
02937   switch (Opcode) {
02938   default: break;
02939   case ISD::TokenFactor:
02940     assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
02941            N2.getValueType() == MVT::Other && "Invalid token factor!");
02942     // Fold trivial token factors.
02943     if (N1.getOpcode() == ISD::EntryToken) return N2;
02944     if (N2.getOpcode() == ISD::EntryToken) return N1;
02945     if (N1 == N2) return N1;
02946     break;
02947   case ISD::CONCAT_VECTORS:
02948     // Concat of UNDEFs is UNDEF.
02949     if (N1.getOpcode() == ISD::UNDEF &&
02950         N2.getOpcode() == ISD::UNDEF)
02951       return getUNDEF(VT);
02952 
02953     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
02954     // one big BUILD_VECTOR.
02955     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
02956         N2.getOpcode() == ISD::BUILD_VECTOR) {
02957       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
02958                                     N1.getNode()->op_end());
02959       Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
02960       return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
02961     }
02962     break;
02963   case ISD::AND:
02964     assert(VT.isInteger() && "This operator does not apply to FP types!");
02965     assert(N1.getValueType() == N2.getValueType() &&
02966            N1.getValueType() == VT && "Binary operator types must match!");
02967     // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
02968     // worth handling here.
02969     if (N2C && N2C->isNullValue())
02970       return N2;
02971     if (N2C && N2C->isAllOnesValue())  // X & -1 -> X
02972       return N1;
02973     break;
02974   case ISD::OR:
02975   case ISD::XOR:
02976   case ISD::ADD:
02977   case ISD::SUB:
02978     assert(VT.isInteger() && "This operator does not apply to FP types!");
02979     assert(N1.getValueType() == N2.getValueType() &&
02980            N1.getValueType() == VT && "Binary operator types must match!");
02981     // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
02982     // it's worth handling here.
02983     if (N2C && N2C->isNullValue())
02984       return N1;
02985     break;
02986   case ISD::UDIV:
02987   case ISD::UREM:
02988   case ISD::MULHU:
02989   case ISD::MULHS:
02990   case ISD::MUL:
02991   case ISD::SDIV:
02992   case ISD::SREM:
02993     assert(VT.isInteger() && "This operator does not apply to FP types!");
02994     assert(N1.getValueType() == N2.getValueType() &&
02995            N1.getValueType() == VT && "Binary operator types must match!");
02996     break;
02997   case ISD::FADD:
02998   case ISD::FSUB:
02999   case ISD::FMUL:
03000   case ISD::FDIV:
03001   case ISD::FREM:
03002     if (getTarget().Options.UnsafeFPMath) {
03003       if (Opcode == ISD::FADD) {
03004         // 0+x --> x
03005         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
03006           if (CFP->getValueAPF().isZero())
03007             return N2;
03008         // x+0 --> x
03009         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
03010           if (CFP->getValueAPF().isZero())
03011             return N1;
03012       } else if (Opcode == ISD::FSUB) {
03013         // x-0 --> x
03014         if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
03015           if (CFP->getValueAPF().isZero())
03016             return N1;
03017       } else if (Opcode == ISD::FMUL) {
03018         ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
03019         SDValue V = N2;
03020 
03021         // If the first operand isn't the constant, try the second
03022         if (!CFP) {
03023           CFP = dyn_cast<ConstantFPSDNode>(N2);
03024           V = N1;
03025         }
03026 
03027         if (CFP) {
03028           // 0*x --> 0
03029           if (CFP->isZero())
03030             return SDValue(CFP,0);
03031           // 1*x --> x
03032           if (CFP->isExactlyValue(1.0))
03033             return V;
03034         }
03035       }
03036     }
03037     assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
03038     assert(N1.getValueType() == N2.getValueType() &&
03039            N1.getValueType() == VT && "Binary operator types must match!");
03040     break;
03041   case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
03042     assert(N1.getValueType() == VT &&
03043            N1.getValueType().isFloatingPoint() &&
03044            N2.getValueType().isFloatingPoint() &&
03045            "Invalid FCOPYSIGN!");
03046     break;
03047   case ISD::SHL:
03048   case ISD::SRA:
03049   case ISD::SRL:
03050   case ISD::ROTL:
03051   case ISD::ROTR:
03052     assert(VT == N1.getValueType() &&
03053            "Shift operators return type must be the same as their first arg");
03054     assert(VT.isInteger() && N2.getValueType().isInteger() &&
03055            "Shifts only work on integers");
03056     assert((!VT.isVector() || VT == N2.getValueType()) &&
03057            "Vector shift amounts must be in the same as their first arg");
03058     // Verify that the shift amount VT is bit enough to hold valid shift
03059     // amounts.  This catches things like trying to shift an i1024 value by an
03060     // i8, which is easy to fall into in generic code that uses
03061     // TLI.getShiftAmount().
03062     assert(N2.getValueType().getSizeInBits() >=
03063                    Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
03064            "Invalid use of small shift amount with oversized value!");
03065 
03066     // Always fold shifts of i1 values so the code generator doesn't need to
03067     // handle them.  Since we know the size of the shift has to be less than the
03068     // size of the value, the shift/rotate count is guaranteed to be zero.
03069     if (VT == MVT::i1)
03070       return N1;
03071     if (N2C && N2C->isNullValue())
03072       return N1;
03073     break;
03074   case ISD::FP_ROUND_INREG: {
03075     EVT EVT = cast<VTSDNode>(N2)->getVT();
03076     assert(VT == N1.getValueType() && "Not an inreg round!");
03077     assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
03078            "Cannot FP_ROUND_INREG integer types");
03079     assert(EVT.isVector() == VT.isVector() &&
03080            "FP_ROUND_INREG type should be vector iff the operand "
03081            "type is vector!");
03082     assert((!EVT.isVector() ||
03083             EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
03084            "Vector element counts must match in FP_ROUND_INREG");
03085     assert(EVT.bitsLE(VT) && "Not rounding down!");
03086     (void)EVT;
03087     if (cast<VTSDNode>(N2)->getVT() == VT) return N1;  // Not actually rounding.
03088     break;
03089   }
03090   case ISD::FP_ROUND:
03091     assert(VT.isFloatingPoint() &&
03092            N1.getValueType().isFloatingPoint() &&
03093            VT.bitsLE(N1.getValueType()) &&
03094            isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
03095     if (N1.getValueType() == VT) return N1;  // noop conversion.
03096     break;
03097   case ISD::AssertSext:
03098   case ISD::AssertZext: {
03099     EVT EVT = cast<VTSDNode>(N2)->getVT();
03100     assert(VT == N1.getValueType() && "Not an inreg extend!");
03101     assert(VT.isInteger() && EVT.isInteger() &&
03102            "Cannot *_EXTEND_INREG FP types");
03103     assert(!EVT.isVector() &&
03104            "AssertSExt/AssertZExt type should be the vector element type "
03105            "rather than the vector type!");
03106     assert(EVT.bitsLE(VT) && "Not extending!");
03107     if (VT == EVT) return N1; // noop assertion.
03108     break;
03109   }
03110   case ISD::SIGN_EXTEND_INREG: {
03111     EVT EVT = cast<VTSDNode>(N2)->getVT();
03112     assert(VT == N1.getValueType() && "Not an inreg extend!");
03113     assert(VT.isInteger() && EVT.isInteger() &&
03114            "Cannot *_EXTEND_INREG FP types");
03115     assert(EVT.isVector() == VT.isVector() &&
03116            "SIGN_EXTEND_INREG type should be vector iff the operand "
03117            "type is vector!");
03118     assert((!EVT.isVector() ||
03119             EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
03120            "Vector element counts must match in SIGN_EXTEND_INREG");
03121     assert(EVT.bitsLE(VT) && "Not extending!");
03122     if (EVT == VT) return N1;  // Not actually extending
03123 
03124     if (N1C) {
03125       APInt Val = N1C->getAPIntValue();
03126       unsigned FromBits = EVT.getScalarType().getSizeInBits();
03127       Val <<= Val.getBitWidth()-FromBits;
03128       Val = Val.ashr(Val.getBitWidth()-FromBits);
03129       return getConstant(Val, VT);
03130     }
03131     break;
03132   }
03133   case ISD::EXTRACT_VECTOR_ELT:
03134     // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
03135     if (N1.getOpcode() == ISD::UNDEF)
03136       return getUNDEF(VT);
03137 
03138     // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
03139     // expanding copies of large vectors from registers.
03140     if (N2C &&
03141         N1.getOpcode() == ISD::CONCAT_VECTORS &&
03142         N1.getNumOperands() > 0) {
03143       unsigned Factor =
03144         N1.getOperand(0).getValueType().getVectorNumElements();
03145       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
03146                      N1.getOperand(N2C->getZExtValue() / Factor),
03147                      getConstant(N2C->getZExtValue() % Factor,
03148                                  N2.getValueType()));
03149     }
03150 
03151     // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
03152     // expanding large vector constants.
03153     if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
03154       SDValue Elt = N1.getOperand(N2C->getZExtValue());
03155 
03156       if (VT != Elt.getValueType())
03157         // If the vector element type is not legal, the BUILD_VECTOR operands
03158         // are promoted and implicitly truncated, and the result implicitly
03159         // extended. Make that explicit here.
03160         Elt = getAnyExtOrTrunc(Elt, DL, VT);
03161 
03162       return Elt;
03163     }
03164 
03165     // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
03166     // operations are lowered to scalars.
03167     if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
03168       // If the indices are the same, return the inserted element else
03169       // if the indices are known different, extract the element from
03170       // the original vector.
03171       SDValue N1Op2 = N1.getOperand(2);
03172       ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
03173 
03174       if (N1Op2C && N2C) {
03175         if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
03176           if (VT == N1.getOperand(1).getValueType())
03177             return N1.getOperand(1);
03178           else
03179             return getSExtOrTrunc(N1.getOperand(1), DL, VT);
03180         }
03181 
03182         return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
03183       }
03184     }
03185     break;
03186   case ISD::EXTRACT_ELEMENT:
03187     assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
03188     assert(!N1.getValueType().isVector() && !VT.isVector() &&
03189            (N1.getValueType().isInteger() == VT.isInteger()) &&
03190            N1.getValueType() != VT &&
03191            "Wrong types for EXTRACT_ELEMENT!");
03192 
03193     // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
03194     // 64-bit integers into 32-bit parts.  Instead of building the extract of
03195     // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
03196     if (N1.getOpcode() == ISD::BUILD_PAIR)
03197       return N1.getOperand(N2C->getZExtValue());
03198 
03199     // EXTRACT_ELEMENT of a constant int is also very common.
03200     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
03201       unsigned ElementSize = VT.getSizeInBits();
03202       unsigned Shift = ElementSize * N2C->getZExtValue();
03203       APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
03204       return getConstant(ShiftedVal.trunc(ElementSize), VT);
03205     }
03206     break;
03207   case ISD::EXTRACT_SUBVECTOR: {
03208     SDValue Index = N2;
03209     if (VT.isSimple() && N1.getValueType().isSimple()) {
03210       assert(VT.isVector() && N1.getValueType().isVector() &&
03211              "Extract subvector VTs must be a vectors!");
03212       assert(VT.getVectorElementType() ==
03213              N1.getValueType().getVectorElementType() &&
03214              "Extract subvector VTs must have the same element type!");
03215       assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
03216              "Extract subvector must be from larger vector to smaller vector!");
03217 
03218       if (isa<ConstantSDNode>(Index.getNode())) {
03219         assert((VT.getVectorNumElements() +
03220                 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
03221                 <= N1.getValueType().getVectorNumElements())
03222                && "Extract subvector overflow!");
03223       }
03224 
03225       // Trivial extraction.
03226       if (VT.getSimpleVT() == N1.getSimpleValueType())
03227         return N1;
03228     }
03229     break;
03230   }
03231   }
03232 
03233   // Perform trivial constant folding.
03234   SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
03235   if (SV.getNode()) return SV;
03236 
03237   // Canonicalize constant to RHS if commutative.
03238   if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
03239     std::swap(N1C, N2C);
03240     std::swap(N1, N2);
03241   }
03242 
03243   // Constant fold FP operations.
03244   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
03245   ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
03246   if (N1CFP) {
03247     if (!N2CFP && isCommutativeBinOp(Opcode)) {
03248       // Canonicalize constant to RHS if commutative.
03249       std::swap(N1CFP, N2CFP);
03250       std::swap(N1, N2);
03251     } else if (N2CFP) {
03252       APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
03253       APFloat::opStatus s;
03254       switch (Opcode) {
03255       case ISD::FADD:
03256         s = V1.add(V2, APFloat::rmNearestTiesToEven);
03257         if (s != APFloat::opInvalidOp)
03258           return getConstantFP(V1, VT);
03259         break;
03260       case ISD::FSUB:
03261         s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
03262         if (s!=APFloat::opInvalidOp)
03263           return getConstantFP(V1, VT);
03264         break;
03265       case ISD::FMUL:
03266         s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
03267         if (s!=APFloat::opInvalidOp)
03268           return getConstantFP(V1, VT);
03269         break;
03270       case ISD::FDIV:
03271         s = V1.divide(V2, APFloat::rmNearestTiesToEven);
03272         if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
03273           return getConstantFP(V1, VT);
03274         break;
03275       case ISD::FREM :
03276         s = V1.mod(V2, APFloat::rmNearestTiesToEven);
03277         if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
03278           return getConstantFP(V1, VT);
03279         break;
03280       case ISD::FCOPYSIGN:
03281         V1.copySign(V2);
03282         return getConstantFP(V1, VT);
03283       default: break;
03284       }
03285     }
03286 
03287     if (Opcode == ISD::FP_ROUND) {
03288       APFloat V = N1CFP->getValueAPF();    // make copy
03289       bool ignored;
03290       // This can return overflow, underflow, or inexact; we don't care.
03291       // FIXME need to be more flexible about rounding mode.
03292       (void)V.convert(EVTToAPFloatSemantics(VT),
03293                       APFloat::rmNearestTiesToEven, &ignored);
03294       return getConstantFP(V, VT);
03295     }
03296   }
03297 
03298   // Canonicalize an UNDEF to the RHS, even over a constant.
03299   if (N1.getOpcode() == ISD::UNDEF) {
03300     if (isCommutativeBinOp(Opcode)) {
03301       std::swap(N1, N2);
03302     } else {
03303       switch (Opcode) {
03304       case ISD::FP_ROUND_INREG:
03305       case ISD::SIGN_EXTEND_INREG:
03306       case ISD::SUB:
03307       case ISD::FSUB:
03308       case ISD::FDIV:
03309       case ISD::FREM:
03310       case ISD::SRA:
03311         return N1;     // fold op(undef, arg2) -> undef
03312       case ISD::UDIV:
03313       case ISD::SDIV:
03314       case ISD::UREM:
03315       case ISD::SREM:
03316       case ISD::SRL:
03317       case ISD::SHL:
03318         if (!VT.isVector())
03319           return getConstant(0, VT);    // fold op(undef, arg2) -> 0
03320         // For vectors, we can't easily build an all zero vector, just return
03321         // the LHS.
03322         return N2;
03323       }
03324     }
03325   }
03326 
03327   // Fold a bunch of operators when the RHS is undef.
03328   if (N2.getOpcode() == ISD::UNDEF) {
03329     switch (Opcode) {
03330     case ISD::XOR:
03331       if (N1.getOpcode() == ISD::UNDEF)
03332         // Handle undef ^ undef -> 0 special case. This is a common
03333         // idiom (misuse).
03334         return getConstant(0, VT);
03335       // fallthrough
03336     case ISD::ADD:
03337     case ISD::ADDC:
03338     case ISD::ADDE:
03339     case ISD::SUB:
03340     case ISD::UDIV:
03341     case ISD::SDIV:
03342     case ISD::UREM:
03343     case ISD::SREM:
03344       return N2;       // fold op(arg1, undef) -> undef
03345     case ISD::FADD:
03346     case ISD::FSUB:
03347     case ISD::FMUL:
03348     case ISD::FDIV:
03349     case ISD::FREM:
03350       if (getTarget().Options.UnsafeFPMath)
03351         return N2;
03352       break;
03353     case ISD::MUL:
03354     case ISD::AND:
03355     case ISD::SRL:
03356     case ISD::SHL:
03357       if (!VT.isVector())
03358         return getConstant(0, VT);  // fold op(arg1, undef) -> 0
03359       // For vectors, we can't easily build an all zero vector, just return
03360       // the LHS.
03361       return N1;
03362     case ISD::OR:
03363       if (!VT.isVector())
03364         return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
03365       // For vectors, we can't easily build an all one vector, just return
03366       // the LHS.
03367       return N1;
03368     case ISD::SRA:
03369       return N1;
03370     }
03371   }
03372 
03373   // Memoize this node if possible.
03374   SDNode *N;
03375   SDVTList VTs = getVTList(VT);
03376   if (VT != MVT::Glue) {
03377     SDValue Ops[] = { N1, N2 };
03378     FoldingSetNodeID ID;
03379     AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
03380     void *IP = nullptr;
03381     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
03382       return SDValue(E, 0);
03383 
03384     N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
03385                                          DL.getDebugLoc(), VTs, N1, N2);
03386     CSEMap.InsertNode(N, IP);
03387   } else {
03388     N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
03389                                          DL.getDebugLoc(), VTs, N1, N2);
03390   }
03391 
03392   AllNodes.push_back(N);
03393 #ifndef NDEBUG
03394   VerifySDNode(N);
03395 #endif
03396   return SDValue(N, 0);
03397 }
03398 
03399 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
03400                               SDValue N1, SDValue N2, SDValue N3) {
03401   // Perform various simplifications.
03402   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
03403   switch (Opcode) {
03404   case ISD::FMA: {
03405     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
03406     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
03407     ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
03408     if (N1CFP && N2CFP && N3CFP) {
03409       APFloat  V1 = N1CFP->getValueAPF();
03410       const APFloat &V2 = N2CFP->getValueAPF();
03411       const APFloat &V3 = N3CFP->getValueAPF();
03412       APFloat::opStatus s =
03413         V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
03414       if (s != APFloat::opInvalidOp)
03415         return getConstantFP(V1, VT);
03416     }
03417     break;
03418   }
03419   case ISD::CONCAT_VECTORS:
03420     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
03421     // one big BUILD_VECTOR.
03422     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
03423         N2.getOpcode() == ISD::BUILD_VECTOR &&
03424         N3.getOpcode() == ISD::BUILD_VECTOR) {
03425       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
03426                                     N1.getNode()->op_end());
03427       Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
03428       Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
03429       return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
03430     }
03431     break;
03432   case ISD::SETCC: {
03433     // Use FoldSetCC to simplify SETCC's.
03434     SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
03435     if (Simp.getNode()) return Simp;
03436     break;
03437   }
03438   case ISD::SELECT:
03439     if (N1C) {
03440      if (N1C->getZExtValue())
03441        return N2;             // select true, X, Y -> X
03442      return N3;             // select false, X, Y -> Y
03443     }
03444 
03445     if (N2 == N3) return N2;   // select C, X, X -> X
03446     break;
03447   case ISD::VECTOR_SHUFFLE:
03448     llvm_unreachable("should use getVectorShuffle constructor!");
03449   case ISD::INSERT_SUBVECTOR: {
03450     SDValue Index = N3;
03451     if (VT.isSimple() && N1.getValueType().isSimple()
03452         && N2.getValueType().isSimple()) {
03453       assert(VT.isVector() && N1.getValueType().isVector() &&
03454              N2.getValueType().isVector() &&
03455              "Insert subvector VTs must be a vectors");
03456       assert(VT == N1.getValueType() &&
03457              "Dest and insert subvector source types must match!");
03458       assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
03459              "Insert subvector must be from smaller vector to larger vector!");
03460       if (isa<ConstantSDNode>(Index.getNode())) {
03461         assert((N2.getValueType().getVectorNumElements() +
03462                 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
03463                 <= VT.getVectorNumElements())
03464                && "Insert subvector overflow!");
03465       }
03466 
03467       // Trivial insertion.
03468       if (VT.getSimpleVT() == N2.getSimpleValueType())
03469         return N2;
03470     }
03471     break;
03472   }
03473   case ISD::BITCAST:
03474     // Fold bit_convert nodes from a type to themselves.
03475     if (N1.getValueType() == VT)
03476       return N1;
03477     break;
03478   }
03479 
03480   // Memoize node if it doesn't produce a flag.
03481   SDNode *N;
03482   SDVTList VTs = getVTList(VT);
03483   if (VT != MVT::Glue) {
03484     SDValue Ops[] = { N1, N2, N3 };
03485     FoldingSetNodeID ID;
03486     AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
03487     void *IP = nullptr;
03488     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
03489       return SDValue(E, 0);
03490 
03491     N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
03492                                           DL.getDebugLoc(), VTs, N1, N2, N3);
03493     CSEMap.InsertNode(N, IP);
03494   } else {
03495     N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
03496                                           DL.getDebugLoc(), VTs, N1, N2, N3);
03497   }
03498 
03499   AllNodes.push_back(N);
03500 #ifndef NDEBUG
03501   VerifySDNode(N);
03502 #endif
03503   return SDValue(N, 0);
03504 }
03505 
03506 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
03507                               SDValue N1, SDValue N2, SDValue N3,
03508                               SDValue N4) {
03509   SDValue Ops[] = { N1, N2, N3, N4 };
03510   return getNode(Opcode, DL, VT, Ops, 4);
03511 }
03512 
03513 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
03514                               SDValue N1, SDValue N2, SDValue N3,
03515                               SDValue N4, SDValue N5) {
03516   SDValue Ops[] = { N1, N2, N3, N4, N5 };
03517   return getNode(Opcode, DL, VT, Ops, 5);
03518 }
03519 
03520 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
03521 /// the incoming stack arguments to be loaded from the stack.
03522 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
03523   SmallVector<SDValue, 8> ArgChains;
03524 
03525   // Include the original chain at the beginning of the list. When this is
03526   // used by target LowerCall hooks, this helps legalize find the
03527   // CALLSEQ_BEGIN node.
03528   ArgChains.push_back(Chain);
03529 
03530   // Add a chain value for each stack argument.
03531   for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
03532        UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
03533     if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
03534       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
03535         if (FI->getIndex() < 0)
03536           ArgChains.push_back(SDValue(L, 1));
03537 
03538   // Build a tokenfactor for all the chains.
03539   return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
03540                  &ArgChains[0], ArgChains.size());
03541 }
03542 
03543 /// getMemsetValue - Vectorized representation of the memset value
03544 /// operand.
03545 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
03546                               SDLoc dl) {
03547   assert(Value.getOpcode() != ISD::UNDEF);
03548 
03549   unsigned NumBits = VT.getScalarType().getSizeInBits();
03550   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
03551     assert(C->getAPIntValue().getBitWidth() == 8);
03552     APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
03553     if (VT.isInteger())
03554       return DAG.getConstant(Val, VT);
03555     return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
03556   }
03557 
03558   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
03559   if (NumBits > 8) {
03560     // Use a multiplication with 0x010101... to extend the input to the
03561     // required length.
03562     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
03563     Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
03564   }
03565 
03566   return Value;
03567 }
03568 
03569 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
03570 /// used when a memcpy is turned into a memset when the source is a constant
03571 /// string ptr.
03572 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
03573                                   const TargetLowering &TLI, StringRef Str) {
03574   // Handle vector with all elements zero.
03575   if (Str.empty()) {
03576     if (VT.isInteger())
03577       return DAG.getConstant(0, VT);
03578     else if (VT == MVT::f32 || VT == MVT::f64)
03579       return DAG.getConstantFP(0.0, VT);
03580     else if (VT.isVector()) {
03581       unsigned NumElts = VT.getVectorNumElements();
03582       MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
03583       return DAG.getNode(ISD::BITCAST, dl, VT,
03584                          DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
03585                                                              EltVT, NumElts)));
03586     } else
03587       llvm_unreachable("Expected type!");
03588   }
03589 
03590   assert(!VT.isVector() && "Can't handle vector type here!");
03591   unsigned NumVTBits = VT.getSizeInBits();
03592   unsigned NumVTBytes = NumVTBits / 8;
03593   unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
03594 
03595   APInt Val(NumVTBits, 0);
03596   if (TLI.isLittleEndian()) {
03597     for (unsigned i = 0; i != NumBytes; ++i)
03598       Val |= (uint64_t)(unsigned char)Str[i] << i*8;
03599   } else {
03600     for (unsigned i = 0; i != NumBytes; ++i)
03601       Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
03602   }
03603 
03604   // If the "cost" of materializing the integer immediate is less than the cost
03605   // of a load, then it is cost effective to turn the load into the immediate.
03606   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
03607   if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
03608     return DAG.getConstant(Val, VT);
03609   return SDValue(nullptr, 0);
03610 }
03611 
03612 /// getMemBasePlusOffset - Returns base and offset node for the
03613 ///
03614 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl,
03615                                       SelectionDAG &DAG) {
03616   EVT VT = Base.getValueType();
03617   return DAG.getNode(ISD::ADD, dl,
03618                      VT, Base, DAG.getConstant(Offset, VT));
03619 }
03620 
03621 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
03622 ///
03623 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
03624   unsigned SrcDelta = 0;
03625   GlobalAddressSDNode *G = nullptr;
03626   if (Src.getOpcode() == ISD::GlobalAddress)
03627     G = cast<GlobalAddressSDNode>(Src);
03628   else if (Src.getOpcode() == ISD::ADD &&
03629            Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
03630            Src.getOperand(1).getOpcode() == ISD::Constant) {
03631     G = cast<GlobalAddressSDNode>(Src.getOperand(0));
03632     SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
03633   }
03634   if (!G)
03635     return false;
03636 
03637   return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
03638 }
03639 
03640 /// FindOptimalMemOpLowering - Determines the optimial series memory ops
03641 /// to replace the memset / memcpy. Return true if the number of memory ops
03642 /// is below the threshold. It returns the types of the sequence of
03643 /// memory ops to perform memset / memcpy by reference.
03644 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
03645                                      unsigned Limit, uint64_t Size,
03646                                      unsigned DstAlign, unsigned SrcAlign,
03647                                      bool IsMemset,
03648                                      bool ZeroMemset,
03649                                      bool MemcpyStrSrc,
03650                                      bool AllowOverlap,
03651                                      SelectionDAG &DAG,
03652                                      const TargetLowering &TLI) {
03653   assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
03654          "Expecting memcpy / memset source to meet alignment requirement!");
03655   // If 'SrcAlign' is zero, that means the memory operation does not need to
03656   // load the value, i.e. memset or memcpy from constant string. Otherwise,
03657   // it's the inferred alignment of the source. 'DstAlign', on the other hand,
03658   // is the specified alignment of the memory operation. If it is zero, that
03659   // means it's possible to change the alignment of the destination.
03660   // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
03661   // not need to be loaded.
03662   EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
03663                                    IsMemset, ZeroMemset, MemcpyStrSrc,
03664                                    DAG.getMachineFunction());
03665 
03666   if (VT == MVT::Other) {
03667     unsigned AS = 0;
03668     if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
03669         TLI.allowsUnalignedMemoryAccesses(VT, AS)) {
03670       VT = TLI.getPointerTy();
03671     } else {
03672       switch (DstAlign & 7) {
03673       case 0:  VT = MVT::i64; break;
03674       case 4:  VT = MVT::i32; break;
03675       case 2:  VT = MVT::i16; break;
03676       default: VT = MVT::i8;  break;
03677       }
03678     }
03679 
03680     MVT LVT = MVT::i64;
03681     while (!TLI.isTypeLegal(LVT))
03682       LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
03683     assert(LVT.isInteger());
03684 
03685     if (VT.bitsGT(LVT))
03686       VT = LVT;
03687   }
03688 
03689   unsigned NumMemOps = 0;
03690   while (Size != 0) {
03691     unsigned VTSize = VT.getSizeInBits() / 8;
03692     while (VTSize > Size) {
03693       // For now, only use non-vector load / store's for the left-over pieces.
03694       EVT NewVT = VT;
03695       unsigned NewVTSize;
03696 
03697       bool Found = false;
03698       if (VT.isVector() || VT.isFloatingPoint()) {
03699         NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
03700         if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
03701             TLI.isSafeMemOpType(NewVT.getSimpleVT()))
03702           Found = true;
03703         else if (NewVT == MVT::i64 &&
03704                  TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
03705                  TLI.isSafeMemOpType(MVT::f64)) {
03706           // i64 is usually not legal on 32-bit targets, but f64 may be.
03707           NewVT = MVT::f64;
03708           Found = true;
03709         }
03710       }
03711 
03712       if (!Found) {
03713         do {
03714           NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
03715           if (NewVT == MVT::i8)
03716             break;
03717         } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
03718       }
03719       NewVTSize = NewVT.getSizeInBits() / 8;
03720 
03721       // If the new VT cannot cover all of the remaining bits, then consider
03722       // issuing a (or a pair of) unaligned and overlapping load / store.
03723       // FIXME: Only does this for 64-bit or more since we don't have proper
03724       // cost model for unaligned load / store.
03725       bool Fast;
03726       unsigned AS = 0;
03727       if (NumMemOps && AllowOverlap &&
03728           VTSize >= 8 && NewVTSize < Size &&
03729           TLI.allowsUnalignedMemoryAccesses(VT, AS, &Fast) && Fast)
03730         VTSize = Size;
03731       else {
03732         VT = NewVT;
03733         VTSize = NewVTSize;
03734       }
03735     }
03736 
03737     if (++NumMemOps > Limit)
03738       return false;
03739 
03740     MemOps.push_back(VT);
03741     Size -= VTSize;
03742   }
03743 
03744   return true;
03745 }
03746 
03747 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
03748                                        SDValue Chain, SDValue Dst,
03749                                        SDValue Src, uint64_t Size,
03750                                        unsigned Align, bool isVol,
03751                                        bool AlwaysInline,
03752                                        MachinePointerInfo DstPtrInfo,
03753                                        MachinePointerInfo SrcPtrInfo) {
03754   // Turn a memcpy of undef to nop.
03755   if (Src.getOpcode() == ISD::UNDEF)
03756     return Chain;
03757 
03758   // Expand memcpy to a series of load and store ops if the size operand falls
03759   // below a certain threshold.
03760   // TODO: In the AlwaysInline case, if the size is big then generate a loop
03761   // rather than maybe a humongous number of loads and stores.
03762   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
03763   std::vector<EVT> MemOps;
03764   bool DstAlignCanChange = false;
03765   MachineFunction &MF = DAG.getMachineFunction();
03766   MachineFrameInfo *MFI = MF.getFrameInfo();
03767   bool OptSize =
03768     MF.getFunction()->getAttributes().
03769       hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
03770   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
03771   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
03772     DstAlignCanChange = true;
03773   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
03774   if (Align > SrcAlign)
03775     SrcAlign = Align;
03776   StringRef Str;
03777   bool CopyFromStr = isMemSrcFromString(Src, Str);
03778   bool isZeroStr = CopyFromStr && Str.empty();
03779   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
03780 
03781   if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
03782                                 (DstAlignCanChange ? 0 : Align),
03783                                 (isZeroStr ? 0 : SrcAlign),
03784                                 false, false, CopyFromStr, true, DAG, TLI))
03785     return SDValue();
03786 
03787   if (DstAlignCanChange) {
03788     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
03789     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
03790 
03791     // Don't promote to an alignment that would require dynamic stack
03792     // realignment.
03793     const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
03794     if (!TRI->needsStackRealignment(MF))
03795        while (NewAlign > Align &&
03796              TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
03797           NewAlign /= 2;
03798 
03799     if (NewAlign > Align) {
03800       // Give the stack frame object a larger alignment if needed.
03801       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
03802         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
03803       Align = NewAlign;
03804     }
03805   }
03806 
03807   SmallVector<SDValue, 8> OutChains;
03808   unsigned NumMemOps = MemOps.size();
03809   uint64_t SrcOff = 0, DstOff = 0;
03810   for (unsigned i = 0; i != NumMemOps; ++i) {
03811     EVT VT = MemOps[i];
03812     unsigned VTSize = VT.getSizeInBits() / 8;
03813     SDValue Value, Store;
03814 
03815     if (VTSize > Size) {
03816       // Issuing an unaligned load / store pair  that overlaps with the previous
03817       // pair. Adjust the offset accordingly.
03818       assert(i == NumMemOps-1 && i != 0);
03819       SrcOff -= VTSize - Size;
03820       DstOff -= VTSize - Size;
03821     }
03822 
03823     if (CopyFromStr &&
03824         (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
03825       // It's unlikely a store of a vector immediate can be done in a single
03826       // instruction. It would require a load from a constantpool first.
03827       // We only handle zero vectors here.
03828       // FIXME: Handle other cases where store of vector immediate is done in
03829       // a single instruction.
03830       Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
03831       if (Value.getNode())
03832         Store = DAG.getStore(Chain, dl, Value,
03833                              getMemBasePlusOffset(Dst, DstOff, dl, DAG),
03834                              DstPtrInfo.getWithOffset(DstOff), isVol,
03835                              false, Align);
03836     }
03837 
03838     if (!Store.getNode()) {
03839       // The type might not be legal for the target.  This should only happen
03840       // if the type is smaller than a legal type, as on PPC, so the right
03841       // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify
03842       // to Load/Store if NVT==VT.
03843       // FIXME does the case above also need this?
03844       EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
03845       assert(NVT.bitsGE(VT));
03846       Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
03847                              getMemBasePlusOffset(Src, SrcOff, dl, DAG),
03848                              SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
03849                              MinAlign(SrcAlign, SrcOff));
03850       Store = DAG.getTruncStore(Chain, dl, Value,
03851                                 getMemBasePlusOffset(Dst, DstOff, dl, DAG),
03852                                 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
03853                                 false, Align);
03854     }
03855     OutChains.push_back(Store);
03856     SrcOff += VTSize;
03857     DstOff += VTSize;
03858     Size -= VTSize;
03859   }
03860 
03861   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
03862                      &OutChains[0], OutChains.size());
03863 }
03864 
03865 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
03866                                         SDValue Chain, SDValue Dst,
03867                                         SDValue Src, uint64_t Size,
03868                                         unsigned Align,  bool isVol,
03869                                         bool AlwaysInline,
03870                                         MachinePointerInfo DstPtrInfo,
03871                                         MachinePointerInfo SrcPtrInfo) {
03872   // Turn a memmove of undef to nop.
03873   if (Src.getOpcode() == ISD::UNDEF)
03874     return Chain;
03875 
03876   // Expand memmove to a series of load and store ops if the size operand falls
03877   // below a certain threshold.
03878   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
03879   std::vector<EVT> MemOps;
03880   bool DstAlignCanChange = false;
03881   MachineFunction &MF = DAG.getMachineFunction();
03882   MachineFrameInfo *MFI = MF.getFrameInfo();
03883   bool OptSize = MF.getFunction()->getAttributes().
03884     hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
03885   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
03886   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
03887     DstAlignCanChange = true;
03888   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
03889   if (Align > SrcAlign)
03890     SrcAlign = Align;
03891   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
03892 
03893   if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
03894                                 (DstAlignCanChange ? 0 : Align), SrcAlign,
03895                                 false, false, false, false, DAG, TLI))
03896     return SDValue();
03897 
03898   if (DstAlignCanChange) {
03899     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
03900     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
03901     if (NewAlign > Align) {
03902       // Give the stack frame object a larger alignment if needed.
03903       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
03904         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
03905       Align = NewAlign;
03906     }
03907   }
03908 
03909   uint64_t SrcOff = 0, DstOff = 0;
03910   SmallVector<SDValue, 8> LoadValues;
03911   SmallVector<SDValue, 8> LoadChains;
03912   SmallVector<SDValue, 8> OutChains;
03913   unsigned NumMemOps = MemOps.size();
03914   for (unsigned i = 0; i < NumMemOps; i++) {
03915     EVT VT = MemOps[i];
03916     unsigned VTSize = VT.getSizeInBits() / 8;
03917     SDValue Value;
03918 
03919     Value = DAG.getLoad(VT, dl, Chain,
03920                         getMemBasePlusOffset(Src, SrcOff, dl, DAG),
03921                         SrcPtrInfo.getWithOffset(SrcOff), isVol,
03922                         false, false, SrcAlign);
03923     LoadValues.push_back(Value);
03924     LoadChains.push_back(Value.getValue(1));
03925     SrcOff += VTSize;
03926   }
03927   Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
03928                       &LoadChains[0], LoadChains.size());
03929   OutChains.clear();
03930   for (unsigned i = 0; i < NumMemOps; i++) {
03931     EVT VT = MemOps[i];
03932     unsigned VTSize = VT.getSizeInBits() / 8;
03933     SDValue Store;
03934 
03935     Store = DAG.getStore(Chain, dl, LoadValues[i],
03936                          getMemBasePlusOffset(Dst, DstOff, dl, DAG),
03937                          DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
03938     OutChains.push_back(Store);
03939     DstOff += VTSize;
03940   }
03941 
03942   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
03943                      &OutChains[0], OutChains.size());
03944 }
03945 
03946 /// \brief Lower the call to 'memset' intrinsic function into a series of store
03947 /// operations.
03948 ///
03949 /// \param DAG Selection DAG where lowered code is placed.
03950 /// \param dl Link to corresponding IR location.
03951 /// \param Chain Control flow dependency.
03952 /// \param Dst Pointer to destination memory location.
03953 /// \param Src Value of byte to write into the memory.
03954 /// \param Size Number of bytes to write.
03955 /// \param Align Alignment of the destination in bytes.
03956 /// \param isVol True if destination is volatile.
03957 /// \param DstPtrInfo IR information on the memory pointer.
03958 /// \returns New head in the control flow, if lowering was successful, empty
03959 /// SDValue otherwise.
03960 ///
03961 /// The function tries to replace 'llvm.memset' intrinsic with several store
03962 /// operations and value calculation code. This is usually profitable for small
03963 /// memory size.
03964 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
03965                                SDValue Chain, SDValue Dst,
03966                                SDValue Src, uint64_t Size,
03967                                unsigned Align, bool isVol,
03968                                MachinePointerInfo DstPtrInfo) {
03969   // Turn a memset of undef to nop.
03970   if (Src.getOpcode() == ISD::UNDEF)
03971     return Chain;
03972 
03973   // Expand memset to a series of load/store ops if the size operand
03974   // falls below a certain threshold.
03975   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
03976   std::vector<EVT> MemOps;
03977   bool DstAlignCanChange = false;
03978   MachineFunction &MF = DAG.getMachineFunction();
03979   MachineFrameInfo *MFI = MF.getFrameInfo();
03980   bool OptSize = MF.getFunction()->getAttributes().
03981     hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
03982   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
03983   if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
03984     DstAlignCanChange = true;
03985   bool IsZeroVal =
03986     isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
03987   if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
03988                                 Size, (DstAlignCanChange ? 0 : Align), 0,
03989                                 true, IsZeroVal, false, true, DAG, TLI))
03990     return SDValue();
03991 
03992   if (DstAlignCanChange) {
03993     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
03994     unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
03995     if (NewAlign > Align) {
03996       // Give the stack frame object a larger alignment if needed.
03997       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
03998         MFI->setObjectAlignment(FI->getIndex(), NewAlign);
03999       Align = NewAlign;
04000     }
04001   }
04002 
04003   SmallVector<SDValue, 8> OutChains;
04004   uint64_t DstOff = 0;
04005   unsigned NumMemOps = MemOps.size();
04006 
04007   // Find the largest store and generate the bit pattern for it.
04008   EVT LargestVT = MemOps[0];
04009   for (unsigned i = 1; i < NumMemOps; i++)
04010     if (MemOps[i].bitsGT(LargestVT))
04011       LargestVT = MemOps[i];
04012   SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
04013 
04014   for (unsigned i = 0; i < NumMemOps; i++) {
04015     EVT VT = MemOps[i];
04016     unsigned VTSize = VT.getSizeInBits() / 8;
04017     if (VTSize > Size) {
04018       // Issuing an unaligned load / store pair  that overlaps with the previous
04019       // pair. Adjust the offset accordingly.
04020       assert(i == NumMemOps-1 && i != 0);
04021       DstOff -= VTSize - Size;
04022     }
04023 
04024     // If this store is smaller than the largest store see whether we can get
04025     // the smaller value for free with a truncate.
04026     SDValue Value = MemSetValue;
04027     if (VT.bitsLT(LargestVT)) {
04028       if (!LargestVT.isVector() && !VT.isVector() &&
04029           TLI.isTruncateFree(LargestVT, VT))
04030         Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
04031       else
04032         Value = getMemsetValue(Src, VT, DAG, dl);
04033     }
04034     assert(Value.getValueType() == VT && "Value with wrong type.");
04035     SDValue Store = DAG.getStore(Chain, dl, Value,
04036                                  getMemBasePlusOffset(Dst, DstOff, dl, DAG),
04037                                  DstPtrInfo.getWithOffset(DstOff),
04038                                  isVol, false, Align);
04039     OutChains.push_back(Store);
04040     DstOff += VT.getSizeInBits() / 8;
04041     Size -= VTSize;
04042   }
04043 
04044   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
04045                      &OutChains[0], OutChains.size());
04046 }
04047 
04048 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
04049                                 SDValue Src, SDValue Size,
04050                                 unsigned Align, bool isVol, bool AlwaysInline,
04051                                 MachinePointerInfo DstPtrInfo,
04052                                 MachinePointerInfo SrcPtrInfo) {
04053   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
04054 
04055   // Check to see if we should lower the memcpy to loads and stores first.
04056   // For cases within the target-specified limits, this is the best choice.
04057   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
04058   if (ConstantSize) {
04059     // Memcpy with size zero? Just return the original chain.
04060     if (ConstantSize->isNullValue())
04061       return Chain;
04062 
04063     SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
04064                                              ConstantSize->getZExtValue(),Align,
04065                                 isVol, false, DstPtrInfo, SrcPtrInfo);
04066     if (Result.getNode())
04067       return Result;
04068   }
04069 
04070   // Then check to see if we should lower the memcpy with target-specific
04071   // code. If the target chooses to do this, this is the next best.
04072   SDValue Result =
04073     TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
04074                                 isVol, AlwaysInline,
04075                                 DstPtrInfo, SrcPtrInfo);
04076   if (Result.getNode())
04077     return Result;
04078 
04079   // If we really need inline code and the target declined to provide it,
04080   // use a (potentially long) sequence of loads and stores.
04081   if (AlwaysInline) {
04082     assert(ConstantSize && "AlwaysInline requires a constant size!");
04083     return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
04084                                    ConstantSize->getZExtValue(), Align, isVol,
04085                                    true, DstPtrInfo, SrcPtrInfo);
04086   }
04087 
04088   // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
04089   // memcpy is not guaranteed to be safe. libc memcpys aren't required to
04090   // respect volatile, so they may do things like read or write memory
04091   // beyond the given memory regions. But fixing this isn't easy, and most
04092   // people don't care.
04093 
04094   const TargetLowering *TLI = TM.getTargetLowering();
04095 
04096   // Emit a library call.
04097   TargetLowering::ArgListTy Args;
04098   TargetLowering::ArgListEntry Entry;
04099   Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
04100   Entry.Node = Dst; Args.push_back(Entry);
04101   Entry.Node = Src; Args.push_back(Entry);
04102   Entry.Node = Size; Args.push_back(Entry);
04103   // FIXME: pass in SDLoc
04104   TargetLowering::
04105   CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
04106                     false, false, false, false, 0,
04107                     TLI->getLibcallCallingConv(RTLIB::MEMCPY),
04108                     /*isTailCall=*/false,
04109                     /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
04110                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
04111                                       TLI->getPointerTy()),
04112                     Args, *this, dl);
04113   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
04114 
04115   return CallResult.second;
04116 }
04117 
04118 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
04119                                  SDValue Src, SDValue Size,
04120                                  unsigned Align, bool isVol,
04121                                  MachinePointerInfo DstPtrInfo,
04122                                  MachinePointerInfo SrcPtrInfo) {
04123   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
04124 
04125   // Check to see if we should lower the memmove to loads and stores first.
04126   // For cases within the target-specified limits, this is the best choice.
04127   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
04128   if (ConstantSize) {
04129     // Memmove with size zero? Just return the original chain.
04130     if (ConstantSize->isNullValue())
04131       return Chain;
04132 
04133     SDValue Result =
04134       getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
04135                                ConstantSize->getZExtValue(), Align, isVol,
04136                                false, DstPtrInfo, SrcPtrInfo);
04137     if (Result.getNode())
04138       return Result;
04139   }
04140 
04141   // Then check to see if we should lower the memmove with target-specific
04142   // code. If the target chooses to do this, this is the next best.
04143   SDValue Result =
04144     TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
04145                                  DstPtrInfo, SrcPtrInfo);
04146   if (Result.getNode())
04147     return Result;
04148 
04149   // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
04150   // not be safe.  See memcpy above for more details.
04151 
04152   const TargetLowering *TLI = TM.getTargetLowering();
04153 
04154   // Emit a library call.
04155   TargetLowering::ArgListTy Args;
04156   TargetLowering::ArgListEntry Entry;
04157   Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
04158   Entry.Node = Dst; Args.push_back(Entry);
04159   Entry.Node = Src; Args.push_back(Entry);
04160   Entry.Node = Size; Args.push_back(Entry);
04161   // FIXME:  pass in SDLoc
04162   TargetLowering::
04163   CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
04164                     false, false, false, false, 0,
04165                     TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
04166                     /*isTailCall=*/false,
04167                     /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
04168                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
04169                                       TLI->getPointerTy()),
04170                     Args, *this, dl);
04171   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
04172 
04173   return CallResult.second;
04174 }
04175 
04176 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
04177                                 SDValue Src, SDValue Size,
04178                                 unsigned Align, bool isVol,
04179                                 MachinePointerInfo DstPtrInfo) {
04180   assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
04181 
04182   // Check to see if we should lower the memset to stores first.
04183   // For cases within the target-specified limits, this is the best choice.
04184   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
04185   if (ConstantSize) {
04186     // Memset with size zero? Just return the original chain.
04187     if (ConstantSize->isNullValue())
04188       return Chain;
04189 
04190     SDValue Result =
04191       getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
04192                       Align, isVol, DstPtrInfo);
04193 
04194     if (Result.getNode())
04195       return Result;
04196   }
04197 
04198   // Then check to see if we should lower the memset with target-specific
04199   // code. If the target chooses to do this, this is the next best.
04200   SDValue Result =
04201     TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
04202                                 DstPtrInfo);
04203   if (Result.getNode())
04204     return Result;
04205 
04206   // Emit a library call.
04207   const TargetLowering *TLI = TM.getTargetLowering();
04208   Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
04209   TargetLowering::ArgListTy Args;
04210   TargetLowering::ArgListEntry Entry;
04211   Entry.Node = Dst; Entry.Ty = IntPtrTy;
04212   Args.push_back(Entry);
04213   // Extend or truncate the argument to be an i32 value for the call.
04214   if (Src.getValueType().bitsGT(MVT::i32))
04215     Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
04216   else
04217     Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
04218   Entry.Node = Src;
04219   Entry.Ty = Type::getInt32Ty(*getContext());
04220   Entry.isSExt = true;
04221   Args.push_back(Entry);
04222   Entry.Node = Size;
04223   Entry.Ty = IntPtrTy;
04224   Entry.isSExt = false;
04225   Args.push_back(Entry);
04226   // FIXME: pass in SDLoc
04227   TargetLowering::
04228   CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
04229                     false, false, false, false, 0,
04230                     TLI->getLibcallCallingConv(RTLIB::MEMSET),
04231                     /*isTailCall=*/false,
04232                     /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
04233                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
04234                                       TLI->getPointerTy()),
04235                     Args, *this, dl);
04236   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
04237 
04238   return CallResult.second;
04239 }
04240 
04241 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
04242                                 SDVTList VTList, SDValue *Ops, unsigned NumOps,
04243                                 MachineMemOperand *MMO,
04244                                 AtomicOrdering SuccessOrdering,
04245                                 AtomicOrdering FailureOrdering,
04246                                 SynchronizationScope SynchScope) {
04247   FoldingSetNodeID ID;
04248   ID.AddInteger(MemVT.getRawBits());
04249   AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
04250   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
04251   void* IP = nullptr;
04252   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
04253     cast<AtomicSDNode>(E)->refineAlignment(MMO);
04254     return SDValue(E, 0);
04255   }
04256 
04257   // Allocate the operands array for the node out of the BumpPtrAllocator, since
04258   // SDNode doesn't have access to it.  This memory will be "leaked" when
04259   // the node is deallocated, but recovered when the allocator is released.
04260   // If the number of operands is less than 5 we use AtomicSDNode's internal
04261   // storage.
04262   SDUse *DynOps = NumOps > 4 ? OperandAllocator.Allocate<SDUse>(NumOps) : nullptr;
04263 
04264   SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(),
04265                                                dl.getDebugLoc(), VTList, MemVT,
04266                                                Ops, DynOps, NumOps, MMO,
04267                                                SuccessOrdering, FailureOrdering,
04268                                                SynchScope);
04269   CSEMap.InsertNode(N, IP);
04270   AllNodes.push_back(N);
04271   return SDValue(N, 0);
04272 }
04273 
04274 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
04275                                 SDVTList VTList, SDValue *Ops, unsigned NumOps,
04276                                 MachineMemOperand *MMO,
04277                                 AtomicOrdering Ordering,
04278                                 SynchronizationScope SynchScope) {
04279   return getAtomic(Opcode, dl, MemVT, VTList, Ops, NumOps, MMO, Ordering,
04280                    Ordering, SynchScope);
04281 }
04282 
04283 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
04284                                 SDValue Chain, SDValue Ptr, SDValue Cmp,
04285                                 SDValue Swp, MachinePointerInfo PtrInfo,
04286                                 unsigned Alignment,
04287                                 AtomicOrdering SuccessOrdering,
04288                                 AtomicOrdering FailureOrdering,
04289                                 SynchronizationScope SynchScope) {
04290   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
04291     Alignment = getEVTAlignment(MemVT);
04292 
04293   MachineFunction &MF = getMachineFunction();
04294 
04295   // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
04296   // For now, atomics are considered to be volatile always.
04297   // FIXME: Volatile isn't really correct; we should keep track of atomic
04298   // orderings in the memoperand.
04299   unsigned Flags = MachineMemOperand::MOVolatile;
04300   if (Opcode != ISD::ATOMIC_STORE)
04301     Flags |= MachineMemOperand::MOLoad;
04302   if (Opcode != ISD::ATOMIC_LOAD)
04303     Flags |= MachineMemOperand::MOStore;
04304 
04305   MachineMemOperand *MMO =
04306     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
04307 
04308   return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
04309                    SuccessOrdering, FailureOrdering, SynchScope);
04310 }
04311 
04312 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
04313                                 SDValue Chain,
04314                                 SDValue Ptr, SDValue Cmp,
04315                                 SDValue Swp, MachineMemOperand *MMO,
04316                                 AtomicOrdering SuccessOrdering,
04317                                 AtomicOrdering FailureOrdering,
04318                                 SynchronizationScope SynchScope) {
04319   assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
04320   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
04321 
04322   EVT VT = Cmp.getValueType();
04323 
04324   SDVTList VTs = getVTList(VT, MVT::Other);
04325   SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
04326   return getAtomic(Opcode, dl, MemVT, VTs, Ops, 4, MMO, SuccessOrdering,
04327                    FailureOrdering, SynchScope);
04328 }
04329 
04330 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
04331                                 SDValue Chain,
04332                                 SDValue Ptr, SDValue Val,
04333                                 const Value* PtrVal,
04334                                 unsigned Alignment,
04335                                 AtomicOrdering Ordering,
04336                                 SynchronizationScope SynchScope) {
04337   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
04338     Alignment = getEVTAlignment(MemVT);
04339 
04340   MachineFunction &MF = getMachineFunction();
04341   // An atomic store does not load. An atomic load does not store.
04342   // (An atomicrmw obviously both loads and stores.)
04343   // For now, atomics are considered to be volatile always, and they are
04344   // chained as such.
04345   // FIXME: Volatile isn't really correct; we should keep track of atomic
04346   // orderings in the memoperand.
04347   unsigned Flags = MachineMemOperand::MOVolatile;
04348   if (Opcode != ISD::ATOMIC_STORE)
04349     Flags |= MachineMemOperand::MOLoad;
04350   if (Opcode != ISD::ATOMIC_LOAD)
04351     Flags |= MachineMemOperand::MOStore;
04352 
04353   MachineMemOperand *MMO =
04354     MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
04355                             MemVT.getStoreSize(), Alignment);
04356 
04357   return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
04358                    Ordering, SynchScope);
04359 }
04360 
04361 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
04362                                 SDValue Chain,
04363                                 SDValue Ptr, SDValue Val,
04364                                 MachineMemOperand *MMO,
04365                                 AtomicOrdering Ordering,
04366                                 SynchronizationScope SynchScope) {
04367   assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
04368           Opcode == ISD::ATOMIC_LOAD_SUB ||
04369           Opcode == ISD::ATOMIC_LOAD_AND ||
04370           Opcode == ISD::ATOMIC_LOAD_OR ||
04371           Opcode == ISD::ATOMIC_LOAD_XOR ||
04372           Opcode == ISD::ATOMIC_LOAD_NAND ||
04373           Opcode == ISD::ATOMIC_LOAD_MIN ||
04374           Opcode == ISD::ATOMIC_LOAD_MAX ||
04375           Opcode == ISD::ATOMIC_LOAD_UMIN ||
04376           Opcode == ISD::ATOMIC_LOAD_UMAX ||
04377           Opcode == ISD::ATOMIC_SWAP ||
04378           Opcode == ISD::ATOMIC_STORE) &&
04379          "Invalid Atomic Op");
04380 
04381   EVT VT = Val.getValueType();
04382 
04383   SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
04384                                                getVTList(VT, MVT::Other);
04385   SDValue Ops[] = {Chain, Ptr, Val};
04386   return getAtomic(Opcode, dl, MemVT, VTs, Ops, 3, MMO, Ordering, SynchScope);
04387 }
04388 
04389 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT,
04390                                 EVT VT, SDValue Chain,
04391                                 SDValue Ptr,
04392                                 MachineMemOperand *MMO,
04393                                 AtomicOrdering Ordering,
04394                                 SynchronizationScope SynchScope) {
04395   assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
04396 
04397   SDVTList VTs = getVTList(VT, MVT::Other);
04398   SDValue Ops[] = {Chain, Ptr};
04399   return getAtomic(Opcode, dl, MemVT, VTs, Ops, 2, MMO, Ordering, SynchScope);
04400 }
04401 
04402 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
04403 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
04404                                      SDLoc dl) {
04405   if (NumOps == 1)
04406     return Ops[0];
04407 
04408   SmallVector<EVT, 4> VTs;
04409   VTs.reserve(NumOps);
04410   for (unsigned i = 0; i < NumOps; ++i)
04411     VTs.push_back(Ops[i].getValueType());
04412   return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs),
04413                  Ops, NumOps);
04414 }
04415 
04416 SDValue
04417 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl,
04418                                   const EVT *VTs, unsigned NumVTs,
04419                                   const SDValue *Ops, unsigned NumOps,
04420                                   EVT MemVT, MachinePointerInfo PtrInfo,
04421                                   unsigned Align, bool Vol,
04422                                   bool ReadMem, bool WriteMem) {
04423   return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
04424                              MemVT, PtrInfo, Align, Vol,
04425                              ReadMem, WriteMem);
04426 }
04427 
04428 SDValue
04429 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
04430                                   const SDValue *Ops, unsigned NumOps,
04431                                   EVT MemVT, MachinePointerInfo PtrInfo,
04432                                   unsigned Align, bool Vol,
04433                                   bool ReadMem, bool WriteMem) {
04434   if (Align == 0)  // Ensure that codegen never sees alignment 0
04435     Align = getEVTAlignment(MemVT);
04436 
04437   MachineFunction &MF = getMachineFunction();
04438   unsigned Flags = 0;
04439   if (WriteMem)
04440     Flags |= MachineMemOperand::MOStore;
04441   if (ReadMem)
04442     Flags |= MachineMemOperand::MOLoad;
04443   if (Vol)
04444     Flags |= MachineMemOperand::MOVolatile;
04445   MachineMemOperand *MMO =
04446     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
04447 
04448   return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
04449 }
04450 
04451 SDValue
04452 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
04453                                   const SDValue *Ops, unsigned NumOps,
04454                                   EVT MemVT, MachineMemOperand *MMO) {
04455   assert((Opcode == ISD::INTRINSIC_VOID ||
04456           Opcode == ISD::INTRINSIC_W_CHAIN ||
04457           Opcode == ISD::PREFETCH ||
04458           Opcode == ISD::LIFETIME_START ||
04459           Opcode == ISD::LIFETIME_END ||
04460           (Opcode <= INT_MAX &&
04461            (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
04462          "Opcode is not a memory-accessing opcode!");
04463 
04464   // Memoize the node unless it returns a flag.
04465   MemIntrinsicSDNode *N;
04466   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
04467     FoldingSetNodeID ID;
04468     AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
04469     ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
04470     void *IP = nullptr;
04471     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
04472       cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
04473       return SDValue(E, 0);
04474     }
04475 
04476     N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
04477                                                dl.getDebugLoc(), VTList, Ops,
04478                                                NumOps, MemVT, MMO);
04479     CSEMap.InsertNode(N, IP);
04480   } else {
04481     N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(),
04482                                                dl.getDebugLoc(), VTList, Ops,
04483                                                NumOps, MemVT, MMO);
04484   }
04485   AllNodes.push_back(N);
04486   return SDValue(N, 0);
04487 }
04488 
04489 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
04490 /// MachinePointerInfo record from it.  This is particularly useful because the
04491 /// code generator has many cases where it doesn't bother passing in a
04492 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
04493 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
04494   // If this is FI+Offset, we can model it.
04495   if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
04496     return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
04497 
04498   // If this is (FI+Offset1)+Offset2, we can model it.
04499   if (Ptr.getOpcode() != ISD::ADD ||
04500       !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
04501       !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
04502     return MachinePointerInfo();
04503 
04504   int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
04505   return MachinePointerInfo::getFixedStack(FI, Offset+
04506                        cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
04507 }
04508 
04509 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
04510 /// MachinePointerInfo record from it.  This is particularly useful because the
04511 /// code generator has many cases where it doesn't bother passing in a
04512 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
04513 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
04514   // If the 'Offset' value isn't a constant, we can't handle this.
04515   if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
04516     return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
04517   if (OffsetOp.getOpcode() == ISD::UNDEF)
04518     return InferPointerInfo(Ptr);
04519   return MachinePointerInfo();
04520 }
04521 
04522 
04523 SDValue
04524 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
04525                       EVT VT, SDLoc dl, SDValue Chain,
04526                       SDValue Ptr, SDValue Offset,
04527                       MachinePointerInfo PtrInfo, EVT MemVT,
04528                       bool isVolatile, bool isNonTemporal, bool isInvariant,
04529                       unsigned Alignment, const MDNode *TBAAInfo,
04530                       const MDNode *Ranges) {
04531   assert(Chain.getValueType() == MVT::Other &&
04532         "Invalid chain type");
04533   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
04534     Alignment = getEVTAlignment(VT);
04535 
04536   unsigned Flags = MachineMemOperand::MOLoad;
04537   if (isVolatile)
04538     Flags |= MachineMemOperand::MOVolatile;
04539   if (isNonTemporal)
04540     Flags |= MachineMemOperand::MONonTemporal;
04541   if (isInvariant)
04542     Flags |= MachineMemOperand::MOInvariant;
04543 
04544   // If we don't have a PtrInfo, infer the trivial frame index case to simplify
04545   // clients.
04546   if (PtrInfo.V.isNull())
04547     PtrInfo = InferPointerInfo(Ptr, Offset);
04548 
04549   MachineFunction &MF = getMachineFunction();
04550   MachineMemOperand *MMO =
04551     MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
04552                             TBAAInfo, Ranges);
04553   return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
04554 }
04555 
04556 SDValue
04557 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
04558                       EVT VT, SDLoc dl, SDValue Chain,
04559                       SDValue Ptr, SDValue Offset, EVT MemVT,
04560                       MachineMemOperand *MMO) {
04561   if (VT == MemVT) {
04562     ExtType = ISD::NON_EXTLOAD;
04563   } else if (ExtType == ISD::NON_EXTLOAD) {
04564     assert(VT == MemVT && "Non-extending load from different memory type!");
04565   } else {
04566     // Extending load.
04567     assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
04568            "Should only be an extending load, not truncating!");
04569     assert(VT.isInteger() == MemVT.isInteger() &&
04570            "Cannot convert from FP to Int or Int -> FP!");
04571     assert(VT.isVector() == MemVT.isVector() &&
04572            "Cannot use trunc store to convert to or from a vector!");
04573     assert((!VT.isVector() ||
04574             VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
04575            "Cannot use trunc store to change the number of vector elements!");
04576   }
04577 
04578   bool Indexed = AM != ISD::UNINDEXED;
04579   assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
04580          "Unindexed load with an offset!");
04581 
04582   SDVTList VTs = Indexed ?
04583     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
04584   SDValue Ops[] = { Chain, Ptr, Offset };
04585   FoldingSetNodeID ID;
04586   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
04587   ID.AddInteger(MemVT.getRawBits());
04588   ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
04589                                      MMO->isNonTemporal(),
04590                                      MMO->isInvariant()));
04591   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
04592   void *IP = nullptr;
04593   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
04594     cast<LoadSDNode>(E)->refineAlignment(MMO);
04595     return SDValue(E, 0);
04596   }
04597   SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(),
04598                                              dl.getDebugLoc(), VTs, AM, ExtType,
04599                                              MemVT, MMO);
04600   CSEMap.InsertNode(N, IP);
04601   AllNodes.push_back(N);
04602   return SDValue(N, 0);
04603 }
04604 
04605 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
04606                               SDValue Chain, SDValue Ptr,
04607                               MachinePointerInfo PtrInfo,
04608                               bool isVolatile, bool isNonTemporal,
04609                               bool isInvariant, unsigned Alignment,
04610                               const MDNode *TBAAInfo,
04611                               const MDNode *Ranges) {
04612   SDValue Undef = getUNDEF(Ptr.getValueType());
04613   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
04614                  PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
04615                  TBAAInfo, Ranges);
04616 }
04617 
04618 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl,
04619                               SDValue Chain, SDValue Ptr,
04620                               MachineMemOperand *MMO) {
04621   SDValue Undef = getUNDEF(Ptr.getValueType());
04622   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
04623                  VT, MMO);
04624 }
04625 
04626 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
04627                                  SDValue Chain, SDValue Ptr,
04628                                  MachinePointerInfo PtrInfo, EVT MemVT,
04629                                  bool isVolatile, bool isNonTemporal,
04630                                  unsigned Alignment, const MDNode *TBAAInfo) {
04631   SDValue Undef = getUNDEF(Ptr.getValueType());
04632   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
04633                  PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
04634                  TBAAInfo);
04635 }
04636 
04637 
04638 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
04639                                  SDValue Chain, SDValue Ptr, EVT MemVT,
04640                                  MachineMemOperand *MMO) {
04641   SDValue Undef = getUNDEF(Ptr.getValueType());
04642   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
04643                  MemVT, MMO);
04644 }
04645 
04646 SDValue
04647 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
04648                              SDValue Offset, ISD::MemIndexedMode AM) {
04649   LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
04650   assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
04651          "Load is already a indexed load!");
04652   return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
04653                  LD->getChain(), Base, Offset, LD->getPointerInfo(),
04654                  LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
04655                  false, LD->getAlignment());
04656 }
04657 
04658 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
04659                                SDValue Ptr, MachinePointerInfo PtrInfo,
04660                                bool isVolatile, bool isNonTemporal,
04661                                unsigned Alignment, const MDNode *TBAAInfo) {
04662   assert(Chain.getValueType() == MVT::Other &&
04663         "Invalid chain type");
04664   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
04665     Alignment = getEVTAlignment(Val.getValueType());
04666 
04667   unsigned Flags = MachineMemOperand::MOStore;
04668   if (isVolatile)
04669     Flags |= MachineMemOperand::MOVolatile;
04670   if (isNonTemporal)
04671     Flags |= MachineMemOperand::MONonTemporal;
04672 
04673   if (PtrInfo.V.isNull())
04674     PtrInfo = InferPointerInfo(Ptr);
04675 
04676   MachineFunction &MF = getMachineFunction();
04677   MachineMemOperand *MMO =
04678     MF.getMachineMemOperand(PtrInfo, Flags,
04679                             Val.getValueType().getStoreSize(), Alignment,
04680                             TBAAInfo);
04681 
04682   return getStore(Chain, dl, Val, Ptr, MMO);
04683 }
04684 
04685 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val,
04686                                SDValue Ptr, MachineMemOperand *MMO) {
04687   assert(Chain.getValueType() == MVT::Other &&
04688         "Invalid chain type");
04689   EVT VT = Val.getValueType();
04690   SDVTList VTs = getVTList(MVT::Other);
04691   SDValue Undef = getUNDEF(Ptr.getValueType());
04692   SDValue Ops[] = { Chain, Val, Ptr, Undef };
04693   FoldingSetNodeID ID;
04694   AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
04695   ID.AddInteger(VT.getRawBits());
04696   ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
04697                                      MMO->isNonTemporal(), MMO->isInvariant()));
04698   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
04699   void *IP = nullptr;
04700   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
04701     cast<StoreSDNode>(E)->refineAlignment(MMO);
04702     return SDValue(E, 0);
04703   }
04704   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
04705                                               dl.getDebugLoc(), VTs,
04706                                               ISD::UNINDEXED, false, VT, MMO);
04707   CSEMap.InsertNode(N, IP);
04708   AllNodes.push_back(N);
04709   return SDValue(N, 0);
04710 }
04711 
04712 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
04713                                     SDValue Ptr, MachinePointerInfo PtrInfo,
04714                                     EVT SVT,bool isVolatile, bool isNonTemporal,
04715                                     unsigned Alignment,
04716                                     const MDNode *TBAAInfo) {
04717   assert(Chain.getValueType() == MVT::Other &&
04718         "Invalid chain type");
04719   if (Alignment == 0)  // Ensure that codegen never sees alignment 0
04720     Alignment = getEVTAlignment(SVT);
04721 
04722   unsigned Flags = MachineMemOperand::MOStore;
04723   if (isVolatile)
04724     Flags |= MachineMemOperand::MOVolatile;
04725   if (isNonTemporal)
04726     Flags |= MachineMemOperand::MONonTemporal;
04727 
04728   if (PtrInfo.V.isNull())
04729     PtrInfo = InferPointerInfo(Ptr);
04730 
04731   MachineFunction &MF = getMachineFunction();
04732   MachineMemOperand *MMO =
04733     MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
04734                             TBAAInfo);
04735 
04736   return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
04737 }
04738 
04739 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val,
04740                                     SDValue Ptr, EVT SVT,
04741                                     MachineMemOperand *MMO) {
04742   EVT VT = Val.getValueType();
04743 
04744   assert(Chain.getValueType() == MVT::Other &&
04745         "Invalid chain type");
04746   if (VT == SVT)
04747     return getStore(Chain, dl, Val, Ptr, MMO);
04748 
04749   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
04750          "Should only be a truncating store, not extending!");
04751   assert(VT.isInteger() == SVT.isInteger() &&
04752          "Can't do FP-INT conversion!");
04753   assert(VT.isVector() == SVT.isVector() &&
04754          "Cannot use trunc store to convert to or from a vector!");
04755   assert((!VT.isVector() ||
04756           VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
04757          "Cannot use trunc store to change the number of vector elements!");
04758 
04759   SDVTList VTs = getVTList(MVT::Other);
04760   SDValue Undef = getUNDEF(Ptr.getValueType());
04761   SDValue Ops[] = { Chain, Val, Ptr, Undef };
04762   FoldingSetNodeID ID;
04763   AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
04764   ID.AddInteger(SVT.getRawBits());
04765   ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
04766                                      MMO->isNonTemporal(), MMO->isInvariant()));
04767   ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
04768   void *IP = nullptr;
04769   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
04770     cast<StoreSDNode>(E)->refineAlignment(MMO);
04771     return SDValue(E, 0);
04772   }
04773   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
04774                                               dl.getDebugLoc(), VTs,
04775                                               ISD::UNINDEXED, true, SVT, MMO);
04776   CSEMap.InsertNode(N, IP);
04777   AllNodes.push_back(N);
04778   return SDValue(N, 0);
04779 }
04780 
04781 SDValue
04782 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base,
04783                               SDValue Offset, ISD::MemIndexedMode AM) {
04784   StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
04785   assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
04786          "Store is already a indexed store!");
04787   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
04788   SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
04789   FoldingSetNodeID ID;
04790   AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
04791   ID.AddInteger(ST->getMemoryVT().getRawBits());
04792   ID.AddInteger(ST->getRawSubclassData());
04793   ID.AddInteger(ST->getPointerInfo().getAddrSpace());
04794   void *IP = nullptr;
04795   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
04796     return SDValue(E, 0);
04797 
04798   SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(),
04799                                               dl.getDebugLoc(), VTs, AM,
04800                                               ST->isTruncatingStore(),
04801                                               ST->getMemoryVT(),
04802                                               ST->getMemOperand());
04803   CSEMap.InsertNode(N, IP);
04804   AllNodes.push_back(N);
04805   return SDValue(N, 0);
04806 }
04807 
04808 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl,
04809                                SDValue Chain, SDValue Ptr,
04810                                SDValue SV,
04811                                unsigned Align) {
04812   SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
04813   return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
04814 }
04815 
04816 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
04817                               const SDUse *Ops, unsigned NumOps) {
04818   switch (NumOps) {
04819   case 0: return getNode(Opcode, DL, VT);
04820   case 1: return getNode(Opcode, DL, VT, Ops[0]);
04821   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
04822   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
04823   default: break;
04824   }
04825 
04826   // Copy from an SDUse array into an SDValue array for use with
04827   // the regular getNode logic.
04828   SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
04829   return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
04830 }
04831 
04832 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
04833                               const SDValue *Ops, unsigned NumOps) {
04834   switch (NumOps) {
04835   case 0: return getNode(Opcode, DL, VT);
04836   case 1: return getNode(Opcode, DL, VT, Ops[0]);
04837   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
04838   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
04839   default: break;
04840   }
04841 
04842   switch (Opcode) {
04843   default: break;
04844   case ISD::SELECT_CC: {
04845     assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
04846     assert(Ops[0].getValueType() == Ops[1].getValueType() &&
04847            "LHS and RHS of condition must have same type!");
04848     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
04849            "True and False arms of SelectCC must have same type!");
04850     assert(Ops[2].getValueType() == VT &&
04851            "select_cc node must be of same type as true and false value!");
04852     break;
04853   }
04854   case ISD::BR_CC: {
04855     assert(NumOps == 5 && "BR_CC takes 5 operands!");
04856     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
04857            "LHS/RHS of comparison should match types!");
04858     break;
04859   }
04860   }
04861 
04862   // Memoize nodes.
04863   SDNode *N;
04864   SDVTList VTs = getVTList(VT);
04865 
04866   if (VT != MVT::Glue) {
04867     FoldingSetNodeID ID;
04868     AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
04869     void *IP = nullptr;
04870 
04871     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
04872       return SDValue(E, 0);
04873 
04874     N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
04875                                    VTs, Ops, NumOps);
04876     CSEMap.InsertNode(N, IP);
04877   } else {
04878     N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
04879                                    VTs, Ops, NumOps);
04880   }
04881 
04882   AllNodes.push_back(N);
04883 #ifndef NDEBUG
04884   VerifySDNode(N);
04885 #endif
04886   return SDValue(N, 0);
04887 }
04888 
04889 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
04890                               ArrayRef<EVT> ResultTys,
04891                               const SDValue *Ops, unsigned NumOps) {
04892   return getNode(Opcode, DL, getVTList(ResultTys),
04893                  Ops, NumOps);
04894 }
04895 
04896 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
04897                               const EVT *VTs, unsigned NumVTs,
04898                               const SDValue *Ops, unsigned NumOps) {
04899   if (NumVTs == 1)
04900     return getNode(Opcode, DL, VTs[0], Ops, NumOps);
04901   return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
04902 }
04903 
04904 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
04905                               const SDValue *Ops, unsigned NumOps) {
04906   if (VTList.NumVTs == 1)
04907     return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
04908 
04909 #if 0
04910   switch (Opcode) {
04911   // FIXME: figure out how to safely handle things like
04912   // int foo(int x) { return 1 << (x & 255); }
04913   // int bar() { return foo(256); }
04914   case ISD::SRA_PARTS:
04915   case ISD::SRL_PARTS:
04916   case ISD::SHL_PARTS:
04917     if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
04918         cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
04919       return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
04920     else if (N3.getOpcode() == ISD::AND)
04921       if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
04922         // If the and is only masking out bits that cannot effect the shift,
04923         // eliminate the and.
04924         unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
04925         if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
04926           return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
04927       }
04928     break;
04929   }
04930 #endif
04931 
04932   // Memoize the node unless it returns a flag.
04933   SDNode *N;
04934   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
04935     FoldingSetNodeID ID;
04936     AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
04937     void *IP = nullptr;
04938     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
04939       return SDValue(E, 0);
04940 
04941     if (NumOps == 1) {
04942       N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
04943                                           DL.getDebugLoc(), VTList, Ops[0]);
04944     } else if (NumOps == 2) {
04945       N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
04946                                            DL.getDebugLoc(), VTList, Ops[0],
04947                                            Ops[1]);
04948     } else if (NumOps == 3) {
04949       N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
04950                                             DL.getDebugLoc(), VTList, Ops[0],
04951                                             Ops[1], Ops[2]);
04952     } else {
04953       N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
04954                                      VTList, Ops, NumOps);
04955     }
04956     CSEMap.InsertNode(N, IP);
04957   } else {
04958     if (NumOps == 1) {
04959       N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(),
04960                                           DL.getDebugLoc(), VTList, Ops[0]);
04961     } else if (NumOps == 2) {
04962       N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(),
04963                                            DL.getDebugLoc(), VTList, Ops[0],
04964                                            Ops[1]);
04965     } else if (NumOps == 3) {
04966       N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(),
04967                                             DL.getDebugLoc(), VTList, Ops[0],
04968                                             Ops[1], Ops[2]);
04969     } else {
04970       N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(),
04971                                      VTList, Ops, NumOps);
04972     }
04973   }
04974   AllNodes.push_back(N);
04975 #ifndef NDEBUG
04976   VerifySDNode(N);
04977 #endif
04978   return SDValue(N, 0);
04979 }
04980 
04981 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) {
04982   return getNode(Opcode, DL, VTList, nullptr, 0);
04983 }
04984 
04985 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
04986                               SDValue N1) {
04987   SDValue Ops[] = { N1 };
04988   return getNode(Opcode, DL, VTList, Ops, 1);
04989 }
04990 
04991 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
04992                               SDValue N1, SDValue N2) {
04993   SDValue Ops[] = { N1, N2 };
04994   return getNode(Opcode, DL, VTList, Ops, 2);
04995 }
04996 
04997 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
04998                               SDValue N1, SDValue N2, SDValue N3) {
04999   SDValue Ops[] = { N1, N2, N3 };
05000   return getNode(Opcode, DL, VTList, Ops, 3);
05001 }
05002 
05003 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
05004                               SDValue N1, SDValue N2, SDValue N3,
05005                               SDValue N4) {
05006   SDValue Ops[] = { N1, N2, N3, N4 };
05007   return getNode(Opcode, DL, VTList, Ops, 4);
05008 }
05009 
05010 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList,
05011                               SDValue N1, SDValue N2, SDValue N3,
05012                               SDValue N4, SDValue N5) {
05013   SDValue Ops[] = { N1, N2, N3, N4, N5 };
05014   return getNode(Opcode, DL, VTList, Ops, 5);
05015 }
05016 
05017 SDVTList SelectionDAG::getVTList(EVT VT) {
05018   return makeVTList(SDNode::getValueTypeList(VT), 1);
05019 }
05020 
05021 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
05022   FoldingSetNodeID ID;
05023   ID.AddInteger(2U);
05024   ID.AddInteger(VT1.getRawBits());
05025   ID.AddInteger(VT2.getRawBits());
05026 
05027   void *IP = nullptr;
05028   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
05029   if (!Result) {
05030     EVT *Array = Allocator.Allocate<EVT>(2);
05031     Array[0] = VT1;
05032     Array[1] = VT2;
05033     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
05034     VTListMap.InsertNode(Result, IP);
05035   }
05036   return Result->getSDVTList();
05037 }
05038 
05039 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
05040   FoldingSetNodeID ID;
05041   ID.AddInteger(3U);
05042   ID.AddInteger(VT1.getRawBits());
05043   ID.AddInteger(VT2.getRawBits());
05044   ID.AddInteger(VT3.getRawBits());
05045 
05046   void *IP = nullptr;
05047   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
05048   if (!Result) {
05049     EVT *Array = Allocator.Allocate<EVT>(3);
05050     Array[0] = VT1;
05051     Array[1] = VT2;
05052     Array[2] = VT3;
05053     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
05054     VTListMap.InsertNode(Result, IP);
05055   }
05056   return Result->getSDVTList();
05057 }
05058 
05059 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
05060   FoldingSetNodeID ID;
05061   ID.AddInteger(4U);
05062   ID.AddInteger(VT1.getRawBits());
05063   ID.AddInteger(VT2.getRawBits());
05064   ID.AddInteger(VT3.getRawBits());
05065   ID.AddInteger(VT4.getRawBits());
05066 
05067   void *IP = nullptr;
05068   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
05069   if (!Result) {
05070     EVT *Array = Allocator.Allocate<EVT>(4);
05071     Array[0] = VT1;
05072     Array[1] = VT2;
05073     Array[2] = VT3;
05074     Array[3] = VT4;
05075     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
05076     VTListMap.InsertNode(Result, IP);
05077   }
05078   return Result->getSDVTList();
05079 }
05080 
05081 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
05082   unsigned NumVTs = VTs.size();
05083   FoldingSetNodeID ID;
05084   ID.AddInteger(NumVTs);
05085   for (unsigned index = 0; index < NumVTs; index++) {
05086     ID.AddInteger(VTs[index].getRawBits());
05087   }
05088 
05089   void *IP = nullptr;
05090   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
05091   if (!Result) {
05092     EVT *Array = Allocator.Allocate<EVT>(NumVTs);
05093     std::copy(VTs.begin(), VTs.end(), Array);
05094     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
05095     VTListMap.InsertNode(Result, IP);
05096   }
05097   return Result->getSDVTList();
05098 }
05099 
05100 
05101 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
05102 /// specified operands.  If the resultant node already exists in the DAG,
05103 /// this does not modify the specified node, instead it returns the node that
05104 /// already exists.  If the resultant node does not exist in the DAG, the
05105 /// input node is returned.  As a degenerate case, if you specify the same
05106 /// input operands as the node already has, the input node is returned.
05107 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
05108   assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
05109 
05110   // Check to see if there is no change.
05111   if (Op == N->getOperand(0)) return N;
05112 
05113   // See if the modified node already exists.
05114   void *InsertPos = nullptr;
05115   if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
05116     return Existing;
05117 
05118   // Nope it doesn't.  Remove the node from its current place in the maps.
05119   if (InsertPos)
05120     if (!RemoveNodeFromCSEMaps(N))
05121       InsertPos = nullptr;
05122 
05123   // Now we update the operands.
05124   N->OperandList[0].set(Op);
05125 
05126   // If this gets put into a CSE map, add it.
05127   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
05128   return N;
05129 }
05130 
05131 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
05132   assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
05133 
05134   // Check to see if there is no change.
05135   if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
05136     return N;   // No operands changed, just return the input node.
05137 
05138   // See if the modified node already exists.
05139   void *InsertPos = nullptr;
05140   if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
05141     return Existing;
05142 
05143   // Nope it doesn't.  Remove the node from its current place in the maps.
05144   if (InsertPos)
05145     if (!RemoveNodeFromCSEMaps(N))
05146       InsertPos = nullptr;
05147 
05148   // Now we update the operands.
05149   if (N->OperandList[0] != Op1)
05150     N->OperandList[0].set(Op1);
05151   if (N->OperandList[1] != Op2)
05152     N->OperandList[1].set(Op2);
05153 
05154   // If this gets put into a CSE map, add it.
05155   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
05156   return N;
05157 }
05158 
05159 SDNode *SelectionDAG::
05160 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
05161   SDValue Ops[] = { Op1, Op2, Op3 };
05162   return UpdateNodeOperands(N, Ops, 3);
05163 }
05164 
05165 SDNode *SelectionDAG::
05166 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
05167                    SDValue Op3, SDValue Op4) {
05168   SDValue Ops[] = { Op1, Op2, Op3, Op4 };
05169   return UpdateNodeOperands(N, Ops, 4);
05170 }
05171 
05172 SDNode *SelectionDAG::
05173 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
05174                    SDValue Op3, SDValue Op4, SDValue Op5) {
05175   SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
05176   return UpdateNodeOperands(N, Ops, 5);
05177 }
05178 
05179 SDNode *SelectionDAG::
05180 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
05181   assert(N->getNumOperands() == NumOps &&
05182          "Update with wrong number of operands");
05183 
05184   // Check to see if there is no change.
05185   bool AnyChange = false;
05186   for (unsigned i = 0; i != NumOps; ++i) {
05187     if (Ops[i] != N->getOperand(i)) {
05188       AnyChange = true;
05189       break;
05190     }
05191   }
05192 
05193   // No operands changed, just return the input node.
05194   if (!AnyChange) return N;
05195 
05196   // See if the modified node already exists.
05197   void *InsertPos = nullptr;
05198   if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
05199     return Existing;
05200 
05201   // Nope it doesn't.  Remove the node from its current place in the maps.
05202   if (InsertPos)
05203     if (!RemoveNodeFromCSEMaps(N))
05204       InsertPos = nullptr;
05205 
05206   // Now we update the operands.
05207   for (unsigned i = 0; i != NumOps; ++i)
05208     if (N->OperandList[i] != Ops[i])
05209       N->OperandList[i].set(Ops[i]);
05210 
05211   // If this gets put into a CSE map, add it.
05212   if (InsertPos) CSEMap.InsertNode(N, InsertPos);
05213   return N;
05214 }
05215 
05216 /// DropOperands - Release the operands and set this node to have
05217 /// zero operands.
05218 void SDNode::DropOperands() {
05219   // Unlike the code in MorphNodeTo that does this, we don't need to
05220   // watch for dead nodes here.
05221   for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
05222     SDUse &Use = *I++;
05223     Use.set(SDValue());
05224   }
05225 }
05226 
05227 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
05228 /// machine opcode.
05229 ///
05230 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05231                                    EVT VT) {
05232   SDVTList VTs = getVTList(VT);
05233   return SelectNodeTo(N, MachineOpc, VTs, nullptr, 0);
05234 }
05235 
05236 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05237                                    EVT VT, SDValue Op1) {
05238   SDVTList VTs = getVTList(VT);
05239   SDValue Ops[] = { Op1 };
05240   return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
05241 }
05242 
05243 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05244                                    EVT VT, SDValue Op1,
05245                                    SDValue Op2) {
05246   SDVTList VTs = getVTList(VT);
05247   SDValue Ops[] = { Op1, Op2 };
05248   return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
05249 }
05250 
05251 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05252                                    EVT VT, SDValue Op1,
05253                                    SDValue Op2, SDValue Op3) {
05254   SDVTList VTs = getVTList(VT);
05255   SDValue Ops[] = { Op1, Op2, Op3 };
05256   return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
05257 }
05258 
05259 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05260                                    EVT VT, const SDValue *Ops,
05261                                    unsigned NumOps) {
05262   SDVTList VTs = getVTList(VT);
05263   return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
05264 }
05265 
05266 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05267                                    EVT VT1, EVT VT2, const SDValue *Ops,
05268                                    unsigned NumOps) {
05269   SDVTList VTs = getVTList(VT1, VT2);
05270   return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
05271 }
05272 
05273 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05274                                    EVT VT1, EVT VT2) {
05275   SDVTList VTs = getVTList(VT1, VT2);
05276   return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)nullptr, 0);
05277 }
05278 
05279 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05280                                    EVT VT1, EVT VT2, EVT VT3,
05281                                    const SDValue *Ops, unsigned NumOps) {
05282   SDVTList VTs = getVTList(VT1, VT2, VT3);
05283   return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
05284 }
05285 
05286 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05287                                    EVT VT1, EVT VT2, EVT VT3, EVT VT4,
05288                                    const SDValue *Ops, unsigned NumOps) {
05289   SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
05290   return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
05291 }
05292 
05293 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05294                                    EVT VT1, EVT VT2,
05295                                    SDValue Op1) {
05296   SDVTList VTs = getVTList(VT1, VT2);
05297   SDValue Ops[] = { Op1 };
05298   return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
05299 }
05300 
05301 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05302                                    EVT VT1, EVT VT2,
05303                                    SDValue Op1, SDValue Op2) {
05304   SDVTList VTs = getVTList(VT1, VT2);
05305   SDValue Ops[] = { Op1, Op2 };
05306   return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
05307 }
05308 
05309 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05310                                    EVT VT1, EVT VT2,
05311                                    SDValue Op1, SDValue Op2,
05312                                    SDValue Op3) {
05313   SDVTList VTs = getVTList(VT1, VT2);
05314   SDValue Ops[] = { Op1, Op2, Op3 };
05315   return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
05316 }
05317 
05318 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05319                                    EVT VT1, EVT VT2, EVT VT3,
05320                                    SDValue Op1, SDValue Op2,
05321                                    SDValue Op3) {
05322   SDVTList VTs = getVTList(VT1, VT2, VT3);
05323   SDValue Ops[] = { Op1, Op2, Op3 };
05324   return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
05325 }
05326 
05327 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
05328                                    SDVTList VTs, const SDValue *Ops,
05329                                    unsigned NumOps) {
05330   N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
05331   // Reset the NodeID to -1.
05332   N->setNodeId(-1);
05333   return N;
05334 }
05335 
05336 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away
05337 /// the line number information on the merged node since it is not possible to
05338 /// preserve the information that operation is associated with multiple lines.
05339 /// This will make the debugger working better at -O0, were there is a higher
05340 /// probability having other instructions associated with that line.
05341 ///
05342 /// For IROrder, we keep the smaller of the two
05343 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
05344   DebugLoc NLoc = N->getDebugLoc();
05345   if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
05346     (OLoc.getDebugLoc() != NLoc)) {
05347     N->setDebugLoc(DebugLoc());
05348   }
05349   unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
05350   N->setIROrder(Order);
05351   return N;
05352 }
05353 
05354 /// MorphNodeTo - This *mutates* the specified node to have the specified
05355 /// return type, opcode, and operands.
05356 ///
05357 /// Note that MorphNodeTo returns the resultant node.  If there is already a
05358 /// node of the specified opcode and operands, it returns that node instead of
05359 /// the current one.  Note that the SDLoc need not be the same.
05360 ///
05361 /// Using MorphNodeTo is faster than creating a new node and swapping it in
05362 /// with ReplaceAllUsesWith both because it often avoids allocating a new
05363 /// node, and because it doesn't require CSE recalculation for any of
05364 /// the node's users.
05365 ///
05366 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
05367                                   SDVTList VTs, const SDValue *Ops,
05368                                   unsigned NumOps) {
05369   // If an identical node already exists, use it.
05370   void *IP = nullptr;
05371   if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
05372     FoldingSetNodeID ID;
05373     AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
05374     if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
05375       return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N));
05376   }
05377 
05378   if (!RemoveNodeFromCSEMaps(N))
05379     IP = nullptr;
05380 
05381   // Start the morphing.
05382   N->NodeType = Opc;
05383   N->ValueList = VTs.VTs;
05384   N->NumValues = VTs.NumVTs;
05385 
05386   // Clear the operands list, updating used nodes to remove this from their
05387   // use list.  Keep track of any operands that become dead as a result.
05388   SmallPtrSet<SDNode*, 16> DeadNodeSet;
05389   for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
05390     SDUse &Use = *I++;
05391     SDNode *Used = Use.getNode();
05392     Use.set(SDValue());
05393     if (Used->use_empty())
05394       DeadNodeSet.insert(Used);
05395   }
05396 
05397   if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
05398     // Initialize the memory references information.
05399     MN->setMemRefs(nullptr, nullptr);
05400     // If NumOps is larger than the # of operands we can have in a
05401     // MachineSDNode, reallocate the operand list.
05402     if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
05403       if (MN->OperandsNeedDelete)
05404         delete[] MN->OperandList;
05405       if (NumOps > array_lengthof(MN->LocalOperands))
05406         // We're creating a final node that will live unmorphed for the
05407         // remainder of the current SelectionDAG iteration, so we can allocate
05408         // the operands directly out of a pool with no recycling metadata.
05409         MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
05410                          Ops, NumOps);
05411       else
05412         MN->InitOperands(MN->LocalOperands, Ops, NumOps);
05413       MN->OperandsNeedDelete = false;
05414     } else
05415       MN->InitOperands(MN->OperandList, Ops, NumOps);
05416   } else {
05417     // If NumOps is larger than the # of operands we currently have, reallocate
05418     // the operand list.
05419     if (NumOps > N->NumOperands) {
05420       if (N->OperandsNeedDelete)
05421         delete[] N->OperandList;
05422       N->InitOperands(new SDUse[NumOps], Ops, NumOps);
05423       N->OperandsNeedDelete = true;
05424     } else
05425       N->InitOperands(N->OperandList, Ops, NumOps);
05426   }
05427 
05428   // Delete any nodes that are still dead after adding the uses for the
05429   // new operands.
05430   if (!DeadNodeSet.empty()) {
05431     SmallVector<SDNode *, 16> DeadNodes;
05432     for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
05433          E = DeadNodeSet.end(); I != E; ++I)
05434       if ((*I)->use_empty())
05435         DeadNodes.push_back(*I);
05436     RemoveDeadNodes(DeadNodes);
05437   }
05438 
05439   if (IP)
05440     CSEMap.InsertNode(N, IP);   // Memoize the new node.
05441   return N;
05442 }
05443 
05444 
05445 /// getMachineNode - These are used for target selectors to create a new node
05446 /// with specified return type(s), MachineInstr opcode, and operands.
05447 ///
05448 /// Note that getMachineNode returns the resultant node.  If there is already a
05449 /// node of the specified opcode and operands, it returns that node instead of
05450 /// the current one.
05451 MachineSDNode *
05452 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) {
05453   SDVTList VTs = getVTList(VT);
05454   return getMachineNode(Opcode, dl, VTs, None);
05455 }
05456 
05457 MachineSDNode *
05458 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) {
05459   SDVTList VTs = getVTList(VT);
05460   SDValue Ops[] = { Op1 };
05461   return getMachineNode(Opcode, dl, VTs, Ops);
05462 }
05463 
05464 MachineSDNode *
05465 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
05466                              SDValue Op1, SDValue Op2) {
05467   SDVTList VTs = getVTList(VT);
05468   SDValue Ops[] = { Op1, Op2 };
05469   return getMachineNode(Opcode, dl, VTs, Ops);
05470 }
05471 
05472 MachineSDNode *
05473 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
05474                              SDValue Op1, SDValue Op2, SDValue Op3) {
05475   SDVTList VTs = getVTList(VT);
05476   SDValue Ops[] = { Op1, Op2, Op3 };
05477   return getMachineNode(Opcode, dl, VTs, Ops);
05478 }
05479 
05480 MachineSDNode *
05481 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
05482                              ArrayRef<SDValue> Ops) {
05483   SDVTList VTs = getVTList(VT);
05484   return getMachineNode(Opcode, dl, VTs, Ops);
05485 }
05486 
05487 MachineSDNode *
05488 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) {
05489   SDVTList VTs = getVTList(VT1, VT2);
05490   return getMachineNode(Opcode, dl, VTs, None);
05491 }
05492 
05493 MachineSDNode *
05494 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
05495                              EVT VT1, EVT VT2, SDValue Op1) {
05496   SDVTList VTs = getVTList(VT1, VT2);
05497   SDValue Ops[] = { Op1 };
05498   return getMachineNode(Opcode, dl, VTs, Ops);
05499 }
05500 
05501 MachineSDNode *
05502 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
05503                              EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
05504   SDVTList VTs = getVTList(VT1, VT2);
05505   SDValue Ops[] = { Op1, Op2 };
05506   return getMachineNode(Opcode, dl, VTs, Ops);
05507 }
05508 
05509 MachineSDNode *
05510 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
05511                              EVT VT1, EVT VT2, SDValue Op1,
05512                              SDValue Op2, SDValue Op3) {
05513   SDVTList VTs = getVTList(VT1, VT2);
05514   SDValue Ops[] = { Op1, Op2, Op3 };
05515   return getMachineNode(Opcode, dl, VTs, Ops);
05516 }
05517 
05518 MachineSDNode *
05519 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
05520                              EVT VT1, EVT VT2,
05521                              ArrayRef<SDValue> Ops) {
05522   SDVTList VTs = getVTList(VT1, VT2);
05523   return getMachineNode(Opcode, dl, VTs, Ops);
05524 }
05525 
05526 MachineSDNode *
05527 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
05528                              EVT VT1, EVT VT2, EVT VT3,
05529                              SDValue Op1, SDValue Op2) {
05530   SDVTList VTs = getVTList(VT1, VT2, VT3);
05531   SDValue Ops[] = { Op1, Op2 };
05532   return getMachineNode(Opcode, dl, VTs, Ops);
05533 }
05534 
05535 MachineSDNode *
05536 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
05537                              EVT VT1, EVT VT2, EVT VT3,
05538                              SDValue Op1, SDValue Op2, SDValue Op3) {
05539   SDVTList VTs = getVTList(VT1, VT2, VT3);
05540   SDValue Ops[] = { Op1, Op2, Op3 };
05541   return getMachineNode(Opcode, dl, VTs, Ops);
05542 }
05543 
05544 MachineSDNode *
05545 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
05546                              EVT VT1, EVT VT2, EVT VT3,
05547                              ArrayRef<SDValue> Ops) {
05548   SDVTList VTs = getVTList(VT1, VT2, VT3);
05549   return getMachineNode(Opcode, dl, VTs, Ops);
05550 }
05551 
05552 MachineSDNode *
05553 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1,
05554                              EVT VT2, EVT VT3, EVT VT4,
05555                              ArrayRef<SDValue> Ops) {
05556   SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
05557   return getMachineNode(Opcode, dl, VTs, Ops);
05558 }
05559 
05560 MachineSDNode *
05561 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl,
05562                              ArrayRef<EVT> ResultTys,
05563                              ArrayRef<SDValue> Ops) {
05564   SDVTList VTs = getVTList(ResultTys);
05565   return getMachineNode(Opcode, dl, VTs, Ops);
05566 }
05567 
05568 MachineSDNode *
05569 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
05570                              ArrayRef<SDValue> OpsArray) {
05571   bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
05572   MachineSDNode *N;
05573   void *IP = nullptr;
05574   const SDValue *Ops = OpsArray.data();
05575   unsigned NumOps = OpsArray.size();
05576 
05577   if (DoCSE) {
05578     FoldingSetNodeID ID;
05579     AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
05580     IP = nullptr;
05581     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
05582       return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL));
05583     }
05584   }
05585 
05586   // Allocate a new MachineSDNode.
05587   N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(),
05588                                         DL.getDebugLoc(), VTs);
05589 
05590   // Initialize the operands list.
05591   if (NumOps > array_lengthof(N->LocalOperands))
05592     // We're creating a final node that will live unmorphed for the
05593     // remainder of the current SelectionDAG iteration, so we can allocate
05594     // the operands directly out of a pool with no recycling metadata.
05595     N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
05596                     Ops, NumOps);
05597   else
05598     N->InitOperands(N->LocalOperands, Ops, NumOps);
05599   N->OperandsNeedDelete = false;
05600 
05601   if (DoCSE)
05602     CSEMap.InsertNode(N, IP);
05603 
05604   AllNodes.push_back(N);
05605 #ifndef NDEBUG
05606   VerifyMachineNode(N);
05607 #endif
05608   return N;
05609 }
05610 
05611 /// getTargetExtractSubreg - A convenience function for creating
05612 /// TargetOpcode::EXTRACT_SUBREG nodes.
05613 SDValue
05614 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
05615                                      SDValue Operand) {
05616   SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
05617   SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
05618                                   VT, Operand, SRIdxVal);
05619   return SDValue(Subreg, 0);
05620 }
05621 
05622 /// getTargetInsertSubreg - A convenience function for creating
05623 /// TargetOpcode::INSERT_SUBREG nodes.
05624 SDValue
05625 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
05626                                     SDValue Operand, SDValue Subreg) {
05627   SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
05628   SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
05629                                   VT, Operand, Subreg, SRIdxVal);
05630   return SDValue(Result, 0);
05631 }
05632 
05633 /// getNodeIfExists - Get the specified node if it's already available, or
05634 /// else return NULL.
05635 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
05636                                       const SDValue *Ops, unsigned NumOps) {
05637   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
05638     FoldingSetNodeID ID;
05639     AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
05640     void *IP = nullptr;
05641     if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
05642       return E;
05643   }
05644   return nullptr;
05645 }
05646 
05647 /// getDbgValue - Creates a SDDbgValue node.
05648 ///
05649 SDDbgValue *
05650 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
05651                           DebugLoc DL, unsigned O) {
05652   return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
05653 }
05654 
05655 SDDbgValue *
05656 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
05657                           DebugLoc DL, unsigned O) {
05658   return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
05659 }
05660 
05661 SDDbgValue *
05662 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
05663                           DebugLoc DL, unsigned O) {
05664   return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
05665 }
05666 
05667 namespace {
05668 
05669 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
05670 /// pointed to by a use iterator is deleted, increment the use iterator
05671 /// so that it doesn't dangle.
05672 ///
05673 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
05674   SDNode::use_iterator &UI;
05675   SDNode::use_iterator &UE;
05676 
05677   void NodeDeleted(SDNode *N, SDNode *E) override {
05678     // Increment the iterator as needed.
05679     while (UI != UE && N == *UI)
05680       ++UI;
05681   }
05682 
05683 public:
05684   RAUWUpdateListener(SelectionDAG &d,
05685                      SDNode::use_iterator &ui,
05686                      SDNode::use_iterator &ue)
05687     : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
05688 };
05689 
05690 }
05691 
05692 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
05693 /// This can cause recursive merging of nodes in the DAG.
05694 ///
05695 /// This version assumes From has a single result value.
05696 ///
05697 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
05698   SDNode *From = FromN.getNode();
05699   assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
05700          "Cannot replace with this method!");
05701   assert(From != To.getNode() && "Cannot replace uses of with self");
05702 
05703   // Iterate over all the existing uses of From. New uses will be added
05704   // to the beginning of the use list, which we avoid visiting.
05705   // This specifically avoids visiting uses of From that arise while the
05706   // replacement is happening, because any such uses would be the result
05707   // of CSE: If an existing node looks like From after one of its operands
05708   // is replaced by To, we don't want to replace of all its users with To
05709   // too. See PR3018 for more info.
05710   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
05711   RAUWUpdateListener Listener(*this, UI, UE);
05712   while (UI != UE) {
05713     SDNode *User = *UI;
05714 
05715     // This node is about to morph, remove its old self from the CSE maps.
05716     RemoveNodeFromCSEMaps(User);
05717 
05718     // A user can appear in a use list multiple times, and when this
05719     // happens the uses are usually next to each other in the list.
05720     // To help reduce the number of CSE recomputations, process all
05721     // the uses of this user that we can find this way.
05722     do {
05723       SDUse &Use = UI.getUse();
05724       ++UI;
05725       Use.set(To);
05726     } while (UI != UE && *UI == User);
05727 
05728     // Now that we have modified User, add it back to the CSE maps.  If it
05729     // already exists there, recursively merge the results together.
05730     AddModifiedNodeToCSEMaps(User);
05731   }
05732 
05733   // If we just RAUW'd the root, take note.
05734   if (FromN == getRoot())
05735     setRoot(To);
05736 }
05737 
05738 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
05739 /// This can cause recursive merging of nodes in the DAG.
05740 ///
05741 /// This version assumes that for each value of From, there is a
05742 /// corresponding value in To in the same position with the same type.
05743 ///
05744 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
05745 #ifndef NDEBUG
05746   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
05747     assert((!From->hasAnyUseOfValue(i) ||
05748             From->getValueType(i) == To->getValueType(i)) &&
05749            "Cannot use this version of ReplaceAllUsesWith!");
05750 #endif
05751 
05752   // Handle the trivial case.
05753   if (From == To)
05754     return;
05755 
05756   // Iterate over just the existing users of From. See the comments in
05757   // the ReplaceAllUsesWith above.
05758   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
05759   RAUWUpdateListener Listener(*this, UI, UE);
05760   while (UI != UE) {
05761     SDNode *User = *UI;
05762 
05763     // This node is about to morph, remove its old self from the CSE maps.
05764     RemoveNodeFromCSEMaps(User);
05765 
05766     // A user can appear in a use list multiple times, and when this
05767     // happens the uses are usually next to each other in the list.
05768     // To help reduce the number of CSE recomputations, process all
05769     // the uses of this user that we can find this way.
05770     do {
05771       SDUse &Use = UI.getUse();
05772       ++UI;
05773       Use.setNode(To);
05774     } while (UI != UE && *UI == User);
05775 
05776     // Now that we have modified User, add it back to the CSE maps.  If it
05777     // already exists there, recursively merge the results together.
05778     AddModifiedNodeToCSEMaps(User);
05779   }
05780 
05781   // If we just RAUW'd the root, take note.
05782   if (From == getRoot().getNode())
05783     setRoot(SDValue(To, getRoot().getResNo()));
05784 }
05785 
05786 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
05787 /// This can cause recursive merging of nodes in the DAG.
05788 ///
05789 /// This version can replace From with any result values.  To must match the
05790 /// number and types of values returned by From.
05791 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
05792   if (From->getNumValues() == 1)  // Handle the simple case efficiently.
05793     return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
05794 
05795   // Iterate over just the existing users of From. See the comments in
05796   // the ReplaceAllUsesWith above.
05797   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
05798   RAUWUpdateListener Listener(*this, UI, UE);
05799   while (UI != UE) {
05800     SDNode *User = *UI;
05801 
05802     // This node is about to morph, remove its old self from the CSE maps.
05803     RemoveNodeFromCSEMaps(User);
05804 
05805     // A user can appear in a use list multiple times, and when this
05806     // happens the uses are usually next to each other in the list.
05807     // To help reduce the number of CSE recomputations, process all
05808     // the uses of this user that we can find this way.
05809     do {
05810       SDUse &Use = UI.getUse();
05811       const SDValue &ToOp = To[Use.getResNo()];
05812       ++UI;
05813       Use.set(ToOp);
05814     } while (UI != UE && *UI == User);
05815 
05816     // Now that we have modified User, add it back to the CSE maps.  If it
05817     // already exists there, recursively merge the results together.
05818     AddModifiedNodeToCSEMaps(User);
05819   }
05820 
05821   // If we just RAUW'd the root, take note.
05822   if (From == getRoot().getNode())
05823     setRoot(SDValue(To[getRoot().getResNo()]));
05824 }
05825 
05826 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
05827 /// uses of other values produced by From.getNode() alone.  The Deleted
05828 /// vector is handled the same way as for ReplaceAllUsesWith.
05829 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
05830   // Handle the really simple, really trivial case efficiently.
05831   if (From == To) return;
05832 
05833   // Handle the simple, trivial, case efficiently.
05834   if (From.getNode()->getNumValues() == 1) {
05835     ReplaceAllUsesWith(From, To);
05836     return;
05837   }
05838 
05839   // Iterate over just the existing users of From. See the comments in
05840   // the ReplaceAllUsesWith above.
05841   SDNode::use_iterator UI = From.getNode()->use_begin(),
05842                        UE = From.getNode()->use_end();
05843   RAUWUpdateListener Listener(*this, UI, UE);
05844   while (UI != UE) {
05845     SDNode *User = *UI;
05846     bool UserRemovedFromCSEMaps = false;
05847 
05848     // A user can appear in a use list multiple times, and when this
05849     // happens the uses are usually next to each other in the list.
05850     // To help reduce the number of CSE recomputations, process all
05851     // the uses of this user that we can find this way.
05852     do {
05853       SDUse &Use = UI.getUse();
05854 
05855       // Skip uses of different values from the same node.
05856       if (Use.getResNo() != From.getResNo()) {
05857         ++UI;
05858         continue;
05859       }
05860 
05861       // If this node hasn't been modified yet, it's still in the CSE maps,
05862       // so remove its old self from the CSE maps.
05863       if (!UserRemovedFromCSEMaps) {
05864         RemoveNodeFromCSEMaps(User);
05865         UserRemovedFromCSEMaps = true;
05866       }
05867 
05868       ++UI;
05869       Use.set(To);
05870     } while (UI != UE && *UI == User);
05871 
05872     // We are iterating over all uses of the From node, so if a use
05873     // doesn't use the specific value, no changes are made.
05874     if (!UserRemovedFromCSEMaps)
05875       continue;
05876 
05877     // Now that we have modified User, add it back to the CSE maps.  If it
05878     // already exists there, recursively merge the results together.
05879     AddModifiedNodeToCSEMaps(User);
05880   }
05881 
05882   // If we just RAUW'd the root, take note.
05883   if (From == getRoot())
05884     setRoot(To);
05885 }
05886 
05887 namespace {
05888   /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
05889   /// to record information about a use.
05890   struct UseMemo {
05891     SDNode *User;
05892     unsigned Index;
05893     SDUse *Use;
05894   };
05895 
05896   /// operator< - Sort Memos by User.
05897   bool operator<(const UseMemo &L, const UseMemo &R) {
05898     return (intptr_t)L.User < (intptr_t)R.User;
05899   }
05900 }
05901 
05902 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
05903 /// uses of other values produced by From.getNode() alone.  The same value
05904 /// may appear in both the From and To list.  The Deleted vector is
05905 /// handled the same way as for ReplaceAllUsesWith.
05906 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
05907                                               const SDValue *To,
05908                                               unsigned Num){
05909   // Handle the simple, trivial case efficiently.
05910   if (Num == 1)
05911     return ReplaceAllUsesOfValueWith(*From, *To);
05912 
05913   // Read up all the uses and make records of them. This helps
05914   // processing new uses that are introduced during the
05915   // replacement process.
05916   SmallVector<UseMemo, 4> Uses;
05917   for (unsigned i = 0; i != Num; ++i) {
05918     unsigned FromResNo = From[i].getResNo();
05919     SDNode *FromNode = From[i].getNode();
05920     for (SDNode::use_iterator UI = FromNode->use_begin(),
05921          E = FromNode->use_end(); UI != E; ++UI) {
05922       SDUse &Use = UI.getUse();
05923       if (Use.getResNo() == FromResNo) {
05924         UseMemo Memo = { *UI, i, &Use };
05925         Uses.push_back(Memo);
05926       }
05927     }
05928   }
05929 
05930   // Sort the uses, so that all the uses from a given User are together.
05931   std::sort(Uses.begin(), Uses.end());
05932 
05933   for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
05934        UseIndex != UseIndexEnd; ) {
05935     // We know that this user uses some value of From.  If it is the right
05936     // value, update it.
05937     SDNode *User = Uses[UseIndex].User;
05938 
05939     // This node is about to morph, remove its old self from the CSE maps.
05940     RemoveNodeFromCSEMaps(User);
05941 
05942     // The Uses array is sorted, so all the uses for a given User
05943     // are next to each other in the list.
05944     // To help reduce the number of CSE recomputations, process all
05945     // the uses of this user that we can find this way.
05946     do {
05947       unsigned i = Uses[UseIndex].Index;
05948       SDUse &Use = *Uses[UseIndex].Use;
05949       ++UseIndex;
05950 
05951       Use.set(To[i]);
05952     } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
05953 
05954     // Now that we have modified User, add it back to the CSE maps.  If it
05955     // already exists there, recursively merge the results together.
05956     AddModifiedNodeToCSEMaps(User);
05957   }
05958 }
05959 
05960 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
05961 /// based on their topological order. It returns the maximum id and a vector
05962 /// of the SDNodes* in assigned order by reference.
05963 unsigned SelectionDAG::AssignTopologicalOrder() {
05964 
05965   unsigned DAGSize = 0;
05966 
05967   // SortedPos tracks the progress of the algorithm. Nodes before it are
05968   // sorted, nodes after it are unsorted. When the algorithm completes
05969   // it is at the end of the list.
05970   allnodes_iterator SortedPos = allnodes_begin();
05971 
05972   // Visit all the nodes. Move nodes with no operands to the front of
05973   // the list immediately. Annotate nodes that do have operands with their
05974   // operand count. Before we do this, the Node Id fields of the nodes
05975   // may contain arbitrary values. After, the Node Id fields for nodes
05976   // before SortedPos will contain the topological sort index, and the
05977   // Node Id fields for nodes At SortedPos and after will contain the
05978   // count of outstanding operands.
05979   for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
05980     SDNode *N = I++;
05981     checkForCycles(N);
05982     unsigned Degree = N->getNumOperands();
05983     if (Degree == 0) {
05984       // A node with no uses, add it to the result array immediately.
05985       N->setNodeId(DAGSize++);
05986       allnodes_iterator Q = N;
05987       if (Q != SortedPos)
05988         SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
05989       assert(SortedPos != AllNodes.end() && "Overran node list");
05990       ++SortedPos;
05991     } else {
05992       // Temporarily use the Node Id as scratch space for the degree count.
05993       N->setNodeId(Degree);
05994     }
05995   }
05996 
05997   // Visit all the nodes. As we iterate, move nodes into sorted order,
05998   // such that by the time the end is reached all nodes will be sorted.
05999   for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
06000     SDNode *N = I;
06001     checkForCycles(N);
06002     // N is in sorted position, so all its uses have one less operand
06003     // that needs to be sorted.
06004     for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
06005          UI != UE; ++UI) {
06006       SDNode *P = *UI;
06007       unsigned Degree = P->getNodeId();
06008       assert(Degree != 0 && "Invalid node degree");
06009       --Degree;
06010       if (Degree == 0) {
06011         // All of P's operands are sorted, so P may sorted now.
06012         P->setNodeId(DAGSize++);
06013         if (P != SortedPos)
06014           SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
06015         assert(SortedPos != AllNodes.end() && "Overran node list");
06016         ++SortedPos;
06017       } else {
06018         // Update P's outstanding operand count.
06019         P->setNodeId(Degree);
06020       }
06021     }
06022     if (I == SortedPos) {
06023 #ifndef NDEBUG
06024       SDNode *S = ++I;
06025       dbgs() << "Overran sorted position:\n";
06026       S->dumprFull();
06027 #endif
06028       llvm_unreachable(nullptr);
06029     }
06030   }
06031 
06032   assert(SortedPos == AllNodes.end() &&
06033          "Topological sort incomplete!");
06034   assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
06035          "First node in topological sort is not the entry token!");
06036   assert(AllNodes.front().getNodeId() == 0 &&
06037          "First node in topological sort has non-zero id!");
06038   assert(AllNodes.front().getNumOperands() == 0 &&
06039          "First node in topological sort has operands!");
06040   assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
06041          "Last node in topologic sort has unexpected id!");
06042   assert(AllNodes.back().use_empty() &&
06043          "Last node in topologic sort has users!");
06044   assert(DAGSize == allnodes_size() && "Node count mismatch!");
06045   return DAGSize;
06046 }
06047 
06048 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
06049 /// value is produced by SD.
06050 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
06051   DbgInfo->add(DB, SD, isParameter);
06052   if (SD)
06053     SD->setHasDebugValue(true);
06054 }
06055 
06056 /// TransferDbgValues - Transfer SDDbgValues.
06057 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
06058   if (From == To || !From.getNode()->getHasDebugValue())
06059     return;
06060   SDNode *FromNode = From.getNode();
06061   SDNode *ToNode = To.getNode();
06062   ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
06063   SmallVector<SDDbgValue *, 2> ClonedDVs;
06064   for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
06065        I != E; ++I) {
06066     SDDbgValue *Dbg = *I;
06067     if (Dbg->getKind() == SDDbgValue::SDNODE) {
06068       SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
06069                                       Dbg->getOffset(), Dbg->getDebugLoc(),
06070                                       Dbg->getOrder());
06071       ClonedDVs.push_back(Clone);
06072     }
06073   }
06074   for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
06075          E = ClonedDVs.end(); I != E; ++I)
06076     AddDbgValue(*I, ToNode, false);
06077 }
06078 
06079 //===----------------------------------------------------------------------===//
06080 //                              SDNode Class
06081 //===----------------------------------------------------------------------===//
06082 
06083 HandleSDNode::~HandleSDNode() {
06084   DropOperands();
06085 }
06086 
06087 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
06088                                          DebugLoc DL, const GlobalValue *GA,
06089                                          EVT VT, int64_t o, unsigned char TF)
06090   : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
06091   TheGlobal = GA;
06092 }
06093 
06094 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT,
06095                                          SDValue X, unsigned SrcAS,
06096                                          unsigned DestAS)
06097  : UnarySDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT), X),
06098    SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
06099 
06100 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
06101                      EVT memvt, MachineMemOperand *mmo)
06102  : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
06103   SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
06104                                       MMO->isNonTemporal(), MMO->isInvariant());
06105   assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
06106   assert(isNonTemporal() == MMO->isNonTemporal() &&
06107          "Non-temporal encoding error!");
06108   assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
06109 }
06110 
06111 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
06112                      const SDValue *Ops, unsigned NumOps, EVT memvt,
06113                      MachineMemOperand *mmo)
06114    : SDNode(Opc, Order, dl, VTs, Ops, NumOps),
06115      MemoryVT(memvt), MMO(mmo) {
06116   SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
06117                                       MMO->isNonTemporal(), MMO->isInvariant());
06118   assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
06119   assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
06120 }
06121 
06122 /// Profile - Gather unique data for the node.
06123 ///
06124 void SDNode::Profile(FoldingSetNodeID &ID) const {
06125   AddNodeIDNode(ID, this);
06126 }
06127 
06128 namespace {
06129   struct EVTArray {
06130     std::vector<EVT> VTs;
06131 
06132     EVTArray() {
06133       VTs.reserve(MVT::LAST_VALUETYPE);
06134       for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
06135         VTs.push_back(MVT((MVT::SimpleValueType)i));
06136     }
06137   };
06138 }
06139 
06140 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
06141 static ManagedStatic<EVTArray> SimpleVTArray;
06142 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
06143 
06144 /// getValueTypeList - Return a pointer to the specified value type.
06145 ///
06146 const EVT *SDNode::getValueTypeList(EVT VT) {
06147   if (VT.isExtended()) {
06148     sys::SmartScopedLock<true> Lock(*VTMutex);
06149     return &(*EVTs->insert(VT).first);
06150   } else {
06151     assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
06152            "Value type out of range!");
06153     return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
06154   }
06155 }
06156 
06157 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
06158 /// indicated value.  This method ignores uses of other values defined by this
06159 /// operation.
06160 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
06161   assert(Value < getNumValues() && "Bad value!");
06162 
06163   // TODO: Only iterate over uses of a given value of the node
06164   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
06165     if (UI.getUse().getResNo() == Value) {
06166       if (NUses == 0)
06167         return false;
06168       --NUses;
06169     }
06170   }
06171 
06172   // Found exactly the right number of uses?
06173   return NUses == 0;
06174 }
06175 
06176 
06177 /// hasAnyUseOfValue - Return true if there are any use of the indicated
06178 /// value. This method ignores uses of other values defined by this operation.
06179 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
06180   assert(Value < getNumValues() && "Bad value!");
06181 
06182   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
06183     if (UI.getUse().getResNo() == Value)
06184       return true;
06185 
06186   return false;
06187 }
06188 
06189 
06190 /// isOnlyUserOf - Return true if this node is the only use of N.
06191 ///
06192 bool SDNode::isOnlyUserOf(SDNode *N) const {
06193   bool Seen = false;
06194   for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
06195     SDNode *User = *I;
06196     if (User == this)
06197       Seen = true;
06198     else
06199       return false;
06200   }
06201 
06202   return Seen;
06203 }
06204 
06205 /// isOperand - Return true if this node is an operand of N.
06206 ///
06207 bool SDValue::isOperandOf(SDNode *N) const {
06208   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
06209     if (*this == N->getOperand(i))
06210       return true;
06211   return false;
06212 }
06213 
06214 bool SDNode::isOperandOf(SDNode *N) const {
06215   for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
06216     if (this == N->OperandList[i].getNode())
06217       return true;
06218   return false;
06219 }
06220 
06221 /// reachesChainWithoutSideEffects - Return true if this operand (which must
06222 /// be a chain) reaches the specified operand without crossing any
06223 /// side-effecting instructions on any chain path.  In practice, this looks
06224 /// through token factors and non-volatile loads.  In order to remain efficient,
06225 /// this only looks a couple of nodes in, it does not do an exhaustive search.
06226 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
06227                                                unsigned Depth) const {
06228   if (*this == Dest) return true;
06229 
06230   // Don't search too deeply, we just want to be able to see through
06231   // TokenFactor's etc.
06232   if (Depth == 0) return false;
06233 
06234   // If this is a token factor, all inputs to the TF happen in parallel.  If any
06235   // of the operands of the TF does not reach dest, then we cannot do the xform.
06236   if (getOpcode() == ISD::TokenFactor) {
06237     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
06238       if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
06239         return false;
06240     return true;
06241   }
06242 
06243   // Loads don't have side effects, look through them.
06244   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
06245     if (!Ld->isVolatile())
06246       return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
06247   }
06248   return false;
06249 }
06250 
06251 /// hasPredecessor - Return true if N is a predecessor of this node.
06252 /// N is either an operand of this node, or can be reached by recursively
06253 /// traversing up the operands.
06254 /// NOTE: This is an expensive method. Use it carefully.
06255 bool SDNode::hasPredecessor(const SDNode *N) const {
06256   SmallPtrSet<const SDNode *, 32> Visited;
06257   SmallVector<const SDNode *, 16> Worklist;
06258   return hasPredecessorHelper(N, Visited, Worklist);
06259 }
06260 
06261 bool
06262 SDNode::hasPredecessorHelper(const SDNode *N,
06263                              SmallPtrSet<const SDNode *, 32> &Visited,
06264                              SmallVectorImpl<const SDNode *> &Worklist) const {
06265   if (Visited.empty()) {
06266     Worklist.push_back(this);
06267   } else {
06268     // Take a look in the visited set. If we've already encountered this node
06269     // we needn't search further.
06270     if (Visited.count(N))
06271       return true;
06272   }
06273 
06274   // Haven't visited N yet. Continue the search.
06275   while (!Worklist.empty()) {
06276     const SDNode *M = Worklist.pop_back_val();
06277     for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
06278       SDNode *Op = M->getOperand(i).getNode();
06279       if (Visited.insert(Op))
06280         Worklist.push_back(Op);
06281       if (Op == N)
06282         return true;
06283     }
06284   }
06285 
06286   return false;
06287 }
06288 
06289 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
06290   assert(Num < NumOperands && "Invalid child # of SDNode!");
06291   return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
06292 }
06293 
06294 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
06295   assert(N->getNumValues() == 1 &&
06296          "Can't unroll a vector with multiple results!");
06297 
06298   EVT VT = N->getValueType(0);
06299   unsigned NE = VT.getVectorNumElements();
06300   EVT EltVT = VT.getVectorElementType();
06301   SDLoc dl(N);
06302 
06303   SmallVector<SDValue, 8> Scalars;
06304   SmallVector<SDValue, 4> Operands(N->getNumOperands());
06305 
06306