LLVM API Documentation

PPCISelLowering.cpp
Go to the documentation of this file.
00001 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file implements the PPCISelLowering class.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "PPCISelLowering.h"
00015 #include "MCTargetDesc/PPCPredicates.h"
00016 #include "PPCCallingConv.h"
00017 #include "PPCMachineFunctionInfo.h"
00018 #include "PPCPerfectShuffle.h"
00019 #include "PPCTargetMachine.h"
00020 #include "PPCTargetObjectFile.h"
00021 #include "llvm/ADT/STLExtras.h"
00022 #include "llvm/ADT/StringSwitch.h"
00023 #include "llvm/ADT/Triple.h"
00024 #include "llvm/CodeGen/CallingConvLower.h"
00025 #include "llvm/CodeGen/MachineFrameInfo.h"
00026 #include "llvm/CodeGen/MachineFunction.h"
00027 #include "llvm/CodeGen/MachineInstrBuilder.h"
00028 #include "llvm/CodeGen/MachineLoopInfo.h"
00029 #include "llvm/CodeGen/MachineRegisterInfo.h"
00030 #include "llvm/CodeGen/SelectionDAG.h"
00031 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
00032 #include "llvm/IR/CallingConv.h"
00033 #include "llvm/IR/Constants.h"
00034 #include "llvm/IR/DerivedTypes.h"
00035 #include "llvm/IR/Function.h"
00036 #include "llvm/IR/Intrinsics.h"
00037 #include "llvm/Support/CommandLine.h"
00038 #include "llvm/Support/ErrorHandling.h"
00039 #include "llvm/Support/MathExtras.h"
00040 #include "llvm/Support/raw_ostream.h"
00041 #include "llvm/Target/TargetOptions.h"
00042 using namespace llvm;
00043 
00044 // FIXME: Remove this once soft-float is supported.
00045 static cl::opt<bool> DisablePPCFloatInVariadic("disable-ppc-float-in-variadic",
00046 cl::desc("disable saving float registers for va_start on PPC"), cl::Hidden);
00047 
00048 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
00049 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
00050 
00051 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
00052 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
00053 
00054 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
00055 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
00056 
00057 // FIXME: Remove this once the bug has been fixed!
00058 extern cl::opt<bool> ANDIGlueBug;
00059 
00060 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
00061     : TargetLowering(TM),
00062       Subtarget(*TM.getSubtargetImpl()) {
00063   // Use _setjmp/_longjmp instead of setjmp/longjmp.
00064   setUseUnderscoreSetJmp(true);
00065   setUseUnderscoreLongJmp(true);
00066 
00067   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
00068   // arguments are at least 4/8 bytes aligned.
00069   bool isPPC64 = Subtarget.isPPC64();
00070   setMinStackArgumentAlignment(isPPC64 ? 8:4);
00071 
00072   // Set up the register classes.
00073   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
00074   addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
00075   addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
00076 
00077   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
00078   for (MVT VT : MVT::integer_valuetypes()) {
00079     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
00080     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
00081   }
00082 
00083   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
00084 
00085   // PowerPC has pre-inc load and store's.
00086   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
00087   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
00088   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
00089   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
00090   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
00091   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
00092   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
00093   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
00094   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
00095   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
00096 
00097   if (Subtarget.useCRBits()) {
00098     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00099 
00100     if (isPPC64 || Subtarget.hasFPCVT()) {
00101       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
00102       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
00103                          isPPC64 ? MVT::i64 : MVT::i32);
00104       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
00105       AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, 
00106                          isPPC64 ? MVT::i64 : MVT::i32);
00107     } else {
00108       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
00109       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
00110     }
00111 
00112     // PowerPC does not support direct load / store of condition registers
00113     setOperationAction(ISD::LOAD, MVT::i1, Custom);
00114     setOperationAction(ISD::STORE, MVT::i1, Custom);
00115 
00116     // FIXME: Remove this once the ANDI glue bug is fixed:
00117     if (ANDIGlueBug)
00118       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
00119 
00120     for (MVT VT : MVT::integer_valuetypes()) {
00121       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
00122       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
00123       setTruncStoreAction(VT, MVT::i1, Expand);
00124     }
00125 
00126     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
00127   }
00128 
00129   // This is used in the ppcf128->int sequence.  Note it has different semantics
00130   // from FP_ROUND:  that rounds to nearest, this rounds to zero.
00131   setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
00132 
00133   // We do not currently implement these libm ops for PowerPC.
00134   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
00135   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
00136   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
00137   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
00138   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
00139   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
00140 
00141   // PowerPC has no SREM/UREM instructions
00142   setOperationAction(ISD::SREM, MVT::i32, Expand);
00143   setOperationAction(ISD::UREM, MVT::i32, Expand);
00144   setOperationAction(ISD::SREM, MVT::i64, Expand);
00145   setOperationAction(ISD::UREM, MVT::i64, Expand);
00146 
00147   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
00148   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
00149   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
00150   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
00151   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
00152   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
00153   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
00154   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
00155   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
00156 
00157   // We don't support sin/cos/sqrt/fmod/pow
00158   setOperationAction(ISD::FSIN , MVT::f64, Expand);
00159   setOperationAction(ISD::FCOS , MVT::f64, Expand);
00160   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
00161   setOperationAction(ISD::FREM , MVT::f64, Expand);
00162   setOperationAction(ISD::FPOW , MVT::f64, Expand);
00163   setOperationAction(ISD::FMA  , MVT::f64, Legal);
00164   setOperationAction(ISD::FSIN , MVT::f32, Expand);
00165   setOperationAction(ISD::FCOS , MVT::f32, Expand);
00166   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
00167   setOperationAction(ISD::FREM , MVT::f32, Expand);
00168   setOperationAction(ISD::FPOW , MVT::f32, Expand);
00169   setOperationAction(ISD::FMA  , MVT::f32, Legal);
00170 
00171   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
00172 
00173   // If we're enabling GP optimizations, use hardware square root
00174   if (!Subtarget.hasFSQRT() &&
00175       !(TM.Options.UnsafeFPMath &&
00176         Subtarget.hasFRSQRTE() && Subtarget.hasFRE()))
00177     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
00178 
00179   if (!Subtarget.hasFSQRT() &&
00180       !(TM.Options.UnsafeFPMath &&
00181         Subtarget.hasFRSQRTES() && Subtarget.hasFRES()))
00182     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
00183 
00184   if (Subtarget.hasFCPSGN()) {
00185     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
00186     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
00187   } else {
00188     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
00189     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
00190   }
00191 
00192   if (Subtarget.hasFPRND()) {
00193     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
00194     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
00195     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
00196     setOperationAction(ISD::FROUND, MVT::f64, Legal);
00197 
00198     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
00199     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
00200     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
00201     setOperationAction(ISD::FROUND, MVT::f32, Legal);
00202   }
00203 
00204   // PowerPC does not have BSWAP, CTPOP or CTTZ
00205   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
00206   setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
00207   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
00208   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
00209   setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
00210   setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
00211   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
00212   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
00213 
00214   if (Subtarget.hasPOPCNTD()) {
00215     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
00216     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
00217   } else {
00218     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
00219     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
00220   }
00221 
00222   // PowerPC does not have ROTR
00223   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
00224   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
00225 
00226   if (!Subtarget.useCRBits()) {
00227     // PowerPC does not have Select
00228     setOperationAction(ISD::SELECT, MVT::i32, Expand);
00229     setOperationAction(ISD::SELECT, MVT::i64, Expand);
00230     setOperationAction(ISD::SELECT, MVT::f32, Expand);
00231     setOperationAction(ISD::SELECT, MVT::f64, Expand);
00232   }
00233 
00234   // PowerPC wants to turn select_cc of FP into fsel when possible.
00235   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
00236   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
00237 
00238   // PowerPC wants to optimize integer setcc a bit
00239   if (!Subtarget.useCRBits())
00240     setOperationAction(ISD::SETCC, MVT::i32, Custom);
00241 
00242   // PowerPC does not have BRCOND which requires SetCC
00243   if (!Subtarget.useCRBits())
00244     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
00245 
00246   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
00247 
00248   // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
00249   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00250 
00251   // PowerPC does not have [U|S]INT_TO_FP
00252   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
00253   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
00254 
00255   setOperationAction(ISD::BITCAST, MVT::f32, Expand);
00256   setOperationAction(ISD::BITCAST, MVT::i32, Expand);
00257   setOperationAction(ISD::BITCAST, MVT::i64, Expand);
00258   setOperationAction(ISD::BITCAST, MVT::f64, Expand);
00259 
00260   // We cannot sextinreg(i1).  Expand to shifts.
00261   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00262 
00263   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
00264   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
00265   // support continuation, user-level threading, and etc.. As a result, no
00266   // other SjLj exception interfaces are implemented and please don't build
00267   // your own exception handling based on them.
00268   // LLVM/Clang supports zero-cost DWARF exception handling.
00269   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
00270   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
00271 
00272   // We want to legalize GlobalAddress and ConstantPool nodes into the
00273   // appropriate instructions to materialize the address.
00274   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
00275   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
00276   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
00277   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
00278   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
00279   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
00280   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
00281   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
00282   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
00283   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
00284 
00285   // TRAP is legal.
00286   setOperationAction(ISD::TRAP, MVT::Other, Legal);
00287 
00288   // TRAMPOLINE is custom lowered.
00289   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
00290   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
00291 
00292   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
00293   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
00294 
00295   if (Subtarget.isSVR4ABI()) {
00296     if (isPPC64) {
00297       // VAARG always uses double-word chunks, so promote anything smaller.
00298       setOperationAction(ISD::VAARG, MVT::i1, Promote);
00299       AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
00300       setOperationAction(ISD::VAARG, MVT::i8, Promote);
00301       AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
00302       setOperationAction(ISD::VAARG, MVT::i16, Promote);
00303       AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
00304       setOperationAction(ISD::VAARG, MVT::i32, Promote);
00305       AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
00306       setOperationAction(ISD::VAARG, MVT::Other, Expand);
00307     } else {
00308       // VAARG is custom lowered with the 32-bit SVR4 ABI.
00309       setOperationAction(ISD::VAARG, MVT::Other, Custom);
00310       setOperationAction(ISD::VAARG, MVT::i64, Custom);
00311     }
00312   } else
00313     setOperationAction(ISD::VAARG, MVT::Other, Expand);
00314 
00315   if (Subtarget.isSVR4ABI() && !isPPC64)
00316     // VACOPY is custom lowered with the 32-bit SVR4 ABI.
00317     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
00318   else
00319     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
00320 
00321   // Use the default implementation.
00322   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
00323   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
00324   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
00325   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
00326   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
00327 
00328   // We want to custom lower some of our intrinsics.
00329   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
00330 
00331   // To handle counter-based loop conditions.
00332   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
00333 
00334   // Comparisons that require checking two conditions.
00335   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
00336   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
00337   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
00338   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
00339   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
00340   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
00341   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
00342   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
00343   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
00344   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
00345   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
00346   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
00347 
00348   if (Subtarget.has64BitSupport()) {
00349     // They also have instructions for converting between i64 and fp.
00350     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
00351     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
00352     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
00353     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
00354     // This is just the low 32 bits of a (signed) fp->i64 conversion.
00355     // We cannot do this with Promote because i64 is not a legal type.
00356     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00357 
00358     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
00359       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00360   } else {
00361     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
00362     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
00363   }
00364 
00365   // With the instructions enabled under FPCVT, we can do everything.
00366   if (Subtarget.hasFPCVT()) {
00367     if (Subtarget.has64BitSupport()) {
00368       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
00369       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
00370       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
00371       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
00372     }
00373 
00374     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00375     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00376     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00377     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
00378   }
00379 
00380   if (Subtarget.use64BitRegs()) {
00381     // 64-bit PowerPC implementations can support i64 types directly
00382     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
00383     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
00384     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
00385     // 64-bit PowerPC wants to expand i128 shifts itself.
00386     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
00387     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
00388     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
00389   } else {
00390     // 32-bit PowerPC wants to expand i64 shifts itself.
00391     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
00392     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
00393     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
00394   }
00395 
00396   if (Subtarget.hasAltivec()) {
00397     // First set operation action for all vector types to expand. Then we
00398     // will selectively turn on ones that can be effectively codegen'd.
00399     for (MVT VT : MVT::vector_valuetypes()) {
00400       // add/sub are legal for all supported vector VT's.
00401       setOperationAction(ISD::ADD , VT, Legal);
00402       setOperationAction(ISD::SUB , VT, Legal);
00403 
00404       // We promote all shuffles to v16i8.
00405       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
00406       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
00407 
00408       // We promote all non-typed operations to v4i32.
00409       setOperationAction(ISD::AND   , VT, Promote);
00410       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
00411       setOperationAction(ISD::OR    , VT, Promote);
00412       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
00413       setOperationAction(ISD::XOR   , VT, Promote);
00414       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
00415       setOperationAction(ISD::LOAD  , VT, Promote);
00416       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
00417       setOperationAction(ISD::SELECT, VT, Promote);
00418       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
00419       setOperationAction(ISD::STORE, VT, Promote);
00420       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
00421 
00422       // No other operations are legal.
00423       setOperationAction(ISD::MUL , VT, Expand);
00424       setOperationAction(ISD::SDIV, VT, Expand);
00425       setOperationAction(ISD::SREM, VT, Expand);
00426       setOperationAction(ISD::UDIV, VT, Expand);
00427       setOperationAction(ISD::UREM, VT, Expand);
00428       setOperationAction(ISD::FDIV, VT, Expand);
00429       setOperationAction(ISD::FREM, VT, Expand);
00430       setOperationAction(ISD::FNEG, VT, Expand);
00431       setOperationAction(ISD::FSQRT, VT, Expand);
00432       setOperationAction(ISD::FLOG, VT, Expand);
00433       setOperationAction(ISD::FLOG10, VT, Expand);
00434       setOperationAction(ISD::FLOG2, VT, Expand);
00435       setOperationAction(ISD::FEXP, VT, Expand);
00436       setOperationAction(ISD::FEXP2, VT, Expand);
00437       setOperationAction(ISD::FSIN, VT, Expand);
00438       setOperationAction(ISD::FCOS, VT, Expand);
00439       setOperationAction(ISD::FABS, VT, Expand);
00440       setOperationAction(ISD::FPOWI, VT, Expand);
00441       setOperationAction(ISD::FFLOOR, VT, Expand);
00442       setOperationAction(ISD::FCEIL,  VT, Expand);
00443       setOperationAction(ISD::FTRUNC, VT, Expand);
00444       setOperationAction(ISD::FRINT,  VT, Expand);
00445       setOperationAction(ISD::FNEARBYINT, VT, Expand);
00446       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
00447       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
00448       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
00449       setOperationAction(ISD::MULHU, VT, Expand);
00450       setOperationAction(ISD::MULHS, VT, Expand);
00451       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
00452       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
00453       setOperationAction(ISD::UDIVREM, VT, Expand);
00454       setOperationAction(ISD::SDIVREM, VT, Expand);
00455       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
00456       setOperationAction(ISD::FPOW, VT, Expand);
00457       setOperationAction(ISD::BSWAP, VT, Expand);
00458       setOperationAction(ISD::CTPOP, VT, Expand);
00459       setOperationAction(ISD::CTLZ, VT, Expand);
00460       setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
00461       setOperationAction(ISD::CTTZ, VT, Expand);
00462       setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
00463       setOperationAction(ISD::VSELECT, VT, Expand);
00464       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
00465 
00466       for (MVT InnerVT : MVT::vector_valuetypes()) {
00467         setTruncStoreAction(VT, InnerVT, Expand);
00468         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
00469         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
00470         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
00471       }
00472     }
00473 
00474     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
00475     // with merges, splats, etc.
00476     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
00477 
00478     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
00479     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
00480     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
00481     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
00482     setOperationAction(ISD::SELECT, MVT::v4i32,
00483                        Subtarget.useCRBits() ? Legal : Expand);
00484     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
00485     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
00486     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
00487     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
00488     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
00489     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
00490     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
00491     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
00492     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
00493 
00494     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
00495     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
00496     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
00497     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
00498 
00499     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
00500     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
00501 
00502     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
00503       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
00504       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
00505     }
00506 
00507     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
00508     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
00509     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
00510 
00511     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
00512     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
00513 
00514     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
00515     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
00516     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
00517     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
00518 
00519     // Altivec does not contain unordered floating-point compare instructions
00520     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
00521     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
00522     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
00523     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
00524 
00525     if (Subtarget.hasVSX()) {
00526       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
00527       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
00528 
00529       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
00530       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
00531       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
00532       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
00533       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
00534 
00535       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
00536 
00537       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
00538       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
00539 
00540       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
00541       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
00542 
00543       setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
00544       setOperationAction(ISD::VSELECT, MVT::v8i16, Legal);
00545       setOperationAction(ISD::VSELECT, MVT::v4i32, Legal);
00546       setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
00547       setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
00548 
00549       // Share the Altivec comparison restrictions.
00550       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
00551       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
00552       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
00553       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
00554 
00555       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
00556       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
00557 
00558       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
00559 
00560       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
00561 
00562       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
00563       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
00564 
00565       // VSX v2i64 only supports non-arithmetic operations.
00566       setOperationAction(ISD::ADD, MVT::v2i64, Expand);
00567       setOperationAction(ISD::SUB, MVT::v2i64, Expand);
00568 
00569       setOperationAction(ISD::SHL, MVT::v2i64, Expand);
00570       setOperationAction(ISD::SRA, MVT::v2i64, Expand);
00571       setOperationAction(ISD::SRL, MVT::v2i64, Expand);
00572 
00573       setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
00574 
00575       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
00576       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
00577       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
00578       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
00579 
00580       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
00581 
00582       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
00583       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
00584       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
00585       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
00586 
00587       // Vector operation legalization checks the result type of
00588       // SIGN_EXTEND_INREG, overall legalization checks the inner type.
00589       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
00590       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
00591       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
00592       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
00593 
00594       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
00595     }
00596   }
00597 
00598   if (Subtarget.has64BitSupport())
00599     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
00600 
00601   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
00602 
00603   if (!isPPC64) {
00604     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
00605     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
00606   }
00607 
00608   setBooleanContents(ZeroOrOneBooleanContent);
00609   // Altivec instructions set fields to all zeros or all ones.
00610   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
00611 
00612   if (!isPPC64) {
00613     // These libcalls are not available in 32-bit.
00614     setLibcallName(RTLIB::SHL_I128, nullptr);
00615     setLibcallName(RTLIB::SRL_I128, nullptr);
00616     setLibcallName(RTLIB::SRA_I128, nullptr);
00617   }
00618 
00619   if (isPPC64) {
00620     setStackPointerRegisterToSaveRestore(PPC::X1);
00621     setExceptionPointerRegister(PPC::X3);
00622     setExceptionSelectorRegister(PPC::X4);
00623   } else {
00624     setStackPointerRegisterToSaveRestore(PPC::R1);
00625     setExceptionPointerRegister(PPC::R3);
00626     setExceptionSelectorRegister(PPC::R4);
00627   }
00628 
00629   // We have target-specific dag combine patterns for the following nodes:
00630   setTargetDAGCombine(ISD::SINT_TO_FP);
00631   if (Subtarget.hasFPCVT())
00632     setTargetDAGCombine(ISD::UINT_TO_FP);
00633   setTargetDAGCombine(ISD::LOAD);
00634   setTargetDAGCombine(ISD::STORE);
00635   setTargetDAGCombine(ISD::BR_CC);
00636   if (Subtarget.useCRBits())
00637     setTargetDAGCombine(ISD::BRCOND);
00638   setTargetDAGCombine(ISD::BSWAP);
00639   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
00640   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
00641   setTargetDAGCombine(ISD::INTRINSIC_VOID);
00642 
00643   setTargetDAGCombine(ISD::SIGN_EXTEND);
00644   setTargetDAGCombine(ISD::ZERO_EXTEND);
00645   setTargetDAGCombine(ISD::ANY_EXTEND);
00646 
00647   if (Subtarget.useCRBits()) {
00648     setTargetDAGCombine(ISD::TRUNCATE);
00649     setTargetDAGCombine(ISD::SETCC);
00650     setTargetDAGCombine(ISD::SELECT_CC);
00651   }
00652 
00653   // Use reciprocal estimates.
00654   if (TM.Options.UnsafeFPMath) {
00655     setTargetDAGCombine(ISD::FDIV);
00656     setTargetDAGCombine(ISD::FSQRT);
00657   }
00658 
00659   // Darwin long double math library functions have $LDBL128 appended.
00660   if (Subtarget.isDarwin()) {
00661     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
00662     setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
00663     setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
00664     setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
00665     setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
00666     setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
00667     setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
00668     setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
00669     setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
00670     setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
00671   }
00672 
00673   // With 32 condition bits, we don't need to sink (and duplicate) compares
00674   // aggressively in CodeGenPrep.
00675   if (Subtarget.useCRBits())
00676     setHasMultipleConditionRegisters();
00677 
00678   setMinFunctionAlignment(2);
00679   if (Subtarget.isDarwin())
00680     setPrefFunctionAlignment(4);
00681 
00682   switch (Subtarget.getDarwinDirective()) {
00683   default: break;
00684   case PPC::DIR_970:
00685   case PPC::DIR_A2:
00686   case PPC::DIR_E500mc:
00687   case PPC::DIR_E5500:
00688   case PPC::DIR_PWR4:
00689   case PPC::DIR_PWR5:
00690   case PPC::DIR_PWR5X:
00691   case PPC::DIR_PWR6:
00692   case PPC::DIR_PWR6X:
00693   case PPC::DIR_PWR7:
00694   case PPC::DIR_PWR8:
00695     setPrefFunctionAlignment(4);
00696     setPrefLoopAlignment(4);
00697     break;
00698   }
00699 
00700   setInsertFencesForAtomic(true);
00701 
00702   if (Subtarget.enableMachineScheduler())
00703     setSchedulingPreference(Sched::Source);
00704   else
00705     setSchedulingPreference(Sched::Hybrid);
00706 
00707   computeRegisterProperties();
00708 
00709   // The Freescale cores do better with aggressive inlining of memcpy and
00710   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
00711   if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
00712       Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
00713     MaxStoresPerMemset = 32;
00714     MaxStoresPerMemsetOptSize = 16;
00715     MaxStoresPerMemcpy = 32;
00716     MaxStoresPerMemcpyOptSize = 8;
00717     MaxStoresPerMemmove = 32;
00718     MaxStoresPerMemmoveOptSize = 8;
00719   }
00720 }
00721 
00722 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
00723 /// the desired ByVal argument alignment.
00724 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
00725                              unsigned MaxMaxAlign) {
00726   if (MaxAlign == MaxMaxAlign)
00727     return;
00728   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
00729     if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
00730       MaxAlign = 32;
00731     else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
00732       MaxAlign = 16;
00733   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
00734     unsigned EltAlign = 0;
00735     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
00736     if (EltAlign > MaxAlign)
00737       MaxAlign = EltAlign;
00738   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
00739     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
00740       unsigned EltAlign = 0;
00741       getMaxByValAlign(STy->getElementType(i), EltAlign, MaxMaxAlign);
00742       if (EltAlign > MaxAlign)
00743         MaxAlign = EltAlign;
00744       if (MaxAlign == MaxMaxAlign)
00745         break;
00746     }
00747   }
00748 }
00749 
00750 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
00751 /// function arguments in the caller parameter area.
00752 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const {
00753   // Darwin passes everything on 4 byte boundary.
00754   if (Subtarget.isDarwin())
00755     return 4;
00756 
00757   // 16byte and wider vectors are passed on 16byte boundary.
00758   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
00759   unsigned Align = Subtarget.isPPC64() ? 8 : 4;
00760   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
00761     getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
00762   return Align;
00763 }
00764 
00765 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
00766   switch (Opcode) {
00767   default: return nullptr;
00768   case PPCISD::FSEL:            return "PPCISD::FSEL";
00769   case PPCISD::FCFID:           return "PPCISD::FCFID";
00770   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
00771   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
00772   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
00773   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
00774   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
00775   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
00776   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
00777   case PPCISD::FRE:             return "PPCISD::FRE";
00778   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
00779   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
00780   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
00781   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
00782   case PPCISD::VPERM:           return "PPCISD::VPERM";
00783   case PPCISD::CMPB:            return "PPCISD::CMPB";
00784   case PPCISD::Hi:              return "PPCISD::Hi";
00785   case PPCISD::Lo:              return "PPCISD::Lo";
00786   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
00787   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
00788   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
00789   case PPCISD::SRL:             return "PPCISD::SRL";
00790   case PPCISD::SRA:             return "PPCISD::SRA";
00791   case PPCISD::SHL:             return "PPCISD::SHL";
00792   case PPCISD::CALL:            return "PPCISD::CALL";
00793   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
00794   case PPCISD::CALL_TLS:        return "PPCISD::CALL_TLS";
00795   case PPCISD::CALL_NOP_TLS:    return "PPCISD::CALL_NOP_TLS";
00796   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
00797   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
00798   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
00799   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
00800   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
00801   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
00802   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
00803   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
00804   case PPCISD::VCMP:            return "PPCISD::VCMP";
00805   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
00806   case PPCISD::LBRX:            return "PPCISD::LBRX";
00807   case PPCISD::STBRX:           return "PPCISD::STBRX";
00808   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
00809   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
00810   case PPCISD::LARX:            return "PPCISD::LARX";
00811   case PPCISD::STCX:            return "PPCISD::STCX";
00812   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
00813   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
00814   case PPCISD::BDZ:             return "PPCISD::BDZ";
00815   case PPCISD::MFFS:            return "PPCISD::MFFS";
00816   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
00817   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
00818   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
00819   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
00820   case PPCISD::ADDIS_TOC_HA:    return "PPCISD::ADDIS_TOC_HA";
00821   case PPCISD::LD_TOC_L:        return "PPCISD::LD_TOC_L";
00822   case PPCISD::ADDI_TOC_L:      return "PPCISD::ADDI_TOC_L";
00823   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
00824   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
00825   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
00826   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
00827   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
00828   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
00829   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
00830   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
00831   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
00832   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
00833   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
00834   case PPCISD::SC:              return "PPCISD::SC";
00835   }
00836 }
00837 
00838 EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
00839   if (!VT.isVector())
00840     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
00841   return VT.changeVectorElementTypeToInteger();
00842 }
00843 
00844 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
00845   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
00846   return true;
00847 }
00848 
00849 //===----------------------------------------------------------------------===//
00850 // Node matching predicates, for use by the tblgen matching code.
00851 //===----------------------------------------------------------------------===//
00852 
00853 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
00854 static bool isFloatingPointZero(SDValue Op) {
00855   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
00856     return CFP->getValueAPF().isZero();
00857   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
00858     // Maybe this has already been legalized into the constant pool?
00859     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
00860       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
00861         return CFP->getValueAPF().isZero();
00862   }
00863   return false;
00864 }
00865 
00866 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
00867 /// true if Op is undef or if it matches the specified value.
00868 static bool isConstantOrUndef(int Op, int Val) {
00869   return Op < 0 || Op == Val;
00870 }
00871 
00872 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
00873 /// VPKUHUM instruction.
00874 /// The ShuffleKind distinguishes between big-endian operations with
00875 /// two different inputs (0), either-endian operations with two identical
00876 /// inputs (1), and little-endian operantion with two different inputs (2).
00877 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
00878 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
00879                                SelectionDAG &DAG) {
00880   bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
00881   if (ShuffleKind == 0) {
00882     if (IsLE)
00883       return false;
00884     for (unsigned i = 0; i != 16; ++i)
00885       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
00886         return false;
00887   } else if (ShuffleKind == 2) {
00888     if (!IsLE)
00889       return false;
00890     for (unsigned i = 0; i != 16; ++i)
00891       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
00892         return false;
00893   } else if (ShuffleKind == 1) {
00894     unsigned j = IsLE ? 0 : 1;
00895     for (unsigned i = 0; i != 8; ++i)
00896       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
00897           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
00898         return false;
00899   }
00900   return true;
00901 }
00902 
00903 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
00904 /// VPKUWUM instruction.
00905 /// The ShuffleKind distinguishes between big-endian operations with
00906 /// two different inputs (0), either-endian operations with two identical
00907 /// inputs (1), and little-endian operantion with two different inputs (2).
00908 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
00909 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
00910                                SelectionDAG &DAG) {
00911   bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
00912   if (ShuffleKind == 0) {
00913     if (IsLE)
00914       return false;
00915     for (unsigned i = 0; i != 16; i += 2)
00916       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
00917           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
00918         return false;
00919   } else if (ShuffleKind == 2) {
00920     if (!IsLE)
00921       return false;
00922     for (unsigned i = 0; i != 16; i += 2)
00923       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
00924           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
00925         return false;
00926   } else if (ShuffleKind == 1) {
00927     unsigned j = IsLE ? 0 : 2;
00928     for (unsigned i = 0; i != 8; i += 2)
00929       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
00930           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
00931           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
00932           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
00933         return false;
00934   }
00935   return true;
00936 }
00937 
00938 /// isVMerge - Common function, used to match vmrg* shuffles.
00939 ///
00940 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
00941                      unsigned LHSStart, unsigned RHSStart) {
00942   if (N->getValueType(0) != MVT::v16i8)
00943     return false;
00944   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
00945          "Unsupported merge size!");
00946 
00947   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
00948     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
00949       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
00950                              LHSStart+j+i*UnitSize) ||
00951           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
00952                              RHSStart+j+i*UnitSize))
00953         return false;
00954     }
00955   return true;
00956 }
00957 
00958 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
00959 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
00960 /// The ShuffleKind distinguishes between big-endian merges with two 
00961 /// different inputs (0), either-endian merges with two identical inputs (1),
00962 /// and little-endian merges with two different inputs (2).  For the latter,
00963 /// the input operands are swapped (see PPCInstrAltivec.td).
00964 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
00965                              unsigned ShuffleKind, SelectionDAG &DAG) {
00966   if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
00967     if (ShuffleKind == 1) // unary
00968       return isVMerge(N, UnitSize, 0, 0);
00969     else if (ShuffleKind == 2) // swapped
00970       return isVMerge(N, UnitSize, 0, 16);
00971     else
00972       return false;
00973   } else {
00974     if (ShuffleKind == 1) // unary
00975       return isVMerge(N, UnitSize, 8, 8);
00976     else if (ShuffleKind == 0) // normal
00977       return isVMerge(N, UnitSize, 8, 24);
00978     else
00979       return false;
00980   }
00981 }
00982 
00983 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
00984 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
00985 /// The ShuffleKind distinguishes between big-endian merges with two 
00986 /// different inputs (0), either-endian merges with two identical inputs (1),
00987 /// and little-endian merges with two different inputs (2).  For the latter,
00988 /// the input operands are swapped (see PPCInstrAltivec.td).
00989 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
00990                              unsigned ShuffleKind, SelectionDAG &DAG) {
00991   if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
00992     if (ShuffleKind == 1) // unary
00993       return isVMerge(N, UnitSize, 8, 8);
00994     else if (ShuffleKind == 2) // swapped
00995       return isVMerge(N, UnitSize, 8, 24);
00996     else
00997       return false;
00998   } else {
00999     if (ShuffleKind == 1) // unary
01000       return isVMerge(N, UnitSize, 0, 0);
01001     else if (ShuffleKind == 0) // normal
01002       return isVMerge(N, UnitSize, 0, 16);
01003     else
01004       return false;
01005   }
01006 }
01007 
01008 
01009 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
01010 /// amount, otherwise return -1.
01011 /// The ShuffleKind distinguishes between big-endian operations with two 
01012 /// different inputs (0), either-endian operations with two identical inputs
01013 /// (1), and little-endian operations with two different inputs (2).  For the
01014 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
01015 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
01016                              SelectionDAG &DAG) {
01017   if (N->getValueType(0) != MVT::v16i8)
01018     return -1;
01019 
01020   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
01021 
01022   // Find the first non-undef value in the shuffle mask.
01023   unsigned i;
01024   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
01025     /*search*/;
01026 
01027   if (i == 16) return -1;  // all undef.
01028 
01029   // Otherwise, check to see if the rest of the elements are consecutively
01030   // numbered from this value.
01031   unsigned ShiftAmt = SVOp->getMaskElt(i);
01032   if (ShiftAmt < i) return -1;
01033 
01034   ShiftAmt -= i;
01035   bool isLE = DAG.getTarget().getDataLayout()->isLittleEndian();
01036 
01037   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
01038     // Check the rest of the elements to see if they are consecutive.
01039     for (++i; i != 16; ++i)
01040       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
01041         return -1;
01042   } else if (ShuffleKind == 1) {
01043     // Check the rest of the elements to see if they are consecutive.
01044     for (++i; i != 16; ++i)
01045       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
01046         return -1;
01047   } else
01048     return -1;
01049 
01050   if (ShuffleKind == 2 && isLE)
01051     ShiftAmt = 16 - ShiftAmt;
01052 
01053   return ShiftAmt;
01054 }
01055 
01056 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
01057 /// specifies a splat of a single element that is suitable for input to
01058 /// VSPLTB/VSPLTH/VSPLTW.
01059 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
01060   assert(N->getValueType(0) == MVT::v16i8 &&
01061          (EltSize == 1 || EltSize == 2 || EltSize == 4));
01062 
01063   // This is a splat operation if each element of the permute is the same, and
01064   // if the value doesn't reference the second vector.
01065   unsigned ElementBase = N->getMaskElt(0);
01066 
01067   // FIXME: Handle UNDEF elements too!
01068   if (ElementBase >= 16)
01069     return false;
01070 
01071   // Check that the indices are consecutive, in the case of a multi-byte element
01072   // splatted with a v16i8 mask.
01073   for (unsigned i = 1; i != EltSize; ++i)
01074     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
01075       return false;
01076 
01077   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
01078     if (N->getMaskElt(i) < 0) continue;
01079     for (unsigned j = 0; j != EltSize; ++j)
01080       if (N->getMaskElt(i+j) != N->getMaskElt(j))
01081         return false;
01082   }
01083   return true;
01084 }
01085 
01086 /// isAllNegativeZeroVector - Returns true if all elements of build_vector
01087 /// are -0.0.
01088 bool PPC::isAllNegativeZeroVector(SDNode *N) {
01089   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
01090 
01091   APInt APVal, APUndef;
01092   unsigned BitSize;
01093   bool HasAnyUndefs;
01094 
01095   if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true))
01096     if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
01097       return CFP->getValueAPF().isNegZero();
01098 
01099   return false;
01100 }
01101 
01102 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
01103 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
01104 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
01105                                 SelectionDAG &DAG) {
01106   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
01107   assert(isSplatShuffleMask(SVOp, EltSize));
01108   if (DAG.getTarget().getDataLayout()->isLittleEndian())
01109     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
01110   else
01111     return SVOp->getMaskElt(0) / EltSize;
01112 }
01113 
01114 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
01115 /// by using a vspltis[bhw] instruction of the specified element size, return
01116 /// the constant being splatted.  The ByteSize field indicates the number of
01117 /// bytes of each element [124] -> [bhw].
01118 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
01119   SDValue OpVal(nullptr, 0);
01120 
01121   // If ByteSize of the splat is bigger than the element size of the
01122   // build_vector, then we have a case where we are checking for a splat where
01123   // multiple elements of the buildvector are folded together into a single
01124   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
01125   unsigned EltSize = 16/N->getNumOperands();
01126   if (EltSize < ByteSize) {
01127     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
01128     SDValue UniquedVals[4];
01129     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
01130 
01131     // See if all of the elements in the buildvector agree across.
01132     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
01133       if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
01134       // If the element isn't a constant, bail fully out.
01135       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
01136 
01137 
01138       if (!UniquedVals[i&(Multiple-1)].getNode())
01139         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
01140       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
01141         return SDValue();  // no match.
01142     }
01143 
01144     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
01145     // either constant or undef values that are identical for each chunk.  See
01146     // if these chunks can form into a larger vspltis*.
01147 
01148     // Check to see if all of the leading entries are either 0 or -1.  If
01149     // neither, then this won't fit into the immediate field.
01150     bool LeadingZero = true;
01151     bool LeadingOnes = true;
01152     for (unsigned i = 0; i != Multiple-1; ++i) {
01153       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
01154 
01155       LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
01156       LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
01157     }
01158     // Finally, check the least significant entry.
01159     if (LeadingZero) {
01160       if (!UniquedVals[Multiple-1].getNode())
01161         return DAG.getTargetConstant(0, MVT::i32);  // 0,0,0,undef
01162       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
01163       if (Val < 16)
01164         return DAG.getTargetConstant(Val, MVT::i32);  // 0,0,0,4 -> vspltisw(4)
01165     }
01166     if (LeadingOnes) {
01167       if (!UniquedVals[Multiple-1].getNode())
01168         return DAG.getTargetConstant(~0U, MVT::i32);  // -1,-1,-1,undef
01169       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
01170       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
01171         return DAG.getTargetConstant(Val, MVT::i32);
01172     }
01173 
01174     return SDValue();
01175   }
01176 
01177   // Check to see if this buildvec has a single non-undef value in its elements.
01178   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
01179     if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
01180     if (!OpVal.getNode())
01181       OpVal = N->getOperand(i);
01182     else if (OpVal != N->getOperand(i))
01183       return SDValue();
01184   }
01185 
01186   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
01187 
01188   unsigned ValSizeInBytes = EltSize;
01189   uint64_t Value = 0;
01190   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
01191     Value = CN->getZExtValue();
01192   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
01193     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
01194     Value = FloatToBits(CN->getValueAPF().convertToFloat());
01195   }
01196 
01197   // If the splat value is larger than the element value, then we can never do
01198   // this splat.  The only case that we could fit the replicated bits into our
01199   // immediate field for would be zero, and we prefer to use vxor for it.
01200   if (ValSizeInBytes < ByteSize) return SDValue();
01201 
01202   // If the element value is larger than the splat value, cut it in half and
01203   // check to see if the two halves are equal.  Continue doing this until we
01204   // get to ByteSize.  This allows us to handle 0x01010101 as 0x01.
01205   while (ValSizeInBytes > ByteSize) {
01206     ValSizeInBytes >>= 1;
01207 
01208     // If the top half equals the bottom half, we're still ok.
01209     if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
01210          (Value                        & ((1 << (8*ValSizeInBytes))-1)))
01211       return SDValue();
01212   }
01213 
01214   // Properly sign extend the value.
01215   int MaskVal = SignExtend32(Value, ByteSize * 8);
01216 
01217   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
01218   if (MaskVal == 0) return SDValue();
01219 
01220   // Finally, if this value fits in a 5 bit sext field, return it
01221   if (SignExtend32<5>(MaskVal) == MaskVal)
01222     return DAG.getTargetConstant(MaskVal, MVT::i32);
01223   return SDValue();
01224 }
01225 
01226 //===----------------------------------------------------------------------===//
01227 //  Addressing Mode Selection
01228 //===----------------------------------------------------------------------===//
01229 
01230 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
01231 /// or 64-bit immediate, and if the value can be accurately represented as a
01232 /// sign extension from a 16-bit value.  If so, this returns true and the
01233 /// immediate.
01234 static bool isIntS16Immediate(SDNode *N, short &Imm) {
01235   if (!isa<ConstantSDNode>(N))
01236     return false;
01237 
01238   Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
01239   if (N->getValueType(0) == MVT::i32)
01240     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
01241   else
01242     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
01243 }
01244 static bool isIntS16Immediate(SDValue Op, short &Imm) {
01245   return isIntS16Immediate(Op.getNode(), Imm);
01246 }
01247 
01248 
01249 /// SelectAddressRegReg - Given the specified addressed, check to see if it
01250 /// can be represented as an indexed [r+r] operation.  Returns false if it
01251 /// can be more efficiently represented with [r+imm].
01252 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
01253                                             SDValue &Index,
01254                                             SelectionDAG &DAG) const {
01255   short imm = 0;
01256   if (N.getOpcode() == ISD::ADD) {
01257     if (isIntS16Immediate(N.getOperand(1), imm))
01258       return false;    // r+i
01259     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
01260       return false;    // r+i
01261 
01262     Base = N.getOperand(0);
01263     Index = N.getOperand(1);
01264     return true;
01265   } else if (N.getOpcode() == ISD::OR) {
01266     if (isIntS16Immediate(N.getOperand(1), imm))
01267       return false;    // r+i can fold it if we can.
01268 
01269     // If this is an or of disjoint bitfields, we can codegen this as an add
01270     // (for better address arithmetic) if the LHS and RHS of the OR are provably
01271     // disjoint.
01272     APInt LHSKnownZero, LHSKnownOne;
01273     APInt RHSKnownZero, RHSKnownOne;
01274     DAG.computeKnownBits(N.getOperand(0),
01275                          LHSKnownZero, LHSKnownOne);
01276 
01277     if (LHSKnownZero.getBoolValue()) {
01278       DAG.computeKnownBits(N.getOperand(1),
01279                            RHSKnownZero, RHSKnownOne);
01280       // If all of the bits are known zero on the LHS or RHS, the add won't
01281       // carry.
01282       if (~(LHSKnownZero | RHSKnownZero) == 0) {
01283         Base = N.getOperand(0);
01284         Index = N.getOperand(1);
01285         return true;
01286       }
01287     }
01288   }
01289 
01290   return false;
01291 }
01292 
01293 // If we happen to be doing an i64 load or store into a stack slot that has
01294 // less than a 4-byte alignment, then the frame-index elimination may need to
01295 // use an indexed load or store instruction (because the offset may not be a
01296 // multiple of 4). The extra register needed to hold the offset comes from the
01297 // register scavenger, and it is possible that the scavenger will need to use
01298 // an emergency spill slot. As a result, we need to make sure that a spill slot
01299 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
01300 // stack slot.
01301 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
01302   // FIXME: This does not handle the LWA case.
01303   if (VT != MVT::i64)
01304     return;
01305 
01306   // NOTE: We'll exclude negative FIs here, which come from argument
01307   // lowering, because there are no known test cases triggering this problem
01308   // using packed structures (or similar). We can remove this exclusion if
01309   // we find such a test case. The reason why this is so test-case driven is
01310   // because this entire 'fixup' is only to prevent crashes (from the
01311   // register scavenger) on not-really-valid inputs. For example, if we have:
01312   //   %a = alloca i1
01313   //   %b = bitcast i1* %a to i64*
01314   //   store i64* a, i64 b
01315   // then the store should really be marked as 'align 1', but is not. If it
01316   // were marked as 'align 1' then the indexed form would have been
01317   // instruction-selected initially, and the problem this 'fixup' is preventing
01318   // won't happen regardless.
01319   if (FrameIdx < 0)
01320     return;
01321 
01322   MachineFunction &MF = DAG.getMachineFunction();
01323   MachineFrameInfo *MFI = MF.getFrameInfo();
01324 
01325   unsigned Align = MFI->getObjectAlignment(FrameIdx);
01326   if (Align >= 4)
01327     return;
01328 
01329   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
01330   FuncInfo->setHasNonRISpills();
01331 }
01332 
01333 /// Returns true if the address N can be represented by a base register plus
01334 /// a signed 16-bit displacement [r+imm], and if it is not better
01335 /// represented as reg+reg.  If Aligned is true, only accept displacements
01336 /// suitable for STD and friends, i.e. multiples of 4.
01337 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
01338                                             SDValue &Base,
01339                                             SelectionDAG &DAG,
01340                                             bool Aligned) const {
01341   // FIXME dl should come from parent load or store, not from address
01342   SDLoc dl(N);
01343   // If this can be more profitably realized as r+r, fail.
01344   if (SelectAddressRegReg(N, Disp, Base, DAG))
01345     return false;
01346 
01347   if (N.getOpcode() == ISD::ADD) {
01348     short imm = 0;
01349     if (isIntS16Immediate(N.getOperand(1), imm) &&
01350         (!Aligned || (imm & 3) == 0)) {
01351       Disp = DAG.getTargetConstant(imm, N.getValueType());
01352       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
01353         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
01354         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
01355       } else {
01356         Base = N.getOperand(0);
01357       }
01358       return true; // [r+i]
01359     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
01360       // Match LOAD (ADD (X, Lo(G))).
01361       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
01362              && "Cannot handle constant offsets yet!");
01363       Disp = N.getOperand(1).getOperand(0);  // The global address.
01364       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
01365              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
01366              Disp.getOpcode() == ISD::TargetConstantPool ||
01367              Disp.getOpcode() == ISD::TargetJumpTable);
01368       Base = N.getOperand(0);
01369       return true;  // [&g+r]
01370     }
01371   } else if (N.getOpcode() == ISD::OR) {
01372     short imm = 0;
01373     if (isIntS16Immediate(N.getOperand(1), imm) &&
01374         (!Aligned || (imm & 3) == 0)) {
01375       // If this is an or of disjoint bitfields, we can codegen this as an add
01376       // (for better address arithmetic) if the LHS and RHS of the OR are
01377       // provably disjoint.
01378       APInt LHSKnownZero, LHSKnownOne;
01379       DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
01380 
01381       if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
01382         // If all of the bits are known zero on the LHS or RHS, the add won't
01383         // carry.
01384         if (FrameIndexSDNode *FI =
01385               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
01386           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
01387           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
01388         } else {
01389           Base = N.getOperand(0);
01390         }
01391         Disp = DAG.getTargetConstant(imm, N.getValueType());
01392         return true;
01393       }
01394     }
01395   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
01396     // Loading from a constant address.
01397 
01398     // If this address fits entirely in a 16-bit sext immediate field, codegen
01399     // this as "d, 0"
01400     short Imm;
01401     if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) {
01402       Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
01403       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
01404                              CN->getValueType(0));
01405       return true;
01406     }
01407 
01408     // Handle 32-bit sext immediates with LIS + addr mode.
01409     if ((CN->getValueType(0) == MVT::i32 ||
01410          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
01411         (!Aligned || (CN->getZExtValue() & 3) == 0)) {
01412       int Addr = (int)CN->getZExtValue();
01413 
01414       // Otherwise, break this down into an LIS + disp.
01415       Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
01416 
01417       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
01418       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
01419       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
01420       return true;
01421     }
01422   }
01423 
01424   Disp = DAG.getTargetConstant(0, getPointerTy());
01425   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
01426     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
01427     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
01428   } else
01429     Base = N;
01430   return true;      // [r+0]
01431 }
01432 
01433 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
01434 /// represented as an indexed [r+r] operation.
01435 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
01436                                                 SDValue &Index,
01437                                                 SelectionDAG &DAG) const {
01438   // Check to see if we can easily represent this as an [r+r] address.  This
01439   // will fail if it thinks that the address is more profitably represented as
01440   // reg+imm, e.g. where imm = 0.
01441   if (SelectAddressRegReg(N, Base, Index, DAG))
01442     return true;
01443 
01444   // If the operand is an addition, always emit this as [r+r], since this is
01445   // better (for code size, and execution, as the memop does the add for free)
01446   // than emitting an explicit add.
01447   if (N.getOpcode() == ISD::ADD) {
01448     Base = N.getOperand(0);
01449     Index = N.getOperand(1);
01450     return true;
01451   }
01452 
01453   // Otherwise, do it the hard way, using R0 as the base register.
01454   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
01455                          N.getValueType());
01456   Index = N;
01457   return true;
01458 }
01459 
01460 /// getPreIndexedAddressParts - returns true by value, base pointer and
01461 /// offset pointer and addressing mode by reference if the node's address
01462 /// can be legally represented as pre-indexed load / store address.
01463 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
01464                                                   SDValue &Offset,
01465                                                   ISD::MemIndexedMode &AM,
01466                                                   SelectionDAG &DAG) const {
01467   if (DisablePPCPreinc) return false;
01468 
01469   bool isLoad = true;
01470   SDValue Ptr;
01471   EVT VT;
01472   unsigned Alignment;
01473   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
01474     Ptr = LD->getBasePtr();
01475     VT = LD->getMemoryVT();
01476     Alignment = LD->getAlignment();
01477   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
01478     Ptr = ST->getBasePtr();
01479     VT  = ST->getMemoryVT();
01480     Alignment = ST->getAlignment();
01481     isLoad = false;
01482   } else
01483     return false;
01484 
01485   // PowerPC doesn't have preinc load/store instructions for vectors.
01486   if (VT.isVector())
01487     return false;
01488 
01489   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
01490 
01491     // Common code will reject creating a pre-inc form if the base pointer
01492     // is a frame index, or if N is a store and the base pointer is either
01493     // the same as or a predecessor of the value being stored.  Check for
01494     // those situations here, and try with swapped Base/Offset instead.
01495     bool Swap = false;
01496 
01497     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
01498       Swap = true;
01499     else if (!isLoad) {
01500       SDValue Val = cast<StoreSDNode>(N)->getValue();
01501       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
01502         Swap = true;
01503     }
01504 
01505     if (Swap)
01506       std::swap(Base, Offset);
01507 
01508     AM = ISD::PRE_INC;
01509     return true;
01510   }
01511 
01512   // LDU/STU can only handle immediates that are a multiple of 4.
01513   if (VT != MVT::i64) {
01514     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false))
01515       return false;
01516   } else {
01517     // LDU/STU need an address with at least 4-byte alignment.
01518     if (Alignment < 4)
01519       return false;
01520 
01521     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true))
01522       return false;
01523   }
01524 
01525   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
01526     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
01527     // sext i32 to i64 when addr mode is r+i.
01528     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
01529         LD->getExtensionType() == ISD::SEXTLOAD &&
01530         isa<ConstantSDNode>(Offset))
01531       return false;
01532   }
01533 
01534   AM = ISD::PRE_INC;
01535   return true;
01536 }
01537 
01538 //===----------------------------------------------------------------------===//
01539 //  LowerOperation implementation
01540 //===----------------------------------------------------------------------===//
01541 
01542 /// GetLabelAccessInfo - Return true if we should reference labels using a
01543 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
01544 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
01545                                unsigned &LoOpFlags,
01546                                const GlobalValue *GV = nullptr) {
01547   HiOpFlags = PPCII::MO_HA;
01548   LoOpFlags = PPCII::MO_LO;
01549 
01550   // Don't use the pic base if not in PIC relocation model.
01551   bool isPIC = TM.getRelocationModel() == Reloc::PIC_;
01552 
01553   if (isPIC) {
01554     HiOpFlags |= PPCII::MO_PIC_FLAG;
01555     LoOpFlags |= PPCII::MO_PIC_FLAG;
01556   }
01557 
01558   // If this is a reference to a global value that requires a non-lazy-ptr, make
01559   // sure that instruction lowering adds it.
01560   if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) {
01561     HiOpFlags |= PPCII::MO_NLP_FLAG;
01562     LoOpFlags |= PPCII::MO_NLP_FLAG;
01563 
01564     if (GV->hasHiddenVisibility()) {
01565       HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
01566       LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
01567     }
01568   }
01569 
01570   return isPIC;
01571 }
01572 
01573 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
01574                              SelectionDAG &DAG) {
01575   EVT PtrVT = HiPart.getValueType();
01576   SDValue Zero = DAG.getConstant(0, PtrVT);
01577   SDLoc DL(HiPart);
01578 
01579   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
01580   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
01581 
01582   // With PIC, the first instruction is actually "GR+hi(&G)".
01583   if (isPIC)
01584     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
01585                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
01586 
01587   // Generate non-pic code that has direct accesses to the constant pool.
01588   // The address of the global is just (hi(&g)+lo(&g)).
01589   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
01590 }
01591 
01592 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
01593                                              SelectionDAG &DAG) const {
01594   EVT PtrVT = Op.getValueType();
01595   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
01596   const Constant *C = CP->getConstVal();
01597 
01598   // 64-bit SVR4 ABI code is always position-independent.
01599   // The actual address of the GlobalValue is stored in the TOC.
01600   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
01601     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
01602     return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA,
01603                        DAG.getRegister(PPC::X2, MVT::i64));
01604   }
01605 
01606   unsigned MOHiFlag, MOLoFlag;
01607   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
01608 
01609   if (isPIC && Subtarget.isSVR4ABI()) {
01610     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
01611                                            PPCII::MO_PIC_FLAG);
01612     SDLoc DL(CP);
01613     return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA,
01614                        DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT));
01615   }
01616 
01617   SDValue CPIHi =
01618     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
01619   SDValue CPILo =
01620     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
01621   return LowerLabelRef(CPIHi, CPILo, isPIC, DAG);
01622 }
01623 
01624 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
01625   EVT PtrVT = Op.getValueType();
01626   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
01627 
01628   // 64-bit SVR4 ABI code is always position-independent.
01629   // The actual address of the GlobalValue is stored in the TOC.
01630   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
01631     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
01632     return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA,
01633                        DAG.getRegister(PPC::X2, MVT::i64));
01634   }
01635 
01636   unsigned MOHiFlag, MOLoFlag;
01637   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
01638 
01639   if (isPIC && Subtarget.isSVR4ABI()) {
01640     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
01641                                         PPCII::MO_PIC_FLAG);
01642     SDLoc DL(GA);
01643     return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), PtrVT, GA,
01644                        DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT));
01645   }
01646 
01647   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
01648   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
01649   return LowerLabelRef(JTIHi, JTILo, isPIC, DAG);
01650 }
01651 
01652 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
01653                                              SelectionDAG &DAG) const {
01654   EVT PtrVT = Op.getValueType();
01655   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
01656   const BlockAddress *BA = BASDN->getBlockAddress();
01657 
01658   // 64-bit SVR4 ABI code is always position-independent.
01659   // The actual BlockAddress is stored in the TOC.
01660   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
01661     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
01662     return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(BASDN), MVT::i64, GA,
01663                        DAG.getRegister(PPC::X2, MVT::i64));
01664   }
01665 
01666   unsigned MOHiFlag, MOLoFlag;
01667   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
01668   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
01669   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
01670   return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
01671 }
01672 
01673 // Generate a call to __tls_get_addr for the given GOT entry Op.
01674 std::pair<SDValue,SDValue>
01675 PPCTargetLowering::lowerTLSCall(SDValue Op, SDLoc dl,
01676                                 SelectionDAG &DAG) const {
01677 
01678   Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
01679   TargetLowering::ArgListTy Args;
01680   TargetLowering::ArgListEntry Entry;
01681   Entry.Node = Op;
01682   Entry.Ty = IntPtrTy;
01683   Args.push_back(Entry);
01684 
01685   TargetLowering::CallLoweringInfo CLI(DAG);
01686   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
01687     .setCallee(CallingConv::C, IntPtrTy,
01688                DAG.getTargetExternalSymbol("__tls_get_addr", getPointerTy()),
01689                std::move(Args), 0);
01690 
01691   return LowerCallTo(CLI);
01692 }
01693 
01694 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
01695                                               SelectionDAG &DAG) const {
01696 
01697   // FIXME: TLS addresses currently use medium model code sequences,
01698   // which is the most useful form.  Eventually support for small and
01699   // large models could be added if users need it, at the cost of
01700   // additional complexity.
01701   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
01702   SDLoc dl(GA);
01703   const GlobalValue *GV = GA->getGlobal();
01704   EVT PtrVT = getPointerTy();
01705   bool is64bit = Subtarget.isPPC64();
01706   const Module *M = DAG.getMachineFunction().getFunction()->getParent();
01707   PICLevel::Level picLevel = M->getPICLevel();
01708 
01709   TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
01710 
01711   if (Model == TLSModel::LocalExec) {
01712     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01713                                                PPCII::MO_TPREL_HA);
01714     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01715                                                PPCII::MO_TPREL_LO);
01716     SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
01717                                      is64bit ? MVT::i64 : MVT::i32);
01718     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
01719     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
01720   }
01721 
01722   if (Model == TLSModel::InitialExec) {
01723     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
01724     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01725                                                 PPCII::MO_TLS);
01726     SDValue GOTPtr;
01727     if (is64bit) {
01728       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
01729       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
01730                            PtrVT, GOTReg, TGA);
01731     } else
01732       GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
01733     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
01734                                    PtrVT, TGA, GOTPtr);
01735     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
01736   }
01737 
01738   if (Model == TLSModel::GeneralDynamic) {
01739     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01740                                              PPCII::MO_TLSGD);
01741     SDValue GOTPtr;
01742     if (is64bit) {
01743       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
01744       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
01745                                    GOTReg, TGA);
01746     } else {
01747       if (picLevel == PICLevel::Small)
01748         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
01749       else
01750         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
01751     }
01752     SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT,
01753                                    GOTPtr, TGA);
01754     std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG);
01755     return CallResult.first;
01756   }
01757 
01758   if (Model == TLSModel::LocalDynamic) {
01759     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01760                                              PPCII::MO_TLSLD);
01761     SDValue GOTPtr;
01762     if (is64bit) {
01763       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
01764       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
01765                            GOTReg, TGA);
01766     } else {
01767       if (picLevel == PICLevel::Small)
01768         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
01769       else
01770         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
01771     }
01772     SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT,
01773                                    GOTPtr, TGA);
01774     std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG);
01775     SDValue TLSAddr = CallResult.first;
01776     SDValue Chain = CallResult.second;
01777     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT,
01778                                       Chain, TLSAddr, TGA);
01779     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
01780   }
01781 
01782   llvm_unreachable("Unknown TLS model!");
01783 }
01784 
01785 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
01786                                               SelectionDAG &DAG) const {
01787   EVT PtrVT = Op.getValueType();
01788   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
01789   SDLoc DL(GSDN);
01790   const GlobalValue *GV = GSDN->getGlobal();
01791 
01792   // 64-bit SVR4 ABI code is always position-independent.
01793   // The actual address of the GlobalValue is stored in the TOC.
01794   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
01795     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
01796     return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA,
01797                        DAG.getRegister(PPC::X2, MVT::i64));
01798   }
01799 
01800   unsigned MOHiFlag, MOLoFlag;
01801   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV);
01802 
01803   if (isPIC && Subtarget.isSVR4ABI()) {
01804     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
01805                                             GSDN->getOffset(),
01806                                             PPCII::MO_PIC_FLAG);
01807     return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA,
01808                        DAG.getNode(PPCISD::GlobalBaseReg, DL, MVT::i32));
01809   }
01810 
01811   SDValue GAHi =
01812     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
01813   SDValue GALo =
01814     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
01815 
01816   SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG);
01817 
01818   // If the global reference is actually to a non-lazy-pointer, we have to do an
01819   // extra load to get the address of the global.
01820   if (MOHiFlag & PPCII::MO_NLP_FLAG)
01821     Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(),
01822                       false, false, false, 0);
01823   return Ptr;
01824 }
01825 
01826 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
01827   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
01828   SDLoc dl(Op);
01829 
01830   if (Op.getValueType() == MVT::v2i64) {
01831     // When the operands themselves are v2i64 values, we need to do something
01832     // special because VSX has no underlying comparison operations for these.
01833     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
01834       // Equality can be handled by casting to the legal type for Altivec
01835       // comparisons, everything else needs to be expanded.
01836       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
01837         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
01838                  DAG.getSetCC(dl, MVT::v4i32,
01839                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
01840                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
01841                    CC));
01842       }
01843 
01844       return SDValue();
01845     }
01846 
01847     // We handle most of these in the usual way.
01848     return Op;
01849   }
01850 
01851   // If we're comparing for equality to zero, expose the fact that this is
01852   // implented as a ctlz/srl pair on ppc, so that the dag combiner can
01853   // fold the new nodes.
01854   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
01855     if (C->isNullValue() && CC == ISD::SETEQ) {
01856       EVT VT = Op.getOperand(0).getValueType();
01857       SDValue Zext = Op.getOperand(0);
01858       if (VT.bitsLT(MVT::i32)) {
01859         VT = MVT::i32;
01860         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
01861       }
01862       unsigned Log2b = Log2_32(VT.getSizeInBits());
01863       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
01864       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
01865                                 DAG.getConstant(Log2b, MVT::i32));
01866       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
01867     }
01868     // Leave comparisons against 0 and -1 alone for now, since they're usually
01869     // optimized.  FIXME: revisit this when we can custom lower all setcc
01870     // optimizations.
01871     if (C->isAllOnesValue() || C->isNullValue())
01872       return SDValue();
01873   }
01874 
01875   // If we have an integer seteq/setne, turn it into a compare against zero
01876   // by xor'ing the rhs with the lhs, which is faster than setting a
01877   // condition register, reading it back out, and masking the correct bit.  The
01878   // normal approach here uses sub to do this instead of xor.  Using xor exposes
01879   // the result to other bit-twiddling opportunities.
01880   EVT LHSVT = Op.getOperand(0).getValueType();
01881   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
01882     EVT VT = Op.getValueType();
01883     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
01884                                 Op.getOperand(1));
01885     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
01886   }
01887   return SDValue();
01888 }
01889 
01890 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
01891                                       const PPCSubtarget &Subtarget) const {
01892   SDNode *Node = Op.getNode();
01893   EVT VT = Node->getValueType(0);
01894   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
01895   SDValue InChain = Node->getOperand(0);
01896   SDValue VAListPtr = Node->getOperand(1);
01897   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
01898   SDLoc dl(Node);
01899 
01900   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
01901 
01902   // gpr_index
01903   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
01904                                     VAListPtr, MachinePointerInfo(SV), MVT::i8,
01905                                     false, false, false, 0);
01906   InChain = GprIndex.getValue(1);
01907 
01908   if (VT == MVT::i64) {
01909     // Check if GprIndex is even
01910     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
01911                                  DAG.getConstant(1, MVT::i32));
01912     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
01913                                 DAG.getConstant(0, MVT::i32), ISD::SETNE);
01914     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
01915                                           DAG.getConstant(1, MVT::i32));
01916     // Align GprIndex to be even if it isn't
01917     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
01918                            GprIndex);
01919   }
01920 
01921   // fpr index is 1 byte after gpr
01922   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
01923                                DAG.getConstant(1, MVT::i32));
01924 
01925   // fpr
01926   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
01927                                     FprPtr, MachinePointerInfo(SV), MVT::i8,
01928                                     false, false, false, 0);
01929   InChain = FprIndex.getValue(1);
01930 
01931   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
01932                                        DAG.getConstant(8, MVT::i32));
01933 
01934   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
01935                                         DAG.getConstant(4, MVT::i32));
01936 
01937   // areas
01938   SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
01939                                      MachinePointerInfo(), false, false,
01940                                      false, 0);
01941   InChain = OverflowArea.getValue(1);
01942 
01943   SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr,
01944                                     MachinePointerInfo(), false, false,
01945                                     false, 0);
01946   InChain = RegSaveArea.getValue(1);
01947 
01948   // select overflow_area if index > 8
01949   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
01950                             DAG.getConstant(8, MVT::i32), ISD::SETLT);
01951 
01952   // adjustment constant gpr_index * 4/8
01953   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
01954                                     VT.isInteger() ? GprIndex : FprIndex,
01955                                     DAG.getConstant(VT.isInteger() ? 4 : 8,
01956                                                     MVT::i32));
01957 
01958   // OurReg = RegSaveArea + RegConstant
01959   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
01960                                RegConstant);
01961 
01962   // Floating types are 32 bytes into RegSaveArea
01963   if (VT.isFloatingPoint())
01964     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
01965                          DAG.getConstant(32, MVT::i32));
01966 
01967   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
01968   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
01969                                    VT.isInteger() ? GprIndex : FprIndex,
01970                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1,
01971                                                    MVT::i32));
01972 
01973   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
01974                               VT.isInteger() ? VAListPtr : FprPtr,
01975                               MachinePointerInfo(SV),
01976                               MVT::i8, false, false, 0);
01977 
01978   // determine if we should load from reg_save_area or overflow_area
01979   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
01980 
01981   // increase overflow_area by 4/8 if gpr/fpr > 8
01982   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
01983                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
01984                                           MVT::i32));
01985 
01986   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
01987                              OverflowAreaPlusN);
01988 
01989   InChain = DAG.getTruncStore(InChain, dl, OverflowArea,
01990                               OverflowAreaPtr,
01991                               MachinePointerInfo(),
01992                               MVT::i32, false, false, 0);
01993 
01994   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(),
01995                      false, false, false, 0);
01996 }
01997 
01998 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG,
01999                                        const PPCSubtarget &Subtarget) const {
02000   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
02001 
02002   // We have to copy the entire va_list struct:
02003   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
02004   return DAG.getMemcpy(Op.getOperand(0), Op,
02005                        Op.getOperand(1), Op.getOperand(2),
02006                        DAG.getConstant(12, MVT::i32), 8, false, true,
02007                        MachinePointerInfo(), MachinePointerInfo());
02008 }
02009 
02010 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
02011                                                   SelectionDAG &DAG) const {
02012   return Op.getOperand(0);
02013 }
02014 
02015 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
02016                                                 SelectionDAG &DAG) const {
02017   SDValue Chain = Op.getOperand(0);
02018   SDValue Trmp = Op.getOperand(1); // trampoline
02019   SDValue FPtr = Op.getOperand(2); // nested function
02020   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
02021   SDLoc dl(Op);
02022 
02023   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02024   bool isPPC64 = (PtrVT == MVT::i64);
02025   Type *IntPtrTy =
02026     DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
02027                                                              *DAG.getContext());
02028 
02029   TargetLowering::ArgListTy Args;
02030   TargetLowering::ArgListEntry Entry;
02031 
02032   Entry.Ty = IntPtrTy;
02033   Entry.Node = Trmp; Args.push_back(Entry);
02034 
02035   // TrampSize == (isPPC64 ? 48 : 40);
02036   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
02037                                isPPC64 ? MVT::i64 : MVT::i32);
02038   Args.push_back(Entry);
02039 
02040   Entry.Node = FPtr; Args.push_back(Entry);
02041   Entry.Node = Nest; Args.push_back(Entry);
02042 
02043   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
02044   TargetLowering::CallLoweringInfo CLI(DAG);
02045   CLI.setDebugLoc(dl).setChain(Chain)
02046     .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
02047                DAG.getExternalSymbol("__trampoline_setup", PtrVT),
02048                std::move(Args), 0);
02049 
02050   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
02051   return CallResult.second;
02052 }
02053 
02054 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
02055                                         const PPCSubtarget &Subtarget) const {
02056   MachineFunction &MF = DAG.getMachineFunction();
02057   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
02058 
02059   SDLoc dl(Op);
02060 
02061   if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
02062     // vastart just stores the address of the VarArgsFrameIndex slot into the
02063     // memory location argument.
02064     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02065     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02066     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02067     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
02068                         MachinePointerInfo(SV),
02069                         false, false, 0);
02070   }
02071 
02072   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
02073   // We suppose the given va_list is already allocated.
02074   //
02075   // typedef struct {
02076   //  char gpr;     /* index into the array of 8 GPRs
02077   //                 * stored in the register save area
02078   //                 * gpr=0 corresponds to r3,
02079   //                 * gpr=1 to r4, etc.
02080   //                 */
02081   //  char fpr;     /* index into the array of 8 FPRs
02082   //                 * stored in the register save area
02083   //                 * fpr=0 corresponds to f1,
02084   //                 * fpr=1 to f2, etc.
02085   //                 */
02086   //  char *overflow_arg_area;
02087   //                /* location on stack that holds
02088   //                 * the next overflow argument
02089   //                 */
02090   //  char *reg_save_area;
02091   //               /* where r3:r10 and f1:f8 (if saved)
02092   //                * are stored
02093   //                */
02094   // } va_list[1];
02095 
02096 
02097   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32);
02098   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32);
02099 
02100 
02101   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02102 
02103   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
02104                                             PtrVT);
02105   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
02106                                  PtrVT);
02107 
02108   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
02109   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
02110 
02111   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
02112   SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
02113 
02114   uint64_t FPROffset = 1;
02115   SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
02116 
02117   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02118 
02119   // Store first byte : number of int regs
02120   SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR,
02121                                          Op.getOperand(1),
02122                                          MachinePointerInfo(SV),
02123                                          MVT::i8, false, false, 0);
02124   uint64_t nextOffset = FPROffset;
02125   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
02126                                   ConstFPROffset);
02127 
02128   // Store second byte : number of float regs
02129   SDValue secondStore =
02130     DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
02131                       MachinePointerInfo(SV, nextOffset), MVT::i8,
02132                       false, false, 0);
02133   nextOffset += StackOffset;
02134   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
02135 
02136   // Store second word : arguments given on stack
02137   SDValue thirdStore =
02138     DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
02139                  MachinePointerInfo(SV, nextOffset),
02140                  false, false, 0);
02141   nextOffset += FrameOffset;
02142   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
02143 
02144   // Store third word : arguments given in registers
02145   return DAG.getStore(thirdStore, dl, FR, nextPtr,
02146                       MachinePointerInfo(SV, nextOffset),
02147                       false, false, 0);
02148 
02149 }
02150 
02151 #include "PPCGenCallingConv.inc"
02152 
02153 // Function whose sole purpose is to kill compiler warnings 
02154 // stemming from unused functions included from PPCGenCallingConv.inc.
02155 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const {
02156   return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
02157 }
02158 
02159 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
02160                                       CCValAssign::LocInfo &LocInfo,
02161                                       ISD::ArgFlagsTy &ArgFlags,
02162                                       CCState &State) {
02163   return true;
02164 }
02165 
02166 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
02167                                              MVT &LocVT,
02168                                              CCValAssign::LocInfo &LocInfo,
02169                                              ISD::ArgFlagsTy &ArgFlags,
02170                                              CCState &State) {
02171   static const MCPhysReg ArgRegs[] = {
02172     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
02173     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
02174   };
02175   const unsigned NumArgRegs = array_lengthof(ArgRegs);
02176 
02177   unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
02178 
02179   // Skip one register if the first unallocated register has an even register
02180   // number and there are still argument registers available which have not been
02181   // allocated yet. RegNum is actually an index into ArgRegs, which means we
02182   // need to skip a register if RegNum is odd.
02183   if (RegNum != NumArgRegs && RegNum % 2 == 1) {
02184     State.AllocateReg(ArgRegs[RegNum]);
02185   }
02186 
02187   // Always return false here, as this function only makes sure that the first
02188   // unallocated register has an odd register number and does not actually
02189   // allocate a register for the current argument.
02190   return false;
02191 }
02192 
02193 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
02194                                                MVT &LocVT,
02195                                                CCValAssign::LocInfo &LocInfo,
02196                                                ISD::ArgFlagsTy &ArgFlags,
02197                                                CCState &State) {
02198   static const MCPhysReg ArgRegs[] = {
02199     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
02200     PPC::F8
02201   };
02202 
02203   const unsigned NumArgRegs = array_lengthof(ArgRegs);
02204 
02205   unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
02206 
02207   // If there is only one Floating-point register left we need to put both f64
02208   // values of a split ppc_fp128 value on the stack.
02209   if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
02210     State.AllocateReg(ArgRegs[RegNum]);
02211   }
02212 
02213   // Always return false here, as this function only makes sure that the two f64
02214   // values a ppc_fp128 value is split into are both passed in registers or both
02215   // passed on the stack and does not actually allocate a register for the
02216   // current argument.
02217   return false;
02218 }
02219 
02220 /// GetFPR - Get the set of FP registers that should be allocated for arguments,
02221 /// on Darwin.
02222 static const MCPhysReg *GetFPR() {
02223   static const MCPhysReg FPR[] = {
02224     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
02225     PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
02226   };
02227 
02228   return FPR;
02229 }
02230 
02231 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
02232 /// the stack.
02233 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
02234                                        unsigned PtrByteSize) {
02235   unsigned ArgSize = ArgVT.getStoreSize();
02236   if (Flags.isByVal())
02237     ArgSize = Flags.getByValSize();
02238 
02239   // Round up to multiples of the pointer size, except for array members,
02240   // which are always packed.
02241   if (!Flags.isInConsecutiveRegs())
02242     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02243 
02244   return ArgSize;
02245 }
02246 
02247 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
02248 /// on the stack.
02249 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
02250                                             ISD::ArgFlagsTy Flags,
02251                                             unsigned PtrByteSize) {
02252   unsigned Align = PtrByteSize;
02253 
02254   // Altivec parameters are padded to a 16 byte boundary.
02255   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
02256       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
02257       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64)
02258     Align = 16;
02259 
02260   // ByVal parameters are aligned as requested.
02261   if (Flags.isByVal()) {
02262     unsigned BVAlign = Flags.getByValAlign();
02263     if (BVAlign > PtrByteSize) {
02264       if (BVAlign % PtrByteSize != 0)
02265           llvm_unreachable(
02266             "ByVal alignment is not a multiple of the pointer size");
02267 
02268       Align = BVAlign;
02269     }
02270   }
02271 
02272   // Array members are always packed to their original alignment.
02273   if (Flags.isInConsecutiveRegs()) {
02274     // If the array member was split into multiple registers, the first
02275     // needs to be aligned to the size of the full type.  (Except for
02276     // ppcf128, which is only aligned as its f64 components.)
02277     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
02278       Align = OrigVT.getStoreSize();
02279     else
02280       Align = ArgVT.getStoreSize();
02281   }
02282 
02283   return Align;
02284 }
02285 
02286 /// CalculateStackSlotUsed - Return whether this argument will use its
02287 /// stack slot (instead of being passed in registers).  ArgOffset,
02288 /// AvailableFPRs, and AvailableVRs must hold the current argument
02289 /// position, and will be updated to account for this argument.
02290 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
02291                                    ISD::ArgFlagsTy Flags,
02292                                    unsigned PtrByteSize,
02293                                    unsigned LinkageSize,
02294                                    unsigned ParamAreaSize,
02295                                    unsigned &ArgOffset,
02296                                    unsigned &AvailableFPRs,
02297                                    unsigned &AvailableVRs) {
02298   bool UseMemory = false;
02299 
02300   // Respect alignment of argument on the stack.
02301   unsigned Align =
02302     CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
02303   ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
02304   // If there's no space left in the argument save area, we must
02305   // use memory (this check also catches zero-sized arguments).
02306   if (ArgOffset >= LinkageSize + ParamAreaSize)
02307     UseMemory = true;
02308 
02309   // Allocate argument on the stack.
02310   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
02311   if (Flags.isInConsecutiveRegsLast())
02312     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02313   // If we overran the argument save area, we must use memory
02314   // (this check catches arguments passed partially in memory)
02315   if (ArgOffset > LinkageSize + ParamAreaSize)
02316     UseMemory = true;
02317 
02318   // However, if the argument is actually passed in an FPR or a VR,
02319   // we don't use memory after all.
02320   if (!Flags.isByVal()) {
02321     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
02322       if (AvailableFPRs > 0) {
02323         --AvailableFPRs;
02324         return false;
02325       }
02326     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
02327         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
02328         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64)
02329       if (AvailableVRs > 0) {
02330         --AvailableVRs;
02331         return false;
02332       }
02333   }
02334 
02335   return UseMemory;
02336 }
02337 
02338 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
02339 /// ensure minimum alignment required for target.
02340 static unsigned EnsureStackAlignment(const TargetMachine &Target,
02341                                      unsigned NumBytes) {
02342   unsigned TargetAlign =
02343       Target.getSubtargetImpl()->getFrameLowering()->getStackAlignment();
02344   unsigned AlignMask = TargetAlign - 1;
02345   NumBytes = (NumBytes + AlignMask) & ~AlignMask;
02346   return NumBytes;
02347 }
02348 
02349 SDValue
02350 PPCTargetLowering::LowerFormalArguments(SDValue Chain,
02351                                         CallingConv::ID CallConv, bool isVarArg,
02352                                         const SmallVectorImpl<ISD::InputArg>
02353                                           &Ins,
02354                                         SDLoc dl, SelectionDAG &DAG,
02355                                         SmallVectorImpl<SDValue> &InVals)
02356                                           const {
02357   if (Subtarget.isSVR4ABI()) {
02358     if (Subtarget.isPPC64())
02359       return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
02360                                          dl, DAG, InVals);
02361     else
02362       return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
02363                                          dl, DAG, InVals);
02364   } else {
02365     return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
02366                                        dl, DAG, InVals);
02367   }
02368 }
02369 
02370 SDValue
02371 PPCTargetLowering::LowerFormalArguments_32SVR4(
02372                                       SDValue Chain,
02373                                       CallingConv::ID CallConv, bool isVarArg,
02374                                       const SmallVectorImpl<ISD::InputArg>
02375                                         &Ins,
02376                                       SDLoc dl, SelectionDAG &DAG,
02377                                       SmallVectorImpl<SDValue> &InVals) const {
02378 
02379   // 32-bit SVR4 ABI Stack Frame Layout:
02380   //              +-----------------------------------+
02381   //        +-->  |            Back chain             |
02382   //        |     +-----------------------------------+
02383   //        |     | Floating-point register save area |
02384   //        |     +-----------------------------------+
02385   //        |     |    General register save area     |
02386   //        |     +-----------------------------------+
02387   //        |     |          CR save word             |
02388   //        |     +-----------------------------------+
02389   //        |     |         VRSAVE save word          |
02390   //        |     +-----------------------------------+
02391   //        |     |         Alignment padding         |
02392   //        |     +-----------------------------------+
02393   //        |     |     Vector register save area     |
02394   //        |     +-----------------------------------+
02395   //        |     |       Local variable space        |
02396   //        |     +-----------------------------------+
02397   //        |     |        Parameter list area        |
02398   //        |     +-----------------------------------+
02399   //        |     |           LR save word            |
02400   //        |     +-----------------------------------+
02401   // SP-->  +---  |            Back chain             |
02402   //              +-----------------------------------+
02403   //
02404   // Specifications:
02405   //   System V Application Binary Interface PowerPC Processor Supplement
02406   //   AltiVec Technology Programming Interface Manual
02407 
02408   MachineFunction &MF = DAG.getMachineFunction();
02409   MachineFrameInfo *MFI = MF.getFrameInfo();
02410   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
02411 
02412   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02413   // Potential tail calls could cause overwriting of argument stack slots.
02414   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
02415                        (CallConv == CallingConv::Fast));
02416   unsigned PtrByteSize = 4;
02417 
02418   // Assign locations to all of the incoming arguments.
02419   SmallVector<CCValAssign, 16> ArgLocs;
02420   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
02421                  *DAG.getContext());
02422 
02423   // Reserve space for the linkage area on the stack.
02424   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(false, false, false);
02425   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
02426 
02427   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
02428 
02429   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
02430     CCValAssign &VA = ArgLocs[i];
02431 
02432     // Arguments stored in registers.
02433     if (VA.isRegLoc()) {
02434       const TargetRegisterClass *RC;
02435       EVT ValVT = VA.getValVT();
02436 
02437       switch (ValVT.getSimpleVT().SimpleTy) {
02438         default:
02439           llvm_unreachable("ValVT not supported by formal arguments Lowering");
02440         case MVT::i1:
02441         case MVT::i32:
02442           RC = &PPC::GPRCRegClass;
02443           break;
02444         case MVT::f32:
02445           RC = &PPC::F4RCRegClass;
02446           break;
02447         case MVT::f64:
02448           if (Subtarget.hasVSX())
02449             RC = &PPC::VSFRCRegClass;
02450           else
02451             RC = &PPC::F8RCRegClass;
02452           break;
02453         case MVT::v16i8:
02454         case MVT::v8i16:
02455         case MVT::v4i32:
02456         case MVT::v4f32:
02457           RC = &PPC::VRRCRegClass;
02458           break;
02459         case MVT::v2f64:
02460         case MVT::v2i64:
02461           RC = &PPC::VSHRCRegClass;
02462           break;
02463       }
02464 
02465       // Transform the arguments stored in physical registers into virtual ones.
02466       unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
02467       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
02468                                             ValVT == MVT::i1 ? MVT::i32 : ValVT);
02469 
02470       if (ValVT == MVT::i1)
02471         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
02472 
02473       InVals.push_back(ArgValue);
02474     } else {
02475       // Argument stored in memory.
02476       assert(VA.isMemLoc());
02477 
02478       unsigned ArgSize = VA.getLocVT().getStoreSize();
02479       int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
02480                                       isImmutable);
02481 
02482       // Create load nodes to retrieve arguments from the stack.
02483       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
02484       InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
02485                                    MachinePointerInfo(),
02486                                    false, false, false, 0));
02487     }
02488   }
02489 
02490   // Assign locations to all of the incoming aggregate by value arguments.
02491   // Aggregates passed by value are stored in the local variable space of the
02492   // caller's stack frame, right above the parameter list area.
02493   SmallVector<CCValAssign, 16> ByValArgLocs;
02494   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
02495                       ByValArgLocs, *DAG.getContext());
02496 
02497   // Reserve stack space for the allocations in CCInfo.
02498   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
02499 
02500   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
02501 
02502   // Area that is at least reserved in the caller of this function.
02503   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
02504   MinReservedArea = std::max(MinReservedArea, LinkageSize);
02505 
02506   // Set the size that is at least reserved in caller of this function.  Tail
02507   // call optimized function's reserved stack space needs to be aligned so that
02508   // taking the difference between two stack areas will result in an aligned
02509   // stack.
02510   MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
02511   FuncInfo->setMinReservedArea(MinReservedArea);
02512 
02513   SmallVector<SDValue, 8> MemOps;
02514 
02515   // If the function takes variable number of arguments, make a frame index for
02516   // the start of the first vararg value... for expansion of llvm.va_start.
02517   if (isVarArg) {
02518     static const MCPhysReg GPArgRegs[] = {
02519       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
02520       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
02521     };
02522     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
02523 
02524     static const MCPhysReg FPArgRegs[] = {
02525       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
02526       PPC::F8
02527     };
02528     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
02529     if (DisablePPCFloatInVariadic)
02530       NumFPArgRegs = 0;
02531 
02532     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs,
02533                                                           NumGPArgRegs));
02534     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs,
02535                                                           NumFPArgRegs));
02536 
02537     // Make room for NumGPArgRegs and NumFPArgRegs.
02538     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
02539                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
02540 
02541     FuncInfo->setVarArgsStackOffset(
02542       MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
02543                              CCInfo.getNextStackOffset(), true));
02544 
02545     FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false));
02546     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02547 
02548     // The fixed integer arguments of a variadic function are stored to the
02549     // VarArgsFrameIndex on the stack so that they may be loaded by deferencing
02550     // the result of va_next.
02551     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
02552       // Get an existing live-in vreg, or add a new one.
02553       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
02554       if (!VReg)
02555         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
02556 
02557       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
02558       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
02559                                    MachinePointerInfo(), false, false, 0);
02560       MemOps.push_back(Store);
02561       // Increment the address by four for the next argument to store
02562       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
02563       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
02564     }
02565 
02566     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
02567     // is set.
02568     // The double arguments are stored to the VarArgsFrameIndex
02569     // on the stack.
02570     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
02571       // Get an existing live-in vreg, or add a new one.
02572       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
02573       if (!VReg)
02574         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
02575 
02576       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
02577       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
02578                                    MachinePointerInfo(), false, false, 0);
02579       MemOps.push_back(Store);
02580       // Increment the address by eight for the next argument to store
02581       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
02582                                          PtrVT);
02583       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
02584     }
02585   }
02586 
02587   if (!MemOps.empty())
02588     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02589 
02590   return Chain;
02591 }
02592 
02593 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
02594 // value to MVT::i64 and then truncate to the correct register size.
02595 SDValue
02596 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
02597                                      SelectionDAG &DAG, SDValue ArgVal,
02598                                      SDLoc dl) const {
02599   if (Flags.isSExt())
02600     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
02601                          DAG.getValueType(ObjectVT));
02602   else if (Flags.isZExt())
02603     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
02604                          DAG.getValueType(ObjectVT));
02605 
02606   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
02607 }
02608 
02609 SDValue
02610 PPCTargetLowering::LowerFormalArguments_64SVR4(
02611                                       SDValue Chain,
02612                                       CallingConv::ID CallConv, bool isVarArg,
02613                                       const SmallVectorImpl<ISD::InputArg>
02614                                         &Ins,
02615                                       SDLoc dl, SelectionDAG &DAG,
02616                                       SmallVectorImpl<SDValue> &InVals) const {
02617   // TODO: add description of PPC stack frame format, or at least some docs.
02618   //
02619   bool isELFv2ABI = Subtarget.isELFv2ABI();
02620   bool isLittleEndian = Subtarget.isLittleEndian();
02621   MachineFunction &MF = DAG.getMachineFunction();
02622   MachineFrameInfo *MFI = MF.getFrameInfo();
02623   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
02624 
02625   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
02626          "fastcc not supported on varargs functions");
02627 
02628   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02629   // Potential tail calls could cause overwriting of argument stack slots.
02630   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
02631                        (CallConv == CallingConv::Fast));
02632   unsigned PtrByteSize = 8;
02633 
02634   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
02635                                                           isELFv2ABI);
02636 
02637   static const MCPhysReg GPR[] = {
02638     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
02639     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
02640   };
02641 
02642   static const MCPhysReg *FPR = GetFPR();
02643 
02644   static const MCPhysReg VR[] = {
02645     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
02646     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
02647   };
02648   static const MCPhysReg VSRH[] = {
02649     PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
02650     PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
02651   };
02652 
02653   const unsigned Num_GPR_Regs = array_lengthof(GPR);
02654   const unsigned Num_FPR_Regs = 13;
02655   const unsigned Num_VR_Regs  = array_lengthof(VR);
02656 
02657   // Do a first pass over the arguments to determine whether the ABI
02658   // guarantees that our caller has allocated the parameter save area
02659   // on its stack frame.  In the ELFv1 ABI, this is always the case;
02660   // in the ELFv2 ABI, it is true if this is a vararg function or if
02661   // any parameter is located in a stack slot.
02662 
02663   bool HasParameterArea = !isELFv2ABI || isVarArg;
02664   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
02665   unsigned NumBytes = LinkageSize;
02666   unsigned AvailableFPRs = Num_FPR_Regs;
02667   unsigned AvailableVRs = Num_VR_Regs;
02668   for (unsigned i = 0, e = Ins.size(); i != e; ++i)
02669     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
02670                                PtrByteSize, LinkageSize, ParamAreaSize,
02671                                NumBytes, AvailableFPRs, AvailableVRs))
02672       HasParameterArea = true;
02673 
02674   // Add DAG nodes to load the arguments or copy them out of registers.  On
02675   // entry to a function on PPC, the arguments start after the linkage area,
02676   // although the first ones are often in registers.
02677 
02678   unsigned ArgOffset = LinkageSize;
02679   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
02680   SmallVector<SDValue, 8> MemOps;
02681   Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
02682   unsigned CurArgIdx = 0;
02683   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
02684     SDValue ArgVal;
02685     bool needsLoad = false;
02686     EVT ObjectVT = Ins[ArgNo].VT;
02687     EVT OrigVT = Ins[ArgNo].ArgVT;
02688     unsigned ObjSize = ObjectVT.getStoreSize();
02689     unsigned ArgSize = ObjSize;
02690     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
02691     std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
02692     CurArgIdx = Ins[ArgNo].OrigArgIndex;
02693 
02694     // We re-align the argument offset for each argument, except when using the
02695     // fast calling convention, when we need to make sure we do that only when
02696     // we'll actually use a stack slot.
02697     unsigned CurArgOffset, Align;
02698     auto ComputeArgOffset = [&]() {
02699       /* Respect alignment of argument on the stack.  */
02700       Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
02701       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
02702       CurArgOffset = ArgOffset;
02703     };
02704 
02705     if (CallConv != CallingConv::Fast) {
02706       ComputeArgOffset();
02707 
02708       /* Compute GPR index associated with argument offset.  */
02709       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
02710       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
02711     }
02712 
02713     // FIXME the codegen can be much improved in some cases.
02714     // We do not have to keep everything in memory.
02715     if (Flags.isByVal()) {
02716       if (CallConv == CallingConv::Fast)
02717         ComputeArgOffset();
02718 
02719       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
02720       ObjSize = Flags.getByValSize();
02721       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02722       // Empty aggregate parameters do not take up registers.  Examples:
02723       //   struct { } a;
02724       //   union  { } b;
02725       //   int c[0];
02726       // etc.  However, we have to provide a place-holder in InVals, so
02727       // pretend we have an 8-byte item at the current address for that
02728       // purpose.
02729       if (!ObjSize) {
02730         int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
02731         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
02732         InVals.push_back(FIN);
02733         continue;
02734       }
02735 
02736       // Create a stack object covering all stack doublewords occupied
02737       // by the argument.  If the argument is (fully or partially) on
02738       // the stack, or if the argument is fully in registers but the
02739       // caller has allocated the parameter save anyway, we can refer
02740       // directly to the caller's stack frame.  Otherwise, create a
02741       // local copy in our own frame.
02742       int FI;
02743       if (HasParameterArea ||
02744           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
02745         FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true);
02746       else
02747         FI = MFI->CreateStackObject(ArgSize, Align, false);
02748       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
02749 
02750       // Handle aggregates smaller than 8 bytes.
02751       if (ObjSize < PtrByteSize) {
02752         // The value of the object is its address, which differs from the
02753         // address of the enclosing doubleword on big-endian systems.
02754         SDValue Arg = FIN;
02755         if (!isLittleEndian) {
02756           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, PtrVT);
02757           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
02758         }
02759         InVals.push_back(Arg);
02760 
02761         if (GPR_idx != Num_GPR_Regs) {
02762           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
02763           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
02764           SDValue Store;
02765 
02766           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
02767             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
02768                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
02769             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
02770                                       MachinePointerInfo(FuncArg),
02771                                       ObjType, false, false, 0);
02772           } else {
02773             // For sizes that don't fit a truncating store (3, 5, 6, 7),
02774             // store the whole register as-is to the parameter save area
02775             // slot.
02776             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
02777                                  MachinePointerInfo(FuncArg),
02778                                  false, false, 0);
02779           }
02780 
02781           MemOps.push_back(Store);
02782         }
02783         // Whether we copied from a register or not, advance the offset
02784         // into the parameter save area by a full doubleword.
02785         ArgOffset += PtrByteSize;
02786         continue;
02787       }
02788 
02789       // The value of the object is its address, which is the address of
02790       // its first stack doubleword.
02791       InVals.push_back(FIN);
02792 
02793       // Store whatever pieces of the object are in registers to memory.
02794       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
02795         if (GPR_idx == Num_GPR_Regs)
02796           break;
02797 
02798         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
02799         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
02800         SDValue Addr = FIN;
02801         if (j) {
02802           SDValue Off = DAG.getConstant(j, PtrVT);
02803           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
02804         }
02805         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
02806                                      MachinePointerInfo(FuncArg, j),
02807                                      false, false, 0);
02808         MemOps.push_back(Store);
02809         ++GPR_idx;
02810       }
02811       ArgOffset += ArgSize;
02812       continue;
02813     }
02814 
02815     switch (ObjectVT.getSimpleVT().SimpleTy) {
02816     default: llvm_unreachable("Unhandled argument type!");
02817     case MVT::i1:
02818     case MVT::i32:
02819     case MVT::i64:
02820       // These can be scalar arguments or elements of an integer array type
02821       // passed directly.  Clang may use those instead of "byval" aggregate
02822       // types to avoid forcing arguments to memory unnecessarily.
02823       if (GPR_idx != Num_GPR_Regs) {
02824         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
02825         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
02826 
02827         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
02828           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
02829           // value to MVT::i64 and then truncate to the correct register size.
02830           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
02831       } else {
02832         if (CallConv == CallingConv::Fast)
02833           ComputeArgOffset();
02834 
02835         needsLoad = true;
02836         ArgSize = PtrByteSize;
02837       }
02838       if (CallConv != CallingConv::Fast || needsLoad)
02839         ArgOffset += 8;
02840       break;
02841 
02842     case MVT::f32:
02843     case MVT::f64:
02844       // These can be scalar arguments or elements of a float array type
02845       // passed directly.  The latter are used to implement ELFv2 homogenous
02846       // float aggregates.
02847       if (FPR_idx != Num_FPR_Regs) {
02848         unsigned VReg;
02849 
02850         if (ObjectVT == MVT::f32)
02851           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
02852         else
02853           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() ?
02854                                             &PPC::VSFRCRegClass :
02855                                             &PPC::F8RCRegClass);
02856 
02857         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
02858         ++FPR_idx;
02859       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
02860         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
02861         // once we support fp <-> gpr moves.
02862 
02863         // This can only ever happen in the presence of f32 array types,
02864         // since otherwise we never run out of FPRs before running out
02865         // of GPRs.
02866         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
02867         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
02868 
02869         if (ObjectVT == MVT::f32) {
02870           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
02871             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
02872                                  DAG.getConstant(32, MVT::i32));
02873           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
02874         }
02875 
02876         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
02877       } else {
02878         if (CallConv == CallingConv::Fast)
02879           ComputeArgOffset();
02880 
02881         needsLoad = true;
02882       }
02883 
02884       // When passing an array of floats, the array occupies consecutive
02885       // space in the argument area; only round up to the next doubleword
02886       // at the end of the array.  Otherwise, each float takes 8 bytes.
02887       if (CallConv != CallingConv::Fast || needsLoad) {
02888         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
02889         ArgOffset += ArgSize;
02890         if (Flags.isInConsecutiveRegsLast())
02891           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02892       }
02893       break;
02894     case MVT::v4f32:
02895     case MVT::v4i32:
02896     case MVT::v8i16:
02897     case MVT::v16i8:
02898     case MVT::v2f64:
02899     case MVT::v2i64:
02900       // These can be scalar arguments or elements of a vector array type
02901       // passed directly.  The latter are used to implement ELFv2 homogenous
02902       // vector aggregates.
02903       if (VR_idx != Num_VR_Regs) {
02904         unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ?
02905                         MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) :
02906                         MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
02907         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
02908         ++VR_idx;
02909       } else {
02910         if (CallConv == CallingConv::Fast)
02911           ComputeArgOffset();
02912 
02913         needsLoad = true;
02914       }
02915       if (CallConv != CallingConv::Fast || needsLoad)
02916         ArgOffset += 16;
02917       break;
02918     }
02919 
02920     // We need to load the argument to a virtual register if we determined
02921     // above that we ran out of physical registers of the appropriate type.
02922     if (needsLoad) {
02923       if (ObjSize < ArgSize && !isLittleEndian)
02924         CurArgOffset += ArgSize - ObjSize;
02925       int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
02926       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
02927       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
02928                            false, false, false, 0);
02929     }
02930 
02931     InVals.push_back(ArgVal);
02932   }
02933 
02934   // Area that is at least reserved in the caller of this function.
02935   unsigned MinReservedArea;
02936   if (HasParameterArea)
02937     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
02938   else
02939     MinReservedArea = LinkageSize;
02940 
02941   // Set the size that is at least reserved in caller of this function.  Tail
02942   // call optimized functions' reserved stack space needs to be aligned so that
02943   // taking the difference between two stack areas will result in an aligned
02944   // stack.
02945   MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
02946   FuncInfo->setMinReservedArea(MinReservedArea);
02947 
02948   // If the function takes variable number of arguments, make a frame index for
02949   // the start of the first vararg value... for expansion of llvm.va_start.
02950   if (isVarArg) {
02951     int Depth = ArgOffset;
02952 
02953     FuncInfo->setVarArgsFrameIndex(
02954       MFI->CreateFixedObject(PtrByteSize, Depth, true));
02955     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02956 
02957     // If this function is vararg, store any remaining integer argument regs
02958     // to their spots on the stack so that they may be loaded by deferencing the
02959     // result of va_next.
02960     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
02961          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
02962       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
02963       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
02964       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
02965                                    MachinePointerInfo(), false, false, 0);
02966       MemOps.push_back(Store);
02967       // Increment the address by four for the next argument to store
02968       SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT);
02969       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
02970     }
02971   }
02972 
02973   if (!MemOps.empty())
02974     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02975 
02976   return Chain;
02977 }
02978 
02979 SDValue
02980 PPCTargetLowering::LowerFormalArguments_Darwin(
02981                                       SDValue Chain,
02982                                       CallingConv::ID CallConv, bool isVarArg,
02983                                       const SmallVectorImpl<ISD::InputArg>
02984                                         &Ins,
02985                                       SDLoc dl, SelectionDAG &DAG,
02986                                       SmallVectorImpl<SDValue> &InVals) const {
02987   // TODO: add description of PPC stack frame format, or at least some docs.
02988   //
02989   MachineFunction &MF = DAG.getMachineFunction();
02990   MachineFrameInfo *MFI = MF.getFrameInfo();
02991   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
02992 
02993   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02994   bool isPPC64 = PtrVT == MVT::i64;
02995   // Potential tail calls could cause overwriting of argument stack slots.
02996   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
02997                        (CallConv == CallingConv::Fast));
02998   unsigned PtrByteSize = isPPC64 ? 8 : 4;
02999 
03000   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true,
03001                                                           false);
03002   unsigned ArgOffset = LinkageSize;
03003   // Area that is at least reserved in caller of this function.
03004   unsigned MinReservedArea = ArgOffset;
03005 
03006   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
03007     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
03008     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
03009   };
03010   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
03011     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
03012     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
03013   };
03014 
03015   static const MCPhysReg *FPR = GetFPR();
03016 
03017   static const MCPhysReg VR[] = {
03018     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
03019     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
03020   };
03021 
03022   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
03023   const unsigned Num_FPR_Regs = 13;
03024   const unsigned Num_VR_Regs  = array_lengthof( VR);
03025 
03026   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
03027 
03028   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
03029 
03030   // In 32-bit non-varargs functions, the stack space for vectors is after the
03031   // stack space for non-vectors.  We do not use this space unless we have
03032   // too many vectors to fit in registers, something that only occurs in
03033   // constructed examples:), but we have to walk the arglist to figure
03034   // that out...for the pathological case, compute VecArgOffset as the
03035   // start of the vector parameter area.  Computing VecArgOffset is the
03036   // entire point of the following loop.
03037   unsigned VecArgOffset = ArgOffset;
03038   if (!isVarArg && !isPPC64) {
03039     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
03040          ++ArgNo) {
03041       EVT ObjectVT = Ins[ArgNo].VT;
03042       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
03043 
03044       if (Flags.isByVal()) {
03045         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
03046         unsigned ObjSize = Flags.getByValSize();
03047         unsigned ArgSize =
03048                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
03049         VecArgOffset += ArgSize;
03050         continue;
03051       }
03052 
03053       switch(ObjectVT.getSimpleVT().SimpleTy) {
03054       default: llvm_unreachable("Unhandled argument type!");
03055       case MVT::i1:
03056       case MVT::i32:
03057       case MVT::f32:
03058         VecArgOffset += 4;
03059         break;
03060       case MVT::i64:  // PPC64
03061       case MVT::f64:
03062         // FIXME: We are guaranteed to be !isPPC64 at this point.
03063         // Does MVT::i64 apply?
03064         VecArgOffset += 8;
03065         break;
03066       case MVT::v4f32:
03067       case MVT::v4i32:
03068       case MVT::v8i16:
03069       case MVT::v16i8:
03070         // Nothing to do, we're only looking at Nonvector args here.
03071         break;
03072       }
03073     }
03074   }
03075   // We've found where the vector parameter area in memory is.  Skip the
03076   // first 12 parameters; these don't use that memory.
03077   VecArgOffset = ((VecArgOffset+15)/16)*16;
03078   VecArgOffset += 12*16;
03079 
03080   // Add DAG nodes to load the arguments or copy them out of registers.  On
03081   // entry to a function on PPC, the arguments start after the linkage area,
03082   // although the first ones are often in registers.
03083 
03084   SmallVector<SDValue, 8> MemOps;
03085   unsigned nAltivecParamsAtEnd = 0;
03086   Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
03087   unsigned CurArgIdx = 0;
03088   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
03089     SDValue ArgVal;
03090     bool needsLoad = false;
03091     EVT ObjectVT = Ins[ArgNo].VT;
03092     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
03093     unsigned ArgSize = ObjSize;
03094     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
03095     std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
03096     CurArgIdx = Ins[ArgNo].OrigArgIndex;
03097 
03098     unsigned CurArgOffset = ArgOffset;
03099 
03100     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
03101     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
03102         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
03103       if (isVarArg || isPPC64) {
03104         MinReservedArea = ((MinReservedArea+15)/16)*16;
03105         MinReservedArea += CalculateStackSlotSize(ObjectVT,
03106                                                   Flags,
03107                                                   PtrByteSize);
03108       } else  nAltivecParamsAtEnd++;
03109     } else
03110       // Calculate min reserved area.
03111       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
03112                                                 Flags,
03113                                                 PtrByteSize);
03114 
03115     // FIXME the codegen can be much improved in some cases.
03116     // We do not have to keep everything in memory.
03117     if (Flags.isByVal()) {
03118       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
03119       ObjSize = Flags.getByValSize();
03120       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
03121       // Objects of size 1 and 2 are right justified, everything else is
03122       // left justified.  This means the memory address is adjusted forwards.
03123       if (ObjSize==1 || ObjSize==2) {
03124         CurArgOffset = CurArgOffset + (4 - ObjSize);
03125       }
03126       // The value of the object is its address.
03127       int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true);
03128       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
03129       InVals.push_back(FIN);
03130       if (ObjSize==1 || ObjSize==2) {
03131         if (GPR_idx != Num_GPR_Regs) {
03132           unsigned VReg;
03133           if (isPPC64)
03134             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
03135           else
03136             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
03137           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
03138           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
03139           SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
03140                                             MachinePointerInfo(FuncArg),
03141                                             ObjType, false, false, 0);
03142           MemOps.push_back(Store);
03143           ++GPR_idx;
03144         }
03145 
03146         ArgOffset += PtrByteSize;
03147 
03148         continue;
03149       }
03150       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
03151         // Store whatever pieces of the object are in registers
03152         // to memory.  ArgOffset will be the address of the beginning
03153         // of the object.
03154         if (GPR_idx != Num_GPR_Regs) {
03155           unsigned VReg;
03156           if (isPPC64)
03157             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
03158           else
03159             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
03160           int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
03161           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
03162           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
03163           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
03164                                        MachinePointerInfo(FuncArg, j),
03165                                        false, false, 0);
03166           MemOps.push_back(Store);
03167           ++GPR_idx;
03168           ArgOffset += PtrByteSize;
03169         } else {
03170           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
03171           break;
03172         }
03173       }
03174       continue;
03175     }
03176 
03177     switch (ObjectVT.getSimpleVT().SimpleTy) {
03178     default: llvm_unreachable("Unhandled argument type!");
03179     case MVT::i1:
03180     case MVT::i32:
03181       if (!isPPC64) {
03182         if (GPR_idx != Num_GPR_Regs) {
03183           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
03184           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
03185 
03186           if (ObjectVT == MVT::i1)
03187             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
03188 
03189           ++GPR_idx;
03190         } else {
03191           needsLoad = true;
03192           ArgSize = PtrByteSize;
03193         }
03194         // All int arguments reserve stack space in the Darwin ABI.
03195         ArgOffset += PtrByteSize;
03196         break;
03197       }
03198       // FALLTHROUGH
03199     case MVT::i64:  // PPC64
03200       if (GPR_idx != Num_GPR_Regs) {
03201         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
03202         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
03203 
03204         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
03205           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
03206           // value to MVT::i64 and then truncate to the correct register size.
03207           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
03208 
03209         ++GPR_idx;
03210       } else {
03211         needsLoad = true;
03212         ArgSize = PtrByteSize;
03213       }
03214       // All int arguments reserve stack space in the Darwin ABI.
03215       ArgOffset += 8;
03216       break;
03217 
03218     case MVT::f32:
03219     case MVT::f64:
03220       // Every 4 bytes of argument space consumes one of the GPRs available for
03221       // argument passing.
03222       if (GPR_idx != Num_GPR_Regs) {
03223         ++GPR_idx;
03224         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
03225           ++GPR_idx;
03226       }
03227       if (FPR_idx != Num_FPR_Regs) {
03228         unsigned VReg;
03229 
03230         if (ObjectVT == MVT::f32)
03231           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
03232         else
03233           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
03234 
03235         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
03236         ++FPR_idx;
03237       } else {
03238         needsLoad = true;
03239       }
03240 
03241       // All FP arguments reserve stack space in the Darwin ABI.
03242       ArgOffset += isPPC64 ? 8 : ObjSize;
03243       break;
03244     case MVT::v4f32:
03245     case MVT::v4i32:
03246     case MVT::v8i16:
03247     case MVT::v16i8:
03248       // Note that vector arguments in registers don't reserve stack space,
03249       // except in varargs functions.
03250       if (VR_idx != Num_VR_Regs) {
03251         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
03252         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
03253         if (isVarArg) {
03254           while ((ArgOffset % 16) != 0) {
03255             ArgOffset += PtrByteSize;
03256             if (GPR_idx != Num_GPR_Regs)
03257               GPR_idx++;
03258           }
03259           ArgOffset += 16;
03260           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
03261         }
03262         ++VR_idx;
03263       } else {
03264         if (!isVarArg && !isPPC64) {
03265           // Vectors go after all the nonvectors.
03266           CurArgOffset = VecArgOffset;
03267           VecArgOffset += 16;
03268         } else {
03269           // Vectors are aligned.
03270           ArgOffset = ((ArgOffset+15)/16)*16;
03271           CurArgOffset = ArgOffset;
03272           ArgOffset += 16;
03273         }
03274         needsLoad = true;
03275       }
03276       break;
03277     }
03278 
03279     // We need to load the argument to a virtual register if we determined above
03280     // that we ran out of physical registers of the appropriate type.
03281     if (needsLoad) {
03282       int FI = MFI->CreateFixedObject(ObjSize,
03283                                       CurArgOffset + (ArgSize - ObjSize),
03284                                       isImmutable);
03285       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
03286       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
03287                            false, false, false, 0);
03288     }
03289 
03290     InVals.push_back(ArgVal);
03291   }
03292 
03293   // Allow for Altivec parameters at the end, if needed.
03294   if (nAltivecParamsAtEnd) {
03295     MinReservedArea = ((MinReservedArea+15)/16)*16;
03296     MinReservedArea += 16*nAltivecParamsAtEnd;
03297   }
03298 
03299   // Area that is at least reserved in the caller of this function.
03300   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
03301 
03302   // Set the size that is at least reserved in caller of this function.  Tail
03303   // call optimized functions' reserved stack space needs to be aligned so that
03304   // taking the difference between two stack areas will result in an aligned
03305   // stack.
03306   MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
03307   FuncInfo->setMinReservedArea(MinReservedArea);
03308 
03309   // If the function takes variable number of arguments, make a frame index for
03310   // the start of the first vararg value... for expansion of llvm.va_start.
03311   if (isVarArg) {
03312     int Depth = ArgOffset;
03313 
03314     FuncInfo->setVarArgsFrameIndex(
03315       MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
03316                              Depth, true));
03317     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
03318 
03319     // If this function is vararg, store any remaining integer argument regs
03320     // to their spots on the stack so that they may be loaded by deferencing the
03321     // result of va_next.
03322     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
03323       unsigned VReg;
03324 
03325       if (isPPC64)
03326         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
03327       else
03328         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
03329 
03330       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
03331       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
03332                                    MachinePointerInfo(), false, false, 0);
03333       MemOps.push_back(Store);
03334       // Increment the address by four for the next argument to store
03335       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
03336       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
03337     }
03338   }
03339 
03340   if (!MemOps.empty())
03341     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
03342 
03343   return Chain;
03344 }
03345 
03346 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
03347 /// adjusted to accommodate the arguments for the tailcall.
03348 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
03349                                    unsigned ParamSize) {
03350 
03351   if (!isTailCall) return 0;
03352 
03353   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
03354   unsigned CallerMinReservedArea = FI->getMinReservedArea();
03355   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
03356   // Remember only if the new adjustement is bigger.
03357   if (SPDiff < FI->getTailCallSPDelta())
03358     FI->setTailCallSPDelta(SPDiff);
03359 
03360   return SPDiff;
03361 }
03362 
03363 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
03364 /// for tail call optimization. Targets which want to do tail call
03365 /// optimization should implement this function.
03366 bool
03367 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
03368                                                      CallingConv::ID CalleeCC,
03369                                                      bool isVarArg,
03370                                       const SmallVectorImpl<ISD::InputArg> &Ins,
03371                                                      SelectionDAG& DAG) const {
03372   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
03373     return false;
03374 
03375   // Variable argument functions are not supported.
03376   if (isVarArg)
03377     return false;
03378 
03379   MachineFunction &MF = DAG.getMachineFunction();
03380   CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
03381   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
03382     // Functions containing by val parameters are not supported.
03383     for (unsigned i = 0; i != Ins.size(); i++) {
03384        ISD::ArgFlagsTy Flags = Ins[i].Flags;
03385        if (Flags.isByVal()) return false;
03386     }
03387 
03388     // Non-PIC/GOT tail calls are supported.
03389     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
03390       return true;
03391 
03392     // At the moment we can only do local tail calls (in same module, hidden
03393     // or protected) if we are generating PIC.
03394     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
03395       return G->getGlobal()->hasHiddenVisibility()
03396           || G->getGlobal()->hasProtectedVisibility();
03397   }
03398 
03399   return false;
03400 }
03401 
03402 /// isCallCompatibleAddress - Return the immediate to use if the specified
03403 /// 32-bit value is representable in the immediate field of a BxA instruction.
03404 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
03405   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
03406   if (!C) return nullptr;
03407 
03408   int Addr = C->getZExtValue();
03409   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
03410       SignExtend32<26>(Addr) != Addr)
03411     return nullptr;  // Top 6 bits have to be sext of immediate.
03412 
03413   return DAG.getConstant((int)C->getZExtValue() >> 2,
03414                          DAG.getTargetLoweringInfo().getPointerTy()).getNode();
03415 }
03416 
03417 namespace {
03418 
03419 struct TailCallArgumentInfo {
03420   SDValue Arg;
03421   SDValue FrameIdxOp;
03422   int       FrameIdx;
03423 
03424   TailCallArgumentInfo() : FrameIdx(0) {}
03425 };
03426 
03427 }
03428 
03429 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
03430 static void
03431 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
03432                                            SDValue Chain,
03433                    const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
03434                    SmallVectorImpl<SDValue> &MemOpChains,
03435                    SDLoc dl) {
03436   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
03437     SDValue Arg = TailCallArgs[i].Arg;
03438     SDValue FIN = TailCallArgs[i].FrameIdxOp;
03439     int FI = TailCallArgs[i].FrameIdx;
03440     // Store relative to framepointer.
03441     MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN,
03442                                        MachinePointerInfo::getFixedStack(FI),
03443                                        false, false, 0));
03444   }
03445 }
03446 
03447 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
03448 /// the appropriate stack slot for the tail call optimized function call.
03449 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
03450                                                MachineFunction &MF,
03451                                                SDValue Chain,
03452                                                SDValue OldRetAddr,
03453                                                SDValue OldFP,
03454                                                int SPDiff,
03455                                                bool isPPC64,
03456                                                bool isDarwinABI,
03457                                                SDLoc dl) {
03458   if (SPDiff) {
03459     // Calculate the new stack slot for the return address.
03460     int SlotSize = isPPC64 ? 8 : 4;
03461     int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64,
03462                                                                    isDarwinABI);
03463     int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
03464                                                           NewRetAddrLoc, true);
03465     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
03466     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
03467     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
03468                          MachinePointerInfo::getFixedStack(NewRetAddr),
03469                          false, false, 0);
03470 
03471     // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
03472     // slot as the FP is never overwritten.
03473     if (isDarwinABI) {
03474       int NewFPLoc =
03475         SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
03476       int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
03477                                                           true);
03478       SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
03479       Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
03480                            MachinePointerInfo::getFixedStack(NewFPIdx),
03481                            false, false, 0);
03482     }
03483   }
03484   return Chain;
03485 }
03486 
03487 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
03488 /// the position of the argument.
03489 static void
03490 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
03491                          SDValue Arg, int SPDiff, unsigned ArgOffset,
03492                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
03493   int Offset = ArgOffset + SPDiff;
03494   uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
03495   int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
03496   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
03497   SDValue FIN = DAG.getFrameIndex(FI, VT);
03498   TailCallArgumentInfo Info;
03499   Info.Arg = Arg;
03500   Info.FrameIdxOp = FIN;
03501   Info.FrameIdx = FI;
03502   TailCallArguments.push_back(Info);
03503 }
03504 
03505 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
03506 /// stack slot. Returns the chain as result and the loaded frame pointers in
03507 /// LROpOut/FPOpout. Used when tail calling.
03508 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
03509                                                         int SPDiff,
03510                                                         SDValue Chain,
03511                                                         SDValue &LROpOut,
03512                                                         SDValue &FPOpOut,
03513                                                         bool isDarwinABI,
03514                                                         SDLoc dl) const {
03515   if (SPDiff) {
03516     // Load the LR and FP stack slot for later adjusting.
03517     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
03518     LROpOut = getReturnAddrFrameIndex(DAG);
03519     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
03520                           false, false, false, 0);
03521     Chain = SDValue(LROpOut.getNode(), 1);
03522 
03523     // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
03524     // slot as the FP is never overwritten.
03525     if (isDarwinABI) {
03526       FPOpOut = getFramePointerFrameIndex(DAG);
03527       FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(),
03528                             false, false, false, 0);
03529       Chain = SDValue(FPOpOut.getNode(), 1);
03530     }
03531   }
03532   return Chain;
03533 }
03534 
03535 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
03536 /// by "Src" to address "Dst" of size "Size".  Alignment information is
03537 /// specified by the specific parameter attribute. The copy will be passed as
03538 /// a byval function parameter.
03539 /// Sometimes what we are copying is the end of a larger object, the part that
03540 /// does not fit in registers.
03541 static SDValue
03542 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
03543                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
03544                           SDLoc dl) {
03545   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
03546   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
03547                        false, false, MachinePointerInfo(),
03548                        MachinePointerInfo());
03549 }
03550 
03551 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
03552 /// tail calls.
03553 static void
03554 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
03555                  SDValue Arg, SDValue PtrOff, int SPDiff,
03556                  unsigned ArgOffset, bool isPPC64, bool isTailCall,
03557                  bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
03558                  SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments,
03559                  SDLoc dl) {
03560   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
03561   if (!isTailCall) {
03562     if (isVector) {
03563       SDValue StackPtr;
03564       if (isPPC64)
03565         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
03566       else
03567         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
03568       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
03569                            DAG.getConstant(ArgOffset, PtrVT));
03570     }
03571     MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
03572                                        MachinePointerInfo(), false, false, 0));
03573   // Calculate and remember argument location.
03574   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
03575                                   TailCallArguments);
03576 }
03577 
03578 static
03579 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
03580                      SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
03581                      SDValue LROp, SDValue FPOp, bool isDarwinABI,
03582                      SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
03583   MachineFunction &MF = DAG.getMachineFunction();
03584 
03585   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
03586   // might overwrite each other in case of tail call optimization.
03587   SmallVector<SDValue, 8> MemOpChains2;
03588   // Do not flag preceding copytoreg stuff together with the following stuff.
03589   InFlag = SDValue();
03590   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
03591                                     MemOpChains2, dl);
03592   if (!MemOpChains2.empty())
03593     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
03594 
03595   // Store the return address to the appropriate stack slot.
03596   Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
03597                                         isPPC64, isDarwinABI, dl);
03598 
03599   // Emit callseq_end just before tailcall node.
03600   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
03601                              DAG.getIntPtrConstant(0, true), InFlag, dl);
03602   InFlag = Chain.getValue(1);
03603 }
03604 
03605 // Is this global address that of a function that can be called by name? (as
03606 // opposed to something that must hold a descriptor for an indirect call).
03607 static bool isFunctionGlobalAddress(SDValue Callee) {
03608   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
03609     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
03610         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
03611       return false;
03612 
03613     return G->getGlobal()->getType()->getElementType()->isFunctionTy();
03614   }
03615 
03616   return false;
03617 }
03618 
03619 static
03620 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
03621                      SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff,
03622                      bool isTailCall, bool IsPatchPoint,
03623                      SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass,
03624                      SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
03625                      ImmutableCallSite *CS, const PPCSubtarget &Subtarget) {
03626 
03627   bool isPPC64 = Subtarget.isPPC64();
03628   bool isSVR4ABI = Subtarget.isSVR4ABI();
03629   bool isELFv2ABI = Subtarget.isELFv2ABI();
03630 
03631   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
03632   NodeTys.push_back(MVT::Other);   // Returns a chain
03633   NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
03634 
03635   unsigned CallOpc = PPCISD::CALL;
03636 
03637   bool needIndirectCall = true;
03638   if (!isSVR4ABI || !isPPC64)
03639     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
03640       // If this is an absolute destination address, use the munged value.
03641       Callee = SDValue(Dest, 0);
03642       needIndirectCall = false;
03643     }
03644 
03645   if (isFunctionGlobalAddress(Callee)) {
03646     GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
03647     // A call to a TLS address is actually an indirect call to a
03648     // thread-specific pointer.
03649     unsigned OpFlags = 0;
03650     if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
03651          (Subtarget.getTargetTriple().isMacOSX() &&
03652           Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) &&
03653          (G->getGlobal()->isDeclaration() ||
03654           G->getGlobal()->isWeakForLinker())) ||
03655         (Subtarget.isTargetELF() && !isPPC64 &&
03656          !G->getGlobal()->hasLocalLinkage() &&
03657          DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
03658       // PC-relative references to external symbols should go through $stub,
03659       // unless we're building with the leopard linker or later, which
03660       // automatically synthesizes these stubs.
03661       OpFlags = PPCII::MO_PLT_OR_STUB;
03662     }
03663 
03664     // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
03665     // every direct call is) turn it into a TargetGlobalAddress /
03666     // TargetExternalSymbol node so that legalize doesn't hack it.
03667     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
03668                                         Callee.getValueType(), 0, OpFlags);
03669     needIndirectCall = false;
03670   }
03671 
03672   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
03673     unsigned char OpFlags = 0;
03674 
03675     if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
03676          (Subtarget.getTargetTriple().isMacOSX() &&
03677           Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) ||
03678         (Subtarget.isTargetELF() && !isPPC64 &&
03679          DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
03680       // PC-relative references to external symbols should go through $stub,
03681       // unless we're building with the leopard linker or later, which
03682       // automatically synthesizes these stubs.
03683       OpFlags = PPCII::MO_PLT_OR_STUB;
03684     }
03685 
03686     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
03687                                          OpFlags);
03688     needIndirectCall = false;
03689   }
03690 
03691   if (IsPatchPoint) {
03692     // We'll form an invalid direct call when lowering a patchpoint; the full
03693     // sequence for an indirect call is complicated, and many of the
03694     // instructions introduced might have side effects (and, thus, can't be
03695     // removed later). The call itself will be removed as soon as the
03696     // argument/return lowering is complete, so the fact that it has the wrong
03697     // kind of operands should not really matter.
03698     needIndirectCall = false;
03699   }
03700 
03701   if (needIndirectCall) {
03702     // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
03703     // to do the call, we can't use PPCISD::CALL.
03704     SDValue MTCTROps[] = {Chain, Callee, InFlag};
03705 
03706     if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
03707       // Function pointers in the 64-bit SVR4 ABI do not point to the function
03708       // entry point, but to the function descriptor (the function entry point
03709       // address is part of the function descriptor though).
03710       // The function descriptor is a three doubleword structure with the
03711       // following fields: function entry point, TOC base address and
03712       // environment pointer.
03713       // Thus for a call through a function pointer, the following actions need
03714       // to be performed:
03715       //   1. Save the TOC of the caller in the TOC save area of its stack
03716       //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
03717       //   2. Load the address of the function entry point from the function
03718       //      descriptor.
03719       //   3. Load the TOC of the callee from the function descriptor into r2.
03720       //   4. Load the environment pointer from the function descriptor into
03721       //      r11.
03722       //   5. Branch to the function entry point address.
03723       //   6. On return of the callee, the TOC of the caller needs to be
03724       //      restored (this is done in FinishCall()).
03725       //
03726       // The loads are scheduled at the beginning of the call sequence, and the
03727       // register copies are flagged together to ensure that no other
03728       // operations can be scheduled in between. E.g. without flagging the
03729       // copies together, a TOC access in the caller could be scheduled between
03730       // the assignment of the callee TOC and the branch to the callee, which
03731       // results in the TOC access going through the TOC of the callee instead
03732       // of going through the TOC of the caller, which leads to incorrect code.
03733 
03734       // Load the address of the function entry point from the function
03735       // descriptor.
03736       SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1);
03737       if (LDChain.getValueType() == MVT::Glue)
03738         LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2);
03739 
03740       bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors();
03741 
03742       MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr);
03743       SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
03744                                         false, false, LoadsInv, 8);
03745 
03746       // Load environment pointer into r11.
03747       SDValue PtrOff = DAG.getIntPtrConstant(16);
03748       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
03749       SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr,
03750                                        MPI.getWithOffset(16), false, false,
03751                                        LoadsInv, 8);
03752 
03753       SDValue TOCOff = DAG.getIntPtrConstant(8);
03754       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
03755       SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC,
03756                                    MPI.getWithOffset(8), false, false,
03757                                    LoadsInv, 8);
03758 
03759       SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr,
03760                                         InFlag);
03761       Chain = TOCVal.getValue(0);
03762       InFlag = TOCVal.getValue(1);
03763 
03764       SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
03765                                         InFlag);
03766 
03767       Chain = EnvVal.getValue(0);
03768       InFlag = EnvVal.getValue(1);
03769 
03770       MTCTROps[0] = Chain;
03771       MTCTROps[1] = LoadFuncPtr;
03772       MTCTROps[2] = InFlag;
03773     }
03774 
03775     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys,
03776                         makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
03777     InFlag = Chain.getValue(1);
03778 
03779     NodeTys.clear();
03780     NodeTys.push_back(MVT::Other);
03781     NodeTys.push_back(MVT::Glue);
03782     Ops.push_back(Chain);
03783     CallOpc = PPCISD::BCTRL;
03784     Callee.setNode(nullptr);
03785     // Add use of X11 (holding environment pointer)
03786     if (isSVR4ABI && isPPC64 && !isELFv2ABI)
03787       Ops.push_back(DAG.getRegister(PPC::X11, PtrVT));
03788     // Add CTR register as callee so a bctr can be emitted later.
03789     if (isTailCall)
03790       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
03791   }
03792 
03793   // If this is a direct call, pass the chain and the callee.
03794   if (Callee.getNode()) {
03795     Ops.push_back(Chain);
03796     Ops.push_back(Callee);
03797 
03798     // If this is a call to __tls_get_addr, find the symbol whose address
03799     // is to be taken and add it to the list.  This will be used to 
03800     // generate __tls_get_addr(<sym>@tlsgd) or __tls_get_addr(<sym>@tlsld).
03801     // We find the symbol by walking the chain to the CopyFromReg, walking
03802     // back from the CopyFromReg to the ADDI_TLSGD_L or ADDI_TLSLD_L, and
03803     // pulling the symbol from that node.
03804     if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
03805       if (!strcmp(S->getSymbol(), "__tls_get_addr")) {
03806         assert(!needIndirectCall && "Indirect call to __tls_get_addr???");
03807         SDNode *AddI = Chain.getNode()->getOperand(2).getNode();
03808         SDValue TGTAddr = AddI->getOperand(1);
03809         assert(TGTAddr.getNode()->getOpcode() == ISD::TargetGlobalTLSAddress &&
03810                "Didn't find target global TLS address where we expected one");
03811         Ops.push_back(TGTAddr);
03812         CallOpc = PPCISD::CALL_TLS;
03813       }
03814   }
03815   // If this is a tail call add stack pointer delta.
03816   if (isTailCall)
03817     Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
03818 
03819   // Add argument registers to the end of the list so that they are known live
03820   // into the call.
03821   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
03822     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
03823                                   RegsToPass[i].second.getValueType()));
03824 
03825   // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live
03826   // into the call.
03827   if (isSVR4ABI && isPPC64 && !IsPatchPoint)
03828     Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
03829 
03830   return CallOpc;
03831 }
03832 
03833 static
03834 bool isLocalCall(const SDValue &Callee)
03835 {
03836   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
03837     return !G->getGlobal()->isDeclaration() &&
03838            !G->getGlobal()->isWeakForLinker();
03839   return false;
03840 }
03841 
03842 SDValue
03843 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
03844                                    CallingConv::ID CallConv, bool isVarArg,
03845                                    const SmallVectorImpl<ISD::InputArg> &Ins,
03846                                    SDLoc dl, SelectionDAG &DAG,
03847                                    SmallVectorImpl<SDValue> &InVals) const {
03848 
03849   SmallVector<CCValAssign, 16> RVLocs;
03850   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
03851                     *DAG.getContext());
03852   CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC);
03853 
03854   // Copy all of the result registers out of their specified physreg.
03855   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
03856     CCValAssign &VA = RVLocs[i];
03857     assert(VA.isRegLoc() && "Can only return in registers!");
03858 
03859     SDValue Val = DAG.getCopyFromReg(Chain, dl,
03860                                      VA.getLocReg(), VA.getLocVT(), InFlag);
03861     Chain = Val.getValue(1);
03862     InFlag = Val.getValue(2);
03863 
03864     switch (VA.getLocInfo()) {
03865     default: llvm_unreachable("Unknown loc info!");
03866     case CCValAssign::Full: break;
03867     case CCValAssign::AExt:
03868       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
03869       break;
03870     case CCValAssign::ZExt:
03871       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
03872                         DAG.getValueType(VA.getValVT()));
03873       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
03874       break;
03875     case CCValAssign::SExt:
03876       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
03877                         DAG.getValueType(VA.getValVT()));
03878       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
03879       break;
03880     }
03881 
03882     InVals.push_back(Val);
03883   }
03884 
03885   return Chain;
03886 }
03887 
03888 SDValue
03889 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
03890                               bool isTailCall, bool isVarArg, bool IsPatchPoint,
03891                               SelectionDAG &DAG,
03892                               SmallVector<std::pair<unsigned, SDValue>, 8>
03893                                 &RegsToPass,
03894                               SDValue InFlag, SDValue Chain,
03895                               SDValue CallSeqStart, SDValue &Callee,
03896                               int SPDiff, unsigned NumBytes,
03897                               const SmallVectorImpl<ISD::InputArg> &Ins,
03898                               SmallVectorImpl<SDValue> &InVals,
03899                               ImmutableCallSite *CS) const {
03900 
03901   bool isELFv2ABI = Subtarget.isELFv2ABI();
03902   std::vector<EVT> NodeTys;
03903   SmallVector<SDValue, 8> Ops;
03904   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
03905                                  SPDiff, isTailCall, IsPatchPoint, RegsToPass,
03906                                  Ops, NodeTys, CS, Subtarget);
03907 
03908   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
03909   if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
03910     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
03911 
03912   // When performing tail call optimization the callee pops its arguments off
03913   // the stack. Account for this here so these bytes can be pushed back on in
03914   // PPCFrameLowering::eliminateCallFramePseudoInstr.
03915   int BytesCalleePops =
03916     (CallConv == CallingConv::Fast &&
03917      getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
03918 
03919   // Add a register mask operand representing the call-preserved registers.
03920   const TargetRegisterInfo *TRI =
03921       getTargetMachine().getSubtargetImpl()->getRegisterInfo();
03922   const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
03923   assert(Mask && "Missing call preserved mask for calling convention");
03924   Ops.push_back(DAG.getRegisterMask(Mask));
03925 
03926   if (InFlag.getNode())
03927     Ops.push_back(InFlag);
03928 
03929   // Emit tail call.
03930   if (isTailCall) {
03931     assert(((Callee.getOpcode() == ISD::Register &&
03932              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
03933             Callee.getOpcode() == ISD::TargetExternalSymbol ||
03934             Callee.getOpcode() == ISD::TargetGlobalAddress ||
03935             isa<ConstantSDNode>(Callee)) &&
03936     "Expecting an global address, external symbol, absolute value or register");
03937 
03938     return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops);
03939   }
03940 
03941   // Add a NOP immediately after the branch instruction when using the 64-bit
03942   // SVR4 ABI. At link time, if caller and callee are in a different module and
03943   // thus have a different TOC, the call will be replaced with a call to a stub
03944   // function which saves the current TOC, loads the TOC of the callee and
03945   // branches to the callee. The NOP will be replaced with a load instruction
03946   // which restores the TOC of the caller from the TOC save slot of the current
03947   // stack frame. If caller and callee belong to the same module (and have the
03948   // same TOC), the NOP will remain unchanged.
03949 
03950   if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() &&
03951       !IsPatchPoint) {
03952     if (CallOpc == PPCISD::BCTRL) {
03953       // This is a call through a function pointer.
03954       // Restore the caller TOC from the save area into R2.
03955       // See PrepareCall() for more information about calls through function
03956       // pointers in the 64-bit SVR4 ABI.
03957       // We are using a target-specific load with r2 hard coded, because the
03958       // result of a target-independent load would never go directly into r2,
03959       // since r2 is a reserved register (which prevents the register allocator
03960       // from allocating it), resulting in an additional register being
03961       // allocated and an unnecessary move instruction being generated.
03962       CallOpc = PPCISD::BCTRL_LOAD_TOC;
03963 
03964       EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
03965       SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
03966       unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI);
03967       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset);
03968       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
03969 
03970       // The address needs to go after the chain input but before the flag (or
03971       // any other variadic arguments).
03972       Ops.insert(std::next(Ops.begin()), AddTOC);
03973     } else if ((CallOpc == PPCISD::CALL) &&
03974                (!isLocalCall(Callee) ||
03975                 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
03976       // Otherwise insert NOP for non-local calls.
03977       CallOpc = PPCISD::CALL_NOP;
03978     } else if (CallOpc == PPCISD::CALL_TLS)
03979       // For 64-bit SVR4, TLS calls are always non-local.
03980       CallOpc = PPCISD::CALL_NOP_TLS;
03981   }
03982 
03983   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
03984   InFlag = Chain.getValue(1);
03985 
03986   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
03987                              DAG.getIntPtrConstant(BytesCalleePops, true),
03988                              InFlag, dl);
03989   if (!Ins.empty())
03990     InFlag = Chain.getValue(1);
03991 
03992   return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
03993                          Ins, dl, DAG, InVals);
03994 }
03995 
03996 SDValue
03997 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
03998                              SmallVectorImpl<SDValue> &InVals) const {
03999   SelectionDAG &DAG                     = CLI.DAG;
04000   SDLoc &dl                             = CLI.DL;
04001   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
04002   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
04003   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
04004   SDValue Chain                         = CLI.Chain;
04005   SDValue Callee                        = CLI.Callee;
04006   bool &isTailCall                      = CLI.IsTailCall;
04007   CallingConv::ID CallConv              = CLI.CallConv;
04008   bool isVarArg                         = CLI.IsVarArg;
04009   bool IsPatchPoint                     = CLI.IsPatchPoint;
04010   ImmutableCallSite *CS                 = CLI.CS;
04011 
04012   if (isTailCall)
04013     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
04014                                                    Ins, DAG);
04015 
04016   if (!isTailCall && CS && CS->isMustTailCall())
04017     report_fatal_error("failed to perform tail call elimination on a call "
04018                        "site marked musttail");
04019 
04020   if (Subtarget.isSVR4ABI()) {
04021     if (Subtarget.isPPC64())
04022       return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
04023                               isTailCall, IsPatchPoint, Outs, OutVals, Ins,
04024                               dl, DAG, InVals, CS);
04025     else
04026       return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
04027                               isTailCall, IsPatchPoint, Outs, OutVals, Ins,
04028                               dl, DAG, InVals, CS);
04029   }
04030 
04031   return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
04032                           isTailCall, IsPatchPoint, Outs, OutVals, Ins,
04033                           dl, DAG, InVals, CS);
04034 }
04035 
04036 SDValue
04037 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
04038                                     CallingConv::ID CallConv, bool isVarArg,
04039                                     bool isTailCall, bool IsPatchPoint,
04040                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
04041                                     const SmallVectorImpl<SDValue> &OutVals,
04042                                     const SmallVectorImpl<ISD::InputArg> &Ins,
04043                                     SDLoc dl, SelectionDAG &DAG,
04044                                     SmallVectorImpl<SDValue> &InVals,
04045                                     ImmutableCallSite *CS) const {
04046   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
04047   // of the 32-bit SVR4 ABI stack frame layout.
04048 
04049   assert((CallConv == CallingConv::C ||
04050           CallConv == CallingConv::Fast) && "Unknown calling convention!");
04051 
04052   unsigned PtrByteSize = 4;
04053 
04054   MachineFunction &MF = DAG.getMachineFunction();
04055 
04056   // Mark this function as potentially containing a function that contains a
04057   // tail call. As a consequence the frame pointer will be used for dynamicalloc
04058   // and restoring the callers stack pointer in this functions epilog. This is
04059   // done because by tail calling the called function might overwrite the value
04060   // in this function's (MF) stack pointer stack slot 0(SP).
04061   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04062       CallConv == CallingConv::Fast)
04063     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
04064 
04065   // Count how many bytes are to be pushed on the stack, including the linkage
04066   // area, parameter list area and the part of the local variable space which
04067   // contains copies of aggregates which are passed by value.
04068 
04069   // Assign locations to all of the outgoing arguments.
04070   SmallVector<CCValAssign, 16> ArgLocs;
04071   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
04072                  *DAG.getContext());
04073 
04074   // Reserve space for the linkage area on the stack.
04075   CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false, false),
04076                        PtrByteSize);
04077 
04078   if (isVarArg) {
04079     // Handle fixed and variable vector arguments differently.
04080     // Fixed vector arguments go into registers as long as registers are
04081     // available. Variable vector arguments always go into memory.
04082     unsigned NumArgs = Outs.size();
04083 
04084     for (unsigned i = 0; i != NumArgs; ++i) {
04085       MVT ArgVT = Outs[i].VT;
04086       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
04087       bool Result;
04088 
04089       if (Outs[i].IsFixed) {
04090         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
04091                                CCInfo);
04092       } else {
04093         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
04094                                       ArgFlags, CCInfo);
04095       }
04096 
04097       if (Result) {
04098 #ifndef NDEBUG
04099         errs() << "Call operand #" << i << " has unhandled type "
04100              << EVT(ArgVT).getEVTString() << "\n";
04101 #endif
04102         llvm_unreachable(nullptr);
04103       }
04104     }
04105   } else {
04106     // All arguments are treated the same.
04107     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
04108   }
04109 
04110   // Assign locations to all of the outgoing aggregate by value arguments.
04111   SmallVector<CCValAssign, 16> ByValArgLocs;
04112   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
04113                       ByValArgLocs, *DAG.getContext());
04114 
04115   // Reserve stack space for the allocations in CCInfo.
04116   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
04117 
04118   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
04119 
04120   // Size of the linkage area, parameter list area and the part of the local
04121   // space variable where copies of aggregates which are passed by value are
04122   // stored.
04123   unsigned NumBytes = CCByValInfo.getNextStackOffset();
04124 
04125   // Calculate by how many bytes the stack has to be adjusted in case of tail
04126   // call optimization.
04127   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
04128 
04129   // Adjust the stack pointer for the new arguments...
04130   // These operations are automatically eliminated by the prolog/epilog pass
04131   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
04132                                dl);
04133   SDValue CallSeqStart = Chain;
04134 
04135   // Load the return address and frame pointer so it can be moved somewhere else
04136   // later.
04137   SDValue LROp, FPOp;
04138   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false,
04139                                        dl);
04140 
04141   // Set up a copy of the stack pointer for use loading and storing any
04142   // arguments that may not fit in the registers available for argument
04143   // passing.
04144   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
04145 
04146   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
04147   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
04148   SmallVector<SDValue, 8> MemOpChains;
04149 
04150   bool seenFloatArg = false;
04151   // Walk the register/memloc assignments, inserting copies/loads.
04152   for (unsigned i = 0, j = 0, e = ArgLocs.size();
04153        i != e;
04154        ++i) {
04155     CCValAssign &VA = ArgLocs[i];
04156     SDValue Arg = OutVals[i];
04157     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04158 
04159     if (Flags.isByVal()) {
04160       // Argument is an aggregate which is passed by value, thus we need to
04161       // create a copy of it in the local variable space of the current stack
04162       // frame (which is the stack frame of the caller) and pass the address of
04163       // this copy to the callee.
04164       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
04165       CCValAssign &ByValVA = ByValArgLocs[j++];
04166       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
04167 
04168       // Memory reserved in the local variable space of the callers stack frame.
04169       unsigned LocMemOffset = ByValVA.getLocMemOffset();
04170 
04171       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
04172       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
04173 
04174       // Create a copy of the argument in the local area of the current
04175       // stack frame.
04176       SDValue MemcpyCall =
04177         CreateCopyOfByValArgument(Arg, PtrOff,
04178                                   CallSeqStart.getNode()->getOperand(0),
04179                                   Flags, DAG, dl);
04180 
04181       // This must go outside the CALLSEQ_START..END.
04182       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
04183                            CallSeqStart.getNode()->getOperand(1),
04184                            SDLoc(MemcpyCall));
04185       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
04186                              NewCallSeqStart.getNode());
04187       Chain = CallSeqStart = NewCallSeqStart;
04188 
04189       // Pass the address of the aggregate copy on the stack either in a
04190       // physical register or in the parameter list area of the current stack
04191       // frame to the callee.
04192       Arg = PtrOff;
04193     }
04194 
04195     if (VA.isRegLoc()) {
04196       if (Arg.getValueType() == MVT::i1)
04197         Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg);
04198 
04199       seenFloatArg |= VA.getLocVT().isFloatingPoint();
04200       // Put argument in a physical register.
04201       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
04202     } else {
04203       // Put argument in the parameter list area of the current stack frame.
04204       assert(VA.isMemLoc());
04205       unsigned LocMemOffset = VA.getLocMemOffset();
04206 
04207       if (!isTailCall) {
04208         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
04209         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
04210 
04211         MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
04212                                            MachinePointerInfo(),
04213                                            false, false, 0));
04214       } else {
04215         // Calculate and remember argument location.
04216         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
04217                                  TailCallArguments);
04218       }
04219     }
04220   }
04221 
04222   if (!MemOpChains.empty())
04223     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
04224 
04225   // Build a sequence of copy-to-reg nodes chained together with token chain
04226   // and flag operands which copy the outgoing args into the appropriate regs.
04227   SDValue InFlag;
04228   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
04229     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
04230                              RegsToPass[i].second, InFlag);
04231     InFlag = Chain.getValue(1);
04232   }
04233 
04234   // Set CR bit 6 to true if this is a vararg call with floating args passed in
04235   // registers.
04236   if (isVarArg) {
04237     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
04238     SDValue Ops[] = { Chain, InFlag };
04239 
04240     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
04241                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
04242 
04243     InFlag = Chain.getValue(1);
04244   }
04245 
04246   if (isTailCall)
04247     PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
04248                     false, TailCallArguments);
04249 
04250   return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG,
04251                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
04252                     NumBytes, Ins, InVals, CS);
04253 }
04254 
04255 // Copy an argument into memory, being careful to do this outside the
04256 // call sequence for the call to which the argument belongs.
04257 SDValue
04258 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
04259                                               SDValue CallSeqStart,
04260                                               ISD::ArgFlagsTy Flags,
04261                                               SelectionDAG &DAG,
04262                                               SDLoc dl) const {
04263   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
04264                         CallSeqStart.getNode()->getOperand(0),
04265                         Flags, DAG, dl);
04266   // The MEMCPY must go outside the CALLSEQ_START..END.
04267   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
04268                              CallSeqStart.getNode()->getOperand(1),
04269                              SDLoc(MemcpyCall));
04270   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
04271                          NewCallSeqStart.getNode());
04272   return NewCallSeqStart;
04273 }
04274 
04275 SDValue
04276 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
04277                                     CallingConv::ID CallConv, bool isVarArg,
04278                                     bool isTailCall, bool IsPatchPoint,
04279                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
04280                                     const SmallVectorImpl<SDValue> &OutVals,
04281                                     const SmallVectorImpl<ISD::InputArg> &Ins,
04282                                     SDLoc dl, SelectionDAG &DAG,
04283                                     SmallVectorImpl<SDValue> &InVals,
04284                                     ImmutableCallSite *CS) const {
04285 
04286   bool isELFv2ABI = Subtarget.isELFv2ABI();
04287   bool isLittleEndian = Subtarget.isLittleEndian();
04288   unsigned NumOps = Outs.size();
04289 
04290   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
04291   unsigned PtrByteSize = 8;
04292 
04293   MachineFunction &MF = DAG.getMachineFunction();
04294 
04295   // Mark this function as potentially containing a function that contains a
04296   // tail call. As a consequence the frame pointer will be used for dynamicalloc
04297   // and restoring the callers stack pointer in this functions epilog. This is
04298   // done because by tail calling the called function might overwrite the value
04299   // in this function's (MF) stack pointer stack slot 0(SP).
04300   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04301       CallConv == CallingConv::Fast)
04302     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
04303 
04304   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
04305          "fastcc not supported on varargs functions");
04306 
04307   // Count how many bytes are to be pushed on the stack, including the linkage
04308   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
04309   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
04310   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
04311   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
04312                                                           isELFv2ABI);
04313   unsigned NumBytes = LinkageSize;
04314   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
04315 
04316   static const MCPhysReg GPR[] = {
04317     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
04318     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
04319   };
04320   static const MCPhysReg *FPR = GetFPR();
04321 
04322   static const MCPhysReg VR[] = {
04323     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
04324     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
04325   };
04326   static const MCPhysReg VSRH[] = {
04327     PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
04328     PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
04329   };
04330 
04331   const unsigned NumGPRs = array_lengthof(GPR);
04332   const unsigned NumFPRs = 13;
04333   const unsigned NumVRs  = array_lengthof(VR);
04334 
04335   // When using the fast calling convention, we don't provide backing for
04336   // arguments that will be in registers.
04337   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
04338 
04339   // Add up all the space actually used.
04340   for (unsigned i = 0; i != NumOps; ++i) {
04341     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04342     EVT ArgVT = Outs[i].VT;
04343     EVT OrigVT = Outs[i].ArgVT;
04344 
04345     if (CallConv == CallingConv::Fast) {
04346       if (Flags.isByVal())
04347         NumGPRsUsed += (Flags.getByValSize()+7)/8;
04348       else
04349         switch (ArgVT.getSimpleVT().SimpleTy) {
04350         default: llvm_unreachable("Unexpected ValueType for argument!");
04351         case MVT::i1:
04352         case MVT::i32:
04353         case MVT::i64:
04354           if (++NumGPRsUsed <= NumGPRs)
04355             continue;
04356           break;
04357         case MVT::f32:
04358         case MVT::f64:
04359           if (++NumFPRsUsed <= NumFPRs)
04360             continue;
04361           break;
04362         case MVT::v4f32:
04363         case MVT::v4i32:
04364         case MVT::v8i16:
04365         case MVT::v16i8:
04366         case MVT::v2f64:
04367         case MVT::v2i64:
04368           if (++NumVRsUsed <= NumVRs)
04369             continue;
04370           break;
04371         }
04372     }
04373 
04374     /* Respect alignment of argument on the stack.  */
04375     unsigned Align =
04376       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
04377     NumBytes = ((NumBytes + Align - 1) / Align) * Align;
04378 
04379     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
04380     if (Flags.isInConsecutiveRegsLast())
04381       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
04382   }
04383 
04384   unsigned NumBytesActuallyUsed = NumBytes;
04385 
04386   // The prolog code of the callee may store up to 8 GPR argument registers to
04387   // the stack, allowing va_start to index over them in memory if its varargs.
04388   // Because we cannot tell if this is needed on the caller side, we have to
04389   // conservatively assume that it is needed.  As such, make sure we have at
04390   // least enough stack space for the caller to store the 8 GPRs.
04391   // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area.
04392   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
04393 
04394   // Tail call needs the stack to be aligned.
04395   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04396       CallConv == CallingConv::Fast)
04397     NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes);
04398 
04399   // Calculate by how many bytes the stack has to be adjusted in case of tail
04400   // call optimization.
04401   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
04402 
04403   // To protect arguments on the stack from being clobbered in a tail call,
04404   // force all the loads to happen before doing any other lowering.
04405   if (isTailCall)
04406     Chain = DAG.getStackArgumentTokenFactor(Chain);
04407 
04408   // Adjust the stack pointer for the new arguments...
04409   // These operations are automatically eliminated by the prolog/epilog pass
04410   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
04411                                dl);
04412   SDValue CallSeqStart = Chain;
04413 
04414   // Load the return address and frame pointer so it can be move somewhere else
04415   // later.
04416   SDValue LROp, FPOp;
04417   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
04418                                        dl);
04419 
04420   // Set up a copy of the stack pointer for use loading and storing any
04421   // arguments that may not fit in the registers available for argument
04422   // passing.
04423   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
04424 
04425   // Figure out which arguments are going to go in registers, and which in
04426   // memory.  Also, if this is a vararg function, floating point operations
04427   // must be stored to our stack, and loaded into integer regs as well, if
04428   // any integer regs are available for argument passing.
04429   unsigned ArgOffset = LinkageSize;
04430 
04431   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
04432   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
04433 
04434   SmallVector<SDValue, 8> MemOpChains;
04435   for (unsigned i = 0; i != NumOps; ++i) {
04436     SDValue Arg = OutVals[i];
04437     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04438     EVT ArgVT = Outs[i].VT;
04439     EVT OrigVT = Outs[i].ArgVT;
04440 
04441     // PtrOff will be used to store the current argument to the stack if a
04442     // register cannot be found for it.
04443     SDValue PtrOff;
04444 
04445     // We re-align the argument offset for each argument, except when using the
04446     // fast calling convention, when we need to make sure we do that only when
04447     // we'll actually use a stack slot.
04448     auto ComputePtrOff = [&]() {
04449       /* Respect alignment of argument on the stack.  */
04450       unsigned Align =
04451         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
04452       ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
04453 
04454       PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
04455 
04456       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
04457     };
04458 
04459     if (CallConv != CallingConv::Fast) {
04460       ComputePtrOff();
04461 
04462       /* Compute GPR index associated with argument offset.  */
04463       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
04464       GPR_idx = std::min(GPR_idx, NumGPRs);
04465     }
04466 
04467     // Promote integers to 64-bit values.
04468     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
04469       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
04470       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
04471       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
04472     }
04473 
04474     // FIXME memcpy is used way more than necessary.  Correctness first.
04475     // Note: "by value" is code for passing a structure by value, not
04476     // basic types.
04477     if (Flags.isByVal()) {
04478       // Note: Size includes alignment padding, so
04479       //   struct x { short a; char b; }
04480       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
04481       // These are the proper values we need for right-justifying the
04482       // aggregate in a parameter register.
04483       unsigned Size = Flags.getByValSize();
04484 
04485       // An empty aggregate parameter takes up no storage and no
04486       // registers.
04487       if (Size == 0)
04488         continue;
04489 
04490       if (CallConv == CallingConv::Fast)
04491         ComputePtrOff();
04492 
04493       // All aggregates smaller than 8 bytes must be passed right-justified.
04494       if (Size==1 || Size==2 || Size==4) {
04495         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
04496         if (GPR_idx != NumGPRs) {
04497           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
04498                                         MachinePointerInfo(), VT,
04499                                         false, false, false, 0);
04500           MemOpChains.push_back(Load.getValue(1));
04501           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04502 
04503           ArgOffset += PtrByteSize;
04504           continue;
04505         }
04506       }
04507 
04508       if (GPR_idx == NumGPRs && Size < 8) {
04509         SDValue AddPtr = PtrOff;
04510         if (!isLittleEndian) {
04511           SDValue Const = DAG.getConstant(PtrByteSize - Size,
04512                                           PtrOff.getValueType());
04513           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
04514         }
04515         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
04516                                                           CallSeqStart,
04517                                                           Flags, DAG, dl);
04518         ArgOffset += PtrByteSize;
04519         continue;
04520       }
04521       // Copy entire object into memory.  There are cases where gcc-generated
04522       // code assumes it is there, even if it could be put entirely into
04523       // registers.  (This is not what the doc says.)
04524 
04525       // FIXME: The above statement is likely due to a misunderstanding of the
04526       // documents.  All arguments must be copied into the parameter area BY
04527       // THE CALLEE in the event that the callee takes the address of any
04528       // formal argument.  That has not yet been implemented.  However, it is
04529       // reasonable to use the stack area as a staging area for the register
04530       // load.
04531 
04532       // Skip this for small aggregates, as we will use the same slot for a
04533       // right-justified copy, below.
04534       if (Size >= 8)
04535         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
04536                                                           CallSeqStart,
04537                                                           Flags, DAG, dl);
04538 
04539       // When a register is available, pass a small aggregate right-justified.
04540       if (Size < 8 && GPR_idx != NumGPRs) {
04541         // The easiest way to get this right-justified in a register
04542         // is to copy the structure into the rightmost portion of a
04543         // local variable slot, then load the whole slot into the
04544         // register.
04545         // FIXME: The memcpy seems to produce pretty awful code for
04546         // small aggregates, particularly for packed ones.
04547         // FIXME: It would be preferable to use the slot in the
04548         // parameter save area instead of a new local variable.
04549         SDValue AddPtr = PtrOff;
04550         if (!isLittleEndian) {
04551           SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType());
04552           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
04553         }
04554         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
04555                                                           CallSeqStart,
04556                                                           Flags, DAG, dl);
04557 
04558         // Load the slot into the register.
04559         SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff,
04560                                    MachinePointerInfo(),
04561                                    false, false, false, 0);
04562         MemOpChains.push_back(Load.getValue(1));
04563         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04564 
04565         // Done with this argument.
04566         ArgOffset += PtrByteSize;
04567         continue;
04568       }
04569 
04570       // For aggregates larger than PtrByteSize, copy the pieces of the
04571       // object that fit into registers from the parameter save area.
04572       for (unsigned j=0; j<Size; j+=PtrByteSize) {
04573         SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
04574         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
04575         if (GPR_idx != NumGPRs) {
04576           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
04577                                      MachinePointerInfo(),
04578                                      false, false, false, 0);
04579           MemOpChains.push_back(Load.getValue(1));
04580           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04581           ArgOffset += PtrByteSize;
04582         } else {
04583           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
04584           break;
04585         }
04586       }
04587       continue;
04588     }
04589 
04590     switch (Arg.getSimpleValueType().SimpleTy) {
04591     default: llvm_unreachable("Unexpected ValueType for argument!");
04592     case MVT::i1:
04593     case MVT::i32:
04594     case MVT::i64:
04595       // These can be scalar arguments or elements of an integer array type
04596       // passed directly.  Clang may use those instead of "byval" aggregate
04597       // types to avoid forcing arguments to memory unnecessarily.
04598       if (GPR_idx != NumGPRs) {
04599         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
04600       } else {
04601         if (CallConv == CallingConv::Fast)
04602           ComputePtrOff();
04603 
04604         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04605                          true, isTailCall, false, MemOpChains,
04606                          TailCallArguments, dl);
04607         if (CallConv == CallingConv::Fast)
04608           ArgOffset += PtrByteSize;
04609       }
04610       if (CallConv != CallingConv::Fast)
04611         ArgOffset += PtrByteSize;
04612       break;
04613     case MVT::f32:
04614     case MVT::f64: {
04615       // These can be scalar arguments or elements of a float array type
04616       // passed directly.  The latter are used to implement ELFv2 homogenous
04617       // float aggregates.
04618 
04619       // Named arguments go into FPRs first, and once they overflow, the
04620       // remaining arguments go into GPRs and then the parameter save area.
04621       // Unnamed arguments for vararg functions always go to GPRs and
04622       // then the parameter save area.  For now, put all arguments to vararg
04623       // routines always in both locations (FPR *and* GPR or stack slot).
04624       bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
04625       bool NeededLoad = false;
04626 
04627       // First load the argument into the next available FPR.
04628       if (FPR_idx != NumFPRs)
04629         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
04630 
04631       // Next, load the argument into GPR or stack slot if needed.
04632       if (!NeedGPROrStack)
04633         ;
04634       else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
04635         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
04636         // once we support fp <-> gpr moves.
04637 
04638         // In the non-vararg case, this can only ever happen in the
04639         // presence of f32 array types, since otherwise we never run
04640         // out of FPRs before running out of GPRs.
04641         SDValue ArgVal;
04642 
04643         // Double values are always passed in a single GPR.
04644         if (Arg.getValueType() != MVT::f32) {
04645           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
04646 
04647         // Non-array float values are extended and passed in a GPR.
04648         } else if (!Flags.isInConsecutiveRegs()) {
04649           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
04650           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
04651 
04652         // If we have an array of floats, we collect every odd element
04653         // together with its predecessor into one GPR.
04654         } else if (ArgOffset % PtrByteSize != 0) {
04655           SDValue Lo, Hi;
04656           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
04657           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
04658           if (!isLittleEndian)
04659             std::swap(Lo, Hi);
04660           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
04661 
04662         // The final element, if even, goes into the first half of a GPR.
04663         } else if (Flags.isInConsecutiveRegsLast()) {
04664           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
04665           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
04666           if (!isLittleEndian)
04667             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
04668                                  DAG.getConstant(32, MVT::i32));
04669 
04670         // Non-final even elements are skipped; they will be handled
04671         // together the with subsequent argument on the next go-around.
04672         } else
04673           ArgVal = SDValue();
04674 
04675         if (ArgVal.getNode())
04676           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
04677       } else {
04678         if (CallConv == CallingConv::Fast)
04679           ComputePtrOff();
04680 
04681         // Single-precision floating-point values are mapped to the
04682         // second (rightmost) word of the stack doubleword.
04683         if (Arg.getValueType() == MVT::f32 &&
04684             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
04685           SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
04686           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
04687         }
04688 
04689         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04690                          true, isTailCall, false, MemOpChains,
04691                          TailCallArguments, dl);
04692 
04693         NeededLoad = true;
04694       }
04695       // When passing an array of floats, the array occupies consecutive
04696       // space in the argument area; only round up to the next doubleword
04697       // at the end of the array.  Otherwise, each float takes 8 bytes.
04698       if (CallConv != CallingConv::Fast || NeededLoad) {
04699         ArgOffset += (Arg.getValueType() == MVT::f32 &&
04700                       Flags.isInConsecutiveRegs()) ? 4 : 8;
04701         if (Flags.isInConsecutiveRegsLast())
04702           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
04703       }
04704       break;
04705     }
04706     case MVT::v4f32:
04707     case MVT::v4i32:
04708     case MVT::v8i16:
04709     case MVT::v16i8:
04710     case MVT::v2f64:
04711     case MVT::v2i64:
04712       // These can be scalar arguments or elements of a vector array type
04713       // passed directly.  The latter are used to implement ELFv2 homogenous
04714       // vector aggregates.
04715 
04716       // For a varargs call, named arguments go into VRs or on the stack as
04717       // usual; unnamed arguments always go to the stack or the corresponding
04718       // GPRs when within range.  For now, we always put the value in both
04719       // locations (or even all three).
04720       if (isVarArg) {
04721         // We could elide this store in the case where the object fits
04722         // entirely in R registers.  Maybe later.
04723         SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
04724                                      MachinePointerInfo(), false, false, 0);
04725         MemOpChains.push_back(Store);
04726         if (VR_idx != NumVRs) {
04727           SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
04728                                      MachinePointerInfo(),
04729                                      false, false, false, 0);
04730           MemOpChains.push_back(Load.getValue(1));
04731 
04732           unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 ||
04733                            Arg.getSimpleValueType() == MVT::v2i64) ?
04734                           VSRH[VR_idx] : VR[VR_idx];
04735           ++VR_idx;
04736 
04737           RegsToPass.push_back(std::make_pair(VReg, Load));
04738         }
04739         ArgOffset += 16;
04740         for (unsigned i=0; i<16; i+=PtrByteSize) {
04741           if (GPR_idx == NumGPRs)
04742             break;
04743           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
04744                                   DAG.getConstant(i, PtrVT));
04745           SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
04746                                      false, false, false, 0);
04747           MemOpChains.push_back(Load.getValue(1));
04748           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04749         }
04750         break;
04751       }
04752 
04753       // Non-varargs Altivec params go into VRs or on the stack.
04754       if (VR_idx != NumVRs) {
04755         unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 ||
04756                          Arg.getSimpleValueType() == MVT::v2i64) ?
04757                         VSRH[VR_idx] : VR[VR_idx];
04758         ++VR_idx;
04759 
04760         RegsToPass.push_back(std::make_pair(VReg, Arg));
04761       } else {
04762         if (CallConv == CallingConv::Fast)
04763           ComputePtrOff();
04764 
04765         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04766                          true, isTailCall, true, MemOpChains,
04767                          TailCallArguments, dl);
04768         if (CallConv == CallingConv::Fast)
04769           ArgOffset += 16;
04770       }
04771 
04772       if (CallConv != CallingConv::Fast)
04773         ArgOffset += 16;
04774       break;
04775     }
04776   }
04777 
04778   assert(NumBytesActuallyUsed == ArgOffset);
04779   (void)NumBytesActuallyUsed;
04780 
04781   if (!MemOpChains.empty())
04782     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
04783 
04784   // Check if this is an indirect call (MTCTR/BCTRL).
04785   // See PrepareCall() for more information about calls through function
04786   // pointers in the 64-bit SVR4 ABI.
04787   if (!isTailCall && !IsPatchPoint &&
04788       !isFunctionGlobalAddress(Callee) &&
04789       !isa<ExternalSymbolSDNode>(Callee)) {
04790     // Load r2 into a virtual register and store it to the TOC save area.
04791     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
04792     // TOC save area offset.
04793     unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI);
04794     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset);
04795     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
04796     Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
04797                          MachinePointerInfo::getStack(TOCSaveOffset),
04798                          false, false, 0);
04799     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
04800     // This does not mean the MTCTR instruction must use R12; it's easier
04801     // to model this as an extra parameter, so do that.
04802     if (isELFv2ABI && !IsPatchPoint)
04803       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
04804   }
04805 
04806   // Build a sequence of copy-to-reg nodes chained together with token chain
04807   // and flag operands which copy the outgoing args into the appropriate regs.
04808   SDValue InFlag;
04809   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
04810     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
04811                              RegsToPass[i].second, InFlag);
04812     InFlag = Chain.getValue(1);
04813   }
04814 
04815   if (isTailCall)
04816     PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp,
04817                     FPOp, true, TailCallArguments);
04818 
04819   return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG,
04820                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
04821                     NumBytes, Ins, InVals, CS);
04822 }
04823 
04824 SDValue
04825 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
04826                                     CallingConv::ID CallConv, bool isVarArg,
04827                                     bool isTailCall, bool IsPatchPoint,
04828                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
04829                                     const SmallVectorImpl<SDValue> &OutVals,
04830                                     const SmallVectorImpl<ISD::InputArg> &Ins,
04831                                     SDLoc dl, SelectionDAG &DAG,
04832                                     SmallVectorImpl<SDValue> &InVals,
04833                                     ImmutableCallSite *CS) const {
04834 
04835   unsigned NumOps = Outs.size();
04836 
04837   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
04838   bool isPPC64 = PtrVT == MVT::i64;
04839   unsigned PtrByteSize = isPPC64 ? 8 : 4;
04840 
04841   MachineFunction &MF = DAG.getMachineFunction();
04842 
04843   // Mark this function as potentially containing a function that contains a
04844   // tail call. As a consequence the frame pointer will be used for dynamicalloc
04845   // and restoring the callers stack pointer in this functions epilog. This is
04846   // done because by tail calling the called function might overwrite the value
04847   // in this function's (MF) stack pointer stack slot 0(SP).
04848   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04849       CallConv == CallingConv::Fast)
04850     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
04851 
04852   // Count how many bytes are to be pushed on the stack, including the linkage
04853   // area, and parameter passing area.  We start with 24/48 bytes, which is
04854   // prereserved space for [SP][CR][LR][3 x unused].
04855   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true,
04856                                                           false);
04857   unsigned NumBytes = LinkageSize;
04858 
04859   // Add up all the space actually used.
04860   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
04861   // they all go in registers, but we must reserve stack space for them for
04862   // possible use by the caller.  In varargs or 64-bit calls, parameters are
04863   // assigned stack space in order, with padding so Altivec parameters are
04864   // 16-byte aligned.
04865   unsigned nAltivecParamsAtEnd = 0;
04866   for (unsigned i = 0; i != NumOps; ++i) {
04867     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04868     EVT ArgVT = Outs[i].VT;
04869     // Varargs Altivec parameters are padded to a 16 byte boundary.
04870     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
04871         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
04872         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
04873       if (!isVarArg && !isPPC64) {
04874         // Non-varargs Altivec parameters go after all the non-Altivec
04875         // parameters; handle those later so we know how much padding we need.
04876         nAltivecParamsAtEnd++;
04877         continue;
04878       }
04879       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
04880       NumBytes = ((NumBytes+15)/16)*16;
04881     }
04882     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
04883   }
04884 
04885   // Allow for Altivec parameters at the end, if needed.
04886   if (nAltivecParamsAtEnd) {
04887     NumBytes = ((NumBytes+15)/16)*16;
04888     NumBytes += 16*nAltivecParamsAtEnd;
04889   }
04890 
04891   // The prolog code of the callee may store up to 8 GPR argument registers to
04892   // the stack, allowing va_start to index over them in memory if its varargs.
04893   // Because we cannot tell if this is needed on the caller side, we have to
04894   // conservatively assume that it is needed.  As such, make sure we have at
04895   // least enough stack space for the caller to store the 8 GPRs.
04896   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
04897 
04898   // Tail call needs the stack to be aligned.
04899   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04900       CallConv == CallingConv::Fast)
04901     NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes);
04902 
04903   // Calculate by how many bytes the stack has to be adjusted in case of tail
04904   // call optimization.
04905   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
04906 
04907   // To protect arguments on the stack from being clobbered in a tail call,
04908   // force all the loads to happen before doing any other lowering.
04909   if (isTailCall)
04910     Chain = DAG.getStackArgumentTokenFactor(Chain);
04911 
04912   // Adjust the stack pointer for the new arguments...
04913   // These operations are automatically eliminated by the prolog/epilog pass
04914   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
04915                                dl);
04916   SDValue CallSeqStart = Chain;
04917 
04918   // Load the return address and frame pointer so it can be move somewhere else
04919   // later.
04920   SDValue LROp, FPOp;
04921   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
04922                                        dl);
04923 
04924   // Set up a copy of the stack pointer for use loading and storing any
04925   // arguments that may not fit in the registers available for argument
04926   // passing.
04927   SDValue StackPtr;
04928   if (isPPC64)
04929     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
04930   else
04931     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
04932 
04933   // Figure out which arguments are going to go in registers, and which in
04934   // memory.  Also, if this is a vararg function, floating point operations
04935   // must be stored to our stack, and loaded into integer regs as well, if
04936   // any integer regs are available for argument passing.
04937   unsigned ArgOffset = LinkageSize;
04938   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
04939 
04940   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
04941     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
04942     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
04943   };
04944   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
04945     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
04946     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
04947   };
04948   static const MCPhysReg *FPR = GetFPR();
04949 
04950   static const MCPhysReg VR[] = {
04951     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
04952     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
04953   };
04954   const unsigned NumGPRs = array_lengthof(GPR_32);
04955   const unsigned NumFPRs = 13;
04956   const unsigned NumVRs  = array_lengthof(VR);
04957 
04958   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
04959 
04960   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
04961   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
04962 
04963   SmallVector<SDValue, 8> MemOpChains;
04964   for (unsigned i = 0; i != NumOps; ++i) {
04965     SDValue Arg = OutVals[i];
04966     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04967 
04968     // PtrOff will be used to store the current argument to the stack if a
04969     // register cannot be found for it.
04970     SDValue PtrOff;
04971 
04972     PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
04973 
04974     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
04975 
04976     // On PPC64, promote integers to 64-bit values.
04977     if (isPPC64 && Arg.getValueType() == MVT::i32) {
04978       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
04979       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
04980       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
04981     }
04982 
04983     // FIXME memcpy is used way more than necessary.  Correctness first.
04984     // Note: "by value" is code for passing a structure by value, not
04985     // basic types.
04986     if (Flags.isByVal()) {
04987       unsigned Size = Flags.getByValSize();
04988       // Very small objects are passed right-justified.  Everything else is
04989       // passed left-justified.
04990       if (Size==1 || Size==2) {
04991         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
04992         if (GPR_idx != NumGPRs) {
04993           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
04994                                         MachinePointerInfo(), VT,
04995                                         false, false, false, 0);
04996           MemOpChains.push_back(Load.getValue(1));
04997           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04998 
04999           ArgOffset += PtrByteSize;
05000         } else {
05001           SDValue Const = DAG.getConstant(PtrByteSize - Size,
05002                                           PtrOff.getValueType());
05003           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
05004           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
05005                                                             CallSeqStart,
05006                                                             Flags, DAG, dl);
05007           ArgOffset += PtrByteSize;
05008         }
05009         continue;
05010       }
05011       // Copy entire object into memory.  There are cases where gcc-generated
05012       // code assumes it is there, even if it could be put entirely into
05013       // registers.  (This is not what the doc says.)
05014       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
05015                                                         CallSeqStart,
05016                                                         Flags, DAG, dl);
05017 
05018       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
05019       // copy the pieces of the object that fit into registers from the
05020       // parameter save area.
05021       for (unsigned j=0; j<Size; j+=PtrByteSize) {
05022         SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
05023         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
05024         if (GPR_idx != NumGPRs) {
05025           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
05026                                      MachinePointerInfo(),
05027                                      false, false, false, 0);
05028           MemOpChains.push_back(Load.getValue(1));
05029           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
05030           ArgOffset += PtrByteSize;
05031         } else {
05032           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
05033           break;
05034         }
05035       }
05036       continue;
05037     }
05038 
05039     switch (Arg.getSimpleValueType().SimpleTy) {
05040     default: llvm_unreachable("Unexpected ValueType for argument!");
05041     case MVT::i1:
05042     case MVT::i32:
05043     case MVT::i64:
05044       if (GPR_idx != NumGPRs) {
05045         if (Arg.getValueType() == MVT::i1)
05046           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
05047 
05048         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
05049       } else {
05050         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
05051                          isPPC64, isTailCall, false, MemOpChains,
05052                          TailCallArguments, dl);
05053       }
05054       ArgOffset += PtrByteSize;
05055       break;
05056     case MVT::f32:
05057     case MVT::f64:
05058       if (FPR_idx != NumFPRs) {
05059         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
05060 
05061         if (isVarArg) {
05062           SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
05063                                        MachinePointerInfo(), false, false, 0);
05064           MemOpChains.push_back(Store);
05065 
05066           // Float varargs are always shadowed in available integer registers
05067           if (GPR_idx != NumGPRs) {
05068             SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
05069                                        MachinePointerInfo(), false, false,
05070                                        false, 0);
05071             MemOpChains.push_back(Load.getValue(1));
05072             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
05073           }
05074           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
05075             SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
05076             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
05077             SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
05078                                        MachinePointerInfo(),
05079                                        false, false, false, 0);
05080             MemOpChains.push_back(Load.getValue(1));
05081             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
05082           }
05083         } else {
05084           // If we have any FPRs remaining, we may also have GPRs remaining.
05085           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
05086           // GPRs.
05087           if (GPR_idx != NumGPRs)
05088             ++GPR_idx;
05089           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
05090               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
05091             ++GPR_idx;
05092         }
05093       } else
05094         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
05095                          isPPC64, isTailCall, false, MemOpChains,
05096                          TailCallArguments, dl);
05097       if (isPPC64)
05098         ArgOffset += 8;
05099       else
05100         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
05101       break;
05102     case MVT::v4f32:
05103     case MVT::v4i32:
05104     case MVT::v8i16:
05105     case MVT::v16i8:
05106       if (isVarArg) {
05107         // These go aligned on the stack, or in the corresponding R registers
05108         // when within range.  The Darwin PPC ABI doc claims they also go in
05109         // V registers; in fact gcc does this only for arguments that are
05110         // prototyped, not for those that match the ...  We do it for all
05111         // arguments, seems to work.
05112         while (ArgOffset % 16 !=0) {
05113           ArgOffset += PtrByteSize;
05114           if (GPR_idx != NumGPRs)
05115             GPR_idx++;
05116         }
05117         // We could elide this store in the case where the object fits
05118         // entirely in R registers.  Maybe later.
05119         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
05120                             DAG.getConstant(ArgOffset, PtrVT));
05121         SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
05122                                      MachinePointerInfo(), false, false, 0);
05123         MemOpChains.push_back(Store);
05124         if (VR_idx != NumVRs) {
05125           SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
05126                                      MachinePointerInfo(),
05127                                      false, false, false, 0);
05128           MemOpChains.push_back(Load.getValue(1));
05129           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
05130         }
05131         ArgOffset += 16;
05132         for (unsigned i=0; i<16; i+=PtrByteSize) {
05133           if (GPR_idx == NumGPRs)
05134             break;
05135           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
05136                                   DAG.getConstant(i, PtrVT));
05137           SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
05138                                      false, false, false, 0);
05139           MemOpChains.push_back(Load.getValue(1));
05140           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
05141         }
05142         break;
05143       }
05144 
05145       // Non-varargs Altivec params generally go in registers, but have
05146       // stack space allocated at the end.
05147       if (VR_idx != NumVRs) {
05148         // Doesn't have GPR space allocated.
05149         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
05150       } else if (nAltivecParamsAtEnd==0) {
05151         // We are emitting Altivec params in order.
05152         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
05153                          isPPC64, isTailCall, true, MemOpChains,
05154                          TailCallArguments, dl);
05155         ArgOffset += 16;
05156       }
05157       break;
05158     }
05159   }
05160   // If all Altivec parameters fit in registers, as they usually do,
05161   // they get stack space following the non-Altivec parameters.  We
05162   // don't track this here because nobody below needs it.
05163   // If there are more Altivec parameters than fit in registers emit
05164   // the stores here.
05165   if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
05166     unsigned j = 0;
05167     // Offset is aligned; skip 1st 12 params which go in V registers.
05168     ArgOffset = ((ArgOffset+15)/16)*16;
05169     ArgOffset += 12*16;
05170     for (unsigned i = 0; i != NumOps; ++i) {
05171       SDValue Arg = OutVals[i];
05172       EVT ArgType = Outs[i].VT;
05173       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
05174           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
05175         if (++j > NumVRs) {
05176           SDValue PtrOff;
05177           // We are emitting Altivec params in order.
05178           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
05179                            isPPC64, isTailCall, true, MemOpChains,
05180                            TailCallArguments, dl);
05181           ArgOffset += 16;
05182         }
05183       }
05184     }
05185   }
05186 
05187   if (!MemOpChains.empty())
05188     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
05189 
05190   // On Darwin, R12 must contain the address of an indirect callee.  This does
05191   // not mean the MTCTR instruction must use R12; it's easier to model this as
05192   // an extra parameter, so do that.
05193   if (!isTailCall &&
05194       !isFunctionGlobalAddress(Callee) &&
05195       !isa<ExternalSymbolSDNode>(Callee) &&
05196       !isBLACompatibleAddress(Callee, DAG))
05197     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
05198                                                    PPC::R12), Callee));
05199 
05200   // Build a sequence of copy-to-reg nodes chained together with token chain
05201   // and flag operands which copy the outgoing args into the appropriate regs.
05202   SDValue InFlag;
05203   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
05204     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
05205                              RegsToPass[i].second, InFlag);
05206     InFlag = Chain.getValue(1);
05207   }
05208 
05209   if (isTailCall)
05210     PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
05211                     FPOp, true, TailCallArguments);
05212 
05213   return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, DAG,
05214                     RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
05215                     NumBytes, Ins, InVals, CS);
05216 }
05217 
05218 bool
05219 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
05220                                   MachineFunction &MF, bool isVarArg,
05221                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
05222                                   LLVMContext &Context) const {
05223   SmallVector<CCValAssign, 16> RVLocs;
05224   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
05225   return CCInfo.CheckReturn(Outs, RetCC_PPC);
05226 }
05227 
05228 SDValue
05229 PPCTargetLowering::LowerReturn(SDValue Chain,
05230                                CallingConv::ID CallConv, bool isVarArg,
05231                                const SmallVectorImpl<ISD::OutputArg> &Outs,
05232                                const SmallVectorImpl<SDValue> &OutVals,
05233                                SDLoc dl, SelectionDAG &DAG) const {
05234 
05235   SmallVector<CCValAssign, 16> RVLocs;
05236   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
05237                  *DAG.getContext());
05238   CCInfo.AnalyzeReturn(Outs, RetCC_PPC);
05239 
05240   SDValue Flag;
05241   SmallVector<SDValue, 4> RetOps(1, Chain);
05242 
05243   // Copy the result values into the output registers.
05244   for (unsigned i = 0; i != RVLocs.size(); ++i) {
05245     CCValAssign &VA = RVLocs[i];
05246     assert(VA.isRegLoc() && "Can only return in registers!");
05247 
05248     SDValue Arg = OutVals[i];
05249 
05250     switch (VA.getLocInfo()) {
05251     default: llvm_unreachable("Unknown loc info!");
05252     case CCValAssign::Full: break;
05253     case CCValAssign::AExt:
05254       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
05255       break;
05256     case CCValAssign::ZExt:
05257       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
05258       break;
05259     case CCValAssign::SExt:
05260       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
05261       break;
05262     }
05263 
05264     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
05265     Flag = Chain.getValue(1);
05266     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
05267   }
05268 
05269   RetOps[0] = Chain;  // Update chain.
05270 
05271   // Add the flag if we have it.
05272   if (Flag.getNode())
05273     RetOps.push_back(Flag);
05274 
05275   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
05276 }
05277 
05278 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
05279                                    const PPCSubtarget &Subtarget) const {
05280   // When we pop the dynamic allocation we need to restore the SP link.
05281   SDLoc dl(Op);
05282 
05283   // Get the corect type for pointers.
05284   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05285 
05286   // Construct the stack pointer operand.
05287   bool isPPC64 = Subtarget.isPPC64();
05288   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
05289   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
05290 
05291   // Get the operands for the STACKRESTORE.
05292   SDValue Chain = Op.getOperand(0);
05293   SDValue SaveSP = Op.getOperand(1);
05294 
05295   // Load the old link SP.
05296   SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr,
05297                                    MachinePointerInfo(),
05298                                    false, false, false, 0);
05299 
05300   // Restore the stack pointer.
05301   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
05302 
05303   // Store the old link SP.
05304   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(),
05305                       false, false, 0);
05306 }
05307 
05308 
05309 
05310 SDValue
05311 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
05312   MachineFunction &MF = DAG.getMachineFunction();
05313   bool isPPC64 = Subtarget.isPPC64();
05314   bool isDarwinABI = Subtarget.isDarwinABI();
05315   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05316 
05317   // Get current frame pointer save index.  The users of this index will be
05318   // primarily DYNALLOC instructions.
05319   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
05320   int RASI = FI->getReturnAddrSaveIndex();
05321 
05322   // If the frame pointer save index hasn't been defined yet.
05323   if (!RASI) {
05324     // Find out what the fix offset of the frame pointer save area.
05325     int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
05326     // Allocate the frame index for frame pointer save area.
05327     RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
05328     // Save the result.
05329     FI->setReturnAddrSaveIndex(RASI);
05330   }
05331   return DAG.getFrameIndex(RASI, PtrVT);
05332 }
05333 
05334 SDValue
05335 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
05336   MachineFunction &MF = DAG.getMachineFunction();
05337   bool isPPC64 = Subtarget.isPPC64();
05338   bool isDarwinABI = Subtarget.isDarwinABI();
05339   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05340 
05341   // Get current frame pointer save index.  The users of this index will be
05342   // primarily DYNALLOC instructions.
05343   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
05344   int FPSI = FI->getFramePointerSaveIndex();
05345 
05346   // If the frame pointer save index hasn't been defined yet.
05347   if (!FPSI) {
05348     // Find out what the fix offset of the frame pointer save area.
05349     int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64,
05350                                                            isDarwinABI);
05351 
05352     // Allocate the frame index for frame pointer save area.
05353     FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
05354     // Save the result.
05355     FI->setFramePointerSaveIndex(FPSI);
05356   }
05357   return DAG.getFrameIndex(FPSI, PtrVT);
05358 }
05359 
05360 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
05361                                          SelectionDAG &DAG,
05362                                          const PPCSubtarget &Subtarget) const {
05363   // Get the inputs.
05364   SDValue Chain = Op.getOperand(0);
05365   SDValue Size  = Op.getOperand(1);
05366   SDLoc dl(Op);
05367 
05368   // Get the corect type for pointers.
05369   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05370   // Negate the size.
05371   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
05372                                   DAG.getConstant(0, PtrVT), Size);
05373   // Construct a node for the frame pointer save index.
05374   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
05375   // Build a DYNALLOC node.
05376   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
05377   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
05378   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
05379 }
05380 
05381 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
05382                                                SelectionDAG &DAG) const {
05383   SDLoc DL(Op);
05384   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
05385                      DAG.getVTList(MVT::i32, MVT::Other),
05386                      Op.getOperand(0), Op.getOperand(1));
05387 }
05388 
05389 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
05390                                                 SelectionDAG &DAG) const {
05391   SDLoc DL(Op);
05392   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
05393                      Op.getOperand(0), Op.getOperand(1));
05394 }
05395 
05396 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
05397   assert(Op.getValueType() == MVT::i1 &&
05398          "Custom lowering only for i1 loads");
05399 
05400   // First, load 8 bits into 32 bits, then truncate to 1 bit.
05401 
05402   SDLoc dl(Op);
05403   LoadSDNode *LD = cast<LoadSDNode>(Op);
05404 
05405   SDValue Chain = LD->getChain();
05406   SDValue BasePtr = LD->getBasePtr();
05407   MachineMemOperand *MMO = LD->getMemOperand();
05408 
05409   SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(), Chain,
05410                                  BasePtr, MVT::i8, MMO);
05411   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
05412 
05413   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
05414   return DAG.getMergeValues(Ops, dl);
05415 }
05416 
05417 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
05418   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
05419          "Custom lowering only for i1 stores");
05420 
05421   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
05422 
05423   SDLoc dl(Op);
05424   StoreSDNode *ST = cast<StoreSDNode>(Op);
05425 
05426   SDValue Chain = ST->getChain();
05427   SDValue BasePtr = ST->getBasePtr();
05428   SDValue Value = ST->getValue();
05429   MachineMemOperand *MMO = ST->getMemOperand();
05430 
05431   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(), Value);
05432   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
05433 }
05434 
05435 // FIXME: Remove this once the ANDI glue bug is fixed:
05436 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
05437   assert(Op.getValueType() == MVT::i1 &&
05438          "Custom lowering only for i1 results");
05439 
05440   SDLoc DL(Op);
05441   return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1,
05442                      Op.getOperand(0));
05443 }
05444 
05445 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
05446 /// possible.
05447 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
05448   // Not FP? Not a fsel.
05449   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
05450       !Op.getOperand(2).getValueType().isFloatingPoint())
05451     return Op;
05452 
05453   // We might be able to do better than this under some circumstances, but in
05454   // general, fsel-based lowering of select is a finite-math-only optimization.
05455   // For more information, see section F.3 of the 2.06 ISA specification.
05456   if (!DAG.getTarget().Options.NoInfsFPMath ||
05457       !DAG.getTarget().Options.NoNaNsFPMath)
05458     return Op;
05459 
05460   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
05461 
05462   EVT ResVT = Op.getValueType();
05463   EVT CmpVT = Op.getOperand(0).getValueType();
05464   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
05465   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
05466   SDLoc dl(Op);
05467 
05468   // If the RHS of the comparison is a 0.0, we don't need to do the
05469   // subtraction at all.
05470   SDValue Sel1;
05471   if (isFloatingPointZero(RHS))
05472     switch (CC) {
05473     default: break;       // SETUO etc aren't handled by fsel.
05474     case ISD::SETNE:
05475       std::swap(TV, FV);
05476     case ISD::SETEQ:
05477       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
05478         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
05479       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
05480       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
05481         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
05482       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
05483                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
05484     case ISD::SETULT:
05485     case ISD::SETLT:
05486       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
05487     case ISD::SETOGE:
05488     case ISD::SETGE:
05489       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
05490         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
05491       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
05492     case ISD::SETUGT:
05493     case ISD::SETGT:
05494       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
05495     case ISD::SETOLE:
05496     case ISD::SETLE:
05497       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
05498         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
05499       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
05500                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
05501     }
05502 
05503   SDValue Cmp;
05504   switch (CC) {
05505   default: break;       // SETUO etc aren't handled by fsel.
05506   case ISD::SETNE:
05507     std::swap(TV, FV);
05508   case ISD::SETEQ:
05509     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
05510     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05511       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05512     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
05513     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
05514       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
05515     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
05516                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
05517   case ISD::SETULT:
05518   case ISD::SETLT:
05519     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
05520     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05521       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05522     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
05523   case ISD::SETOGE:
05524   case ISD::SETGE:
05525     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
05526     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05527       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05528     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
05529   case ISD::SETUGT:
05530   case ISD::SETGT:
05531     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
05532     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05533       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05534     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
05535   case ISD::SETOLE:
05536   case ISD::SETLE:
05537     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
05538     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05539       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05540     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
05541   }
05542   return Op;
05543 }
05544 
05545 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
05546                                                SelectionDAG &DAG,
05547                                                SDLoc dl) const {
05548   assert(Op.getOperand(0).getValueType().isFloatingPoint());
05549   SDValue Src = Op.getOperand(0);
05550   if (Src.getValueType() == MVT::f32)
05551     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
05552 
05553   SDValue Tmp;
05554   switch (Op.getSimpleValueType().SimpleTy) {
05555   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
05556   case MVT::i32:
05557     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
05558                         (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ :
05559                                                    PPCISD::FCTIDZ),
05560                       dl, MVT::f64, Src);
05561     break;
05562   case MVT::i64:
05563     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
05564            "i64 FP_TO_UINT is supported only with FPCVT");
05565     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
05566                                                         PPCISD::FCTIDUZ,
05567                       dl, MVT::f64, Src);
05568     break;
05569   }
05570 
05571   // Convert the FP value to an int value through memory.
05572   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
05573     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
05574   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
05575   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
05576   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI);
05577 
05578   // Emit a store to the stack slot.
05579   SDValue Chain;
05580   if (i32Stack) {
05581     MachineFunction &MF = DAG.getMachineFunction();
05582     MachineMemOperand *MMO =
05583       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
05584     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
05585     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
05586               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
05587   } else
05588     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr,
05589                          MPI, false, false, 0);
05590 
05591   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
05592   // add in a bias.
05593   if (Op.getValueType() == MVT::i32 && !i32Stack) {
05594     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
05595                         DAG.getConstant(4, FIPtr.getValueType()));
05596     MPI = MPI.getWithOffset(4);
05597   }
05598 
05599   RLI.Chain = Chain;
05600   RLI.Ptr = FIPtr;
05601   RLI.MPI = MPI;
05602 }
05603 
05604 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
05605                                           SDLoc dl) const {
05606   ReuseLoadInfo RLI;
05607   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
05608 
05609   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false,
05610                      false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo,
05611                      RLI.Ranges);
05612 }
05613 
05614 // We're trying to insert a regular store, S, and then a load, L. If the
05615 // incoming value, O, is a load, we might just be able to have our load use the
05616 // address used by O. However, we don't know if anything else will store to
05617 // that address before we can load from it. To prevent this situation, we need
05618 // to insert our load, L, into the chain as a peer of O. To do this, we give L
05619 // the same chain operand as O, we create a token factor from the chain results
05620 // of O and L, and we replace all uses of O's chain result with that token
05621 // factor (see spliceIntoChain below for this last part).
05622 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
05623                                             ReuseLoadInfo &RLI,
05624                                             SelectionDAG &DAG,
05625                                             ISD::LoadExtType ET) const {
05626   SDLoc dl(Op);
05627   if (ET == ISD::NON_EXTLOAD &&
05628       (Op.getOpcode() == ISD::FP_TO_UINT ||
05629        Op.getOpcode() == ISD::FP_TO_SINT) &&
05630       isOperationLegalOrCustom(Op.getOpcode(),
05631                                Op.getOperand(0).getValueType())) {
05632 
05633     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
05634     return true;
05635   }
05636 
05637   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
05638   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
05639       LD->isNonTemporal())
05640     return false;
05641   if (LD->getMemoryVT() != MemVT)
05642     return false;
05643 
05644   RLI.Ptr = LD->getBasePtr();
05645   if (LD->isIndexed() && LD->getOffset().getOpcode() != ISD::UNDEF) {
05646     assert(LD->getAddressingMode() == ISD::PRE_INC &&
05647            "Non-pre-inc AM on PPC?");
05648     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
05649                           LD->getOffset());
05650   }
05651 
05652   RLI.Chain = LD->getChain();
05653   RLI.MPI = LD->getPointerInfo();
05654   RLI.IsInvariant = LD->isInvariant();
05655   RLI.Alignment = LD->getAlignment();
05656   RLI.AAInfo = LD->getAAInfo();
05657   RLI.Ranges = LD->getRanges();
05658 
05659   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
05660   return true;
05661 }
05662 
05663 // Given the head of the old chain, ResChain, insert a token factor containing
05664 // it and NewResChain, and make users of ResChain now be users of that token
05665 // factor.
05666 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
05667                                         SDValue NewResChain,
05668                                         SelectionDAG &DAG) const {
05669   if (!ResChain)
05670     return;
05671 
05672   SDLoc dl(NewResChain);
05673 
05674   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
05675                            NewResChain, DAG.getUNDEF(MVT::Other));
05676   assert(TF.getNode() != NewResChain.getNode() &&
05677          "A new TF really is required here");
05678 
05679   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
05680   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
05681 }
05682 
05683 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
05684                                           SelectionDAG &DAG) const {
05685   SDLoc dl(Op);
05686   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
05687   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
05688     return SDValue();
05689 
05690   if (Op.getOperand(0).getValueType() == MVT::i1)
05691     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
05692                        DAG.getConstantFP(1.0, Op.getValueType()),
05693                        DAG.getConstantFP(0.0, Op.getValueType()));
05694 
05695   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
05696          "UINT_TO_FP is supported only with FPCVT");
05697 
05698   // If we have FCFIDS, then use it when converting to single-precision.
05699   // Otherwise, convert to double-precision and then round.
05700   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
05701                    (Op.getOpcode() == ISD::UINT_TO_FP ?
05702                     PPCISD::FCFIDUS : PPCISD::FCFIDS) :
05703                    (Op.getOpcode() == ISD::UINT_TO_FP ?
05704                     PPCISD::FCFIDU : PPCISD::FCFID);
05705   MVT      FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
05706                    MVT::f32 : MVT::f64;
05707 
05708   if (Op.getOperand(0).getValueType() == MVT::i64) {
05709     SDValue SINT = Op.getOperand(0);
05710     // When converting to single-precision, we actually need to convert
05711     // to double-precision first and then round to single-precision.
05712     // To avoid double-rounding effects during that operation, we have
05713     // to prepare the input operand.  Bits that might be truncated when
05714     // converting to double-precision are replaced by a bit that won't
05715     // be lost at this stage, but is below the single-precision rounding
05716     // position.
05717     //
05718     // However, if -enable-unsafe-fp-math is in effect, accept double
05719     // rounding to avoid the extra overhead.
05720     if (Op.getValueType() == MVT::f32 &&
05721         !Subtarget.hasFPCVT() &&
05722         !DAG.getTarget().Options.UnsafeFPMath) {
05723 
05724       // Twiddle input to make sure the low 11 bits are zero.  (If this
05725       // is the case, we are guaranteed the value will fit into the 53 bit
05726       // mantissa of an IEEE double-precision value without rounding.)
05727       // If any of those low 11 bits were not zero originally, make sure
05728       // bit 12 (value 2048) is set instead, so that the final rounding
05729       // to single-precision gets the correct result.
05730       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
05731                                   SINT, DAG.getConstant(2047, MVT::i64));
05732       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
05733                           Round, DAG.getConstant(2047, MVT::i64));
05734       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
05735       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
05736                           Round, DAG.getConstant(-2048, MVT::i64));
05737 
05738       // However, we cannot use that value unconditionally: if the magnitude
05739       // of the input value is small, the bit-twiddling we did above might
05740       // end up visibly changing the output.  Fortunately, in that case, we
05741       // don't need to twiddle bits since the original input will convert
05742       // exactly to double-precision floating-point already.  Therefore,
05743       // construct a conditional to use the original value if the top 11
05744       // bits are all sign-bit copies, and use the rounded value computed
05745       // above otherwise.
05746       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
05747                                  SINT, DAG.getConstant(53, MVT::i32));
05748       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
05749                          Cond, DAG.getConstant(1, MVT::i64));
05750       Cond = DAG.getSetCC(dl, MVT::i32,
05751                           Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT);
05752 
05753       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
05754     }
05755 
05756     ReuseLoadInfo RLI;
05757     SDValue Bits;
05758 
05759     MachineFunction &MF = DAG.getMachineFunction();
05760     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
05761       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false,
05762                          false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo,
05763                          RLI.Ranges);
05764       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
05765     } else if (Subtarget.hasLFIWAX() &&
05766                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
05767       MachineMemOperand *MMO =
05768         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
05769                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
05770       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
05771       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
05772                                      DAG.getVTList(MVT::f64, MVT::Other),
05773                                      Ops, MVT::i32, MMO);
05774       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
05775     } else if (Subtarget.hasFPCVT() &&
05776                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
05777       MachineMemOperand *MMO =
05778         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
05779                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
05780       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
05781       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
05782                                      DAG.getVTList(MVT::f64, MVT::Other),
05783                                      Ops, MVT::i32, MMO);
05784       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
05785     } else if (((Subtarget.hasLFIWAX() &&
05786                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
05787                 (Subtarget.hasFPCVT() &&
05788                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
05789                SINT.getOperand(0).getValueType() == MVT::i32) {
05790       MachineFrameInfo *FrameInfo = MF.getFrameInfo();
05791       EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05792 
05793       int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
05794       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
05795 
05796       SDValue Store =
05797         DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
05798                      MachinePointerInfo::getFixedStack(FrameIdx),
05799                      false, false, 0);
05800 
05801       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
05802              "Expected an i32 store");
05803 
05804       RLI.Ptr = FIdx;
05805       RLI.Chain = Store;
05806       RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx);
05807       RLI.Alignment = 4;
05808 
05809       MachineMemOperand *MMO =
05810         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
05811                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
05812       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
05813       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
05814                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
05815                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
05816                                      Ops, MVT::i32, MMO);
05817     } else
05818       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
05819 
05820     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
05821 
05822     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
05823       FP = DAG.getNode(ISD::FP_ROUND, dl,
05824                        MVT::f32, FP, DAG.getIntPtrConstant(0));
05825     return FP;
05826   }
05827 
05828   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
05829          "Unhandled INT_TO_FP type in custom expander!");
05830   // Since we only generate this in 64-bit mode, we can take advantage of
05831   // 64-bit registers.  In particular, sign extend the input value into the
05832   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
05833   // then lfd it and fcfid it.
05834   MachineFunction &MF = DAG.getMachineFunction();
05835   MachineFrameInfo *FrameInfo = MF.getFrameInfo();
05836   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05837 
05838   SDValue Ld;
05839   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
05840     ReuseLoadInfo RLI;
05841     bool ReusingLoad;
05842     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
05843                                             DAG))) {
05844       int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
05845       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
05846 
05847       SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
05848                                    MachinePointerInfo::getFixedStack(FrameIdx),
05849                                    false, false, 0);
05850 
05851       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
05852              "Expected an i32 store");
05853 
05854       RLI.Ptr = FIdx;
05855       RLI.Chain = Store;
05856       RLI.MPI = MachinePointerInfo::getFixedStack(FrameIdx);
05857       RLI.Alignment = 4;
05858     }
05859 
05860     MachineMemOperand *MMO =
05861       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
05862                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
05863     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
05864     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
05865                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
05866                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
05867                                  Ops, MVT::i32, MMO);
05868     if (ReusingLoad)
05869       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
05870   } else {
05871     assert(Subtarget.isPPC64() &&
05872            "i32->FP without LFIWAX supported only on PPC64");
05873 
05874     int FrameIdx = FrameInfo->CreateStackObject(8, 8, false);
05875     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
05876 
05877     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
05878                                 Op.getOperand(0));
05879 
05880     // STD the extended value into the stack slot.
05881     SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx,
05882                                  MachinePointerInfo::getFixedStack(FrameIdx),
05883                                  false, false, 0);
05884 
05885     // Load the value as a double.
05886     Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx,
05887                      MachinePointerInfo::getFixedStack(FrameIdx),
05888                      false, false, false, 0);
05889   }
05890 
05891   // FCFID it and return it.
05892   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
05893   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
05894     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
05895   return FP;
05896 }
05897 
05898 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
05899                                             SelectionDAG &DAG) const {
05900   SDLoc dl(Op);
05901   /*
05902    The rounding mode is in bits 30:31 of FPSR, and has the following
05903    settings:
05904      00 Round to nearest
05905      01 Round to 0
05906      10 Round to +inf
05907      11 Round to -inf
05908 
05909   FLT_ROUNDS, on the other hand, expects the following:
05910     -1 Undefined
05911      0 Round to 0
05912      1 Round to nearest
05913      2 Round to +inf
05914      3 Round to -inf
05915 
05916   To perform the conversion, we do:
05917     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
05918   */
05919 
05920   MachineFunction &MF = DAG.getMachineFunction();
05921   EVT VT = Op.getValueType();
05922   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05923 
05924   // Save FP Control Word to register
05925   EVT NodeTys[] = {
05926     MVT::f64,    // return register
05927     MVT::Glue    // unused in this context
05928   };
05929   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
05930 
05931   // Save FP register to stack slot
05932   int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false);
05933   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
05934   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain,
05935                                StackSlot, MachinePointerInfo(), false, false,0);
05936 
05937   // Load FP Control Word from low 32 bits of stack slot.
05938   SDValue Four = DAG.getConstant(4, PtrVT);
05939   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
05940   SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(),
05941                             false, false, false, 0);
05942 
05943   // Transform as necessary
05944   SDValue CWD1 =
05945     DAG.getNode(ISD::AND, dl, MVT::i32,
05946                 CWD, DAG.getConstant(3, MVT::i32));
05947   SDValue CWD2 =
05948     DAG.getNode(ISD::SRL, dl, MVT::i32,
05949                 DAG.getNode(ISD::AND, dl, MVT::i32,
05950                             DAG.getNode(ISD::XOR, dl, MVT::i32,
05951                                         CWD, DAG.getConstant(3, MVT::i32)),
05952                             DAG.getConstant(3, MVT::i32)),
05953                 DAG.getConstant(1, MVT::i32));
05954 
05955   SDValue RetVal =
05956     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
05957 
05958   return DAG.getNode((VT.getSizeInBits() < 16 ?
05959                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
05960 }
05961 
05962 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
05963   EVT VT = Op.getValueType();
05964   unsigned BitWidth = VT.getSizeInBits();
05965   SDLoc dl(Op);
05966   assert(Op.getNumOperands() == 3 &&
05967          VT == Op.getOperand(1).getValueType() &&
05968          "Unexpected SHL!");
05969 
05970   // Expand into a bunch of logical ops.  Note that these ops
05971   // depend on the PPC behavior for oversized shift amounts.
05972   SDValue Lo = Op.getOperand(0);
05973   SDValue Hi = Op.getOperand(1);
05974   SDValue Amt = Op.getOperand(2);
05975   EVT AmtVT = Amt.getValueType();
05976 
05977   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
05978                              DAG.getConstant(BitWidth, AmtVT), Amt);
05979   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
05980   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
05981   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
05982   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
05983                              DAG.getConstant(-BitWidth, AmtVT));
05984   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
05985   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
05986   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
05987   SDValue OutOps[] = { OutLo, OutHi };
05988   return DAG.getMergeValues(OutOps, dl);
05989 }
05990 
05991 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
05992   EVT VT = Op.getValueType();
05993   SDLoc dl(Op);
05994   unsigned BitWidth = VT.getSizeInBits();
05995   assert(Op.getNumOperands() == 3 &&
05996          VT == Op.getOperand(1).getValueType() &&
05997          "Unexpected SRL!");
05998 
05999   // Expand into a bunch of logical ops.  Note that these ops
06000   // depend on the PPC behavior for oversized shift amounts.
06001   SDValue Lo = Op.getOperand(0);
06002   SDValue Hi = Op.getOperand(1);
06003   SDValue Amt = Op.getOperand(2);
06004   EVT AmtVT = Amt.getValueType();
06005 
06006   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
06007                              DAG.getConstant(BitWidth, AmtVT), Amt);
06008   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
06009   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
06010   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
06011   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
06012                              DAG.getConstant(-BitWidth, AmtVT));
06013   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
06014   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
06015   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
06016   SDValue OutOps[] = { OutLo, OutHi };
06017   return DAG.getMergeValues(OutOps, dl);
06018 }
06019 
06020 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
06021   SDLoc dl(Op);
06022   EVT VT = Op.getValueType();
06023   unsigned BitWidth = VT.getSizeInBits();
06024   assert(Op.getNumOperands() == 3 &&
06025          VT == Op.getOperand(1).getValueType() &&
06026          "Unexpected SRA!");
06027 
06028   // Expand into a bunch of logical ops, followed by a select_cc.
06029   SDValue Lo = Op.getOperand(0);
06030   SDValue Hi = Op.getOperand(1);
06031   SDValue Amt = Op.getOperand(2);
06032   EVT AmtVT = Amt.getValueType();
06033 
06034   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
06035                              DAG.getConstant(BitWidth, AmtVT), Amt);
06036   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
06037   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
06038   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
06039   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
06040                              DAG.getConstant(-BitWidth, AmtVT));
06041   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
06042   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
06043   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT),
06044                                   Tmp4, Tmp6, ISD::SETLE);
06045   SDValue OutOps[] = { OutLo, OutHi };
06046   return DAG.getMergeValues(OutOps, dl);
06047 }
06048 
06049 //===----------------------------------------------------------------------===//
06050 // Vector related lowering.
06051 //
06052 
06053 /// BuildSplatI - Build a canonical splati of Val with an element size of
06054 /// SplatSize.  Cast the result to VT.
06055 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
06056                              SelectionDAG &DAG, SDLoc dl) {
06057   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
06058 
06059   static const EVT VTys[] = { // canonical VT to use for each size.
06060     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
06061   };
06062 
06063   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
06064 
06065   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
06066   if (Val == -1)
06067     SplatSize = 1;
06068 
06069   EVT CanonicalVT = VTys[SplatSize-1];
06070 
06071   // Build a canonical splat for this value.
06072   SDValue Elt = DAG.getConstant(Val, MVT::i32);
06073   SmallVector<SDValue, 8> Ops;
06074   Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
06075   SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops);
06076   return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
06077 }
06078 
06079 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
06080 /// specified intrinsic ID.
06081 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op,
06082                                 SelectionDAG &DAG, SDLoc dl,
06083                                 EVT DestVT = MVT::Other) {
06084   if (DestVT == MVT::Other) DestVT = Op.getValueType();
06085   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
06086                      DAG.getConstant(IID, MVT::i32), Op);
06087 }
06088 
06089 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
06090 /// specified intrinsic ID.
06091 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
06092                                 SelectionDAG &DAG, SDLoc dl,
06093                                 EVT DestVT = MVT::Other) {
06094   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
06095   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
06096                      DAG.getConstant(IID, MVT::i32), LHS, RHS);
06097 }
06098 
06099 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
06100 /// specified intrinsic ID.
06101 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
06102                                 SDValue Op2, SelectionDAG &DAG,
06103                                 SDLoc dl, EVT DestVT = MVT::Other) {
06104   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
06105   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
06106                      DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
06107 }
06108 
06109 
06110 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
06111 /// amount.  The result has the specified value type.
06112 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
06113                              EVT VT, SelectionDAG &DAG, SDLoc dl) {
06114   // Force LHS/RHS to be the right type.
06115   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
06116   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
06117 
06118   int Ops[16];
06119   for (unsigned i = 0; i != 16; ++i)
06120     Ops[i] = i + Amt;
06121   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
06122   return DAG.getNode(ISD::BITCAST, dl, VT, T);
06123 }
06124 
06125 // If this is a case we can't handle, return null and let the default
06126 // expansion code take care of it.  If we CAN select this case, and if it
06127 // selects to a single instruction, return Op.  Otherwise, if we can codegen
06128 // this case more efficiently than a constant pool load, lower it to the
06129 // sequence of ops that should be used.
06130 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
06131                                              SelectionDAG &DAG) const {
06132   SDLoc dl(Op);
06133   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
06134   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
06135 
06136   // Check if this is a splat of a constant value.
06137   APInt APSplatBits, APSplatUndef;
06138   unsigned SplatBitSize;
06139   bool HasAnyUndefs;
06140   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
06141                              HasAnyUndefs, 0, true) || SplatBitSize > 32)
06142     return SDValue();
06143 
06144   unsigned SplatBits = APSplatBits.getZExtValue();
06145   unsigned SplatUndef = APSplatUndef.getZExtValue();
06146   unsigned SplatSize = SplatBitSize / 8;
06147 
06148   // First, handle single instruction cases.
06149 
06150   // All zeros?
06151   if (SplatBits == 0) {
06152     // Canonicalize all zero vectors to be v4i32.
06153     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
06154       SDValue Z = DAG.getConstant(0, MVT::i32);
06155       Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
06156       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
06157     }
06158     return Op;
06159   }
06160 
06161   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
06162   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
06163                     (32-SplatBitSize));
06164   if (SextVal >= -16 && SextVal <= 15)
06165     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
06166 
06167 
06168