LLVM API Documentation

PPCISelLowering.cpp
Go to the documentation of this file.
00001 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file implements the PPCISelLowering class.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "PPCISelLowering.h"
00015 #include "MCTargetDesc/PPCPredicates.h"
00016 #include "PPCMachineFunctionInfo.h"
00017 #include "PPCPerfectShuffle.h"
00018 #include "PPCTargetMachine.h"
00019 #include "PPCTargetObjectFile.h"
00020 #include "llvm/ADT/STLExtras.h"
00021 #include "llvm/ADT/StringSwitch.h"
00022 #include "llvm/ADT/Triple.h"
00023 #include "llvm/CodeGen/CallingConvLower.h"
00024 #include "llvm/CodeGen/MachineFrameInfo.h"
00025 #include "llvm/CodeGen/MachineFunction.h"
00026 #include "llvm/CodeGen/MachineInstrBuilder.h"
00027 #include "llvm/CodeGen/MachineRegisterInfo.h"
00028 #include "llvm/CodeGen/SelectionDAG.h"
00029 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
00030 #include "llvm/IR/CallingConv.h"
00031 #include "llvm/IR/Constants.h"
00032 #include "llvm/IR/DerivedTypes.h"
00033 #include "llvm/IR/Function.h"
00034 #include "llvm/IR/Intrinsics.h"
00035 #include "llvm/Support/CommandLine.h"
00036 #include "llvm/Support/ErrorHandling.h"
00037 #include "llvm/Support/MathExtras.h"
00038 #include "llvm/Support/raw_ostream.h"
00039 #include "llvm/Target/TargetOptions.h"
00040 using namespace llvm;
00041 
00042 // FIXME: Remove this once soft-float is supported.
00043 static cl::opt<bool> DisablePPCFloatInVariadic("disable-ppc-float-in-variadic",
00044 cl::desc("disable saving float registers for va_start on PPC"), cl::Hidden);
00045 
00046 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
00047 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
00048 
00049 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
00050 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
00051 
00052 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
00053 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
00054 
00055 // FIXME: Remove this once the bug has been fixed!
00056 extern cl::opt<bool> ANDIGlueBug;
00057 
00058 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM)
00059     : TargetLowering(TM),
00060       Subtarget(*TM.getSubtargetImpl()) {
00061   // Use _setjmp/_longjmp instead of setjmp/longjmp.
00062   setUseUnderscoreSetJmp(true);
00063   setUseUnderscoreLongJmp(true);
00064 
00065   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
00066   // arguments are at least 4/8 bytes aligned.
00067   bool isPPC64 = Subtarget.isPPC64();
00068   setMinStackArgumentAlignment(isPPC64 ? 8:4);
00069 
00070   // Set up the register classes.
00071   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
00072   addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
00073   addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
00074 
00075   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
00076   setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
00077   setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
00078 
00079   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
00080 
00081   // PowerPC has pre-inc load and store's.
00082   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
00083   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
00084   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
00085   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
00086   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
00087   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
00088   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
00089   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
00090   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
00091   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
00092 
00093   if (Subtarget.useCRBits()) {
00094     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00095 
00096     if (isPPC64 || Subtarget.hasFPCVT()) {
00097       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
00098       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
00099                          isPPC64 ? MVT::i64 : MVT::i32);
00100       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
00101       AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, 
00102                          isPPC64 ? MVT::i64 : MVT::i32);
00103     } else {
00104       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
00105       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
00106     }
00107 
00108     // PowerPC does not support direct load / store of condition registers
00109     setOperationAction(ISD::LOAD, MVT::i1, Custom);
00110     setOperationAction(ISD::STORE, MVT::i1, Custom);
00111 
00112     // FIXME: Remove this once the ANDI glue bug is fixed:
00113     if (ANDIGlueBug)
00114       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
00115 
00116     setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
00117     setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
00118     setTruncStoreAction(MVT::i64, MVT::i1, Expand);
00119     setTruncStoreAction(MVT::i32, MVT::i1, Expand);
00120     setTruncStoreAction(MVT::i16, MVT::i1, Expand);
00121     setTruncStoreAction(MVT::i8, MVT::i1, Expand);
00122 
00123     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
00124   }
00125 
00126   // This is used in the ppcf128->int sequence.  Note it has different semantics
00127   // from FP_ROUND:  that rounds to nearest, this rounds to zero.
00128   setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
00129 
00130   // We do not currently implement these libm ops for PowerPC.
00131   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
00132   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
00133   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
00134   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
00135   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
00136   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
00137 
00138   // PowerPC has no SREM/UREM instructions
00139   setOperationAction(ISD::SREM, MVT::i32, Expand);
00140   setOperationAction(ISD::UREM, MVT::i32, Expand);
00141   setOperationAction(ISD::SREM, MVT::i64, Expand);
00142   setOperationAction(ISD::UREM, MVT::i64, Expand);
00143 
00144   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
00145   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
00146   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
00147   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
00148   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
00149   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
00150   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
00151   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
00152   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
00153 
00154   // We don't support sin/cos/sqrt/fmod/pow
00155   setOperationAction(ISD::FSIN , MVT::f64, Expand);
00156   setOperationAction(ISD::FCOS , MVT::f64, Expand);
00157   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
00158   setOperationAction(ISD::FREM , MVT::f64, Expand);
00159   setOperationAction(ISD::FPOW , MVT::f64, Expand);
00160   setOperationAction(ISD::FMA  , MVT::f64, Legal);
00161   setOperationAction(ISD::FSIN , MVT::f32, Expand);
00162   setOperationAction(ISD::FCOS , MVT::f32, Expand);
00163   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
00164   setOperationAction(ISD::FREM , MVT::f32, Expand);
00165   setOperationAction(ISD::FPOW , MVT::f32, Expand);
00166   setOperationAction(ISD::FMA  , MVT::f32, Legal);
00167 
00168   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
00169 
00170   // If we're enabling GP optimizations, use hardware square root
00171   if (!Subtarget.hasFSQRT() &&
00172       !(TM.Options.UnsafeFPMath &&
00173         Subtarget.hasFRSQRTE() && Subtarget.hasFRE()))
00174     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
00175 
00176   if (!Subtarget.hasFSQRT() &&
00177       !(TM.Options.UnsafeFPMath &&
00178         Subtarget.hasFRSQRTES() && Subtarget.hasFRES()))
00179     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
00180 
00181   if (Subtarget.hasFCPSGN()) {
00182     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
00183     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
00184   } else {
00185     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
00186     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
00187   }
00188 
00189   if (Subtarget.hasFPRND()) {
00190     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
00191     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
00192     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
00193     setOperationAction(ISD::FROUND, MVT::f64, Legal);
00194 
00195     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
00196     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
00197     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
00198     setOperationAction(ISD::FROUND, MVT::f32, Legal);
00199   }
00200 
00201   // PowerPC does not have BSWAP, CTPOP or CTTZ
00202   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
00203   setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
00204   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
00205   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
00206   setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
00207   setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
00208   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
00209   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
00210 
00211   if (Subtarget.hasPOPCNTD()) {
00212     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
00213     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
00214   } else {
00215     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
00216     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
00217   }
00218 
00219   // PowerPC does not have ROTR
00220   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
00221   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
00222 
00223   if (!Subtarget.useCRBits()) {
00224     // PowerPC does not have Select
00225     setOperationAction(ISD::SELECT, MVT::i32, Expand);
00226     setOperationAction(ISD::SELECT, MVT::i64, Expand);
00227     setOperationAction(ISD::SELECT, MVT::f32, Expand);
00228     setOperationAction(ISD::SELECT, MVT::f64, Expand);
00229   }
00230 
00231   // PowerPC wants to turn select_cc of FP into fsel when possible.
00232   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
00233   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
00234 
00235   // PowerPC wants to optimize integer setcc a bit
00236   if (!Subtarget.useCRBits())
00237     setOperationAction(ISD::SETCC, MVT::i32, Custom);
00238 
00239   // PowerPC does not have BRCOND which requires SetCC
00240   if (!Subtarget.useCRBits())
00241     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
00242 
00243   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
00244 
00245   // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
00246   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00247 
00248   // PowerPC does not have [U|S]INT_TO_FP
00249   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
00250   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
00251 
00252   setOperationAction(ISD::BITCAST, MVT::f32, Expand);
00253   setOperationAction(ISD::BITCAST, MVT::i32, Expand);
00254   setOperationAction(ISD::BITCAST, MVT::i64, Expand);
00255   setOperationAction(ISD::BITCAST, MVT::f64, Expand);
00256 
00257   // We cannot sextinreg(i1).  Expand to shifts.
00258   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00259 
00260   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
00261   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
00262   // support continuation, user-level threading, and etc.. As a result, no
00263   // other SjLj exception interfaces are implemented and please don't build
00264   // your own exception handling based on them.
00265   // LLVM/Clang supports zero-cost DWARF exception handling.
00266   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
00267   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
00268 
00269   // We want to legalize GlobalAddress and ConstantPool nodes into the
00270   // appropriate instructions to materialize the address.
00271   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
00272   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
00273   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
00274   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
00275   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
00276   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
00277   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
00278   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
00279   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
00280   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
00281 
00282   // TRAP is legal.
00283   setOperationAction(ISD::TRAP, MVT::Other, Legal);
00284 
00285   // TRAMPOLINE is custom lowered.
00286   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
00287   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
00288 
00289   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
00290   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
00291 
00292   if (Subtarget.isSVR4ABI()) {
00293     if (isPPC64) {
00294       // VAARG always uses double-word chunks, so promote anything smaller.
00295       setOperationAction(ISD::VAARG, MVT::i1, Promote);
00296       AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
00297       setOperationAction(ISD::VAARG, MVT::i8, Promote);
00298       AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
00299       setOperationAction(ISD::VAARG, MVT::i16, Promote);
00300       AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
00301       setOperationAction(ISD::VAARG, MVT::i32, Promote);
00302       AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
00303       setOperationAction(ISD::VAARG, MVT::Other, Expand);
00304     } else {
00305       // VAARG is custom lowered with the 32-bit SVR4 ABI.
00306       setOperationAction(ISD::VAARG, MVT::Other, Custom);
00307       setOperationAction(ISD::VAARG, MVT::i64, Custom);
00308     }
00309   } else
00310     setOperationAction(ISD::VAARG, MVT::Other, Expand);
00311 
00312   if (Subtarget.isSVR4ABI() && !isPPC64)
00313     // VACOPY is custom lowered with the 32-bit SVR4 ABI.
00314     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
00315   else
00316     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
00317 
00318   // Use the default implementation.
00319   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
00320   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
00321   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
00322   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
00323   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
00324 
00325   // We want to custom lower some of our intrinsics.
00326   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
00327 
00328   // To handle counter-based loop conditions.
00329   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
00330 
00331   // Comparisons that require checking two conditions.
00332   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
00333   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
00334   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
00335   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
00336   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
00337   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
00338   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
00339   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
00340   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
00341   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
00342   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
00343   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
00344 
00345   if (Subtarget.has64BitSupport()) {
00346     // They also have instructions for converting between i64 and fp.
00347     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
00348     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
00349     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
00350     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
00351     // This is just the low 32 bits of a (signed) fp->i64 conversion.
00352     // We cannot do this with Promote because i64 is not a legal type.
00353     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00354 
00355     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
00356       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00357   } else {
00358     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
00359     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
00360   }
00361 
00362   // With the instructions enabled under FPCVT, we can do everything.
00363   if (Subtarget.hasFPCVT()) {
00364     if (Subtarget.has64BitSupport()) {
00365       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
00366       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
00367       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
00368       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
00369     }
00370 
00371     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00372     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00373     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00374     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
00375   }
00376 
00377   if (Subtarget.use64BitRegs()) {
00378     // 64-bit PowerPC implementations can support i64 types directly
00379     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
00380     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
00381     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
00382     // 64-bit PowerPC wants to expand i128 shifts itself.
00383     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
00384     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
00385     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
00386   } else {
00387     // 32-bit PowerPC wants to expand i64 shifts itself.
00388     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
00389     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
00390     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
00391   }
00392 
00393   if (Subtarget.hasAltivec()) {
00394     // First set operation action for all vector types to expand. Then we
00395     // will selectively turn on ones that can be effectively codegen'd.
00396     for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
00397          i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
00398       MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
00399 
00400       // add/sub are legal for all supported vector VT's.
00401       setOperationAction(ISD::ADD , VT, Legal);
00402       setOperationAction(ISD::SUB , VT, Legal);
00403 
00404       // We promote all shuffles to v16i8.
00405       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
00406       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
00407 
00408       // We promote all non-typed operations to v4i32.
00409       setOperationAction(ISD::AND   , VT, Promote);
00410       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
00411       setOperationAction(ISD::OR    , VT, Promote);
00412       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
00413       setOperationAction(ISD::XOR   , VT, Promote);
00414       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
00415       setOperationAction(ISD::LOAD  , VT, Promote);
00416       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
00417       setOperationAction(ISD::SELECT, VT, Promote);
00418       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
00419       setOperationAction(ISD::STORE, VT, Promote);
00420       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
00421 
00422       // No other operations are legal.
00423       setOperationAction(ISD::MUL , VT, Expand);
00424       setOperationAction(ISD::SDIV, VT, Expand);
00425       setOperationAction(ISD::SREM, VT, Expand);
00426       setOperationAction(ISD::UDIV, VT, Expand);
00427       setOperationAction(ISD::UREM, VT, Expand);
00428       setOperationAction(ISD::FDIV, VT, Expand);
00429       setOperationAction(ISD::FREM, VT, Expand);
00430       setOperationAction(ISD::FNEG, VT, Expand);
00431       setOperationAction(ISD::FSQRT, VT, Expand);
00432       setOperationAction(ISD::FLOG, VT, Expand);
00433       setOperationAction(ISD::FLOG10, VT, Expand);
00434       setOperationAction(ISD::FLOG2, VT, Expand);
00435       setOperationAction(ISD::FEXP, VT, Expand);
00436       setOperationAction(ISD::FEXP2, VT, Expand);
00437       setOperationAction(ISD::FSIN, VT, Expand);
00438       setOperationAction(ISD::FCOS, VT, Expand);
00439       setOperationAction(ISD::FABS, VT, Expand);
00440       setOperationAction(ISD::FPOWI, VT, Expand);
00441       setOperationAction(ISD::FFLOOR, VT, Expand);
00442       setOperationAction(ISD::FCEIL,  VT, Expand);
00443       setOperationAction(ISD::FTRUNC, VT, Expand);
00444       setOperationAction(ISD::FRINT,  VT, Expand);
00445       setOperationAction(ISD::FNEARBYINT, VT, Expand);
00446       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
00447       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
00448       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
00449       setOperationAction(ISD::MULHU, VT, Expand);
00450       setOperationAction(ISD::MULHS, VT, Expand);
00451       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
00452       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
00453       setOperationAction(ISD::UDIVREM, VT, Expand);
00454       setOperationAction(ISD::SDIVREM, VT, Expand);
00455       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
00456       setOperationAction(ISD::FPOW, VT, Expand);
00457       setOperationAction(ISD::BSWAP, VT, Expand);
00458       setOperationAction(ISD::CTPOP, VT, Expand);
00459       setOperationAction(ISD::CTLZ, VT, Expand);
00460       setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
00461       setOperationAction(ISD::CTTZ, VT, Expand);
00462       setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
00463       setOperationAction(ISD::VSELECT, VT, Expand);
00464       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
00465 
00466       for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
00467            j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) {
00468         MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j;
00469         setTruncStoreAction(VT, InnerVT, Expand);
00470       }
00471       setLoadExtAction(ISD::SEXTLOAD, VT, Expand);
00472       setLoadExtAction(ISD::ZEXTLOAD, VT, Expand);
00473       setLoadExtAction(ISD::EXTLOAD, VT, Expand);
00474     }
00475 
00476     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
00477     // with merges, splats, etc.
00478     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
00479 
00480     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
00481     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
00482     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
00483     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
00484     setOperationAction(ISD::SELECT, MVT::v4i32,
00485                        Subtarget.useCRBits() ? Legal : Expand);
00486     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
00487     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
00488     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
00489     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
00490     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
00491     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
00492     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
00493     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
00494     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
00495 
00496     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
00497     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
00498     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
00499     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
00500 
00501     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
00502     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
00503 
00504     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
00505       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
00506       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
00507     }
00508 
00509     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
00510     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
00511     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
00512 
00513     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
00514     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
00515 
00516     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
00517     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
00518     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
00519     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
00520 
00521     // Altivec does not contain unordered floating-point compare instructions
00522     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
00523     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
00524     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
00525     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
00526 
00527     if (Subtarget.hasVSX()) {
00528       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
00529       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
00530 
00531       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
00532       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
00533       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
00534       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
00535       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
00536 
00537       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
00538 
00539       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
00540       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
00541 
00542       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
00543       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
00544 
00545       setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
00546       setOperationAction(ISD::VSELECT, MVT::v8i16, Legal);
00547       setOperationAction(ISD::VSELECT, MVT::v4i32, Legal);
00548       setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
00549       setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
00550 
00551       // Share the Altivec comparison restrictions.
00552       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
00553       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
00554       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
00555       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
00556 
00557       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
00558       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
00559 
00560       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
00561 
00562       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
00563 
00564       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
00565       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
00566 
00567       // VSX v2i64 only supports non-arithmetic operations.
00568       setOperationAction(ISD::ADD, MVT::v2i64, Expand);
00569       setOperationAction(ISD::SUB, MVT::v2i64, Expand);
00570 
00571       setOperationAction(ISD::SHL, MVT::v2i64, Expand);
00572       setOperationAction(ISD::SRA, MVT::v2i64, Expand);
00573       setOperationAction(ISD::SRL, MVT::v2i64, Expand);
00574 
00575       setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
00576 
00577       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
00578       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
00579       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
00580       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
00581 
00582       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
00583 
00584       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
00585       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
00586       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
00587       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
00588 
00589       // Vector operation legalization checks the result type of
00590       // SIGN_EXTEND_INREG, overall legalization checks the inner type.
00591       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
00592       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
00593       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
00594       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
00595 
00596       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
00597     }
00598   }
00599 
00600   if (Subtarget.has64BitSupport())
00601     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
00602 
00603   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
00604 
00605   if (!isPPC64) {
00606     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
00607     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
00608   }
00609 
00610   setBooleanContents(ZeroOrOneBooleanContent);
00611   // Altivec instructions set fields to all zeros or all ones.
00612   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
00613 
00614   if (!isPPC64) {
00615     // These libcalls are not available in 32-bit.
00616     setLibcallName(RTLIB::SHL_I128, nullptr);
00617     setLibcallName(RTLIB::SRL_I128, nullptr);
00618     setLibcallName(RTLIB::SRA_I128, nullptr);
00619   }
00620 
00621   if (isPPC64) {
00622     setStackPointerRegisterToSaveRestore(PPC::X1);
00623     setExceptionPointerRegister(PPC::X3);
00624     setExceptionSelectorRegister(PPC::X4);
00625   } else {
00626     setStackPointerRegisterToSaveRestore(PPC::R1);
00627     setExceptionPointerRegister(PPC::R3);
00628     setExceptionSelectorRegister(PPC::R4);
00629   }
00630 
00631   // We have target-specific dag combine patterns for the following nodes:
00632   setTargetDAGCombine(ISD::SINT_TO_FP);
00633   setTargetDAGCombine(ISD::LOAD);
00634   setTargetDAGCombine(ISD::STORE);
00635   setTargetDAGCombine(ISD::BR_CC);
00636   if (Subtarget.useCRBits())
00637     setTargetDAGCombine(ISD::BRCOND);
00638   setTargetDAGCombine(ISD::BSWAP);
00639   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
00640   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
00641   setTargetDAGCombine(ISD::INTRINSIC_VOID);
00642 
00643   setTargetDAGCombine(ISD::SIGN_EXTEND);
00644   setTargetDAGCombine(ISD::ZERO_EXTEND);
00645   setTargetDAGCombine(ISD::ANY_EXTEND);
00646 
00647   if (Subtarget.useCRBits()) {
00648     setTargetDAGCombine(ISD::TRUNCATE);
00649     setTargetDAGCombine(ISD::SETCC);
00650     setTargetDAGCombine(ISD::SELECT_CC);
00651   }
00652 
00653   // Use reciprocal estimates.
00654   if (TM.Options.UnsafeFPMath) {
00655     setTargetDAGCombine(ISD::FDIV);
00656     setTargetDAGCombine(ISD::FSQRT);
00657   }
00658 
00659   // Darwin long double math library functions have $LDBL128 appended.
00660   if (Subtarget.isDarwin()) {
00661     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
00662     setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
00663     setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
00664     setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
00665     setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
00666     setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
00667     setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
00668     setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
00669     setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
00670     setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
00671   }
00672 
00673   // With 32 condition bits, we don't need to sink (and duplicate) compares
00674   // aggressively in CodeGenPrep.
00675   if (Subtarget.useCRBits())
00676     setHasMultipleConditionRegisters();
00677 
00678   setMinFunctionAlignment(2);
00679   if (Subtarget.isDarwin())
00680     setPrefFunctionAlignment(4);
00681 
00682   setInsertFencesForAtomic(true);
00683 
00684   if (Subtarget.enableMachineScheduler())
00685     setSchedulingPreference(Sched::Source);
00686   else
00687     setSchedulingPreference(Sched::Hybrid);
00688 
00689   computeRegisterProperties();
00690 
00691   // The Freescale cores does better with aggressive inlining of memcpy and
00692   // friends. Gcc uses same threshold of 128 bytes (= 32 word stores).
00693   if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc ||
00694       Subtarget.getDarwinDirective() == PPC::DIR_E5500) {
00695     MaxStoresPerMemset = 32;
00696     MaxStoresPerMemsetOptSize = 16;
00697     MaxStoresPerMemcpy = 32;
00698     MaxStoresPerMemcpyOptSize = 8;
00699     MaxStoresPerMemmove = 32;
00700     MaxStoresPerMemmoveOptSize = 8;
00701 
00702     setPrefFunctionAlignment(4);
00703   }
00704 }
00705 
00706 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
00707 /// the desired ByVal argument alignment.
00708 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
00709                              unsigned MaxMaxAlign) {
00710   if (MaxAlign == MaxMaxAlign)
00711     return;
00712   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
00713     if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
00714       MaxAlign = 32;
00715     else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
00716       MaxAlign = 16;
00717   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
00718     unsigned EltAlign = 0;
00719     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
00720     if (EltAlign > MaxAlign)
00721       MaxAlign = EltAlign;
00722   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
00723     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
00724       unsigned EltAlign = 0;
00725       getMaxByValAlign(STy->getElementType(i), EltAlign, MaxMaxAlign);
00726       if (EltAlign > MaxAlign)
00727         MaxAlign = EltAlign;
00728       if (MaxAlign == MaxMaxAlign)
00729         break;
00730     }
00731   }
00732 }
00733 
00734 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
00735 /// function arguments in the caller parameter area.
00736 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const {
00737   // Darwin passes everything on 4 byte boundary.
00738   if (Subtarget.isDarwin())
00739     return 4;
00740 
00741   // 16byte and wider vectors are passed on 16byte boundary.
00742   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
00743   unsigned Align = Subtarget.isPPC64() ? 8 : 4;
00744   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
00745     getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
00746   return Align;
00747 }
00748 
00749 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
00750   switch (Opcode) {
00751   default: return nullptr;
00752   case PPCISD::FSEL:            return "PPCISD::FSEL";
00753   case PPCISD::FCFID:           return "PPCISD::FCFID";
00754   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
00755   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
00756   case PPCISD::FRE:             return "PPCISD::FRE";
00757   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
00758   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
00759   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
00760   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
00761   case PPCISD::VPERM:           return "PPCISD::VPERM";
00762   case PPCISD::Hi:              return "PPCISD::Hi";
00763   case PPCISD::Lo:              return "PPCISD::Lo";
00764   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
00765   case PPCISD::LOAD:            return "PPCISD::LOAD";
00766   case PPCISD::LOAD_TOC:        return "PPCISD::LOAD_TOC";
00767   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
00768   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
00769   case PPCISD::SRL:             return "PPCISD::SRL";
00770   case PPCISD::SRA:             return "PPCISD::SRA";
00771   case PPCISD::SHL:             return "PPCISD::SHL";
00772   case PPCISD::CALL:            return "PPCISD::CALL";
00773   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
00774   case PPCISD::CALL_TLS:        return "PPCISD::CALL_TLS";
00775   case PPCISD::CALL_NOP_TLS:    return "PPCISD::CALL_NOP_TLS";
00776   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
00777   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
00778   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
00779   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
00780   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
00781   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
00782   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
00783   case PPCISD::VCMP:            return "PPCISD::VCMP";
00784   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
00785   case PPCISD::LBRX:            return "PPCISD::LBRX";
00786   case PPCISD::STBRX:           return "PPCISD::STBRX";
00787   case PPCISD::LARX:            return "PPCISD::LARX";
00788   case PPCISD::STCX:            return "PPCISD::STCX";
00789   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
00790   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
00791   case PPCISD::BDZ:             return "PPCISD::BDZ";
00792   case PPCISD::MFFS:            return "PPCISD::MFFS";
00793   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
00794   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
00795   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
00796   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
00797   case PPCISD::ADDIS_TOC_HA:    return "PPCISD::ADDIS_TOC_HA";
00798   case PPCISD::LD_TOC_L:        return "PPCISD::LD_TOC_L";
00799   case PPCISD::ADDI_TOC_L:      return "PPCISD::ADDI_TOC_L";
00800   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
00801   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
00802   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
00803   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
00804   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
00805   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
00806   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
00807   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
00808   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
00809   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
00810   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
00811   case PPCISD::SC:              return "PPCISD::SC";
00812   }
00813 }
00814 
00815 EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
00816   if (!VT.isVector())
00817     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
00818   return VT.changeVectorElementTypeToInteger();
00819 }
00820 
00821 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
00822   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
00823   return true;
00824 }
00825 
00826 //===----------------------------------------------------------------------===//
00827 // Node matching predicates, for use by the tblgen matching code.
00828 //===----------------------------------------------------------------------===//
00829 
00830 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
00831 static bool isFloatingPointZero(SDValue Op) {
00832   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
00833     return CFP->getValueAPF().isZero();
00834   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
00835     // Maybe this has already been legalized into the constant pool?
00836     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
00837       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
00838         return CFP->getValueAPF().isZero();
00839   }
00840   return false;
00841 }
00842 
00843 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
00844 /// true if Op is undef or if it matches the specified value.
00845 static bool isConstantOrUndef(int Op, int Val) {
00846   return Op < 0 || Op == Val;
00847 }
00848 
00849 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
00850 /// VPKUHUM instruction.
00851 /// The ShuffleKind distinguishes between big-endian operations with
00852 /// two different inputs (0), either-endian operations with two identical
00853 /// inputs (1), and little-endian operantion with two different inputs (2).
00854 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
00855 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
00856                                SelectionDAG &DAG) {
00857   bool IsLE = DAG.getSubtarget().getDataLayout()->isLittleEndian();
00858   if (ShuffleKind == 0) {
00859     if (IsLE)
00860       return false;
00861     for (unsigned i = 0; i != 16; ++i)
00862       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
00863         return false;
00864   } else if (ShuffleKind == 2) {
00865     if (!IsLE)
00866       return false;
00867     for (unsigned i = 0; i != 16; ++i)
00868       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
00869         return false;
00870   } else if (ShuffleKind == 1) {
00871     unsigned j = IsLE ? 0 : 1;
00872     for (unsigned i = 0; i != 8; ++i)
00873       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
00874           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
00875         return false;
00876   }
00877   return true;
00878 }
00879 
00880 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
00881 /// VPKUWUM instruction.
00882 /// The ShuffleKind distinguishes between big-endian operations with
00883 /// two different inputs (0), either-endian operations with two identical
00884 /// inputs (1), and little-endian operantion with two different inputs (2).
00885 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
00886 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
00887                                SelectionDAG &DAG) {
00888   bool IsLE = DAG.getSubtarget().getDataLayout()->isLittleEndian();
00889   if (ShuffleKind == 0) {
00890     if (IsLE)
00891       return false;
00892     for (unsigned i = 0; i != 16; i += 2)
00893       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
00894           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
00895         return false;
00896   } else if (ShuffleKind == 2) {
00897     if (!IsLE)
00898       return false;
00899     for (unsigned i = 0; i != 16; i += 2)
00900       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
00901           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
00902         return false;
00903   } else if (ShuffleKind == 1) {
00904     unsigned j = IsLE ? 0 : 2;
00905     for (unsigned i = 0; i != 8; i += 2)
00906       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
00907           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
00908           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
00909           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
00910         return false;
00911   }
00912   return true;
00913 }
00914 
00915 /// isVMerge - Common function, used to match vmrg* shuffles.
00916 ///
00917 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
00918                      unsigned LHSStart, unsigned RHSStart) {
00919   if (N->getValueType(0) != MVT::v16i8)
00920     return false;
00921   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
00922          "Unsupported merge size!");
00923 
00924   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
00925     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
00926       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
00927                              LHSStart+j+i*UnitSize) ||
00928           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
00929                              RHSStart+j+i*UnitSize))
00930         return false;
00931     }
00932   return true;
00933 }
00934 
00935 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
00936 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
00937 /// The ShuffleKind distinguishes between big-endian merges with two 
00938 /// different inputs (0), either-endian merges with two identical inputs (1),
00939 /// and little-endian merges with two different inputs (2).  For the latter,
00940 /// the input operands are swapped (see PPCInstrAltivec.td).
00941 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
00942                              unsigned ShuffleKind, SelectionDAG &DAG) {
00943   if (DAG.getSubtarget().getDataLayout()->isLittleEndian()) {
00944     if (ShuffleKind == 1) // unary
00945       return isVMerge(N, UnitSize, 0, 0);
00946     else if (ShuffleKind == 2) // swapped
00947       return isVMerge(N, UnitSize, 0, 16);
00948     else
00949       return false;
00950   } else {
00951     if (ShuffleKind == 1) // unary
00952       return isVMerge(N, UnitSize, 8, 8);
00953     else if (ShuffleKind == 0) // normal
00954       return isVMerge(N, UnitSize, 8, 24);
00955     else
00956       return false;
00957   }
00958 }
00959 
00960 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
00961 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
00962 /// The ShuffleKind distinguishes between big-endian merges with two 
00963 /// different inputs (0), either-endian merges with two identical inputs (1),
00964 /// and little-endian merges with two different inputs (2).  For the latter,
00965 /// the input operands are swapped (see PPCInstrAltivec.td).
00966 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
00967                              unsigned ShuffleKind, SelectionDAG &DAG) {
00968   if (DAG.getSubtarget().getDataLayout()->isLittleEndian()) {
00969     if (ShuffleKind == 1) // unary
00970       return isVMerge(N, UnitSize, 8, 8);
00971     else if (ShuffleKind == 2) // swapped
00972       return isVMerge(N, UnitSize, 8, 24);
00973     else
00974       return false;
00975   } else {
00976     if (ShuffleKind == 1) // unary
00977       return isVMerge(N, UnitSize, 0, 0);
00978     else if (ShuffleKind == 0) // normal
00979       return isVMerge(N, UnitSize, 0, 16);
00980     else
00981       return false;
00982   }
00983 }
00984 
00985 
00986 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
00987 /// amount, otherwise return -1.
00988 /// The ShuffleKind distinguishes between big-endian operations with two 
00989 /// different inputs (0), either-endian operations with two identical inputs
00990 /// (1), and little-endian operations with two different inputs (2).  For the
00991 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
00992 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
00993                              SelectionDAG &DAG) {
00994   if (N->getValueType(0) != MVT::v16i8)
00995     return -1;
00996 
00997   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
00998 
00999   // Find the first non-undef value in the shuffle mask.
01000   unsigned i;
01001   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
01002     /*search*/;
01003 
01004   if (i == 16) return -1;  // all undef.
01005 
01006   // Otherwise, check to see if the rest of the elements are consecutively
01007   // numbered from this value.
01008   unsigned ShiftAmt = SVOp->getMaskElt(i);
01009   if (ShiftAmt < i) return -1;
01010 
01011   ShiftAmt -= i;
01012   bool isLE = DAG.getTarget().getSubtargetImpl()->getDataLayout()->
01013     isLittleEndian();
01014 
01015   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
01016     // Check the rest of the elements to see if they are consecutive.
01017     for (++i; i != 16; ++i)
01018       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
01019         return -1;
01020   } else if (ShuffleKind == 1) {
01021     // Check the rest of the elements to see if they are consecutive.
01022     for (++i; i != 16; ++i)
01023       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
01024         return -1;
01025   } else
01026     return -1;
01027 
01028   if (ShuffleKind == 2 && isLE)
01029     ShiftAmt = 16 - ShiftAmt;
01030 
01031   return ShiftAmt;
01032 }
01033 
01034 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
01035 /// specifies a splat of a single element that is suitable for input to
01036 /// VSPLTB/VSPLTH/VSPLTW.
01037 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
01038   assert(N->getValueType(0) == MVT::v16i8 &&
01039          (EltSize == 1 || EltSize == 2 || EltSize == 4));
01040 
01041   // This is a splat operation if each element of the permute is the same, and
01042   // if the value doesn't reference the second vector.
01043   unsigned ElementBase = N->getMaskElt(0);
01044 
01045   // FIXME: Handle UNDEF elements too!
01046   if (ElementBase >= 16)
01047     return false;
01048 
01049   // Check that the indices are consecutive, in the case of a multi-byte element
01050   // splatted with a v16i8 mask.
01051   for (unsigned i = 1; i != EltSize; ++i)
01052     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
01053       return false;
01054 
01055   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
01056     if (N->getMaskElt(i) < 0) continue;
01057     for (unsigned j = 0; j != EltSize; ++j)
01058       if (N->getMaskElt(i+j) != N->getMaskElt(j))
01059         return false;
01060   }
01061   return true;
01062 }
01063 
01064 /// isAllNegativeZeroVector - Returns true if all elements of build_vector
01065 /// are -0.0.
01066 bool PPC::isAllNegativeZeroVector(SDNode *N) {
01067   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
01068 
01069   APInt APVal, APUndef;
01070   unsigned BitSize;
01071   bool HasAnyUndefs;
01072 
01073   if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true))
01074     if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
01075       return CFP->getValueAPF().isNegZero();
01076 
01077   return false;
01078 }
01079 
01080 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
01081 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
01082 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
01083                                 SelectionDAG &DAG) {
01084   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
01085   assert(isSplatShuffleMask(SVOp, EltSize));
01086   if (DAG.getSubtarget().getDataLayout()->isLittleEndian())
01087     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
01088   else
01089     return SVOp->getMaskElt(0) / EltSize;
01090 }
01091 
01092 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
01093 /// by using a vspltis[bhw] instruction of the specified element size, return
01094 /// the constant being splatted.  The ByteSize field indicates the number of
01095 /// bytes of each element [124] -> [bhw].
01096 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
01097   SDValue OpVal(nullptr, 0);
01098 
01099   // If ByteSize of the splat is bigger than the element size of the
01100   // build_vector, then we have a case where we are checking for a splat where
01101   // multiple elements of the buildvector are folded together into a single
01102   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
01103   unsigned EltSize = 16/N->getNumOperands();
01104   if (EltSize < ByteSize) {
01105     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
01106     SDValue UniquedVals[4];
01107     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
01108 
01109     // See if all of the elements in the buildvector agree across.
01110     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
01111       if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
01112       // If the element isn't a constant, bail fully out.
01113       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
01114 
01115 
01116       if (!UniquedVals[i&(Multiple-1)].getNode())
01117         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
01118       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
01119         return SDValue();  // no match.
01120     }
01121 
01122     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
01123     // either constant or undef values that are identical for each chunk.  See
01124     // if these chunks can form into a larger vspltis*.
01125 
01126     // Check to see if all of the leading entries are either 0 or -1.  If
01127     // neither, then this won't fit into the immediate field.
01128     bool LeadingZero = true;
01129     bool LeadingOnes = true;
01130     for (unsigned i = 0; i != Multiple-1; ++i) {
01131       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
01132 
01133       LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
01134       LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
01135     }
01136     // Finally, check the least significant entry.
01137     if (LeadingZero) {
01138       if (!UniquedVals[Multiple-1].getNode())
01139         return DAG.getTargetConstant(0, MVT::i32);  // 0,0,0,undef
01140       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
01141       if (Val < 16)
01142         return DAG.getTargetConstant(Val, MVT::i32);  // 0,0,0,4 -> vspltisw(4)
01143     }
01144     if (LeadingOnes) {
01145       if (!UniquedVals[Multiple-1].getNode())
01146         return DAG.getTargetConstant(~0U, MVT::i32);  // -1,-1,-1,undef
01147       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
01148       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
01149         return DAG.getTargetConstant(Val, MVT::i32);
01150     }
01151 
01152     return SDValue();
01153   }
01154 
01155   // Check to see if this buildvec has a single non-undef value in its elements.
01156   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
01157     if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
01158     if (!OpVal.getNode())
01159       OpVal = N->getOperand(i);
01160     else if (OpVal != N->getOperand(i))
01161       return SDValue();
01162   }
01163 
01164   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
01165 
01166   unsigned ValSizeInBytes = EltSize;
01167   uint64_t Value = 0;
01168   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
01169     Value = CN->getZExtValue();
01170   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
01171     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
01172     Value = FloatToBits(CN->getValueAPF().convertToFloat());
01173   }
01174 
01175   // If the splat value is larger than the element value, then we can never do
01176   // this splat.  The only case that we could fit the replicated bits into our
01177   // immediate field for would be zero, and we prefer to use vxor for it.
01178   if (ValSizeInBytes < ByteSize) return SDValue();
01179 
01180   // If the element value is larger than the splat value, cut it in half and
01181   // check to see if the two halves are equal.  Continue doing this until we
01182   // get to ByteSize.  This allows us to handle 0x01010101 as 0x01.
01183   while (ValSizeInBytes > ByteSize) {
01184     ValSizeInBytes >>= 1;
01185 
01186     // If the top half equals the bottom half, we're still ok.
01187     if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
01188          (Value                        & ((1 << (8*ValSizeInBytes))-1)))
01189       return SDValue();
01190   }
01191 
01192   // Properly sign extend the value.
01193   int MaskVal = SignExtend32(Value, ByteSize * 8);
01194 
01195   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
01196   if (MaskVal == 0) return SDValue();
01197 
01198   // Finally, if this value fits in a 5 bit sext field, return it
01199   if (SignExtend32<5>(MaskVal) == MaskVal)
01200     return DAG.getTargetConstant(MaskVal, MVT::i32);
01201   return SDValue();
01202 }
01203 
01204 //===----------------------------------------------------------------------===//
01205 //  Addressing Mode Selection
01206 //===----------------------------------------------------------------------===//
01207 
01208 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
01209 /// or 64-bit immediate, and if the value can be accurately represented as a
01210 /// sign extension from a 16-bit value.  If so, this returns true and the
01211 /// immediate.
01212 static bool isIntS16Immediate(SDNode *N, short &Imm) {
01213   if (!isa<ConstantSDNode>(N))
01214     return false;
01215 
01216   Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
01217   if (N->getValueType(0) == MVT::i32)
01218     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
01219   else
01220     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
01221 }
01222 static bool isIntS16Immediate(SDValue Op, short &Imm) {
01223   return isIntS16Immediate(Op.getNode(), Imm);
01224 }
01225 
01226 
01227 /// SelectAddressRegReg - Given the specified addressed, check to see if it
01228 /// can be represented as an indexed [r+r] operation.  Returns false if it
01229 /// can be more efficiently represented with [r+imm].
01230 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
01231                                             SDValue &Index,
01232                                             SelectionDAG &DAG) const {
01233   short imm = 0;
01234   if (N.getOpcode() == ISD::ADD) {
01235     if (isIntS16Immediate(N.getOperand(1), imm))
01236       return false;    // r+i
01237     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
01238       return false;    // r+i
01239 
01240     Base = N.getOperand(0);
01241     Index = N.getOperand(1);
01242     return true;
01243   } else if (N.getOpcode() == ISD::OR) {
01244     if (isIntS16Immediate(N.getOperand(1), imm))
01245       return false;    // r+i can fold it if we can.
01246 
01247     // If this is an or of disjoint bitfields, we can codegen this as an add
01248     // (for better address arithmetic) if the LHS and RHS of the OR are provably
01249     // disjoint.
01250     APInt LHSKnownZero, LHSKnownOne;
01251     APInt RHSKnownZero, RHSKnownOne;
01252     DAG.computeKnownBits(N.getOperand(0),
01253                          LHSKnownZero, LHSKnownOne);
01254 
01255     if (LHSKnownZero.getBoolValue()) {
01256       DAG.computeKnownBits(N.getOperand(1),
01257                            RHSKnownZero, RHSKnownOne);
01258       // If all of the bits are known zero on the LHS or RHS, the add won't
01259       // carry.
01260       if (~(LHSKnownZero | RHSKnownZero) == 0) {
01261         Base = N.getOperand(0);
01262         Index = N.getOperand(1);
01263         return true;
01264       }
01265     }
01266   }
01267 
01268   return false;
01269 }
01270 
01271 // If we happen to be doing an i64 load or store into a stack slot that has
01272 // less than a 4-byte alignment, then the frame-index elimination may need to
01273 // use an indexed load or store instruction (because the offset may not be a
01274 // multiple of 4). The extra register needed to hold the offset comes from the
01275 // register scavenger, and it is possible that the scavenger will need to use
01276 // an emergency spill slot. As a result, we need to make sure that a spill slot
01277 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
01278 // stack slot.
01279 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
01280   // FIXME: This does not handle the LWA case.
01281   if (VT != MVT::i64)
01282     return;
01283 
01284   // NOTE: We'll exclude negative FIs here, which come from argument
01285   // lowering, because there are no known test cases triggering this problem
01286   // using packed structures (or similar). We can remove this exclusion if
01287   // we find such a test case. The reason why this is so test-case driven is
01288   // because this entire 'fixup' is only to prevent crashes (from the
01289   // register scavenger) on not-really-valid inputs. For example, if we have:
01290   //   %a = alloca i1
01291   //   %b = bitcast i1* %a to i64*
01292   //   store i64* a, i64 b
01293   // then the store should really be marked as 'align 1', but is not. If it
01294   // were marked as 'align 1' then the indexed form would have been
01295   // instruction-selected initially, and the problem this 'fixup' is preventing
01296   // won't happen regardless.
01297   if (FrameIdx < 0)
01298     return;
01299 
01300   MachineFunction &MF = DAG.getMachineFunction();
01301   MachineFrameInfo *MFI = MF.getFrameInfo();
01302 
01303   unsigned Align = MFI->getObjectAlignment(FrameIdx);
01304   if (Align >= 4)
01305     return;
01306 
01307   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
01308   FuncInfo->setHasNonRISpills();
01309 }
01310 
01311 /// Returns true if the address N can be represented by a base register plus
01312 /// a signed 16-bit displacement [r+imm], and if it is not better
01313 /// represented as reg+reg.  If Aligned is true, only accept displacements
01314 /// suitable for STD and friends, i.e. multiples of 4.
01315 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
01316                                             SDValue &Base,
01317                                             SelectionDAG &DAG,
01318                                             bool Aligned) const {
01319   // FIXME dl should come from parent load or store, not from address
01320   SDLoc dl(N);
01321   // If this can be more profitably realized as r+r, fail.
01322   if (SelectAddressRegReg(N, Disp, Base, DAG))
01323     return false;
01324 
01325   if (N.getOpcode() == ISD::ADD) {
01326     short imm = 0;
01327     if (isIntS16Immediate(N.getOperand(1), imm) &&
01328         (!Aligned || (imm & 3) == 0)) {
01329       Disp = DAG.getTargetConstant(imm, N.getValueType());
01330       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
01331         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
01332         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
01333       } else {
01334         Base = N.getOperand(0);
01335       }
01336       return true; // [r+i]
01337     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
01338       // Match LOAD (ADD (X, Lo(G))).
01339       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
01340              && "Cannot handle constant offsets yet!");
01341       Disp = N.getOperand(1).getOperand(0);  // The global address.
01342       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
01343              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
01344              Disp.getOpcode() == ISD::TargetConstantPool ||
01345              Disp.getOpcode() == ISD::TargetJumpTable);
01346       Base = N.getOperand(0);
01347       return true;  // [&g+r]
01348     }
01349   } else if (N.getOpcode() == ISD::OR) {
01350     short imm = 0;
01351     if (isIntS16Immediate(N.getOperand(1), imm) &&
01352         (!Aligned || (imm & 3) == 0)) {
01353       // If this is an or of disjoint bitfields, we can codegen this as an add
01354       // (for better address arithmetic) if the LHS and RHS of the OR are
01355       // provably disjoint.
01356       APInt LHSKnownZero, LHSKnownOne;
01357       DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
01358 
01359       if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
01360         // If all of the bits are known zero on the LHS or RHS, the add won't
01361         // carry.
01362         if (FrameIndexSDNode *FI =
01363               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
01364           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
01365           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
01366         } else {
01367           Base = N.getOperand(0);
01368         }
01369         Disp = DAG.getTargetConstant(imm, N.getValueType());
01370         return true;
01371       }
01372     }
01373   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
01374     // Loading from a constant address.
01375 
01376     // If this address fits entirely in a 16-bit sext immediate field, codegen
01377     // this as "d, 0"
01378     short Imm;
01379     if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) {
01380       Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
01381       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
01382                              CN->getValueType(0));
01383       return true;
01384     }
01385 
01386     // Handle 32-bit sext immediates with LIS + addr mode.
01387     if ((CN->getValueType(0) == MVT::i32 ||
01388          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
01389         (!Aligned || (CN->getZExtValue() & 3) == 0)) {
01390       int Addr = (int)CN->getZExtValue();
01391 
01392       // Otherwise, break this down into an LIS + disp.
01393       Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
01394 
01395       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
01396       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
01397       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
01398       return true;
01399     }
01400   }
01401 
01402   Disp = DAG.getTargetConstant(0, getPointerTy());
01403   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
01404     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
01405     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
01406   } else
01407     Base = N;
01408   return true;      // [r+0]
01409 }
01410 
01411 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
01412 /// represented as an indexed [r+r] operation.
01413 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
01414                                                 SDValue &Index,
01415                                                 SelectionDAG &DAG) const {
01416   // Check to see if we can easily represent this as an [r+r] address.  This
01417   // will fail if it thinks that the address is more profitably represented as
01418   // reg+imm, e.g. where imm = 0.
01419   if (SelectAddressRegReg(N, Base, Index, DAG))
01420     return true;
01421 
01422   // If the operand is an addition, always emit this as [r+r], since this is
01423   // better (for code size, and execution, as the memop does the add for free)
01424   // than emitting an explicit add.
01425   if (N.getOpcode() == ISD::ADD) {
01426     Base = N.getOperand(0);
01427     Index = N.getOperand(1);
01428     return true;
01429   }
01430 
01431   // Otherwise, do it the hard way, using R0 as the base register.
01432   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
01433                          N.getValueType());
01434   Index = N;
01435   return true;
01436 }
01437 
01438 /// getPreIndexedAddressParts - returns true by value, base pointer and
01439 /// offset pointer and addressing mode by reference if the node's address
01440 /// can be legally represented as pre-indexed load / store address.
01441 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
01442                                                   SDValue &Offset,
01443                                                   ISD::MemIndexedMode &AM,
01444                                                   SelectionDAG &DAG) const {
01445   if (DisablePPCPreinc) return false;
01446 
01447   bool isLoad = true;
01448   SDValue Ptr;
01449   EVT VT;
01450   unsigned Alignment;
01451   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
01452     Ptr = LD->getBasePtr();
01453     VT = LD->getMemoryVT();
01454     Alignment = LD->getAlignment();
01455   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
01456     Ptr = ST->getBasePtr();
01457     VT  = ST->getMemoryVT();
01458     Alignment = ST->getAlignment();
01459     isLoad = false;
01460   } else
01461     return false;
01462 
01463   // PowerPC doesn't have preinc load/store instructions for vectors.
01464   if (VT.isVector())
01465     return false;
01466 
01467   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
01468 
01469     // Common code will reject creating a pre-inc form if the base pointer
01470     // is a frame index, or if N is a store and the base pointer is either
01471     // the same as or a predecessor of the value being stored.  Check for
01472     // those situations here, and try with swapped Base/Offset instead.
01473     bool Swap = false;
01474 
01475     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
01476       Swap = true;
01477     else if (!isLoad) {
01478       SDValue Val = cast<StoreSDNode>(N)->getValue();
01479       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
01480         Swap = true;
01481     }
01482 
01483     if (Swap)
01484       std::swap(Base, Offset);
01485 
01486     AM = ISD::PRE_INC;
01487     return true;
01488   }
01489 
01490   // LDU/STU can only handle immediates that are a multiple of 4.
01491   if (VT != MVT::i64) {
01492     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false))
01493       return false;
01494   } else {
01495     // LDU/STU need an address with at least 4-byte alignment.
01496     if (Alignment < 4)
01497       return false;
01498 
01499     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true))
01500       return false;
01501   }
01502 
01503   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
01504     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
01505     // sext i32 to i64 when addr mode is r+i.
01506     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
01507         LD->getExtensionType() == ISD::SEXTLOAD &&
01508         isa<ConstantSDNode>(Offset))
01509       return false;
01510   }
01511 
01512   AM = ISD::PRE_INC;
01513   return true;
01514 }
01515 
01516 //===----------------------------------------------------------------------===//
01517 //  LowerOperation implementation
01518 //===----------------------------------------------------------------------===//
01519 
01520 /// GetLabelAccessInfo - Return true if we should reference labels using a
01521 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
01522 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
01523                                unsigned &LoOpFlags,
01524                                const GlobalValue *GV = nullptr) {
01525   HiOpFlags = PPCII::MO_HA;
01526   LoOpFlags = PPCII::MO_LO;
01527 
01528   // Don't use the pic base if not in PIC relocation model.
01529   bool isPIC = TM.getRelocationModel() == Reloc::PIC_;
01530 
01531   if (isPIC) {
01532     HiOpFlags |= PPCII::MO_PIC_FLAG;
01533     LoOpFlags |= PPCII::MO_PIC_FLAG;
01534   }
01535 
01536   // If this is a reference to a global value that requires a non-lazy-ptr, make
01537   // sure that instruction lowering adds it.
01538   if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) {
01539     HiOpFlags |= PPCII::MO_NLP_FLAG;
01540     LoOpFlags |= PPCII::MO_NLP_FLAG;
01541 
01542     if (GV->hasHiddenVisibility()) {
01543       HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
01544       LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
01545     }
01546   }
01547 
01548   return isPIC;
01549 }
01550 
01551 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
01552                              SelectionDAG &DAG) {
01553   EVT PtrVT = HiPart.getValueType();
01554   SDValue Zero = DAG.getConstant(0, PtrVT);
01555   SDLoc DL(HiPart);
01556 
01557   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
01558   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
01559 
01560   // With PIC, the first instruction is actually "GR+hi(&G)".
01561   if (isPIC)
01562     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
01563                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
01564 
01565   // Generate non-pic code that has direct accesses to the constant pool.
01566   // The address of the global is just (hi(&g)+lo(&g)).
01567   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
01568 }
01569 
01570 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
01571                                              SelectionDAG &DAG) const {
01572   EVT PtrVT = Op.getValueType();
01573   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
01574   const Constant *C = CP->getConstVal();
01575 
01576   // 64-bit SVR4 ABI code is always position-independent.
01577   // The actual address of the GlobalValue is stored in the TOC.
01578   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
01579     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
01580     return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA,
01581                        DAG.getRegister(PPC::X2, MVT::i64));
01582   }
01583 
01584   unsigned MOHiFlag, MOLoFlag;
01585   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
01586 
01587   if (isPIC && Subtarget.isSVR4ABI()) {
01588     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
01589                                            PPCII::MO_PIC_FLAG);
01590     SDLoc DL(CP);
01591     return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA,
01592                        DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT));
01593   }
01594 
01595   SDValue CPIHi =
01596     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
01597   SDValue CPILo =
01598     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
01599   return LowerLabelRef(CPIHi, CPILo, isPIC, DAG);
01600 }
01601 
01602 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
01603   EVT PtrVT = Op.getValueType();
01604   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
01605 
01606   // 64-bit SVR4 ABI code is always position-independent.
01607   // The actual address of the GlobalValue is stored in the TOC.
01608   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
01609     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
01610     return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA,
01611                        DAG.getRegister(PPC::X2, MVT::i64));
01612   }
01613 
01614   unsigned MOHiFlag, MOLoFlag;
01615   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
01616 
01617   if (isPIC && Subtarget.isSVR4ABI()) {
01618     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
01619                                         PPCII::MO_PIC_FLAG);
01620     SDLoc DL(GA);
01621     return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), PtrVT, GA,
01622                        DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT));
01623   }
01624 
01625   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
01626   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
01627   return LowerLabelRef(JTIHi, JTILo, isPIC, DAG);
01628 }
01629 
01630 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
01631                                              SelectionDAG &DAG) const {
01632   EVT PtrVT = Op.getValueType();
01633   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
01634   const BlockAddress *BA = BASDN->getBlockAddress();
01635 
01636   // 64-bit SVR4 ABI code is always position-independent.
01637   // The actual BlockAddress is stored in the TOC.
01638   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
01639     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
01640     return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(BASDN), MVT::i64, GA,
01641                        DAG.getRegister(PPC::X2, MVT::i64));
01642   }
01643 
01644   unsigned MOHiFlag, MOLoFlag;
01645   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
01646   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
01647   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
01648   return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
01649 }
01650 
01651 // Generate a call to __tls_get_addr for the given GOT entry Op.
01652 std::pair<SDValue,SDValue>
01653 PPCTargetLowering::lowerTLSCall(SDValue Op, SDLoc dl,
01654                                 SelectionDAG &DAG) const {
01655 
01656   Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
01657   TargetLowering::ArgListTy Args;
01658   TargetLowering::ArgListEntry Entry;
01659   Entry.Node = Op;
01660   Entry.Ty = IntPtrTy;
01661   Args.push_back(Entry);
01662 
01663   TargetLowering::CallLoweringInfo CLI(DAG);
01664   CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
01665     .setCallee(CallingConv::C, IntPtrTy,
01666                DAG.getTargetExternalSymbol("__tls_get_addr", getPointerTy()),
01667                std::move(Args), 0);
01668 
01669   return LowerCallTo(CLI);
01670 }
01671 
01672 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
01673                                               SelectionDAG &DAG) const {
01674 
01675   // FIXME: TLS addresses currently use medium model code sequences,
01676   // which is the most useful form.  Eventually support for small and
01677   // large models could be added if users need it, at the cost of
01678   // additional complexity.
01679   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
01680   SDLoc dl(GA);
01681   const GlobalValue *GV = GA->getGlobal();
01682   EVT PtrVT = getPointerTy();
01683   bool is64bit = Subtarget.isPPC64();
01684   const Module *M = DAG.getMachineFunction().getFunction()->getParent();
01685   PICLevel::Level picLevel = M->getPICLevel();
01686 
01687   TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
01688 
01689   if (Model == TLSModel::LocalExec) {
01690     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01691                                                PPCII::MO_TPREL_HA);
01692     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01693                                                PPCII::MO_TPREL_LO);
01694     SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
01695                                      is64bit ? MVT::i64 : MVT::i32);
01696     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
01697     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
01698   }
01699 
01700   if (Model == TLSModel::InitialExec) {
01701     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
01702     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01703                                                 PPCII::MO_TLS);
01704     SDValue GOTPtr;
01705     if (is64bit) {
01706       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
01707       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
01708                            PtrVT, GOTReg, TGA);
01709     } else
01710       GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
01711     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
01712                                    PtrVT, TGA, GOTPtr);
01713     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
01714   }
01715 
01716   if (Model == TLSModel::GeneralDynamic) {
01717     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01718                                              PPCII::MO_TLSGD);
01719     SDValue GOTPtr;
01720     if (is64bit) {
01721       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
01722       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
01723                                    GOTReg, TGA);
01724     } else {
01725       if (picLevel == PICLevel::Small)
01726         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
01727       else
01728         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
01729     }
01730     SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT,
01731                                    GOTPtr, TGA);
01732     std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG);
01733     return CallResult.first;
01734   }
01735 
01736   if (Model == TLSModel::LocalDynamic) {
01737     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
01738                                              PPCII::MO_TLSLD);
01739     SDValue GOTPtr;
01740     if (is64bit) {
01741       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
01742       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
01743                            GOTReg, TGA);
01744     } else {
01745       if (picLevel == PICLevel::Small)
01746         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
01747       else
01748         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
01749     }
01750     SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT,
01751                                    GOTPtr, TGA);
01752     std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG);
01753     SDValue TLSAddr = CallResult.first;
01754     SDValue Chain = CallResult.second;
01755     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT,
01756                                       Chain, TLSAddr, TGA);
01757     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
01758   }
01759 
01760   llvm_unreachable("Unknown TLS model!");
01761 }
01762 
01763 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
01764                                               SelectionDAG &DAG) const {
01765   EVT PtrVT = Op.getValueType();
01766   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
01767   SDLoc DL(GSDN);
01768   const GlobalValue *GV = GSDN->getGlobal();
01769 
01770   // 64-bit SVR4 ABI code is always position-independent.
01771   // The actual address of the GlobalValue is stored in the TOC.
01772   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
01773     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
01774     return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA,
01775                        DAG.getRegister(PPC::X2, MVT::i64));
01776   }
01777 
01778   unsigned MOHiFlag, MOLoFlag;
01779   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV);
01780 
01781   if (isPIC && Subtarget.isSVR4ABI()) {
01782     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
01783                                             GSDN->getOffset(),
01784                                             PPCII::MO_PIC_FLAG);
01785     return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i32, GA,
01786                        DAG.getNode(PPCISD::GlobalBaseReg, DL, MVT::i32));
01787   }
01788 
01789   SDValue GAHi =
01790     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
01791   SDValue GALo =
01792     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
01793 
01794   SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG);
01795 
01796   // If the global reference is actually to a non-lazy-pointer, we have to do an
01797   // extra load to get the address of the global.
01798   if (MOHiFlag & PPCII::MO_NLP_FLAG)
01799     Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(),
01800                       false, false, false, 0);
01801   return Ptr;
01802 }
01803 
01804 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
01805   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
01806   SDLoc dl(Op);
01807 
01808   if (Op.getValueType() == MVT::v2i64) {
01809     // When the operands themselves are v2i64 values, we need to do something
01810     // special because VSX has no underlying comparison operations for these.
01811     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
01812       // Equality can be handled by casting to the legal type for Altivec
01813       // comparisons, everything else needs to be expanded.
01814       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
01815         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
01816                  DAG.getSetCC(dl, MVT::v4i32,
01817                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
01818                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
01819                    CC));
01820       }
01821 
01822       return SDValue();
01823     }
01824 
01825     // We handle most of these in the usual way.
01826     return Op;
01827   }
01828 
01829   // If we're comparing for equality to zero, expose the fact that this is
01830   // implented as a ctlz/srl pair on ppc, so that the dag combiner can
01831   // fold the new nodes.
01832   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
01833     if (C->isNullValue() && CC == ISD::SETEQ) {
01834       EVT VT = Op.getOperand(0).getValueType();
01835       SDValue Zext = Op.getOperand(0);
01836       if (VT.bitsLT(MVT::i32)) {
01837         VT = MVT::i32;
01838         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
01839       }
01840       unsigned Log2b = Log2_32(VT.getSizeInBits());
01841       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
01842       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
01843                                 DAG.getConstant(Log2b, MVT::i32));
01844       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
01845     }
01846     // Leave comparisons against 0 and -1 alone for now, since they're usually
01847     // optimized.  FIXME: revisit this when we can custom lower all setcc
01848     // optimizations.
01849     if (C->isAllOnesValue() || C->isNullValue())
01850       return SDValue();
01851   }
01852 
01853   // If we have an integer seteq/setne, turn it into a compare against zero
01854   // by xor'ing the rhs with the lhs, which is faster than setting a
01855   // condition register, reading it back out, and masking the correct bit.  The
01856   // normal approach here uses sub to do this instead of xor.  Using xor exposes
01857   // the result to other bit-twiddling opportunities.
01858   EVT LHSVT = Op.getOperand(0).getValueType();
01859   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
01860     EVT VT = Op.getValueType();
01861     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
01862                                 Op.getOperand(1));
01863     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
01864   }
01865   return SDValue();
01866 }
01867 
01868 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
01869                                       const PPCSubtarget &Subtarget) const {
01870   SDNode *Node = Op.getNode();
01871   EVT VT = Node->getValueType(0);
01872   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
01873   SDValue InChain = Node->getOperand(0);
01874   SDValue VAListPtr = Node->getOperand(1);
01875   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
01876   SDLoc dl(Node);
01877 
01878   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
01879 
01880   // gpr_index
01881   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
01882                                     VAListPtr, MachinePointerInfo(SV), MVT::i8,
01883                                     false, false, false, 0);
01884   InChain = GprIndex.getValue(1);
01885 
01886   if (VT == MVT::i64) {
01887     // Check if GprIndex is even
01888     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
01889                                  DAG.getConstant(1, MVT::i32));
01890     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
01891                                 DAG.getConstant(0, MVT::i32), ISD::SETNE);
01892     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
01893                                           DAG.getConstant(1, MVT::i32));
01894     // Align GprIndex to be even if it isn't
01895     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
01896                            GprIndex);
01897   }
01898 
01899   // fpr index is 1 byte after gpr
01900   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
01901                                DAG.getConstant(1, MVT::i32));
01902 
01903   // fpr
01904   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
01905                                     FprPtr, MachinePointerInfo(SV), MVT::i8,
01906                                     false, false, false, 0);
01907   InChain = FprIndex.getValue(1);
01908 
01909   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
01910                                        DAG.getConstant(8, MVT::i32));
01911 
01912   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
01913                                         DAG.getConstant(4, MVT::i32));
01914 
01915   // areas
01916   SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
01917                                      MachinePointerInfo(), false, false,
01918                                      false, 0);
01919   InChain = OverflowArea.getValue(1);
01920 
01921   SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr,
01922                                     MachinePointerInfo(), false, false,
01923                                     false, 0);
01924   InChain = RegSaveArea.getValue(1);
01925 
01926   // select overflow_area if index > 8
01927   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
01928                             DAG.getConstant(8, MVT::i32), ISD::SETLT);
01929 
01930   // adjustment constant gpr_index * 4/8
01931   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
01932                                     VT.isInteger() ? GprIndex : FprIndex,
01933                                     DAG.getConstant(VT.isInteger() ? 4 : 8,
01934                                                     MVT::i32));
01935 
01936   // OurReg = RegSaveArea + RegConstant
01937   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
01938                                RegConstant);
01939 
01940   // Floating types are 32 bytes into RegSaveArea
01941   if (VT.isFloatingPoint())
01942     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
01943                          DAG.getConstant(32, MVT::i32));
01944 
01945   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
01946   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
01947                                    VT.isInteger() ? GprIndex : FprIndex,
01948                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1,
01949                                                    MVT::i32));
01950 
01951   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
01952                               VT.isInteger() ? VAListPtr : FprPtr,
01953                               MachinePointerInfo(SV),
01954                               MVT::i8, false, false, 0);
01955 
01956   // determine if we should load from reg_save_area or overflow_area
01957   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
01958 
01959   // increase overflow_area by 4/8 if gpr/fpr > 8
01960   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
01961                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
01962                                           MVT::i32));
01963 
01964   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
01965                              OverflowAreaPlusN);
01966 
01967   InChain = DAG.getTruncStore(InChain, dl, OverflowArea,
01968                               OverflowAreaPtr,
01969                               MachinePointerInfo(),
01970                               MVT::i32, false, false, 0);
01971 
01972   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(),
01973                      false, false, false, 0);
01974 }
01975 
01976 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG,
01977                                        const PPCSubtarget &Subtarget) const {
01978   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
01979 
01980   // We have to copy the entire va_list struct:
01981   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
01982   return DAG.getMemcpy(Op.getOperand(0), Op,
01983                        Op.getOperand(1), Op.getOperand(2),
01984                        DAG.getConstant(12, MVT::i32), 8, false, true,
01985                        MachinePointerInfo(), MachinePointerInfo());
01986 }
01987 
01988 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
01989                                                   SelectionDAG &DAG) const {
01990   return Op.getOperand(0);
01991 }
01992 
01993 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
01994                                                 SelectionDAG &DAG) const {
01995   SDValue Chain = Op.getOperand(0);
01996   SDValue Trmp = Op.getOperand(1); // trampoline
01997   SDValue FPtr = Op.getOperand(2); // nested function
01998   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
01999   SDLoc dl(Op);
02000 
02001   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02002   bool isPPC64 = (PtrVT == MVT::i64);
02003   Type *IntPtrTy =
02004     DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
02005                                                              *DAG.getContext());
02006 
02007   TargetLowering::ArgListTy Args;
02008   TargetLowering::ArgListEntry Entry;
02009 
02010   Entry.Ty = IntPtrTy;
02011   Entry.Node = Trmp; Args.push_back(Entry);
02012 
02013   // TrampSize == (isPPC64 ? 48 : 40);
02014   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
02015                                isPPC64 ? MVT::i64 : MVT::i32);
02016   Args.push_back(Entry);
02017 
02018   Entry.Node = FPtr; Args.push_back(Entry);
02019   Entry.Node = Nest; Args.push_back(Entry);
02020 
02021   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
02022   TargetLowering::CallLoweringInfo CLI(DAG);
02023   CLI.setDebugLoc(dl).setChain(Chain)
02024     .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
02025                DAG.getExternalSymbol("__trampoline_setup", PtrVT),
02026                std::move(Args), 0);
02027 
02028   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
02029   return CallResult.second;
02030 }
02031 
02032 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
02033                                         const PPCSubtarget &Subtarget) const {
02034   MachineFunction &MF = DAG.getMachineFunction();
02035   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
02036 
02037   SDLoc dl(Op);
02038 
02039   if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
02040     // vastart just stores the address of the VarArgsFrameIndex slot into the
02041     // memory location argument.
02042     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02043     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02044     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02045     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
02046                         MachinePointerInfo(SV),
02047                         false, false, 0);
02048   }
02049 
02050   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
02051   // We suppose the given va_list is already allocated.
02052   //
02053   // typedef struct {
02054   //  char gpr;     /* index into the array of 8 GPRs
02055   //                 * stored in the register save area
02056   //                 * gpr=0 corresponds to r3,
02057   //                 * gpr=1 to r4, etc.
02058   //                 */
02059   //  char fpr;     /* index into the array of 8 FPRs
02060   //                 * stored in the register save area
02061   //                 * fpr=0 corresponds to f1,
02062   //                 * fpr=1 to f2, etc.
02063   //                 */
02064   //  char *overflow_arg_area;
02065   //                /* location on stack that holds
02066   //                 * the next overflow argument
02067   //                 */
02068   //  char *reg_save_area;
02069   //               /* where r3:r10 and f1:f8 (if saved)
02070   //                * are stored
02071   //                */
02072   // } va_list[1];
02073 
02074 
02075   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32);
02076   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32);
02077 
02078 
02079   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02080 
02081   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
02082                                             PtrVT);
02083   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
02084                                  PtrVT);
02085 
02086   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
02087   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
02088 
02089   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
02090   SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
02091 
02092   uint64_t FPROffset = 1;
02093   SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
02094 
02095   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02096 
02097   // Store first byte : number of int regs
02098   SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR,
02099                                          Op.getOperand(1),
02100                                          MachinePointerInfo(SV),
02101                                          MVT::i8, false, false, 0);
02102   uint64_t nextOffset = FPROffset;
02103   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
02104                                   ConstFPROffset);
02105 
02106   // Store second byte : number of float regs
02107   SDValue secondStore =
02108     DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
02109                       MachinePointerInfo(SV, nextOffset), MVT::i8,
02110                       false, false, 0);
02111   nextOffset += StackOffset;
02112   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
02113 
02114   // Store second word : arguments given on stack
02115   SDValue thirdStore =
02116     DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
02117                  MachinePointerInfo(SV, nextOffset),
02118                  false, false, 0);
02119   nextOffset += FrameOffset;
02120   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
02121 
02122   // Store third word : arguments given in registers
02123   return DAG.getStore(thirdStore, dl, FR, nextPtr,
02124                       MachinePointerInfo(SV, nextOffset),
02125                       false, false, 0);
02126 
02127 }
02128 
02129 #include "PPCGenCallingConv.inc"
02130 
02131 // Function whose sole purpose is to kill compiler warnings 
02132 // stemming from unused functions included from PPCGenCallingConv.inc.
02133 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const {
02134   return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
02135 }
02136 
02137 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
02138                                       CCValAssign::LocInfo &LocInfo,
02139                                       ISD::ArgFlagsTy &ArgFlags,
02140                                       CCState &State) {
02141   return true;
02142 }
02143 
02144 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
02145                                              MVT &LocVT,
02146                                              CCValAssign::LocInfo &LocInfo,
02147                                              ISD::ArgFlagsTy &ArgFlags,
02148                                              CCState &State) {
02149   static const MCPhysReg ArgRegs[] = {
02150     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
02151     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
02152   };
02153   const unsigned NumArgRegs = array_lengthof(ArgRegs);
02154 
02155   unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
02156 
02157   // Skip one register if the first unallocated register has an even register
02158   // number and there are still argument registers available which have not been
02159   // allocated yet. RegNum is actually an index into ArgRegs, which means we
02160   // need to skip a register if RegNum is odd.
02161   if (RegNum != NumArgRegs && RegNum % 2 == 1) {
02162     State.AllocateReg(ArgRegs[RegNum]);
02163   }
02164 
02165   // Always return false here, as this function only makes sure that the first
02166   // unallocated register has an odd register number and does not actually
02167   // allocate a register for the current argument.
02168   return false;
02169 }
02170 
02171 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
02172                                                MVT &LocVT,
02173                                                CCValAssign::LocInfo &LocInfo,
02174                                                ISD::ArgFlagsTy &ArgFlags,
02175                                                CCState &State) {
02176   static const MCPhysReg ArgRegs[] = {
02177     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
02178     PPC::F8
02179   };
02180 
02181   const unsigned NumArgRegs = array_lengthof(ArgRegs);
02182 
02183   unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
02184 
02185   // If there is only one Floating-point register left we need to put both f64
02186   // values of a split ppc_fp128 value on the stack.
02187   if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
02188     State.AllocateReg(ArgRegs[RegNum]);
02189   }
02190 
02191   // Always return false here, as this function only makes sure that the two f64
02192   // values a ppc_fp128 value is split into are both passed in registers or both
02193   // passed on the stack and does not actually allocate a register for the
02194   // current argument.
02195   return false;
02196 }
02197 
02198 /// GetFPR - Get the set of FP registers that should be allocated for arguments,
02199 /// on Darwin.
02200 static const MCPhysReg *GetFPR() {
02201   static const MCPhysReg FPR[] = {
02202     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
02203     PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
02204   };
02205 
02206   return FPR;
02207 }
02208 
02209 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
02210 /// the stack.
02211 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
02212                                        unsigned PtrByteSize) {
02213   unsigned ArgSize = ArgVT.getStoreSize();
02214   if (Flags.isByVal())
02215     ArgSize = Flags.getByValSize();
02216 
02217   // Round up to multiples of the pointer size, except for array members,
02218   // which are always packed.
02219   if (!Flags.isInConsecutiveRegs())
02220     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02221 
02222   return ArgSize;
02223 }
02224 
02225 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
02226 /// on the stack.
02227 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
02228                                             ISD::ArgFlagsTy Flags,
02229                                             unsigned PtrByteSize) {
02230   unsigned Align = PtrByteSize;
02231 
02232   // Altivec parameters are padded to a 16 byte boundary.
02233   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
02234       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
02235       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64)
02236     Align = 16;
02237 
02238   // ByVal parameters are aligned as requested.
02239   if (Flags.isByVal()) {
02240     unsigned BVAlign = Flags.getByValAlign();
02241     if (BVAlign > PtrByteSize) {
02242       if (BVAlign % PtrByteSize != 0)
02243           llvm_unreachable(
02244             "ByVal alignment is not a multiple of the pointer size");
02245 
02246       Align = BVAlign;
02247     }
02248   }
02249 
02250   // Array members are always packed to their original alignment.
02251   if (Flags.isInConsecutiveRegs()) {
02252     // If the array member was split into multiple registers, the first
02253     // needs to be aligned to the size of the full type.  (Except for
02254     // ppcf128, which is only aligned as its f64 components.)
02255     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
02256       Align = OrigVT.getStoreSize();
02257     else
02258       Align = ArgVT.getStoreSize();
02259   }
02260 
02261   return Align;
02262 }
02263 
02264 /// CalculateStackSlotUsed - Return whether this argument will use its
02265 /// stack slot (instead of being passed in registers).  ArgOffset,
02266 /// AvailableFPRs, and AvailableVRs must hold the current argument
02267 /// position, and will be updated to account for this argument.
02268 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
02269                                    ISD::ArgFlagsTy Flags,
02270                                    unsigned PtrByteSize,
02271                                    unsigned LinkageSize,
02272                                    unsigned ParamAreaSize,
02273                                    unsigned &ArgOffset,
02274                                    unsigned &AvailableFPRs,
02275                                    unsigned &AvailableVRs) {
02276   bool UseMemory = false;
02277 
02278   // Respect alignment of argument on the stack.
02279   unsigned Align =
02280     CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
02281   ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
02282   // If there's no space left in the argument save area, we must
02283   // use memory (this check also catches zero-sized arguments).
02284   if (ArgOffset >= LinkageSize + ParamAreaSize)
02285     UseMemory = true;
02286 
02287   // Allocate argument on the stack.
02288   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
02289   if (Flags.isInConsecutiveRegsLast())
02290     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02291   // If we overran the argument save area, we must use memory
02292   // (this check catches arguments passed partially in memory)
02293   if (ArgOffset > LinkageSize + ParamAreaSize)
02294     UseMemory = true;
02295 
02296   // However, if the argument is actually passed in an FPR or a VR,
02297   // we don't use memory after all.
02298   if (!Flags.isByVal()) {
02299     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
02300       if (AvailableFPRs > 0) {
02301         --AvailableFPRs;
02302         return false;
02303       }
02304     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
02305         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
02306         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64)
02307       if (AvailableVRs > 0) {
02308         --AvailableVRs;
02309         return false;
02310       }
02311   }
02312 
02313   return UseMemory;
02314 }
02315 
02316 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
02317 /// ensure minimum alignment required for target.
02318 static unsigned EnsureStackAlignment(const TargetMachine &Target,
02319                                      unsigned NumBytes) {
02320   unsigned TargetAlign =
02321       Target.getSubtargetImpl()->getFrameLowering()->getStackAlignment();
02322   unsigned AlignMask = TargetAlign - 1;
02323   NumBytes = (NumBytes + AlignMask) & ~AlignMask;
02324   return NumBytes;
02325 }
02326 
02327 SDValue
02328 PPCTargetLowering::LowerFormalArguments(SDValue Chain,
02329                                         CallingConv::ID CallConv, bool isVarArg,
02330                                         const SmallVectorImpl<ISD::InputArg>
02331                                           &Ins,
02332                                         SDLoc dl, SelectionDAG &DAG,
02333                                         SmallVectorImpl<SDValue> &InVals)
02334                                           const {
02335   if (Subtarget.isSVR4ABI()) {
02336     if (Subtarget.isPPC64())
02337       return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
02338                                          dl, DAG, InVals);
02339     else
02340       return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
02341                                          dl, DAG, InVals);
02342   } else {
02343     return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
02344                                        dl, DAG, InVals);
02345   }
02346 }
02347 
02348 SDValue
02349 PPCTargetLowering::LowerFormalArguments_32SVR4(
02350                                       SDValue Chain,
02351                                       CallingConv::ID CallConv, bool isVarArg,
02352                                       const SmallVectorImpl<ISD::InputArg>
02353                                         &Ins,
02354                                       SDLoc dl, SelectionDAG &DAG,
02355                                       SmallVectorImpl<SDValue> &InVals) const {
02356 
02357   // 32-bit SVR4 ABI Stack Frame Layout:
02358   //              +-----------------------------------+
02359   //        +-->  |            Back chain             |
02360   //        |     +-----------------------------------+
02361   //        |     | Floating-point register save area |
02362   //        |     +-----------------------------------+
02363   //        |     |    General register save area     |
02364   //        |     +-----------------------------------+
02365   //        |     |          CR save word             |
02366   //        |     +-----------------------------------+
02367   //        |     |         VRSAVE save word          |
02368   //        |     +-----------------------------------+
02369   //        |     |         Alignment padding         |
02370   //        |     +-----------------------------------+
02371   //        |     |     Vector register save area     |
02372   //        |     +-----------------------------------+
02373   //        |     |       Local variable space        |
02374   //        |     +-----------------------------------+
02375   //        |     |        Parameter list area        |
02376   //        |     +-----------------------------------+
02377   //        |     |           LR save word            |
02378   //        |     +-----------------------------------+
02379   // SP-->  +---  |            Back chain             |
02380   //              +-----------------------------------+
02381   //
02382   // Specifications:
02383   //   System V Application Binary Interface PowerPC Processor Supplement
02384   //   AltiVec Technology Programming Interface Manual
02385 
02386   MachineFunction &MF = DAG.getMachineFunction();
02387   MachineFrameInfo *MFI = MF.getFrameInfo();
02388   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
02389 
02390   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02391   // Potential tail calls could cause overwriting of argument stack slots.
02392   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
02393                        (CallConv == CallingConv::Fast));
02394   unsigned PtrByteSize = 4;
02395 
02396   // Assign locations to all of the incoming arguments.
02397   SmallVector<CCValAssign, 16> ArgLocs;
02398   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
02399                  *DAG.getContext());
02400 
02401   // Reserve space for the linkage area on the stack.
02402   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(false, false, false);
02403   CCInfo.AllocateStack(LinkageSize, PtrByteSize);
02404 
02405   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
02406 
02407   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
02408     CCValAssign &VA = ArgLocs[i];
02409 
02410     // Arguments stored in registers.
02411     if (VA.isRegLoc()) {
02412       const TargetRegisterClass *RC;
02413       EVT ValVT = VA.getValVT();
02414 
02415       switch (ValVT.getSimpleVT().SimpleTy) {
02416         default:
02417           llvm_unreachable("ValVT not supported by formal arguments Lowering");
02418         case MVT::i1:
02419         case MVT::i32:
02420           RC = &PPC::GPRCRegClass;
02421           break;
02422         case MVT::f32:
02423           RC = &PPC::F4RCRegClass;
02424           break;
02425         case MVT::f64:
02426           if (Subtarget.hasVSX())
02427             RC = &PPC::VSFRCRegClass;
02428           else
02429             RC = &PPC::F8RCRegClass;
02430           break;
02431         case MVT::v16i8:
02432         case MVT::v8i16:
02433         case MVT::v4i32:
02434         case MVT::v4f32:
02435           RC = &PPC::VRRCRegClass;
02436           break;
02437         case MVT::v2f64:
02438         case MVT::v2i64:
02439           RC = &PPC::VSHRCRegClass;
02440           break;
02441       }
02442 
02443       // Transform the arguments stored in physical registers into virtual ones.
02444       unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
02445       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
02446                                             ValVT == MVT::i1 ? MVT::i32 : ValVT);
02447 
02448       if (ValVT == MVT::i1)
02449         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
02450 
02451       InVals.push_back(ArgValue);
02452     } else {
02453       // Argument stored in memory.
02454       assert(VA.isMemLoc());
02455 
02456       unsigned ArgSize = VA.getLocVT().getStoreSize();
02457       int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
02458                                       isImmutable);
02459 
02460       // Create load nodes to retrieve arguments from the stack.
02461       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
02462       InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
02463                                    MachinePointerInfo(),
02464                                    false, false, false, 0));
02465     }
02466   }
02467 
02468   // Assign locations to all of the incoming aggregate by value arguments.
02469   // Aggregates passed by value are stored in the local variable space of the
02470   // caller's stack frame, right above the parameter list area.
02471   SmallVector<CCValAssign, 16> ByValArgLocs;
02472   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
02473                       ByValArgLocs, *DAG.getContext());
02474 
02475   // Reserve stack space for the allocations in CCInfo.
02476   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
02477 
02478   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
02479 
02480   // Area that is at least reserved in the caller of this function.
02481   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
02482   MinReservedArea = std::max(MinReservedArea, LinkageSize);
02483 
02484   // Set the size that is at least reserved in caller of this function.  Tail
02485   // call optimized function's reserved stack space needs to be aligned so that
02486   // taking the difference between two stack areas will result in an aligned
02487   // stack.
02488   MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
02489   FuncInfo->setMinReservedArea(MinReservedArea);
02490 
02491   SmallVector<SDValue, 8> MemOps;
02492 
02493   // If the function takes variable number of arguments, make a frame index for
02494   // the start of the first vararg value... for expansion of llvm.va_start.
02495   if (isVarArg) {
02496     static const MCPhysReg GPArgRegs[] = {
02497       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
02498       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
02499     };
02500     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
02501 
02502     static const MCPhysReg FPArgRegs[] = {
02503       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
02504       PPC::F8
02505     };
02506     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
02507     if (DisablePPCFloatInVariadic)
02508       NumFPArgRegs = 0;
02509 
02510     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs,
02511                                                           NumGPArgRegs));
02512     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs,
02513                                                           NumFPArgRegs));
02514 
02515     // Make room for NumGPArgRegs and NumFPArgRegs.
02516     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
02517                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
02518 
02519     FuncInfo->setVarArgsStackOffset(
02520       MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
02521                              CCInfo.getNextStackOffset(), true));
02522 
02523     FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false));
02524     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02525 
02526     // The fixed integer arguments of a variadic function are stored to the
02527     // VarArgsFrameIndex on the stack so that they may be loaded by deferencing
02528     // the result of va_next.
02529     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
02530       // Get an existing live-in vreg, or add a new one.
02531       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
02532       if (!VReg)
02533         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
02534 
02535       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
02536       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
02537                                    MachinePointerInfo(), false, false, 0);
02538       MemOps.push_back(Store);
02539       // Increment the address by four for the next argument to store
02540       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
02541       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
02542     }
02543 
02544     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
02545     // is set.
02546     // The double arguments are stored to the VarArgsFrameIndex
02547     // on the stack.
02548     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
02549       // Get an existing live-in vreg, or add a new one.
02550       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
02551       if (!VReg)
02552         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
02553 
02554       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
02555       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
02556                                    MachinePointerInfo(), false, false, 0);
02557       MemOps.push_back(Store);
02558       // Increment the address by eight for the next argument to store
02559       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
02560                                          PtrVT);
02561       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
02562     }
02563   }
02564 
02565   if (!MemOps.empty())
02566     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02567 
02568   return Chain;
02569 }
02570 
02571 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
02572 // value to MVT::i64 and then truncate to the correct register size.
02573 SDValue
02574 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
02575                                      SelectionDAG &DAG, SDValue ArgVal,
02576                                      SDLoc dl) const {
02577   if (Flags.isSExt())
02578     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
02579                          DAG.getValueType(ObjectVT));
02580   else if (Flags.isZExt())
02581     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
02582                          DAG.getValueType(ObjectVT));
02583 
02584   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
02585 }
02586 
02587 SDValue
02588 PPCTargetLowering::LowerFormalArguments_64SVR4(
02589                                       SDValue Chain,
02590                                       CallingConv::ID CallConv, bool isVarArg,
02591                                       const SmallVectorImpl<ISD::InputArg>
02592                                         &Ins,
02593                                       SDLoc dl, SelectionDAG &DAG,
02594                                       SmallVectorImpl<SDValue> &InVals) const {
02595   // TODO: add description of PPC stack frame format, or at least some docs.
02596   //
02597   bool isELFv2ABI = Subtarget.isELFv2ABI();
02598   bool isLittleEndian = Subtarget.isLittleEndian();
02599   MachineFunction &MF = DAG.getMachineFunction();
02600   MachineFrameInfo *MFI = MF.getFrameInfo();
02601   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
02602 
02603   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02604   // Potential tail calls could cause overwriting of argument stack slots.
02605   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
02606                        (CallConv == CallingConv::Fast));
02607   unsigned PtrByteSize = 8;
02608 
02609   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
02610                                                           isELFv2ABI);
02611 
02612   static const MCPhysReg GPR[] = {
02613     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
02614     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
02615   };
02616 
02617   static const MCPhysReg *FPR = GetFPR();
02618 
02619   static const MCPhysReg VR[] = {
02620     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
02621     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
02622   };
02623   static const MCPhysReg VSRH[] = {
02624     PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
02625     PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
02626   };
02627 
02628   const unsigned Num_GPR_Regs = array_lengthof(GPR);
02629   const unsigned Num_FPR_Regs = 13;
02630   const unsigned Num_VR_Regs  = array_lengthof(VR);
02631 
02632   // Do a first pass over the arguments to determine whether the ABI
02633   // guarantees that our caller has allocated the parameter save area
02634   // on its stack frame.  In the ELFv1 ABI, this is always the case;
02635   // in the ELFv2 ABI, it is true if this is a vararg function or if
02636   // any parameter is located in a stack slot.
02637 
02638   bool HasParameterArea = !isELFv2ABI || isVarArg;
02639   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
02640   unsigned NumBytes = LinkageSize;
02641   unsigned AvailableFPRs = Num_FPR_Regs;
02642   unsigned AvailableVRs = Num_VR_Regs;
02643   for (unsigned i = 0, e = Ins.size(); i != e; ++i)
02644     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
02645                                PtrByteSize, LinkageSize, ParamAreaSize,
02646                                NumBytes, AvailableFPRs, AvailableVRs))
02647       HasParameterArea = true;
02648 
02649   // Add DAG nodes to load the arguments or copy them out of registers.  On
02650   // entry to a function on PPC, the arguments start after the linkage area,
02651   // although the first ones are often in registers.
02652 
02653   unsigned ArgOffset = LinkageSize;
02654   unsigned GPR_idx, FPR_idx = 0, VR_idx = 0;
02655   SmallVector<SDValue, 8> MemOps;
02656   Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
02657   unsigned CurArgIdx = 0;
02658   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
02659     SDValue ArgVal;
02660     bool needsLoad = false;
02661     EVT ObjectVT = Ins[ArgNo].VT;
02662     EVT OrigVT = Ins[ArgNo].ArgVT;
02663     unsigned ObjSize = ObjectVT.getStoreSize();
02664     unsigned ArgSize = ObjSize;
02665     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
02666     std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
02667     CurArgIdx = Ins[ArgNo].OrigArgIndex;
02668 
02669     /* Respect alignment of argument on the stack.  */
02670     unsigned Align =
02671       CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
02672     ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
02673     unsigned CurArgOffset = ArgOffset;
02674 
02675     /* Compute GPR index associated with argument offset.  */
02676     GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
02677     GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
02678 
02679     // FIXME the codegen can be much improved in some cases.
02680     // We do not have to keep everything in memory.
02681     if (Flags.isByVal()) {
02682       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
02683       ObjSize = Flags.getByValSize();
02684       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02685       // Empty aggregate parameters do not take up registers.  Examples:
02686       //   struct { } a;
02687       //   union  { } b;
02688       //   int c[0];
02689       // etc.  However, we have to provide a place-holder in InVals, so
02690       // pretend we have an 8-byte item at the current address for that
02691       // purpose.
02692       if (!ObjSize) {
02693         int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
02694         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
02695         InVals.push_back(FIN);
02696         continue;
02697       }
02698 
02699       // Create a stack object covering all stack doublewords occupied
02700       // by the argument.  If the argument is (fully or partially) on
02701       // the stack, or if the argument is fully in registers but the
02702       // caller has allocated the parameter save anyway, we can refer
02703       // directly to the caller's stack frame.  Otherwise, create a
02704       // local copy in our own frame.
02705       int FI;
02706       if (HasParameterArea ||
02707           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
02708         FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true);
02709       else
02710         FI = MFI->CreateStackObject(ArgSize, Align, false);
02711       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
02712 
02713       // Handle aggregates smaller than 8 bytes.
02714       if (ObjSize < PtrByteSize) {
02715         // The value of the object is its address, which differs from the
02716         // address of the enclosing doubleword on big-endian systems.
02717         SDValue Arg = FIN;
02718         if (!isLittleEndian) {
02719           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, PtrVT);
02720           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
02721         }
02722         InVals.push_back(Arg);
02723 
02724         if (GPR_idx != Num_GPR_Regs) {
02725           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
02726           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
02727           SDValue Store;
02728 
02729           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
02730             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
02731                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
02732             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
02733                                       MachinePointerInfo(FuncArg),
02734                                       ObjType, false, false, 0);
02735           } else {
02736             // For sizes that don't fit a truncating store (3, 5, 6, 7),
02737             // store the whole register as-is to the parameter save area
02738             // slot.
02739             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
02740                                  MachinePointerInfo(FuncArg),
02741                                  false, false, 0);
02742           }
02743 
02744           MemOps.push_back(Store);
02745         }
02746         // Whether we copied from a register or not, advance the offset
02747         // into the parameter save area by a full doubleword.
02748         ArgOffset += PtrByteSize;
02749         continue;
02750       }
02751 
02752       // The value of the object is its address, which is the address of
02753       // its first stack doubleword.
02754       InVals.push_back(FIN);
02755 
02756       // Store whatever pieces of the object are in registers to memory.
02757       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
02758         if (GPR_idx == Num_GPR_Regs)
02759           break;
02760 
02761         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
02762         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
02763         SDValue Addr = FIN;
02764         if (j) {
02765           SDValue Off = DAG.getConstant(j, PtrVT);
02766           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
02767         }
02768         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
02769                                      MachinePointerInfo(FuncArg, j),
02770                                      false, false, 0);
02771         MemOps.push_back(Store);
02772         ++GPR_idx;
02773       }
02774       ArgOffset += ArgSize;
02775       continue;
02776     }
02777 
02778     switch (ObjectVT.getSimpleVT().SimpleTy) {
02779     default: llvm_unreachable("Unhandled argument type!");
02780     case MVT::i1:
02781     case MVT::i32:
02782     case MVT::i64:
02783       // These can be scalar arguments or elements of an integer array type
02784       // passed directly.  Clang may use those instead of "byval" aggregate
02785       // types to avoid forcing arguments to memory unnecessarily.
02786       if (GPR_idx != Num_GPR_Regs) {
02787         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
02788         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
02789 
02790         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
02791           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
02792           // value to MVT::i64 and then truncate to the correct register size.
02793           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
02794       } else {
02795         needsLoad = true;
02796         ArgSize = PtrByteSize;
02797       }
02798       ArgOffset += 8;
02799       break;
02800 
02801     case MVT::f32:
02802     case MVT::f64:
02803       // These can be scalar arguments or elements of a float array type
02804       // passed directly.  The latter are used to implement ELFv2 homogenous
02805       // float aggregates.
02806       if (FPR_idx != Num_FPR_Regs) {
02807         unsigned VReg;
02808 
02809         if (ObjectVT == MVT::f32)
02810           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
02811         else
02812           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() ?
02813                                             &PPC::VSFRCRegClass :
02814                                             &PPC::F8RCRegClass);
02815 
02816         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
02817         ++FPR_idx;
02818       } else if (GPR_idx != Num_GPR_Regs) {
02819         // This can only ever happen in the presence of f32 array types,
02820         // since otherwise we never run out of FPRs before running out
02821         // of GPRs.
02822         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
02823         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
02824 
02825         if (ObjectVT == MVT::f32) {
02826           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
02827             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
02828                                  DAG.getConstant(32, MVT::i32));
02829           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
02830         }
02831 
02832         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
02833       } else {
02834         needsLoad = true;
02835       }
02836 
02837       // When passing an array of floats, the array occupies consecutive
02838       // space in the argument area; only round up to the next doubleword
02839       // at the end of the array.  Otherwise, each float takes 8 bytes.
02840       ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
02841       ArgOffset += ArgSize;
02842       if (Flags.isInConsecutiveRegsLast())
02843         ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02844       break;
02845     case MVT::v4f32:
02846     case MVT::v4i32:
02847     case MVT::v8i16:
02848     case MVT::v16i8:
02849     case MVT::v2f64:
02850     case MVT::v2i64:
02851       // These can be scalar arguments or elements of a vector array type
02852       // passed directly.  The latter are used to implement ELFv2 homogenous
02853       // vector aggregates.
02854       if (VR_idx != Num_VR_Regs) {
02855         unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ?
02856                         MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) :
02857                         MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
02858         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
02859         ++VR_idx;
02860       } else {
02861         needsLoad = true;
02862       }
02863       ArgOffset += 16;
02864       break;
02865     }
02866 
02867     // We need to load the argument to a virtual register if we determined
02868     // above that we ran out of physical registers of the appropriate type.
02869     if (needsLoad) {
02870       if (ObjSize < ArgSize && !isLittleEndian)
02871         CurArgOffset += ArgSize - ObjSize;
02872       int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
02873       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
02874       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
02875                            false, false, false, 0);
02876     }
02877 
02878     InVals.push_back(ArgVal);
02879   }
02880 
02881   // Area that is at least reserved in the caller of this function.
02882   unsigned MinReservedArea;
02883   if (HasParameterArea)
02884     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
02885   else
02886     MinReservedArea = LinkageSize;
02887 
02888   // Set the size that is at least reserved in caller of this function.  Tail
02889   // call optimized functions' reserved stack space needs to be aligned so that
02890   // taking the difference between two stack areas will result in an aligned
02891   // stack.
02892   MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
02893   FuncInfo->setMinReservedArea(MinReservedArea);
02894 
02895   // If the function takes variable number of arguments, make a frame index for
02896   // the start of the first vararg value... for expansion of llvm.va_start.
02897   if (isVarArg) {
02898     int Depth = ArgOffset;
02899 
02900     FuncInfo->setVarArgsFrameIndex(
02901       MFI->CreateFixedObject(PtrByteSize, Depth, true));
02902     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02903 
02904     // If this function is vararg, store any remaining integer argument regs
02905     // to their spots on the stack so that they may be loaded by deferencing the
02906     // result of va_next.
02907     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
02908          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
02909       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
02910       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
02911       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
02912                                    MachinePointerInfo(), false, false, 0);
02913       MemOps.push_back(Store);
02914       // Increment the address by four for the next argument to store
02915       SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT);
02916       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
02917     }
02918   }
02919 
02920   if (!MemOps.empty())
02921     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02922 
02923   return Chain;
02924 }
02925 
02926 SDValue
02927 PPCTargetLowering::LowerFormalArguments_Darwin(
02928                                       SDValue Chain,
02929                                       CallingConv::ID CallConv, bool isVarArg,
02930                                       const SmallVectorImpl<ISD::InputArg>
02931                                         &Ins,
02932                                       SDLoc dl, SelectionDAG &DAG,
02933                                       SmallVectorImpl<SDValue> &InVals) const {
02934   // TODO: add description of PPC stack frame format, or at least some docs.
02935   //
02936   MachineFunction &MF = DAG.getMachineFunction();
02937   MachineFrameInfo *MFI = MF.getFrameInfo();
02938   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
02939 
02940   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02941   bool isPPC64 = PtrVT == MVT::i64;
02942   // Potential tail calls could cause overwriting of argument stack slots.
02943   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
02944                        (CallConv == CallingConv::Fast));
02945   unsigned PtrByteSize = isPPC64 ? 8 : 4;
02946 
02947   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true,
02948                                                           false);
02949   unsigned ArgOffset = LinkageSize;
02950   // Area that is at least reserved in caller of this function.
02951   unsigned MinReservedArea = ArgOffset;
02952 
02953   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
02954     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
02955     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
02956   };
02957   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
02958     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
02959     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
02960   };
02961 
02962   static const MCPhysReg *FPR = GetFPR();
02963 
02964   static const MCPhysReg VR[] = {
02965     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
02966     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
02967   };
02968 
02969   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
02970   const unsigned Num_FPR_Regs = 13;
02971   const unsigned Num_VR_Regs  = array_lengthof( VR);
02972 
02973   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
02974 
02975   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
02976 
02977   // In 32-bit non-varargs functions, the stack space for vectors is after the
02978   // stack space for non-vectors.  We do not use this space unless we have
02979   // too many vectors to fit in registers, something that only occurs in
02980   // constructed examples:), but we have to walk the arglist to figure
02981   // that out...for the pathological case, compute VecArgOffset as the
02982   // start of the vector parameter area.  Computing VecArgOffset is the
02983   // entire point of the following loop.
02984   unsigned VecArgOffset = ArgOffset;
02985   if (!isVarArg && !isPPC64) {
02986     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
02987          ++ArgNo) {
02988       EVT ObjectVT = Ins[ArgNo].VT;
02989       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
02990 
02991       if (Flags.isByVal()) {
02992         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
02993         unsigned ObjSize = Flags.getByValSize();
02994         unsigned ArgSize =
02995                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
02996         VecArgOffset += ArgSize;
02997         continue;
02998       }
02999 
03000       switch(ObjectVT.getSimpleVT().SimpleTy) {
03001       default: llvm_unreachable("Unhandled argument type!");
03002       case MVT::i1:
03003       case MVT::i32:
03004       case MVT::f32:
03005         VecArgOffset += 4;
03006         break;
03007       case MVT::i64:  // PPC64
03008       case MVT::f64:
03009         // FIXME: We are guaranteed to be !isPPC64 at this point.
03010         // Does MVT::i64 apply?
03011         VecArgOffset += 8;
03012         break;
03013       case MVT::v4f32:
03014       case MVT::v4i32:
03015       case MVT::v8i16:
03016       case MVT::v16i8:
03017         // Nothing to do, we're only looking at Nonvector args here.
03018         break;
03019       }
03020     }
03021   }
03022   // We've found where the vector parameter area in memory is.  Skip the
03023   // first 12 parameters; these don't use that memory.
03024   VecArgOffset = ((VecArgOffset+15)/16)*16;
03025   VecArgOffset += 12*16;
03026 
03027   // Add DAG nodes to load the arguments or copy them out of registers.  On
03028   // entry to a function on PPC, the arguments start after the linkage area,
03029   // although the first ones are often in registers.
03030 
03031   SmallVector<SDValue, 8> MemOps;
03032   unsigned nAltivecParamsAtEnd = 0;
03033   Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
03034   unsigned CurArgIdx = 0;
03035   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
03036     SDValue ArgVal;
03037     bool needsLoad = false;
03038     EVT ObjectVT = Ins[ArgNo].VT;
03039     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
03040     unsigned ArgSize = ObjSize;
03041     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
03042     std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
03043     CurArgIdx = Ins[ArgNo].OrigArgIndex;
03044 
03045     unsigned CurArgOffset = ArgOffset;
03046 
03047     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
03048     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
03049         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
03050       if (isVarArg || isPPC64) {
03051         MinReservedArea = ((MinReservedArea+15)/16)*16;
03052         MinReservedArea += CalculateStackSlotSize(ObjectVT,
03053                                                   Flags,
03054                                                   PtrByteSize);
03055       } else  nAltivecParamsAtEnd++;
03056     } else
03057       // Calculate min reserved area.
03058       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
03059                                                 Flags,
03060                                                 PtrByteSize);
03061 
03062     // FIXME the codegen can be much improved in some cases.
03063     // We do not have to keep everything in memory.
03064     if (Flags.isByVal()) {
03065       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
03066       ObjSize = Flags.getByValSize();
03067       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
03068       // Objects of size 1 and 2 are right justified, everything else is
03069       // left justified.  This means the memory address is adjusted forwards.
03070       if (ObjSize==1 || ObjSize==2) {
03071         CurArgOffset = CurArgOffset + (4 - ObjSize);
03072       }
03073       // The value of the object is its address.
03074       int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true);
03075       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
03076       InVals.push_back(FIN);
03077       if (ObjSize==1 || ObjSize==2) {
03078         if (GPR_idx != Num_GPR_Regs) {
03079           unsigned VReg;
03080           if (isPPC64)
03081             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
03082           else
03083             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
03084           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
03085           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
03086           SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
03087                                             MachinePointerInfo(FuncArg),
03088                                             ObjType, false, false, 0);
03089           MemOps.push_back(Store);
03090           ++GPR_idx;
03091         }
03092 
03093         ArgOffset += PtrByteSize;
03094 
03095         continue;
03096       }
03097       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
03098         // Store whatever pieces of the object are in registers
03099         // to memory.  ArgOffset will be the address of the beginning
03100         // of the object.
03101         if (GPR_idx != Num_GPR_Regs) {
03102           unsigned VReg;
03103           if (isPPC64)
03104             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
03105           else
03106             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
03107           int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
03108           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
03109           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
03110           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
03111                                        MachinePointerInfo(FuncArg, j),
03112                                        false, false, 0);
03113           MemOps.push_back(Store);
03114           ++GPR_idx;
03115           ArgOffset += PtrByteSize;
03116         } else {
03117           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
03118           break;
03119         }
03120       }
03121       continue;
03122     }
03123 
03124     switch (ObjectVT.getSimpleVT().SimpleTy) {
03125     default: llvm_unreachable("Unhandled argument type!");
03126     case MVT::i1:
03127     case MVT::i32:
03128       if (!isPPC64) {
03129         if (GPR_idx != Num_GPR_Regs) {
03130           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
03131           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
03132 
03133           if (ObjectVT == MVT::i1)
03134             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
03135 
03136           ++GPR_idx;
03137         } else {
03138           needsLoad = true;
03139           ArgSize = PtrByteSize;
03140         }
03141         // All int arguments reserve stack space in the Darwin ABI.
03142         ArgOffset += PtrByteSize;
03143         break;
03144       }
03145       // FALLTHROUGH
03146     case MVT::i64:  // PPC64
03147       if (GPR_idx != Num_GPR_Regs) {
03148         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
03149         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
03150 
03151         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
03152           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
03153           // value to MVT::i64 and then truncate to the correct register size.
03154           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
03155 
03156         ++GPR_idx;
03157       } else {
03158         needsLoad = true;
03159         ArgSize = PtrByteSize;
03160       }
03161       // All int arguments reserve stack space in the Darwin ABI.
03162       ArgOffset += 8;
03163       break;
03164 
03165     case MVT::f32:
03166     case MVT::f64:
03167       // Every 4 bytes of argument space consumes one of the GPRs available for
03168       // argument passing.
03169       if (GPR_idx != Num_GPR_Regs) {
03170         ++GPR_idx;
03171         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
03172           ++GPR_idx;
03173       }
03174       if (FPR_idx != Num_FPR_Regs) {
03175         unsigned VReg;
03176 
03177         if (ObjectVT == MVT::f32)
03178           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
03179         else
03180           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
03181 
03182         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
03183         ++FPR_idx;
03184       } else {
03185         needsLoad = true;
03186       }
03187 
03188       // All FP arguments reserve stack space in the Darwin ABI.
03189       ArgOffset += isPPC64 ? 8 : ObjSize;
03190       break;
03191     case MVT::v4f32:
03192     case MVT::v4i32:
03193     case MVT::v8i16:
03194     case MVT::v16i8:
03195       // Note that vector arguments in registers don't reserve stack space,
03196       // except in varargs functions.
03197       if (VR_idx != Num_VR_Regs) {
03198         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
03199         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
03200         if (isVarArg) {
03201           while ((ArgOffset % 16) != 0) {
03202             ArgOffset += PtrByteSize;
03203             if (GPR_idx != Num_GPR_Regs)
03204               GPR_idx++;
03205           }
03206           ArgOffset += 16;
03207           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
03208         }
03209         ++VR_idx;
03210       } else {
03211         if (!isVarArg && !isPPC64) {
03212           // Vectors go after all the nonvectors.
03213           CurArgOffset = VecArgOffset;
03214           VecArgOffset += 16;
03215         } else {
03216           // Vectors are aligned.
03217           ArgOffset = ((ArgOffset+15)/16)*16;
03218           CurArgOffset = ArgOffset;
03219           ArgOffset += 16;
03220         }
03221         needsLoad = true;
03222       }
03223       break;
03224     }
03225 
03226     // We need to load the argument to a virtual register if we determined above
03227     // that we ran out of physical registers of the appropriate type.
03228     if (needsLoad) {
03229       int FI = MFI->CreateFixedObject(ObjSize,
03230                                       CurArgOffset + (ArgSize - ObjSize),
03231                                       isImmutable);
03232       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
03233       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
03234                            false, false, false, 0);
03235     }
03236 
03237     InVals.push_back(ArgVal);
03238   }
03239 
03240   // Allow for Altivec parameters at the end, if needed.
03241   if (nAltivecParamsAtEnd) {
03242     MinReservedArea = ((MinReservedArea+15)/16)*16;
03243     MinReservedArea += 16*nAltivecParamsAtEnd;
03244   }
03245 
03246   // Area that is at least reserved in the caller of this function.
03247   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
03248 
03249   // Set the size that is at least reserved in caller of this function.  Tail
03250   // call optimized functions' reserved stack space needs to be aligned so that
03251   // taking the difference between two stack areas will result in an aligned
03252   // stack.
03253   MinReservedArea = EnsureStackAlignment(MF.getTarget(), MinReservedArea);
03254   FuncInfo->setMinReservedArea(MinReservedArea);
03255 
03256   // If the function takes variable number of arguments, make a frame index for
03257   // the start of the first vararg value... for expansion of llvm.va_start.
03258   if (isVarArg) {
03259     int Depth = ArgOffset;
03260 
03261     FuncInfo->setVarArgsFrameIndex(
03262       MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
03263                              Depth, true));
03264     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
03265 
03266     // If this function is vararg, store any remaining integer argument regs
03267     // to their spots on the stack so that they may be loaded by deferencing the
03268     // result of va_next.
03269     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
03270       unsigned VReg;
03271 
03272       if (isPPC64)
03273         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
03274       else
03275         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
03276 
03277       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
03278       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
03279                                    MachinePointerInfo(), false, false, 0);
03280       MemOps.push_back(Store);
03281       // Increment the address by four for the next argument to store
03282       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
03283       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
03284     }
03285   }
03286 
03287   if (!MemOps.empty())
03288     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
03289 
03290   return Chain;
03291 }
03292 
03293 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
03294 /// adjusted to accommodate the arguments for the tailcall.
03295 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
03296                                    unsigned ParamSize) {
03297 
03298   if (!isTailCall) return 0;
03299 
03300   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
03301   unsigned CallerMinReservedArea = FI->getMinReservedArea();
03302   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
03303   // Remember only if the new adjustement is bigger.
03304   if (SPDiff < FI->getTailCallSPDelta())
03305     FI->setTailCallSPDelta(SPDiff);
03306 
03307   return SPDiff;
03308 }
03309 
03310 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
03311 /// for tail call optimization. Targets which want to do tail call
03312 /// optimization should implement this function.
03313 bool
03314 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
03315                                                      CallingConv::ID CalleeCC,
03316                                                      bool isVarArg,
03317                                       const SmallVectorImpl<ISD::InputArg> &Ins,
03318                                                      SelectionDAG& DAG) const {
03319   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
03320     return false;
03321 
03322   // Variable argument functions are not supported.
03323   if (isVarArg)
03324     return false;
03325 
03326   MachineFunction &MF = DAG.getMachineFunction();
03327   CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
03328   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
03329     // Functions containing by val parameters are not supported.
03330     for (unsigned i = 0; i != Ins.size(); i++) {
03331        ISD::ArgFlagsTy Flags = Ins[i].Flags;
03332        if (Flags.isByVal()) return false;
03333     }
03334 
03335     // Non-PIC/GOT tail calls are supported.
03336     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
03337       return true;
03338 
03339     // At the moment we can only do local tail calls (in same module, hidden
03340     // or protected) if we are generating PIC.
03341     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
03342       return G->getGlobal()->hasHiddenVisibility()
03343           || G->getGlobal()->hasProtectedVisibility();
03344   }
03345 
03346   return false;
03347 }
03348 
03349 /// isCallCompatibleAddress - Return the immediate to use if the specified
03350 /// 32-bit value is representable in the immediate field of a BxA instruction.
03351 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
03352   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
03353   if (!C) return nullptr;
03354 
03355   int Addr = C->getZExtValue();
03356   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
03357       SignExtend32<26>(Addr) != Addr)
03358     return nullptr;  // Top 6 bits have to be sext of immediate.
03359 
03360   return DAG.getConstant((int)C->getZExtValue() >> 2,
03361                          DAG.getTargetLoweringInfo().getPointerTy()).getNode();
03362 }
03363 
03364 namespace {
03365 
03366 struct TailCallArgumentInfo {
03367   SDValue Arg;
03368   SDValue FrameIdxOp;
03369   int       FrameIdx;
03370 
03371   TailCallArgumentInfo() : FrameIdx(0) {}
03372 };
03373 
03374 }
03375 
03376 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
03377 static void
03378 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
03379                                            SDValue Chain,
03380                    const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
03381                    SmallVectorImpl<SDValue> &MemOpChains,
03382                    SDLoc dl) {
03383   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
03384     SDValue Arg = TailCallArgs[i].Arg;
03385     SDValue FIN = TailCallArgs[i].FrameIdxOp;
03386     int FI = TailCallArgs[i].FrameIdx;
03387     // Store relative to framepointer.
03388     MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN,
03389                                        MachinePointerInfo::getFixedStack(FI),
03390                                        false, false, 0));
03391   }
03392 }
03393 
03394 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
03395 /// the appropriate stack slot for the tail call optimized function call.
03396 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
03397                                                MachineFunction &MF,
03398                                                SDValue Chain,
03399                                                SDValue OldRetAddr,
03400                                                SDValue OldFP,
03401                                                int SPDiff,
03402                                                bool isPPC64,
03403                                                bool isDarwinABI,
03404                                                SDLoc dl) {
03405   if (SPDiff) {
03406     // Calculate the new stack slot for the return address.
03407     int SlotSize = isPPC64 ? 8 : 4;
03408     int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64,
03409                                                                    isDarwinABI);
03410     int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
03411                                                           NewRetAddrLoc, true);
03412     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
03413     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
03414     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
03415                          MachinePointerInfo::getFixedStack(NewRetAddr),
03416                          false, false, 0);
03417 
03418     // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
03419     // slot as the FP is never overwritten.
03420     if (isDarwinABI) {
03421       int NewFPLoc =
03422         SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
03423       int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
03424                                                           true);
03425       SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
03426       Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
03427                            MachinePointerInfo::getFixedStack(NewFPIdx),
03428                            false, false, 0);
03429     }
03430   }
03431   return Chain;
03432 }
03433 
03434 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
03435 /// the position of the argument.
03436 static void
03437 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
03438                          SDValue Arg, int SPDiff, unsigned ArgOffset,
03439                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
03440   int Offset = ArgOffset + SPDiff;
03441   uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
03442   int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
03443   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
03444   SDValue FIN = DAG.getFrameIndex(FI, VT);
03445   TailCallArgumentInfo Info;
03446   Info.Arg = Arg;
03447   Info.FrameIdxOp = FIN;
03448   Info.FrameIdx = FI;
03449   TailCallArguments.push_back(Info);
03450 }
03451 
03452 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
03453 /// stack slot. Returns the chain as result and the loaded frame pointers in
03454 /// LROpOut/FPOpout. Used when tail calling.
03455 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
03456                                                         int SPDiff,
03457                                                         SDValue Chain,
03458                                                         SDValue &LROpOut,
03459                                                         SDValue &FPOpOut,
03460                                                         bool isDarwinABI,
03461                                                         SDLoc dl) const {
03462   if (SPDiff) {
03463     // Load the LR and FP stack slot for later adjusting.
03464     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
03465     LROpOut = getReturnAddrFrameIndex(DAG);
03466     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
03467                           false, false, false, 0);
03468     Chain = SDValue(LROpOut.getNode(), 1);
03469 
03470     // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
03471     // slot as the FP is never overwritten.
03472     if (isDarwinABI) {
03473       FPOpOut = getFramePointerFrameIndex(DAG);
03474       FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(),
03475                             false, false, false, 0);
03476       Chain = SDValue(FPOpOut.getNode(), 1);
03477     }
03478   }
03479   return Chain;
03480 }
03481 
03482 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
03483 /// by "Src" to address "Dst" of size "Size".  Alignment information is
03484 /// specified by the specific parameter attribute. The copy will be passed as
03485 /// a byval function parameter.
03486 /// Sometimes what we are copying is the end of a larger object, the part that
03487 /// does not fit in registers.
03488 static SDValue
03489 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
03490                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
03491                           SDLoc dl) {
03492   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
03493   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
03494                        false, false, MachinePointerInfo(),
03495                        MachinePointerInfo());
03496 }
03497 
03498 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
03499 /// tail calls.
03500 static void
03501 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
03502                  SDValue Arg, SDValue PtrOff, int SPDiff,
03503                  unsigned ArgOffset, bool isPPC64, bool isTailCall,
03504                  bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
03505                  SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments,
03506                  SDLoc dl) {
03507   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
03508   if (!isTailCall) {
03509     if (isVector) {
03510       SDValue StackPtr;
03511       if (isPPC64)
03512         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
03513       else
03514         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
03515       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
03516                            DAG.getConstant(ArgOffset, PtrVT));
03517     }
03518     MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
03519                                        MachinePointerInfo(), false, false, 0));
03520   // Calculate and remember argument location.
03521   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
03522                                   TailCallArguments);
03523 }
03524 
03525 static
03526 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
03527                      SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
03528                      SDValue LROp, SDValue FPOp, bool isDarwinABI,
03529                      SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
03530   MachineFunction &MF = DAG.getMachineFunction();
03531 
03532   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
03533   // might overwrite each other in case of tail call optimization.
03534   SmallVector<SDValue, 8> MemOpChains2;
03535   // Do not flag preceding copytoreg stuff together with the following stuff.
03536   InFlag = SDValue();
03537   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
03538                                     MemOpChains2, dl);
03539   if (!MemOpChains2.empty())
03540     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
03541 
03542   // Store the return address to the appropriate stack slot.
03543   Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
03544                                         isPPC64, isDarwinABI, dl);
03545 
03546   // Emit callseq_end just before tailcall node.
03547   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
03548                              DAG.getIntPtrConstant(0, true), InFlag, dl);
03549   InFlag = Chain.getValue(1);
03550 }
03551 
03552 static
03553 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
03554                      SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall,
03555                      SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass,
03556                      SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
03557                      const PPCSubtarget &Subtarget) {
03558 
03559   bool isPPC64 = Subtarget.isPPC64();
03560   bool isSVR4ABI = Subtarget.isSVR4ABI();
03561   bool isELFv2ABI = Subtarget.isELFv2ABI();
03562 
03563   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
03564   NodeTys.push_back(MVT::Other);   // Returns a chain
03565   NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
03566 
03567   unsigned CallOpc = PPCISD::CALL;
03568 
03569   bool needIndirectCall = true;
03570   if (!isSVR4ABI || !isPPC64)
03571     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
03572       // If this is an absolute destination address, use the munged value.
03573       Callee = SDValue(Dest, 0);
03574       needIndirectCall = false;
03575     }
03576 
03577   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
03578     unsigned OpFlags = 0;
03579     if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
03580          (Subtarget.getTargetTriple().isMacOSX() &&
03581           Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) &&
03582          (G->getGlobal()->isDeclaration() ||
03583           G->getGlobal()->isWeakForLinker())) ||
03584         (Subtarget.isTargetELF() && !isPPC64 &&
03585          !G->getGlobal()->hasLocalLinkage() &&
03586          DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
03587       // PC-relative references to external symbols should go through $stub,
03588       // unless we're building with the leopard linker or later, which
03589       // automatically synthesizes these stubs.
03590       OpFlags = PPCII::MO_PLT_OR_STUB;
03591     }
03592 
03593     // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
03594     // every direct call is) turn it into a TargetGlobalAddress /
03595     // TargetExternalSymbol node so that legalize doesn't hack it.
03596     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
03597                                         Callee.getValueType(), 0, OpFlags);
03598     needIndirectCall = false;
03599   }
03600 
03601   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
03602     unsigned char OpFlags = 0;
03603 
03604     if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
03605          (Subtarget.getTargetTriple().isMacOSX() &&
03606           Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) ||
03607         (Subtarget.isTargetELF() && !isPPC64 &&
03608          DAG.getTarget().getRelocationModel() == Reloc::PIC_) ) {
03609       // PC-relative references to external symbols should go through $stub,
03610       // unless we're building with the leopard linker or later, which
03611       // automatically synthesizes these stubs.
03612       OpFlags = PPCII::MO_PLT_OR_STUB;
03613     }
03614 
03615     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
03616                                          OpFlags);
03617     needIndirectCall = false;
03618   }
03619 
03620   if (needIndirectCall) {
03621     // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
03622     // to do the call, we can't use PPCISD::CALL.
03623     SDValue MTCTROps[] = {Chain, Callee, InFlag};
03624 
03625     if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
03626       // Function pointers in the 64-bit SVR4 ABI do not point to the function
03627       // entry point, but to the function descriptor (the function entry point
03628       // address is part of the function descriptor though).
03629       // The function descriptor is a three doubleword structure with the
03630       // following fields: function entry point, TOC base address and
03631       // environment pointer.
03632       // Thus for a call through a function pointer, the following actions need
03633       // to be performed:
03634       //   1. Save the TOC of the caller in the TOC save area of its stack
03635       //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
03636       //   2. Load the address of the function entry point from the function
03637       //      descriptor.
03638       //   3. Load the TOC of the callee from the function descriptor into r2.
03639       //   4. Load the environment pointer from the function descriptor into
03640       //      r11.
03641       //   5. Branch to the function entry point address.
03642       //   6. On return of the callee, the TOC of the caller needs to be
03643       //      restored (this is done in FinishCall()).
03644       //
03645       // All those operations are flagged together to ensure that no other
03646       // operations can be scheduled in between. E.g. without flagging the
03647       // operations together, a TOC access in the caller could be scheduled
03648       // between the load of the callee TOC and the branch to the callee, which
03649       // results in the TOC access going through the TOC of the callee instead
03650       // of going through the TOC of the caller, which leads to incorrect code.
03651 
03652       // Load the address of the function entry point from the function
03653       // descriptor.
03654       SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue);
03655       SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs,
03656                               makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
03657       Chain = LoadFuncPtr.getValue(1);
03658       InFlag = LoadFuncPtr.getValue(2);
03659 
03660       // Load environment pointer into r11.
03661       // Offset of the environment pointer within the function descriptor.
03662       SDValue PtrOff = DAG.getIntPtrConstant(16);
03663 
03664       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
03665       SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr,
03666                                        InFlag);
03667       Chain = LoadEnvPtr.getValue(1);
03668       InFlag = LoadEnvPtr.getValue(2);
03669 
03670       SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
03671                                         InFlag);
03672       Chain = EnvVal.getValue(0);
03673       InFlag = EnvVal.getValue(1);
03674 
03675       // Load TOC of the callee into r2. We are using a target-specific load
03676       // with r2 hard coded, because the result of a target-independent load
03677       // would never go directly into r2, since r2 is a reserved register (which
03678       // prevents the register allocator from allocating it), resulting in an
03679       // additional register being allocated and an unnecessary move instruction
03680       // being generated.
03681       VTs = DAG.getVTList(MVT::Other, MVT::Glue);
03682       SDValue TOCOff = DAG.getIntPtrConstant(8);
03683       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
03684       SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain,
03685                                        AddTOC, InFlag);
03686       Chain = LoadTOCPtr.getValue(0);
03687       InFlag = LoadTOCPtr.getValue(1);
03688 
03689       MTCTROps[0] = Chain;
03690       MTCTROps[1] = LoadFuncPtr;
03691       MTCTROps[2] = InFlag;
03692     }
03693 
03694     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys,
03695                         makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2));
03696     InFlag = Chain.getValue(1);
03697 
03698     NodeTys.clear();
03699     NodeTys.push_back(MVT::Other);
03700     NodeTys.push_back(MVT::Glue);
03701     Ops.push_back(Chain);
03702     CallOpc = PPCISD::BCTRL;
03703     Callee.setNode(nullptr);
03704     // Add use of X11 (holding environment pointer)
03705     if (isSVR4ABI && isPPC64 && !isELFv2ABI)
03706       Ops.push_back(DAG.getRegister(PPC::X11, PtrVT));
03707     // Add CTR register as callee so a bctr can be emitted later.
03708     if (isTailCall)
03709       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
03710   }
03711 
03712   // If this is a direct call, pass the chain and the callee.
03713   if (Callee.getNode()) {
03714     Ops.push_back(Chain);
03715     Ops.push_back(Callee);
03716 
03717     // If this is a call to __tls_get_addr, find the symbol whose address
03718     // is to be taken and add it to the list.  This will be used to 
03719     // generate __tls_get_addr(<sym>@tlsgd) or __tls_get_addr(<sym>@tlsld).
03720     // We find the symbol by walking the chain to the CopyFromReg, walking
03721     // back from the CopyFromReg to the ADDI_TLSGD_L or ADDI_TLSLD_L, and
03722     // pulling the symbol from that node.
03723     if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
03724       if (!strcmp(S->getSymbol(), "__tls_get_addr")) {
03725         assert(!needIndirectCall && "Indirect call to __tls_get_addr???");
03726         SDNode *AddI = Chain.getNode()->getOperand(2).getNode();
03727         SDValue TGTAddr = AddI->getOperand(1);
03728         assert(TGTAddr.getNode()->getOpcode() == ISD::TargetGlobalTLSAddress &&
03729                "Didn't find target global TLS address where we expected one");
03730         Ops.push_back(TGTAddr);
03731         CallOpc = PPCISD::CALL_TLS;
03732       }
03733   }
03734   // If this is a tail call add stack pointer delta.
03735   if (isTailCall)
03736     Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
03737 
03738   // Add argument registers to the end of the list so that they are known live
03739   // into the call.
03740   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
03741     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
03742                                   RegsToPass[i].second.getValueType()));
03743 
03744   // Direct calls in the ELFv2 ABI need the TOC register live into the call.
03745   if (Callee.getNode() && isELFv2ABI)
03746     Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
03747 
03748   return CallOpc;
03749 }
03750 
03751 static
03752 bool isLocalCall(const SDValue &Callee)
03753 {
03754   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
03755     return !G->getGlobal()->isDeclaration() &&
03756            !G->getGlobal()->isWeakForLinker();
03757   return false;
03758 }
03759 
03760 SDValue
03761 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
03762                                    CallingConv::ID CallConv, bool isVarArg,
03763                                    const SmallVectorImpl<ISD::InputArg> &Ins,
03764                                    SDLoc dl, SelectionDAG &DAG,
03765                                    SmallVectorImpl<SDValue> &InVals) const {
03766 
03767   SmallVector<CCValAssign, 16> RVLocs;
03768   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
03769                     *DAG.getContext());
03770   CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC);
03771 
03772   // Copy all of the result registers out of their specified physreg.
03773   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
03774     CCValAssign &VA = RVLocs[i];
03775     assert(VA.isRegLoc() && "Can only return in registers!");
03776 
03777     SDValue Val = DAG.getCopyFromReg(Chain, dl,
03778                                      VA.getLocReg(), VA.getLocVT(), InFlag);
03779     Chain = Val.getValue(1);
03780     InFlag = Val.getValue(2);
03781 
03782     switch (VA.getLocInfo()) {
03783     default: llvm_unreachable("Unknown loc info!");
03784     case CCValAssign::Full: break;
03785     case CCValAssign::AExt:
03786       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
03787       break;
03788     case CCValAssign::ZExt:
03789       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
03790                         DAG.getValueType(VA.getValVT()));
03791       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
03792       break;
03793     case CCValAssign::SExt:
03794       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
03795                         DAG.getValueType(VA.getValVT()));
03796       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
03797       break;
03798     }
03799 
03800     InVals.push_back(Val);
03801   }
03802 
03803   return Chain;
03804 }
03805 
03806 SDValue
03807 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
03808                               bool isTailCall, bool isVarArg,
03809                               SelectionDAG &DAG,
03810                               SmallVector<std::pair<unsigned, SDValue>, 8>
03811                                 &RegsToPass,
03812                               SDValue InFlag, SDValue Chain,
03813                               SDValue &Callee,
03814                               int SPDiff, unsigned NumBytes,
03815                               const SmallVectorImpl<ISD::InputArg> &Ins,
03816                               SmallVectorImpl<SDValue> &InVals) const {
03817 
03818   bool isELFv2ABI = Subtarget.isELFv2ABI();
03819   std::vector<EVT> NodeTys;
03820   SmallVector<SDValue, 8> Ops;
03821   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
03822                                  isTailCall, RegsToPass, Ops, NodeTys,
03823                                  Subtarget);
03824 
03825   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
03826   if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64())
03827     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
03828 
03829   // When performing tail call optimization the callee pops its arguments off
03830   // the stack. Account for this here so these bytes can be pushed back on in
03831   // PPCFrameLowering::eliminateCallFramePseudoInstr.
03832   int BytesCalleePops =
03833     (CallConv == CallingConv::Fast &&
03834      getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
03835 
03836   // Add a register mask operand representing the call-preserved registers.
03837   const TargetRegisterInfo *TRI =
03838       getTargetMachine().getSubtargetImpl()->getRegisterInfo();
03839   const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
03840   assert(Mask && "Missing call preserved mask for calling convention");
03841   Ops.push_back(DAG.getRegisterMask(Mask));
03842 
03843   if (InFlag.getNode())
03844     Ops.push_back(InFlag);
03845 
03846   // Emit tail call.
03847   if (isTailCall) {
03848     assert(((Callee.getOpcode() == ISD::Register &&
03849              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
03850             Callee.getOpcode() == ISD::TargetExternalSymbol ||
03851             Callee.getOpcode() == ISD::TargetGlobalAddress ||
03852             isa<ConstantSDNode>(Callee)) &&
03853     "Expecting an global address, external symbol, absolute value or register");
03854 
03855     return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops);
03856   }
03857 
03858   // Add a NOP immediately after the branch instruction when using the 64-bit
03859   // SVR4 ABI. At link time, if caller and callee are in a different module and
03860   // thus have a different TOC, the call will be replaced with a call to a stub
03861   // function which saves the current TOC, loads the TOC of the callee and
03862   // branches to the callee. The NOP will be replaced with a load instruction
03863   // which restores the TOC of the caller from the TOC save slot of the current
03864   // stack frame. If caller and callee belong to the same module (and have the
03865   // same TOC), the NOP will remain unchanged.
03866 
03867   bool needsTOCRestore = false;
03868   if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64()) {
03869     if (CallOpc == PPCISD::BCTRL) {
03870       // This is a call through a function pointer.
03871       // Restore the caller TOC from the save area into R2.
03872       // See PrepareCall() for more information about calls through function
03873       // pointers in the 64-bit SVR4 ABI.
03874       // We are using a target-specific load with r2 hard coded, because the
03875       // result of a target-independent load would never go directly into r2,
03876       // since r2 is a reserved register (which prevents the register allocator
03877       // from allocating it), resulting in an additional register being
03878       // allocated and an unnecessary move instruction being generated.
03879       needsTOCRestore = true;
03880     } else if ((CallOpc == PPCISD::CALL) &&
03881                (!isLocalCall(Callee) ||
03882                 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
03883       // Otherwise insert NOP for non-local calls.
03884       CallOpc = PPCISD::CALL_NOP;
03885     } else if (CallOpc == PPCISD::CALL_TLS)
03886       // For 64-bit SVR4, TLS calls are always non-local.
03887       CallOpc = PPCISD::CALL_NOP_TLS;
03888   }
03889 
03890   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
03891   InFlag = Chain.getValue(1);
03892 
03893   if (needsTOCRestore) {
03894     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
03895     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
03896     SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
03897     unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI);
03898     SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset);
03899     SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
03900     Chain = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, AddTOC, InFlag);
03901     InFlag = Chain.getValue(1);
03902   }
03903 
03904   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
03905                              DAG.getIntPtrConstant(BytesCalleePops, true),
03906                              InFlag, dl);
03907   if (!Ins.empty())
03908     InFlag = Chain.getValue(1);
03909 
03910   return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
03911                          Ins, dl, DAG, InVals);
03912 }
03913 
03914 SDValue
03915 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
03916                              SmallVectorImpl<SDValue> &InVals) const {
03917   SelectionDAG &DAG                     = CLI.DAG;
03918   SDLoc &dl                             = CLI.DL;
03919   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
03920   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
03921   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
03922   SDValue Chain                         = CLI.Chain;
03923   SDValue Callee                        = CLI.Callee;
03924   bool &isTailCall                      = CLI.IsTailCall;
03925   CallingConv::ID CallConv              = CLI.CallConv;
03926   bool isVarArg                         = CLI.IsVarArg;
03927 
03928   if (isTailCall)
03929     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
03930                                                    Ins, DAG);
03931 
03932   if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
03933     report_fatal_error("failed to perform tail call elimination on a call "
03934                        "site marked musttail");
03935 
03936   if (Subtarget.isSVR4ABI()) {
03937     if (Subtarget.isPPC64())
03938       return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
03939                               isTailCall, Outs, OutVals, Ins,
03940                               dl, DAG, InVals);
03941     else
03942       return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
03943                               isTailCall, Outs, OutVals, Ins,
03944                               dl, DAG, InVals);
03945   }
03946 
03947   return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
03948                           isTailCall, Outs, OutVals, Ins,
03949                           dl, DAG, InVals);
03950 }
03951 
03952 SDValue
03953 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
03954                                     CallingConv::ID CallConv, bool isVarArg,
03955                                     bool isTailCall,
03956                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
03957                                     const SmallVectorImpl<SDValue> &OutVals,
03958                                     const SmallVectorImpl<ISD::InputArg> &Ins,
03959                                     SDLoc dl, SelectionDAG &DAG,
03960                                     SmallVectorImpl<SDValue> &InVals) const {
03961   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
03962   // of the 32-bit SVR4 ABI stack frame layout.
03963 
03964   assert((CallConv == CallingConv::C ||
03965           CallConv == CallingConv::Fast) && "Unknown calling convention!");
03966 
03967   unsigned PtrByteSize = 4;
03968 
03969   MachineFunction &MF = DAG.getMachineFunction();
03970 
03971   // Mark this function as potentially containing a function that contains a
03972   // tail call. As a consequence the frame pointer will be used for dynamicalloc
03973   // and restoring the callers stack pointer in this functions epilog. This is
03974   // done because by tail calling the called function might overwrite the value
03975   // in this function's (MF) stack pointer stack slot 0(SP).
03976   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
03977       CallConv == CallingConv::Fast)
03978     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
03979 
03980   // Count how many bytes are to be pushed on the stack, including the linkage
03981   // area, parameter list area and the part of the local variable space which
03982   // contains copies of aggregates which are passed by value.
03983 
03984   // Assign locations to all of the outgoing arguments.
03985   SmallVector<CCValAssign, 16> ArgLocs;
03986   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
03987                  *DAG.getContext());
03988 
03989   // Reserve space for the linkage area on the stack.
03990   CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false, false),
03991                        PtrByteSize);
03992 
03993   if (isVarArg) {
03994     // Handle fixed and variable vector arguments differently.
03995     // Fixed vector arguments go into registers as long as registers are
03996     // available. Variable vector arguments always go into memory.
03997     unsigned NumArgs = Outs.size();
03998 
03999     for (unsigned i = 0; i != NumArgs; ++i) {
04000       MVT ArgVT = Outs[i].VT;
04001       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
04002       bool Result;
04003 
04004       if (Outs[i].IsFixed) {
04005         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
04006                                CCInfo);
04007       } else {
04008         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
04009                                       ArgFlags, CCInfo);
04010       }
04011 
04012       if (Result) {
04013 #ifndef NDEBUG
04014         errs() << "Call operand #" << i << " has unhandled type "
04015              << EVT(ArgVT).getEVTString() << "\n";
04016 #endif
04017         llvm_unreachable(nullptr);
04018       }
04019     }
04020   } else {
04021     // All arguments are treated the same.
04022     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
04023   }
04024 
04025   // Assign locations to all of the outgoing aggregate by value arguments.
04026   SmallVector<CCValAssign, 16> ByValArgLocs;
04027   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
04028                       ByValArgLocs, *DAG.getContext());
04029 
04030   // Reserve stack space for the allocations in CCInfo.
04031   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
04032 
04033   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
04034 
04035   // Size of the linkage area, parameter list area and the part of the local
04036   // space variable where copies of aggregates which are passed by value are
04037   // stored.
04038   unsigned NumBytes = CCByValInfo.getNextStackOffset();
04039 
04040   // Calculate by how many bytes the stack has to be adjusted in case of tail
04041   // call optimization.
04042   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
04043 
04044   // Adjust the stack pointer for the new arguments...
04045   // These operations are automatically eliminated by the prolog/epilog pass
04046   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
04047                                dl);
04048   SDValue CallSeqStart = Chain;
04049 
04050   // Load the return address and frame pointer so it can be moved somewhere else
04051   // later.
04052   SDValue LROp, FPOp;
04053   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false,
04054                                        dl);
04055 
04056   // Set up a copy of the stack pointer for use loading and storing any
04057   // arguments that may not fit in the registers available for argument
04058   // passing.
04059   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
04060 
04061   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
04062   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
04063   SmallVector<SDValue, 8> MemOpChains;
04064 
04065   bool seenFloatArg = false;
04066   // Walk the register/memloc assignments, inserting copies/loads.
04067   for (unsigned i = 0, j = 0, e = ArgLocs.size();
04068        i != e;
04069        ++i) {
04070     CCValAssign &VA = ArgLocs[i];
04071     SDValue Arg = OutVals[i];
04072     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04073 
04074     if (Flags.isByVal()) {
04075       // Argument is an aggregate which is passed by value, thus we need to
04076       // create a copy of it in the local variable space of the current stack
04077       // frame (which is the stack frame of the caller) and pass the address of
04078       // this copy to the callee.
04079       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
04080       CCValAssign &ByValVA = ByValArgLocs[j++];
04081       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
04082 
04083       // Memory reserved in the local variable space of the callers stack frame.
04084       unsigned LocMemOffset = ByValVA.getLocMemOffset();
04085 
04086       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
04087       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
04088 
04089       // Create a copy of the argument in the local area of the current
04090       // stack frame.
04091       SDValue MemcpyCall =
04092         CreateCopyOfByValArgument(Arg, PtrOff,
04093                                   CallSeqStart.getNode()->getOperand(0),
04094                                   Flags, DAG, dl);
04095 
04096       // This must go outside the CALLSEQ_START..END.
04097       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
04098                            CallSeqStart.getNode()->getOperand(1),
04099                            SDLoc(MemcpyCall));
04100       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
04101                              NewCallSeqStart.getNode());
04102       Chain = CallSeqStart = NewCallSeqStart;
04103 
04104       // Pass the address of the aggregate copy on the stack either in a
04105       // physical register or in the parameter list area of the current stack
04106       // frame to the callee.
04107       Arg = PtrOff;
04108     }
04109 
04110     if (VA.isRegLoc()) {
04111       if (Arg.getValueType() == MVT::i1)
04112         Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg);
04113 
04114       seenFloatArg |= VA.getLocVT().isFloatingPoint();
04115       // Put argument in a physical register.
04116       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
04117     } else {
04118       // Put argument in the parameter list area of the current stack frame.
04119       assert(VA.isMemLoc());
04120       unsigned LocMemOffset = VA.getLocMemOffset();
04121 
04122       if (!isTailCall) {
04123         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
04124         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
04125 
04126         MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
04127                                            MachinePointerInfo(),
04128                                            false, false, 0));
04129       } else {
04130         // Calculate and remember argument location.
04131         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
04132                                  TailCallArguments);
04133       }
04134     }
04135   }
04136 
04137   if (!MemOpChains.empty())
04138     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
04139 
04140   // Build a sequence of copy-to-reg nodes chained together with token chain
04141   // and flag operands which copy the outgoing args into the appropriate regs.
04142   SDValue InFlag;
04143   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
04144     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
04145                              RegsToPass[i].second, InFlag);
04146     InFlag = Chain.getValue(1);
04147   }
04148 
04149   // Set CR bit 6 to true if this is a vararg call with floating args passed in
04150   // registers.
04151   if (isVarArg) {
04152     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
04153     SDValue Ops[] = { Chain, InFlag };
04154 
04155     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
04156                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
04157 
04158     InFlag = Chain.getValue(1);
04159   }
04160 
04161   if (isTailCall)
04162     PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
04163                     false, TailCallArguments);
04164 
04165   return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
04166                     RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
04167                     Ins, InVals);
04168 }
04169 
04170 // Copy an argument into memory, being careful to do this outside the
04171 // call sequence for the call to which the argument belongs.
04172 SDValue
04173 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
04174                                               SDValue CallSeqStart,
04175                                               ISD::ArgFlagsTy Flags,
04176                                               SelectionDAG &DAG,
04177                                               SDLoc dl) const {
04178   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
04179                         CallSeqStart.getNode()->getOperand(0),
04180                         Flags, DAG, dl);
04181   // The MEMCPY must go outside the CALLSEQ_START..END.
04182   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
04183                              CallSeqStart.getNode()->getOperand(1),
04184                              SDLoc(MemcpyCall));
04185   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
04186                          NewCallSeqStart.getNode());
04187   return NewCallSeqStart;
04188 }
04189 
04190 SDValue
04191 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
04192                                     CallingConv::ID CallConv, bool isVarArg,
04193                                     bool isTailCall,
04194                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
04195                                     const SmallVectorImpl<SDValue> &OutVals,
04196                                     const SmallVectorImpl<ISD::InputArg> &Ins,
04197                                     SDLoc dl, SelectionDAG &DAG,
04198                                     SmallVectorImpl<SDValue> &InVals) const {
04199 
04200   bool isELFv2ABI = Subtarget.isELFv2ABI();
04201   bool isLittleEndian = Subtarget.isLittleEndian();
04202   unsigned NumOps = Outs.size();
04203 
04204   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
04205   unsigned PtrByteSize = 8;
04206 
04207   MachineFunction &MF = DAG.getMachineFunction();
04208 
04209   // Mark this function as potentially containing a function that contains a
04210   // tail call. As a consequence the frame pointer will be used for dynamicalloc
04211   // and restoring the callers stack pointer in this functions epilog. This is
04212   // done because by tail calling the called function might overwrite the value
04213   // in this function's (MF) stack pointer stack slot 0(SP).
04214   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04215       CallConv == CallingConv::Fast)
04216     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
04217 
04218   // Count how many bytes are to be pushed on the stack, including the linkage
04219   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
04220   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
04221   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
04222   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
04223                                                           isELFv2ABI);
04224   unsigned NumBytes = LinkageSize;
04225 
04226   // Add up all the space actually used.
04227   for (unsigned i = 0; i != NumOps; ++i) {
04228     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04229     EVT ArgVT = Outs[i].VT;
04230     EVT OrigVT = Outs[i].ArgVT;
04231 
04232     /* Respect alignment of argument on the stack.  */
04233     unsigned Align =
04234       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
04235     NumBytes = ((NumBytes + Align - 1) / Align) * Align;
04236 
04237     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
04238     if (Flags.isInConsecutiveRegsLast())
04239       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
04240   }
04241 
04242   unsigned NumBytesActuallyUsed = NumBytes;
04243 
04244   // The prolog code of the callee may store up to 8 GPR argument registers to
04245   // the stack, allowing va_start to index over them in memory if its varargs.
04246   // Because we cannot tell if this is needed on the caller side, we have to
04247   // conservatively assume that it is needed.  As such, make sure we have at
04248   // least enough stack space for the caller to store the 8 GPRs.
04249   // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area.
04250   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
04251 
04252   // Tail call needs the stack to be aligned.
04253   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04254       CallConv == CallingConv::Fast)
04255     NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes);
04256 
04257   // Calculate by how many bytes the stack has to be adjusted in case of tail
04258   // call optimization.
04259   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
04260 
04261   // To protect arguments on the stack from being clobbered in a tail call,
04262   // force all the loads to happen before doing any other lowering.
04263   if (isTailCall)
04264     Chain = DAG.getStackArgumentTokenFactor(Chain);
04265 
04266   // Adjust the stack pointer for the new arguments...
04267   // These operations are automatically eliminated by the prolog/epilog pass
04268   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
04269                                dl);
04270   SDValue CallSeqStart = Chain;
04271 
04272   // Load the return address and frame pointer so it can be move somewhere else
04273   // later.
04274   SDValue LROp, FPOp;
04275   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
04276                                        dl);
04277 
04278   // Set up a copy of the stack pointer for use loading and storing any
04279   // arguments that may not fit in the registers available for argument
04280   // passing.
04281   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
04282 
04283   // Figure out which arguments are going to go in registers, and which in
04284   // memory.  Also, if this is a vararg function, floating point operations
04285   // must be stored to our stack, and loaded into integer regs as well, if
04286   // any integer regs are available for argument passing.
04287   unsigned ArgOffset = LinkageSize;
04288   unsigned GPR_idx, FPR_idx = 0, VR_idx = 0;
04289 
04290   static const MCPhysReg GPR[] = {
04291     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
04292     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
04293   };
04294   static const MCPhysReg *FPR = GetFPR();
04295 
04296   static const MCPhysReg VR[] = {
04297     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
04298     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
04299   };
04300   static const MCPhysReg VSRH[] = {
04301     PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
04302     PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
04303   };
04304 
04305   const unsigned NumGPRs = array_lengthof(GPR);
04306   const unsigned NumFPRs = 13;
04307   const unsigned NumVRs  = array_lengthof(VR);
04308 
04309   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
04310   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
04311 
04312   SmallVector<SDValue, 8> MemOpChains;
04313   for (unsigned i = 0; i != NumOps; ++i) {
04314     SDValue Arg = OutVals[i];
04315     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04316     EVT ArgVT = Outs[i].VT;
04317     EVT OrigVT = Outs[i].ArgVT;
04318 
04319     /* Respect alignment of argument on the stack.  */
04320     unsigned Align =
04321       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
04322     ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
04323 
04324     /* Compute GPR index associated with argument offset.  */
04325     GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
04326     GPR_idx = std::min(GPR_idx, NumGPRs);
04327 
04328     // PtrOff will be used to store the current argument to the stack if a
04329     // register cannot be found for it.
04330     SDValue PtrOff;
04331 
04332     PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
04333 
04334     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
04335 
04336     // Promote integers to 64-bit values.
04337     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
04338       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
04339       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
04340       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
04341     }
04342 
04343     // FIXME memcpy is used way more than necessary.  Correctness first.
04344     // Note: "by value" is code for passing a structure by value, not
04345     // basic types.
04346     if (Flags.isByVal()) {
04347       // Note: Size includes alignment padding, so
04348       //   struct x { short a; char b; }
04349       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
04350       // These are the proper values we need for right-justifying the
04351       // aggregate in a parameter register.
04352       unsigned Size = Flags.getByValSize();
04353 
04354       // An empty aggregate parameter takes up no storage and no
04355       // registers.
04356       if (Size == 0)
04357         continue;
04358 
04359       // All aggregates smaller than 8 bytes must be passed right-justified.
04360       if (Size==1 || Size==2 || Size==4) {
04361         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
04362         if (GPR_idx != NumGPRs) {
04363           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
04364                                         MachinePointerInfo(), VT,
04365                                         false, false, false, 0);
04366           MemOpChains.push_back(Load.getValue(1));
04367           RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Load));
04368 
04369           ArgOffset += PtrByteSize;
04370           continue;
04371         }
04372       }
04373 
04374       if (GPR_idx == NumGPRs && Size < 8) {
04375         SDValue AddPtr = PtrOff;
04376         if (!isLittleEndian) {
04377           SDValue Const = DAG.getConstant(PtrByteSize - Size,
04378                                           PtrOff.getValueType());
04379           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
04380         }
04381         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
04382                                                           CallSeqStart,
04383                                                           Flags, DAG, dl);
04384         ArgOffset += PtrByteSize;
04385         continue;
04386       }
04387       // Copy entire object into memory.  There are cases where gcc-generated
04388       // code assumes it is there, even if it could be put entirely into
04389       // registers.  (This is not what the doc says.)
04390 
04391       // FIXME: The above statement is likely due to a misunderstanding of the
04392       // documents.  All arguments must be copied into the parameter area BY
04393       // THE CALLEE in the event that the callee takes the address of any
04394       // formal argument.  That has not yet been implemented.  However, it is
04395       // reasonable to use the stack area as a staging area for the register
04396       // load.
04397 
04398       // Skip this for small aggregates, as we will use the same slot for a
04399       // right-justified copy, below.
04400       if (Size >= 8)
04401         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
04402                                                           CallSeqStart,
04403                                                           Flags, DAG, dl);
04404 
04405       // When a register is available, pass a small aggregate right-justified.
04406       if (Size < 8 && GPR_idx != NumGPRs) {
04407         // The easiest way to get this right-justified in a register
04408         // is to copy the structure into the rightmost portion of a
04409         // local variable slot, then load the whole slot into the
04410         // register.
04411         // FIXME: The memcpy seems to produce pretty awful code for
04412         // small aggregates, particularly for packed ones.
04413         // FIXME: It would be preferable to use the slot in the
04414         // parameter save area instead of a new local variable.
04415         SDValue AddPtr = PtrOff;
04416         if (!isLittleEndian) {
04417           SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType());
04418           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
04419         }
04420         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
04421                                                           CallSeqStart,
04422                                                           Flags, DAG, dl);
04423 
04424         // Load the slot into the register.
04425         SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff,
04426                                    MachinePointerInfo(),
04427                                    false, false, false, 0);
04428         MemOpChains.push_back(Load.getValue(1));
04429         RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Load));
04430 
04431         // Done with this argument.
04432         ArgOffset += PtrByteSize;
04433         continue;
04434       }
04435 
04436       // For aggregates larger than PtrByteSize, copy the pieces of the
04437       // object that fit into registers from the parameter save area.
04438       for (unsigned j=0; j<Size; j+=PtrByteSize) {
04439         SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
04440         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
04441         if (GPR_idx != NumGPRs) {
04442           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
04443                                      MachinePointerInfo(),
04444                                      false, false, false, 0);
04445           MemOpChains.push_back(Load.getValue(1));
04446           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04447           ArgOffset += PtrByteSize;
04448         } else {
04449           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
04450           break;
04451         }
04452       }
04453       continue;
04454     }
04455 
04456     switch (Arg.getSimpleValueType().SimpleTy) {
04457     default: llvm_unreachable("Unexpected ValueType for argument!");
04458     case MVT::i1:
04459     case MVT::i32:
04460     case MVT::i64:
04461       // These can be scalar arguments or elements of an integer array type
04462       // passed directly.  Clang may use those instead of "byval" aggregate
04463       // types to avoid forcing arguments to memory unnecessarily.
04464       if (GPR_idx != NumGPRs) {
04465         RegsToPass.push_back(std::make_pair(GPR[GPR_idx], Arg));
04466       } else {
04467         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04468                          true, isTailCall, false, MemOpChains,
04469                          TailCallArguments, dl);
04470       }
04471       ArgOffset += PtrByteSize;
04472       break;
04473     case MVT::f32:
04474     case MVT::f64: {
04475       // These can be scalar arguments or elements of a float array type
04476       // passed directly.  The latter are used to implement ELFv2 homogenous
04477       // float aggregates.
04478 
04479       // Named arguments go into FPRs first, and once they overflow, the
04480       // remaining arguments go into GPRs and then the parameter save area.
04481       // Unnamed arguments for vararg functions always go to GPRs and
04482       // then the parameter save area.  For now, put all arguments to vararg
04483       // routines always in both locations (FPR *and* GPR or stack slot).
04484       bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
04485 
04486       // First load the argument into the next available FPR.
04487       if (FPR_idx != NumFPRs)
04488         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
04489 
04490       // Next, load the argument into GPR or stack slot if needed.
04491       if (!NeedGPROrStack)
04492         ;
04493       else if (GPR_idx != NumGPRs) {
04494         // In the non-vararg case, this can only ever happen in the
04495         // presence of f32 array types, since otherwise we never run
04496         // out of FPRs before running out of GPRs.
04497         SDValue ArgVal;
04498 
04499         // Double values are always passed in a single GPR.
04500         if (Arg.getValueType() != MVT::f32) {
04501           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
04502 
04503         // Non-array float values are extended and passed in a GPR.
04504         } else if (!Flags.isInConsecutiveRegs()) {
04505           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
04506           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
04507 
04508         // If we have an array of floats, we collect every odd element
04509         // together with its predecessor into one GPR.
04510         } else if (ArgOffset % PtrByteSize != 0) {
04511           SDValue Lo, Hi;
04512           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
04513           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
04514           if (!isLittleEndian)
04515             std::swap(Lo, Hi);
04516           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
04517 
04518         // The final element, if even, goes into the first half of a GPR.
04519         } else if (Flags.isInConsecutiveRegsLast()) {
04520           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
04521           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
04522           if (!isLittleEndian)
04523             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
04524                                  DAG.getConstant(32, MVT::i32));
04525 
04526         // Non-final even elements are skipped; they will be handled
04527         // together the with subsequent argument on the next go-around.
04528         } else
04529           ArgVal = SDValue();
04530 
04531         if (ArgVal.getNode())
04532           RegsToPass.push_back(std::make_pair(GPR[GPR_idx], ArgVal));
04533       } else {
04534         // Single-precision floating-point values are mapped to the
04535         // second (rightmost) word of the stack doubleword.
04536         if (Arg.getValueType() == MVT::f32 &&
04537             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
04538           SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
04539           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
04540         }
04541 
04542         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04543                          true, isTailCall, false, MemOpChains,
04544                          TailCallArguments, dl);
04545       }
04546       // When passing an array of floats, the array occupies consecutive
04547       // space in the argument area; only round up to the next doubleword
04548       // at the end of the array.  Otherwise, each float takes 8 bytes.
04549       ArgOffset += (Arg.getValueType() == MVT::f32 &&
04550                     Flags.isInConsecutiveRegs()) ? 4 : 8;
04551       if (Flags.isInConsecutiveRegsLast())
04552         ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
04553       break;
04554     }
04555     case MVT::v4f32:
04556     case MVT::v4i32:
04557     case MVT::v8i16:
04558     case MVT::v16i8:
04559     case MVT::v2f64:
04560     case MVT::v2i64:
04561       // These can be scalar arguments or elements of a vector array type
04562       // passed directly.  The latter are used to implement ELFv2 homogenous
04563       // vector aggregates.
04564 
04565       // For a varargs call, named arguments go into VRs or on the stack as
04566       // usual; unnamed arguments always go to the stack or the corresponding
04567       // GPRs when within range.  For now, we always put the value in both
04568       // locations (or even all three).
04569       if (isVarArg) {
04570         // We could elide this store in the case where the object fits
04571         // entirely in R registers.  Maybe later.
04572         SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
04573                                      MachinePointerInfo(), false, false, 0);
04574         MemOpChains.push_back(Store);
04575         if (VR_idx != NumVRs) {
04576           SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
04577                                      MachinePointerInfo(),
04578                                      false, false, false, 0);
04579           MemOpChains.push_back(Load.getValue(1));
04580 
04581           unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 ||
04582                            Arg.getSimpleValueType() == MVT::v2i64) ?
04583                           VSRH[VR_idx] : VR[VR_idx];
04584           ++VR_idx;
04585 
04586           RegsToPass.push_back(std::make_pair(VReg, Load));
04587         }
04588         ArgOffset += 16;
04589         for (unsigned i=0; i<16; i+=PtrByteSize) {
04590           if (GPR_idx == NumGPRs)
04591             break;
04592           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
04593                                   DAG.getConstant(i, PtrVT));
04594           SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
04595                                      false, false, false, 0);
04596           MemOpChains.push_back(Load.getValue(1));
04597           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04598         }
04599         break;
04600       }
04601 
04602       // Non-varargs Altivec params go into VRs or on the stack.
04603       if (VR_idx != NumVRs) {
04604         unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 ||
04605                          Arg.getSimpleValueType() == MVT::v2i64) ?
04606                         VSRH[VR_idx] : VR[VR_idx];
04607         ++VR_idx;
04608 
04609         RegsToPass.push_back(std::make_pair(VReg, Arg));
04610       } else {
04611         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04612                          true, isTailCall, true, MemOpChains,
04613                          TailCallArguments, dl);
04614       }
04615       ArgOffset += 16;
04616       break;
04617     }
04618   }
04619 
04620   assert(NumBytesActuallyUsed == ArgOffset);
04621   (void)NumBytesActuallyUsed;
04622 
04623   if (!MemOpChains.empty())
04624     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
04625 
04626   // Check if this is an indirect call (MTCTR/BCTRL).
04627   // See PrepareCall() for more information about calls through function
04628   // pointers in the 64-bit SVR4 ABI.
04629   if (!isTailCall &&
04630       !dyn_cast<GlobalAddressSDNode>(Callee) &&
04631       !dyn_cast<ExternalSymbolSDNode>(Callee)) {
04632     // Load r2 into a virtual register and store it to the TOC save area.
04633     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
04634     // TOC save area offset.
04635     unsigned TOCSaveOffset = PPCFrameLowering::getTOCSaveOffset(isELFv2ABI);
04636     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset);
04637     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
04638     Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(),
04639                          false, false, 0);
04640     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
04641     // This does not mean the MTCTR instruction must use R12; it's easier
04642     // to model this as an extra parameter, so do that.
04643     if (isELFv2ABI)
04644       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
04645   }
04646 
04647   // Build a sequence of copy-to-reg nodes chained together with token chain
04648   // and flag operands which copy the outgoing args into the appropriate regs.
04649   SDValue InFlag;
04650   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
04651     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
04652                              RegsToPass[i].second, InFlag);
04653     InFlag = Chain.getValue(1);
04654   }
04655 
04656   if (isTailCall)
04657     PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp,
04658                     FPOp, true, TailCallArguments);
04659 
04660   return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
04661                     RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
04662                     Ins, InVals);
04663 }
04664 
04665 SDValue
04666 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
04667                                     CallingConv::ID CallConv, bool isVarArg,
04668                                     bool isTailCall,
04669                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
04670                                     const SmallVectorImpl<SDValue> &OutVals,
04671                                     const SmallVectorImpl<ISD::InputArg> &Ins,
04672                                     SDLoc dl, SelectionDAG &DAG,
04673                                     SmallVectorImpl<SDValue> &InVals) const {
04674 
04675   unsigned NumOps = Outs.size();
04676 
04677   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
04678   bool isPPC64 = PtrVT == MVT::i64;
04679   unsigned PtrByteSize = isPPC64 ? 8 : 4;
04680 
04681   MachineFunction &MF = DAG.getMachineFunction();
04682 
04683   // Mark this function as potentially containing a function that contains a
04684   // tail call. As a consequence the frame pointer will be used for dynamicalloc
04685   // and restoring the callers stack pointer in this functions epilog. This is
04686   // done because by tail calling the called function might overwrite the value
04687   // in this function's (MF) stack pointer stack slot 0(SP).
04688   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04689       CallConv == CallingConv::Fast)
04690     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
04691 
04692   // Count how many bytes are to be pushed on the stack, including the linkage
04693   // area, and parameter passing area.  We start with 24/48 bytes, which is
04694   // prereserved space for [SP][CR][LR][3 x unused].
04695   unsigned LinkageSize = PPCFrameLowering::getLinkageSize(isPPC64, true,
04696                                                           false);
04697   unsigned NumBytes = LinkageSize;
04698 
04699   // Add up all the space actually used.
04700   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
04701   // they all go in registers, but we must reserve stack space for them for
04702   // possible use by the caller.  In varargs or 64-bit calls, parameters are
04703   // assigned stack space in order, with padding so Altivec parameters are
04704   // 16-byte aligned.
04705   unsigned nAltivecParamsAtEnd = 0;
04706   for (unsigned i = 0; i != NumOps; ++i) {
04707     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04708     EVT ArgVT = Outs[i].VT;
04709     // Varargs Altivec parameters are padded to a 16 byte boundary.
04710     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
04711         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
04712         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
04713       if (!isVarArg && !isPPC64) {
04714         // Non-varargs Altivec parameters go after all the non-Altivec
04715         // parameters; handle those later so we know how much padding we need.
04716         nAltivecParamsAtEnd++;
04717         continue;
04718       }
04719       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
04720       NumBytes = ((NumBytes+15)/16)*16;
04721     }
04722     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
04723   }
04724 
04725   // Allow for Altivec parameters at the end, if needed.
04726   if (nAltivecParamsAtEnd) {
04727     NumBytes = ((NumBytes+15)/16)*16;
04728     NumBytes += 16*nAltivecParamsAtEnd;
04729   }
04730 
04731   // The prolog code of the callee may store up to 8 GPR argument registers to
04732   // the stack, allowing va_start to index over them in memory if its varargs.
04733   // Because we cannot tell if this is needed on the caller side, we have to
04734   // conservatively assume that it is needed.  As such, make sure we have at
04735   // least enough stack space for the caller to store the 8 GPRs.
04736   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
04737 
04738   // Tail call needs the stack to be aligned.
04739   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
04740       CallConv == CallingConv::Fast)
04741     NumBytes = EnsureStackAlignment(MF.getTarget(), NumBytes);
04742 
04743   // Calculate by how many bytes the stack has to be adjusted in case of tail
04744   // call optimization.
04745   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
04746 
04747   // To protect arguments on the stack from being clobbered in a tail call,
04748   // force all the loads to happen before doing any other lowering.
04749   if (isTailCall)
04750     Chain = DAG.getStackArgumentTokenFactor(Chain);
04751 
04752   // Adjust the stack pointer for the new arguments...
04753   // These operations are automatically eliminated by the prolog/epilog pass
04754   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
04755                                dl);
04756   SDValue CallSeqStart = Chain;
04757 
04758   // Load the return address and frame pointer so it can be move somewhere else
04759   // later.
04760   SDValue LROp, FPOp;
04761   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
04762                                        dl);
04763 
04764   // Set up a copy of the stack pointer for use loading and storing any
04765   // arguments that may not fit in the registers available for argument
04766   // passing.
04767   SDValue StackPtr;
04768   if (isPPC64)
04769     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
04770   else
04771     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
04772 
04773   // Figure out which arguments are going to go in registers, and which in
04774   // memory.  Also, if this is a vararg function, floating point operations
04775   // must be stored to our stack, and loaded into integer regs as well, if
04776   // any integer regs are available for argument passing.
04777   unsigned ArgOffset = LinkageSize;
04778   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
04779 
04780   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
04781     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
04782     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
04783   };
04784   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
04785     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
04786     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
04787   };
04788   static const MCPhysReg *FPR = GetFPR();
04789 
04790   static const MCPhysReg VR[] = {
04791     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
04792     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
04793   };
04794   const unsigned NumGPRs = array_lengthof(GPR_32);
04795   const unsigned NumFPRs = 13;
04796   const unsigned NumVRs  = array_lengthof(VR);
04797 
04798   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
04799 
04800   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
04801   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
04802 
04803   SmallVector<SDValue, 8> MemOpChains;
04804   for (unsigned i = 0; i != NumOps; ++i) {
04805     SDValue Arg = OutVals[i];
04806     ISD::ArgFlagsTy Flags = Outs[i].Flags;
04807 
04808     // PtrOff will be used to store the current argument to the stack if a
04809     // register cannot be found for it.
04810     SDValue PtrOff;
04811 
04812     PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
04813 
04814     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
04815 
04816     // On PPC64, promote integers to 64-bit values.
04817     if (isPPC64 && Arg.getValueType() == MVT::i32) {
04818       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
04819       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
04820       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
04821     }
04822 
04823     // FIXME memcpy is used way more than necessary.  Correctness first.
04824     // Note: "by value" is code for passing a structure by value, not
04825     // basic types.
04826     if (Flags.isByVal()) {
04827       unsigned Size = Flags.getByValSize();
04828       // Very small objects are passed right-justified.  Everything else is
04829       // passed left-justified.
04830       if (Size==1 || Size==2) {
04831         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
04832         if (GPR_idx != NumGPRs) {
04833           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
04834                                         MachinePointerInfo(), VT,
04835                                         false, false, false, 0);
04836           MemOpChains.push_back(Load.getValue(1));
04837           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04838 
04839           ArgOffset += PtrByteSize;
04840         } else {
04841           SDValue Const = DAG.getConstant(PtrByteSize - Size,
04842                                           PtrOff.getValueType());
04843           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
04844           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
04845                                                             CallSeqStart,
04846                                                             Flags, DAG, dl);
04847           ArgOffset += PtrByteSize;
04848         }
04849         continue;
04850       }
04851       // Copy entire object into memory.  There are cases where gcc-generated
04852       // code assumes it is there, even if it could be put entirely into
04853       // registers.  (This is not what the doc says.)
04854       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
04855                                                         CallSeqStart,
04856                                                         Flags, DAG, dl);
04857 
04858       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
04859       // copy the pieces of the object that fit into registers from the
04860       // parameter save area.
04861       for (unsigned j=0; j<Size; j+=PtrByteSize) {
04862         SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
04863         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
04864         if (GPR_idx != NumGPRs) {
04865           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
04866                                      MachinePointerInfo(),
04867                                      false, false, false, 0);
04868           MemOpChains.push_back(Load.getValue(1));
04869           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04870           ArgOffset += PtrByteSize;
04871         } else {
04872           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
04873           break;
04874         }
04875       }
04876       continue;
04877     }
04878 
04879     switch (Arg.getSimpleValueType().SimpleTy) {
04880     default: llvm_unreachable("Unexpected ValueType for argument!");
04881     case MVT::i1:
04882     case MVT::i32:
04883     case MVT::i64:
04884       if (GPR_idx != NumGPRs) {
04885         if (Arg.getValueType() == MVT::i1)
04886           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
04887 
04888         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
04889       } else {
04890         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04891                          isPPC64, isTailCall, false, MemOpChains,
04892                          TailCallArguments, dl);
04893       }
04894       ArgOffset += PtrByteSize;
04895       break;
04896     case MVT::f32:
04897     case MVT::f64:
04898       if (FPR_idx != NumFPRs) {
04899         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
04900 
04901         if (isVarArg) {
04902           SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
04903                                        MachinePointerInfo(), false, false, 0);
04904           MemOpChains.push_back(Store);
04905 
04906           // Float varargs are always shadowed in available integer registers
04907           if (GPR_idx != NumGPRs) {
04908             SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
04909                                        MachinePointerInfo(), false, false,
04910                                        false, 0);
04911             MemOpChains.push_back(Load.getValue(1));
04912             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04913           }
04914           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
04915             SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
04916             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
04917             SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
04918                                        MachinePointerInfo(),
04919                                        false, false, false, 0);
04920             MemOpChains.push_back(Load.getValue(1));
04921             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04922           }
04923         } else {
04924           // If we have any FPRs remaining, we may also have GPRs remaining.
04925           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
04926           // GPRs.
04927           if (GPR_idx != NumGPRs)
04928             ++GPR_idx;
04929           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
04930               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
04931             ++GPR_idx;
04932         }
04933       } else
04934         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04935                          isPPC64, isTailCall, false, MemOpChains,
04936                          TailCallArguments, dl);
04937       if (isPPC64)
04938         ArgOffset += 8;
04939       else
04940         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
04941       break;
04942     case MVT::v4f32:
04943     case MVT::v4i32:
04944     case MVT::v8i16:
04945     case MVT::v16i8:
04946       if (isVarArg) {
04947         // These go aligned on the stack, or in the corresponding R registers
04948         // when within range.  The Darwin PPC ABI doc claims they also go in
04949         // V registers; in fact gcc does this only for arguments that are
04950         // prototyped, not for those that match the ...  We do it for all
04951         // arguments, seems to work.
04952         while (ArgOffset % 16 !=0) {
04953           ArgOffset += PtrByteSize;
04954           if (GPR_idx != NumGPRs)
04955             GPR_idx++;
04956         }
04957         // We could elide this store in the case where the object fits
04958         // entirely in R registers.  Maybe later.
04959         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
04960                             DAG.getConstant(ArgOffset, PtrVT));
04961         SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
04962                                      MachinePointerInfo(), false, false, 0);
04963         MemOpChains.push_back(Store);
04964         if (VR_idx != NumVRs) {
04965           SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
04966                                      MachinePointerInfo(),
04967                                      false, false, false, 0);
04968           MemOpChains.push_back(Load.getValue(1));
04969           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
04970         }
04971         ArgOffset += 16;
04972         for (unsigned i=0; i<16; i+=PtrByteSize) {
04973           if (GPR_idx == NumGPRs)
04974             break;
04975           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
04976                                   DAG.getConstant(i, PtrVT));
04977           SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
04978                                      false, false, false, 0);
04979           MemOpChains.push_back(Load.getValue(1));
04980           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
04981         }
04982         break;
04983       }
04984 
04985       // Non-varargs Altivec params generally go in registers, but have
04986       // stack space allocated at the end.
04987       if (VR_idx != NumVRs) {
04988         // Doesn't have GPR space allocated.
04989         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
04990       } else if (nAltivecParamsAtEnd==0) {
04991         // We are emitting Altivec params in order.
04992         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
04993                          isPPC64, isTailCall, true, MemOpChains,
04994                          TailCallArguments, dl);
04995         ArgOffset += 16;
04996       }
04997       break;
04998     }
04999   }
05000   // If all Altivec parameters fit in registers, as they usually do,
05001   // they get stack space following the non-Altivec parameters.  We
05002   // don't track this here because nobody below needs it.
05003   // If there are more Altivec parameters than fit in registers emit
05004   // the stores here.
05005   if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
05006     unsigned j = 0;
05007     // Offset is aligned; skip 1st 12 params which go in V registers.
05008     ArgOffset = ((ArgOffset+15)/16)*16;
05009     ArgOffset += 12*16;
05010     for (unsigned i = 0; i != NumOps; ++i) {
05011       SDValue Arg = OutVals[i];
05012       EVT ArgType = Outs[i].VT;
05013       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
05014           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
05015         if (++j > NumVRs) {
05016           SDValue PtrOff;
05017           // We are emitting Altivec params in order.
05018           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
05019                            isPPC64, isTailCall, true, MemOpChains,
05020                            TailCallArguments, dl);
05021           ArgOffset += 16;
05022         }
05023       }
05024     }
05025   }
05026 
05027   if (!MemOpChains.empty())
05028     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
05029 
05030   // On Darwin, R12 must contain the address of an indirect callee.  This does
05031   // not mean the MTCTR instruction must use R12; it's easier to model this as
05032   // an extra parameter, so do that.
05033   if (!isTailCall &&
05034       !dyn_cast<GlobalAddressSDNode>(Callee) &&
05035       !dyn_cast<ExternalSymbolSDNode>(Callee) &&
05036       !isBLACompatibleAddress(Callee, DAG))
05037     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
05038                                                    PPC::R12), Callee));
05039 
05040   // Build a sequence of copy-to-reg nodes chained together with token chain
05041   // and flag operands which copy the outgoing args into the appropriate regs.
05042   SDValue InFlag;
05043   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
05044     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
05045                              RegsToPass[i].second, InFlag);
05046     InFlag = Chain.getValue(1);
05047   }
05048 
05049   if (isTailCall)
05050     PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
05051                     FPOp, true, TailCallArguments);
05052 
05053   return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
05054                     RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
05055                     Ins, InVals);
05056 }
05057 
05058 bool
05059 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
05060                                   MachineFunction &MF, bool isVarArg,
05061                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
05062                                   LLVMContext &Context) const {
05063   SmallVector<CCValAssign, 16> RVLocs;
05064   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
05065   return CCInfo.CheckReturn(Outs, RetCC_PPC);
05066 }
05067 
05068 SDValue
05069 PPCTargetLowering::LowerReturn(SDValue Chain,
05070                                CallingConv::ID CallConv, bool isVarArg,
05071                                const SmallVectorImpl<ISD::OutputArg> &Outs,
05072                                const SmallVectorImpl<SDValue> &OutVals,
05073                                SDLoc dl, SelectionDAG &DAG) const {
05074 
05075   SmallVector<CCValAssign, 16> RVLocs;
05076   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
05077                  *DAG.getContext());
05078   CCInfo.AnalyzeReturn(Outs, RetCC_PPC);
05079 
05080   SDValue Flag;
05081   SmallVector<SDValue, 4> RetOps(1, Chain);
05082 
05083   // Copy the result values into the output registers.
05084   for (unsigned i = 0; i != RVLocs.size(); ++i) {
05085     CCValAssign &VA = RVLocs[i];
05086     assert(VA.isRegLoc() && "Can only return in registers!");
05087 
05088     SDValue Arg = OutVals[i];
05089 
05090     switch (VA.getLocInfo()) {
05091     default: llvm_unreachable("Unknown loc info!");
05092     case CCValAssign::Full: break;
05093     case CCValAssign::AExt:
05094       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
05095       break;
05096     case CCValAssign::ZExt:
05097       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
05098       break;
05099     case CCValAssign::SExt:
05100       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
05101       break;
05102     }
05103 
05104     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
05105     Flag = Chain.getValue(1);
05106     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
05107   }
05108 
05109   RetOps[0] = Chain;  // Update chain.
05110 
05111   // Add the flag if we have it.
05112   if (Flag.getNode())
05113     RetOps.push_back(Flag);
05114 
05115   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
05116 }
05117 
05118 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
05119                                    const PPCSubtarget &Subtarget) const {
05120   // When we pop the dynamic allocation we need to restore the SP link.
05121   SDLoc dl(Op);
05122 
05123   // Get the corect type for pointers.
05124   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05125 
05126   // Construct the stack pointer operand.
05127   bool isPPC64 = Subtarget.isPPC64();
05128   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
05129   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
05130 
05131   // Get the operands for the STACKRESTORE.
05132   SDValue Chain = Op.getOperand(0);
05133   SDValue SaveSP = Op.getOperand(1);
05134 
05135   // Load the old link SP.
05136   SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr,
05137                                    MachinePointerInfo(),
05138                                    false, false, false, 0);
05139 
05140   // Restore the stack pointer.
05141   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
05142 
05143   // Store the old link SP.
05144   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(),
05145                       false, false, 0);
05146 }
05147 
05148 
05149 
05150 SDValue
05151 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
05152   MachineFunction &MF = DAG.getMachineFunction();
05153   bool isPPC64 = Subtarget.isPPC64();
05154   bool isDarwinABI = Subtarget.isDarwinABI();
05155   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05156 
05157   // Get current frame pointer save index.  The users of this index will be
05158   // primarily DYNALLOC instructions.
05159   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
05160   int RASI = FI->getReturnAddrSaveIndex();
05161 
05162   // If the frame pointer save index hasn't been defined yet.
05163   if (!RASI) {
05164     // Find out what the fix offset of the frame pointer save area.
05165     int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
05166     // Allocate the frame index for frame pointer save area.
05167     RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true);
05168     // Save the result.
05169     FI->setReturnAddrSaveIndex(RASI);
05170   }
05171   return DAG.getFrameIndex(RASI, PtrVT);
05172 }
05173 
05174 SDValue
05175 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
05176   MachineFunction &MF = DAG.getMachineFunction();
05177   bool isPPC64 = Subtarget.isPPC64();
05178   bool isDarwinABI = Subtarget.isDarwinABI();
05179   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05180 
05181   // Get current frame pointer save index.  The users of this index will be
05182   // primarily DYNALLOC instructions.
05183   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
05184   int FPSI = FI->getFramePointerSaveIndex();
05185 
05186   // If the frame pointer save index hasn't been defined yet.
05187   if (!FPSI) {
05188     // Find out what the fix offset of the frame pointer save area.
05189     int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64,
05190                                                            isDarwinABI);
05191 
05192     // Allocate the frame index for frame pointer save area.
05193     FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
05194     // Save the result.
05195     FI->setFramePointerSaveIndex(FPSI);
05196   }
05197   return DAG.getFrameIndex(FPSI, PtrVT);
05198 }
05199 
05200 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
05201                                          SelectionDAG &DAG,
05202                                          const PPCSubtarget &Subtarget) const {
05203   // Get the inputs.
05204   SDValue Chain = Op.getOperand(0);
05205   SDValue Size  = Op.getOperand(1);
05206   SDLoc dl(Op);
05207 
05208   // Get the corect type for pointers.
05209   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05210   // Negate the size.
05211   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
05212                                   DAG.getConstant(0, PtrVT), Size);
05213   // Construct a node for the frame pointer save index.
05214   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
05215   // Build a DYNALLOC node.
05216   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
05217   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
05218   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
05219 }
05220 
05221 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
05222                                                SelectionDAG &DAG) const {
05223   SDLoc DL(Op);
05224   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
05225                      DAG.getVTList(MVT::i32, MVT::Other),
05226                      Op.getOperand(0), Op.getOperand(1));
05227 }
05228 
05229 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
05230                                                 SelectionDAG &DAG) const {
05231   SDLoc DL(Op);
05232   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
05233                      Op.getOperand(0), Op.getOperand(1));
05234 }
05235 
05236 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
05237   assert(Op.getValueType() == MVT::i1 &&
05238          "Custom lowering only for i1 loads");
05239 
05240   // First, load 8 bits into 32 bits, then truncate to 1 bit.
05241 
05242   SDLoc dl(Op);
05243   LoadSDNode *LD = cast<LoadSDNode>(Op);
05244 
05245   SDValue Chain = LD->getChain();
05246   SDValue BasePtr = LD->getBasePtr();
05247   MachineMemOperand *MMO = LD->getMemOperand();
05248 
05249   SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(), Chain,
05250                                  BasePtr, MVT::i8, MMO);
05251   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
05252 
05253   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
05254   return DAG.getMergeValues(Ops, dl);
05255 }
05256 
05257 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
05258   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
05259          "Custom lowering only for i1 stores");
05260 
05261   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
05262 
05263   SDLoc dl(Op);
05264   StoreSDNode *ST = cast<StoreSDNode>(Op);
05265 
05266   SDValue Chain = ST->getChain();
05267   SDValue BasePtr = ST->getBasePtr();
05268   SDValue Value = ST->getValue();
05269   MachineMemOperand *MMO = ST->getMemOperand();
05270 
05271   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(), Value);
05272   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
05273 }
05274 
05275 // FIXME: Remove this once the ANDI glue bug is fixed:
05276 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
05277   assert(Op.getValueType() == MVT::i1 &&
05278          "Custom lowering only for i1 results");
05279 
05280   SDLoc DL(Op);
05281   return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1,
05282                      Op.getOperand(0));
05283 }
05284 
05285 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
05286 /// possible.
05287 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
05288   // Not FP? Not a fsel.
05289   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
05290       !Op.getOperand(2).getValueType().isFloatingPoint())
05291     return Op;
05292 
05293   // We might be able to do better than this under some circumstances, but in
05294   // general, fsel-based lowering of select is a finite-math-only optimization.
05295   // For more information, see section F.3 of the 2.06 ISA specification.
05296   if (!DAG.getTarget().Options.NoInfsFPMath ||
05297       !DAG.getTarget().Options.NoNaNsFPMath)
05298     return Op;
05299 
05300   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
05301 
05302   EVT ResVT = Op.getValueType();
05303   EVT CmpVT = Op.getOperand(0).getValueType();
05304   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
05305   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
05306   SDLoc dl(Op);
05307 
05308   // If the RHS of the comparison is a 0.0, we don't need to do the
05309   // subtraction at all.
05310   SDValue Sel1;
05311   if (isFloatingPointZero(RHS))
05312     switch (CC) {
05313     default: break;       // SETUO etc aren't handled by fsel.
05314     case ISD::SETNE:
05315       std::swap(TV, FV);
05316     case ISD::SETEQ:
05317       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
05318         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
05319       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
05320       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
05321         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
05322       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
05323                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
05324     case ISD::SETULT:
05325     case ISD::SETLT:
05326       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
05327     case ISD::SETOGE:
05328     case ISD::SETGE:
05329       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
05330         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
05331       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
05332     case ISD::SETUGT:
05333     case ISD::SETGT:
05334       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
05335     case ISD::SETOLE:
05336     case ISD::SETLE:
05337       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
05338         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
05339       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
05340                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
05341     }
05342 
05343   SDValue Cmp;
05344   switch (CC) {
05345   default: break;       // SETUO etc aren't handled by fsel.
05346   case ISD::SETNE:
05347     std::swap(TV, FV);
05348   case ISD::SETEQ:
05349     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
05350     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05351       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05352     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
05353     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
05354       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
05355     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
05356                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
05357   case ISD::SETULT:
05358   case ISD::SETLT:
05359     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
05360     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05361       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05362     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
05363   case ISD::SETOGE:
05364   case ISD::SETGE:
05365     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
05366     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05367       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05368     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
05369   case ISD::SETUGT:
05370   case ISD::SETGT:
05371     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
05372     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05373       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05374     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
05375   case ISD::SETOLE:
05376   case ISD::SETLE:
05377     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
05378     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
05379       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
05380     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
05381   }
05382   return Op;
05383 }
05384 
05385 // FIXME: Split this code up when LegalizeDAGTypes lands.
05386 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
05387                                            SDLoc dl) const {
05388   assert(Op.getOperand(0).getValueType().isFloatingPoint());
05389   SDValue Src = Op.getOperand(0);
05390   if (Src.getValueType() == MVT::f32)
05391     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
05392 
05393   SDValue Tmp;
05394   switch (Op.getSimpleValueType().SimpleTy) {
05395   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
05396   case MVT::i32:
05397     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
05398                         (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ :
05399                                                    PPCISD::FCTIDZ),
05400                       dl, MVT::f64, Src);
05401     break;
05402   case MVT::i64:
05403     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
05404            "i64 FP_TO_UINT is supported only with FPCVT");
05405     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
05406                                                         PPCISD::FCTIDUZ,
05407                       dl, MVT::f64, Src);
05408     break;
05409   }
05410 
05411   // Convert the FP value to an int value through memory.
05412   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
05413     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
05414   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
05415   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
05416   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI);
05417 
05418   // Emit a store to the stack slot.
05419   SDValue Chain;
05420   if (i32Stack) {
05421     MachineFunction &MF = DAG.getMachineFunction();
05422     MachineMemOperand *MMO =
05423       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
05424     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
05425     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
05426               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
05427   } else
05428     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr,
05429                          MPI, false, false, 0);
05430 
05431   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
05432   // add in a bias.
05433   if (Op.getValueType() == MVT::i32 && !i32Stack) {
05434     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
05435                         DAG.getConstant(4, FIPtr.getValueType()));
05436     MPI = MachinePointerInfo();
05437   }
05438 
05439   return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MPI,
05440                      false, false, false, 0);
05441 }
05442 
05443 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
05444                                            SelectionDAG &DAG) const {
05445   SDLoc dl(Op);
05446   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
05447   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
05448     return SDValue();
05449 
05450   if (Op.getOperand(0).getValueType() == MVT::i1)
05451     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
05452                        DAG.getConstantFP(1.0, Op.getValueType()),
05453                        DAG.getConstantFP(0.0, Op.getValueType()));
05454 
05455   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
05456          "UINT_TO_FP is supported only with FPCVT");
05457 
05458   // If we have FCFIDS, then use it when converting to single-precision.
05459   // Otherwise, convert to double-precision and then round.
05460   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
05461                    (Op.getOpcode() == ISD::UINT_TO_FP ?
05462                     PPCISD::FCFIDUS : PPCISD::FCFIDS) :
05463                    (Op.getOpcode() == ISD::UINT_TO_FP ?
05464                     PPCISD::FCFIDU : PPCISD::FCFID);
05465   MVT      FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) ?
05466                    MVT::f32 : MVT::f64;
05467 
05468   if (Op.getOperand(0).getValueType() == MVT::i64) {
05469     SDValue SINT = Op.getOperand(0);
05470     // When converting to single-precision, we actually need to convert
05471     // to double-precision first and then round to single-precision.
05472     // To avoid double-rounding effects during that operation, we have
05473     // to prepare the input operand.  Bits that might be truncated when
05474     // converting to double-precision are replaced by a bit that won't
05475     // be lost at this stage, but is below the single-precision rounding
05476     // position.
05477     //
05478     // However, if -enable-unsafe-fp-math is in effect, accept double
05479     // rounding to avoid the extra overhead.
05480     if (Op.getValueType() == MVT::f32 &&
05481         !Subtarget.hasFPCVT() &&
05482         !DAG.getTarget().Options.UnsafeFPMath) {
05483 
05484       // Twiddle input to make sure the low 11 bits are zero.  (If this
05485       // is the case, we are guaranteed the value will fit into the 53 bit
05486       // mantissa of an IEEE double-precision value without rounding.)
05487       // If any of those low 11 bits were not zero originally, make sure
05488       // bit 12 (value 2048) is set instead, so that the final rounding
05489       // to single-precision gets the correct result.
05490       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
05491                                   SINT, DAG.getConstant(2047, MVT::i64));
05492       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
05493                           Round, DAG.getConstant(2047, MVT::i64));
05494       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
05495       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
05496                           Round, DAG.getConstant(-2048, MVT::i64));
05497 
05498       // However, we cannot use that value unconditionally: if the magnitude
05499       // of the input value is small, the bit-twiddling we did above might
05500       // end up visibly changing the output.  Fortunately, in that case, we
05501       // don't need to twiddle bits since the original input will convert
05502       // exactly to double-precision floating-point already.  Therefore,
05503       // construct a conditional to use the original value if the top 11
05504       // bits are all sign-bit copies, and use the rounded value computed
05505       // above otherwise.
05506       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
05507                                  SINT, DAG.getConstant(53, MVT::i32));
05508       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
05509                          Cond, DAG.getConstant(1, MVT::i64));
05510       Cond = DAG.getSetCC(dl, MVT::i32,
05511                           Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT);
05512 
05513       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
05514     }
05515 
05516     SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
05517     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
05518 
05519     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
05520       FP = DAG.getNode(ISD::FP_ROUND, dl,
05521                        MVT::f32, FP, DAG.getIntPtrConstant(0));
05522     return FP;
05523   }
05524 
05525   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
05526          "Unhandled INT_TO_FP type in custom expander!");
05527   // Since we only generate this in 64-bit mode, we can take advantage of
05528   // 64-bit registers.  In particular, sign extend the input value into the
05529   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
05530   // then lfd it and fcfid it.
05531   MachineFunction &MF = DAG.getMachineFunction();
05532   MachineFrameInfo *FrameInfo = MF.getFrameInfo();
05533   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05534 
05535   SDValue Ld;
05536   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
05537     int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
05538     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
05539 
05540     SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
05541                                  MachinePointerInfo::getFixedStack(FrameIdx),
05542                                  false, false, 0);
05543 
05544     assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
05545            "Expected an i32 store");
05546     MachineMemOperand *MMO =
05547       MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
05548                               MachineMemOperand::MOLoad, 4, 4);
05549     SDValue Ops[] = { Store, FIdx };
05550     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
05551                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
05552                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
05553                                  Ops, MVT::i32, MMO);
05554   } else {
05555     assert(Subtarget.isPPC64() &&
05556            "i32->FP without LFIWAX supported only on PPC64");
05557 
05558     int FrameIdx = FrameInfo->CreateStackObject(8, 8, false);
05559     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
05560 
05561     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
05562                                 Op.getOperand(0));
05563 
05564     // STD the extended value into the stack slot.
05565     SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx,
05566                                  MachinePointerInfo::getFixedStack(FrameIdx),
05567                                  false, false, 0);
05568 
05569     // Load the value as a double.
05570     Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx,
05571                      MachinePointerInfo::getFixedStack(FrameIdx),
05572                      false, false, false, 0);
05573   }
05574 
05575   // FCFID it and return it.
05576   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
05577   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
05578     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
05579   return FP;
05580 }
05581 
05582 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
05583                                             SelectionDAG &DAG) const {
05584   SDLoc dl(Op);
05585   /*
05586    The rounding mode is in bits 30:31 of FPSR, and has the following
05587    settings:
05588      00 Round to nearest
05589      01 Round to 0
05590      10 Round to +inf
05591      11 Round to -inf
05592 
05593   FLT_ROUNDS, on the other hand, expects the following:
05594     -1 Undefined
05595      0 Round to 0
05596      1 Round to nearest
05597      2 Round to +inf
05598      3 Round to -inf
05599 
05600   To perform the conversion, we do:
05601     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
05602   */
05603 
05604   MachineFunction &MF = DAG.getMachineFunction();
05605   EVT VT = Op.getValueType();
05606   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
05607 
05608   // Save FP Control Word to register
05609   EVT NodeTys[] = {
05610     MVT::f64,    // return register
05611     MVT::Glue    // unused in this context
05612   };
05613   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
05614 
05615   // Save FP register to stack slot
05616   int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false);
05617   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
05618   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain,
05619                                StackSlot, MachinePointerInfo(), false, false,0);
05620 
05621   // Load FP Control Word from low 32 bits of stack slot.
05622   SDValue Four = DAG.getConstant(4, PtrVT);
05623   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
05624   SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(),
05625                             false, false, false, 0);
05626 
05627   // Transform as necessary
05628   SDValue CWD1 =
05629     DAG.getNode(ISD::AND, dl, MVT::i32,
05630                 CWD, DAG.getConstant(3, MVT::i32));
05631   SDValue CWD2 =
05632     DAG.getNode(ISD::SRL, dl, MVT::i32,
05633                 DAG.getNode(ISD::AND, dl, MVT::i32,
05634                             DAG.getNode(ISD::XOR, dl, MVT::i32,
05635                                         CWD, DAG.getConstant(3, MVT::i32)),
05636                             DAG.getConstant(3, MVT::i32)),
05637                 DAG.getConstant(1, MVT::i32));
05638 
05639   SDValue RetVal =
05640     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
05641 
05642   return DAG.getNode((VT.getSizeInBits() < 16 ?
05643                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
05644 }
05645 
05646 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
05647   EVT VT = Op.getValueType();
05648   unsigned BitWidth = VT.getSizeInBits();
05649   SDLoc dl(Op);
05650   assert(Op.getNumOperands() == 3 &&
05651          VT == Op.getOperand(1).getValueType() &&
05652          "Unexpected SHL!");
05653 
05654   // Expand into a bunch of logical ops.  Note that these ops
05655   // depend on the PPC behavior for oversized shift amounts.
05656   SDValue Lo = Op.getOperand(0);
05657   SDValue Hi = Op.getOperand(1);
05658   SDValue Amt = Op.getOperand(2);
05659   EVT AmtVT = Amt.getValueType();
05660 
05661   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
05662                              DAG.getConstant(BitWidth, AmtVT), Amt);
05663   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
05664   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
05665   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
05666   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
05667                              DAG.getConstant(-BitWidth, AmtVT));
05668   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
05669   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
05670   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
05671   SDValue OutOps[] = { OutLo, OutHi };
05672   return DAG.getMergeValues(OutOps, dl);
05673 }
05674 
05675 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
05676   EVT VT = Op.getValueType();
05677   SDLoc dl(Op);
05678   unsigned BitWidth = VT.getSizeInBits();
05679   assert(Op.getNumOperands() == 3 &&
05680          VT == Op.getOperand(1).getValueType() &&
05681          "Unexpected SRL!");
05682 
05683   // Expand into a bunch of logical ops.  Note that these ops
05684   // depend on the PPC behavior for oversized shift amounts.
05685   SDValue Lo = Op.getOperand(0);
05686   SDValue Hi = Op.getOperand(1);
05687   SDValue Amt = Op.getOperand(2);
05688   EVT AmtVT = Amt.getValueType();
05689 
05690   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
05691                              DAG.getConstant(BitWidth, AmtVT), Amt);
05692   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
05693   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
05694   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
05695   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
05696                              DAG.getConstant(-BitWidth, AmtVT));
05697   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
05698   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
05699   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
05700   SDValue OutOps[] = { OutLo, OutHi };
05701   return DAG.getMergeValues(OutOps, dl);
05702 }
05703 
05704 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
05705   SDLoc dl(Op);
05706   EVT VT = Op.getValueType();
05707   unsigned BitWidth = VT.getSizeInBits();
05708   assert(Op.getNumOperands() == 3 &&
05709          VT == Op.getOperand(1).getValueType() &&
05710          "Unexpected SRA!");
05711 
05712   // Expand into a bunch of logical ops, followed by a select_cc.
05713   SDValue Lo = Op.getOperand(0);
05714   SDValue Hi = Op.getOperand(1);
05715   SDValue Amt = Op.getOperand(2);
05716   EVT AmtVT = Amt.getValueType();
05717 
05718   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
05719                              DAG.getConstant(BitWidth, AmtVT), Amt);
05720   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
05721   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
05722   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
05723   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
05724                              DAG.getConstant(-BitWidth, AmtVT));
05725   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
05726   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
05727   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT),
05728                                   Tmp4, Tmp6, ISD::SETLE);
05729   SDValue OutOps[] = { OutLo, OutHi };
05730   return DAG.getMergeValues(OutOps, dl);
05731 }
05732 
05733 //===----------------------------------------------------------------------===//
05734 // Vector related lowering.
05735 //
05736 
05737 /// BuildSplatI - Build a canonical splati of Val with an element size of
05738 /// SplatSize.  Cast the result to VT.
05739 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
05740                              SelectionDAG &DAG, SDLoc dl) {
05741   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
05742 
05743   static const EVT VTys[] = { // canonical VT to use for each size.
05744     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
05745   };
05746 
05747   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
05748 
05749   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
05750   if (Val == -1)
05751     SplatSize = 1;
05752 
05753   EVT CanonicalVT = VTys[SplatSize-1];
05754 
05755   // Build a canonical splat for this value.
05756   SDValue Elt = DAG.getConstant(Val, MVT::i32);
05757   SmallVector<SDValue, 8> Ops;
05758   Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
05759   SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops);
05760   return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
05761 }
05762 
05763 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
05764 /// specified intrinsic ID.
05765 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op,
05766                                 SelectionDAG &DAG, SDLoc dl,
05767                                 EVT DestVT = MVT::Other) {
05768   if (DestVT == MVT::Other) DestVT = Op.getValueType();
05769   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
05770                      DAG.getConstant(IID, MVT::i32), Op);
05771 }
05772 
05773 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
05774 /// specified intrinsic ID.
05775 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
05776                                 SelectionDAG &DAG, SDLoc dl,
05777                                 EVT DestVT = MVT::Other) {
05778   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
05779   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
05780                      DAG.getConstant(IID, MVT::i32), LHS, RHS);
05781 }
05782 
05783 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
05784 /// specified intrinsic ID.
05785 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
05786                                 SDValue Op2, SelectionDAG &DAG,
05787                                 SDLoc dl, EVT DestVT = MVT::Other) {
05788   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
05789   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
05790                      DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
05791 }
05792 
05793 
05794 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
05795 /// amount.  The result has the specified value type.
05796 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
05797                              EVT VT, SelectionDAG &DAG, SDLoc dl) {
05798   // Force LHS/RHS to be the right type.
05799   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
05800   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
05801 
05802   int Ops[16];
05803   for (unsigned i = 0; i != 16; ++i)
05804     Ops[i] = i + Amt;
05805   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
05806   return DAG.getNode(ISD::BITCAST, dl, VT, T);
05807 }
05808 
05809 // If this is a case we can't handle, return null and let the default
05810 // expansion code take care of it.  If we CAN select this case, and if it
05811 // selects to a single instruction, return Op.  Otherwise, if we can codegen
05812 // this case more efficiently than a constant pool load, lower it to the
05813 // sequence of ops that should be used.
05814 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
05815                                              SelectionDAG &DAG) const {
05816   SDLoc dl(Op);
05817   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
05818   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
05819 
05820   // Check if this is a splat of a constant value.
05821   APInt APSplatBits, APSplatUndef;
05822   unsigned SplatBitSize;
05823   bool HasAnyUndefs;
05824   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
05825                              HasAnyUndefs, 0, true) || SplatBitSize > 32)
05826     return SDValue();
05827 
05828   unsigned SplatBits = APSplatBits.getZExtValue();
05829   unsigned SplatUndef = APSplatUndef.getZExtValue();
05830   unsigned SplatSize = SplatBitSize / 8;
05831 
05832   // First, handle single instruction cases.
05833 
05834   // All zeros?
05835   if (SplatBits == 0) {
05836     // Canonicalize all zero vectors to be v4i32.
05837     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
05838       SDValue Z = DAG.getConstant(0, MVT::i32);
05839       Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
05840       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
05841     }
05842     return Op;
05843   }
05844 
05845   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
05846   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
05847                     (32-SplatBitSize));
05848   if (SextVal >= -16 && SextVal <= 15)
05849     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
05850 
05851 
05852   // Two instruction sequences.
05853 
05854   // If this value is in the range [-32,30] and is even, use:
05855   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
05856   // If this value is in the range [17,31] and is odd, use:
05857   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
05858   // If this value is in the range [-31,-17] and is odd, use:
05859   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
05860   // Note the last two are three-instruction sequences.
05861   if (SextVal >= -32 && SextVal <= 31) {
05862     // To avoid having these optimizations undone by constant folding,
05863     // we convert to a pseudo that will be expanded later into one of
05864     // the above forms.
05865     SDValue Elt = DAG.getConstant(SextVal, MVT::i32);
05866     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
05867               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
05868     SDValue EltSize = DAG.getConstant(SplatSize, MVT::i32);
05869     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
05870     if (VT == Op.getValueType())
05871       return RetVal;
05872     else
05873       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
05874   }
05875 
05876   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
05877   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
05878   // for fneg/fabs.
05879   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
05880     // Make -1 and vspltisw -1:
05881     SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
05882 
05883     // Make the VSLW intrinsic, computing 0x8000_0000.
05884     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
05885                                    OnesV, DAG, dl);
05886 
05887     // xor by OnesV to invert it.
05888     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
05889     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
05890   }
05891 
05892   // The remaining cases assume either big endian element order or
05893   // a splat-size that equates to the element size of the vector
05894   // to be built.  An example that doesn't work for little endian is
05895   // {0, -1, 0, -1, 0, -1, 0, -1} which has a splat size of 32 bits
05896   // and a vector element size of 16 bits.  The code below will
05897   // produce the vector in big endian element order, which for little
05898   // endian is {-1, 0, -1, 0, -1, 0, -1, 0}.
05899 
05900   // For now, just avoid these optimizations in that case.
05901   // FIXME: Develop correct optimizations for LE with mismatched
05902   // splat and element sizes.
05903 
05904   if (Subtarget.isLittleEndian() &&
05905       SplatSize != Op.getValueType().getVectorElementType().getSizeInBits())
05906     return SDValue();
05907 
05908   // Check to see if this is a wide variety of vsplti*, binop self cases.
05909   static const signed char SplatCsts[] = {
05910     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
05911     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
05912   };
05913 
05914   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
05915     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
05916     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
05917     int i = SplatCsts[idx];
05918 
05919     // Figure out what shift amount will be used by altivec if shifted by i in
05920     // this splat size.
05921     unsigned TypeShiftAmt = i & (SplatBitSize-1);
05922 
05923     // vsplti + shl self.
05924     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
05925       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
05926       static const unsigned IIDs[] = { // Intrinsic to use for each size.
05927         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
05928         Intrinsic::ppc_altivec_vslw
05929       };
05930       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
05931       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
05932     }
05933 
05934     // vsplti + srl self.
05935     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
05936       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
05937       static const unsigned IIDs[] = { // Intrinsic to use for each size.
05938         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
05939         Intrinsic::ppc_altivec_vsrw
05940       };
05941       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
05942       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
05943     }
05944 
05945     // vsplti + sra self.
05946     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
05947       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
05948       static const unsigned IIDs[] = { // Intrinsic to use for each size.
05949         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
05950         Intrinsic::ppc_altivec_vsraw
05951       };
05952       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
05953       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
05954     }
05955 
05956     // vsplti + rol self.
05957     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
05958                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
05959       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
05960       static const unsigned IIDs[] = { // Intrinsic to use for each size.
05961         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
05962         Intrinsic::ppc_altivec_vrlw
05963       };
05964       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
05965       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
05966     }
05967 
05968     // t = vsplti c, result = vsldoi t, t, 1
05969     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
05970       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
05971       return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl);
05972     }
05973     // t = vsplti c, result = vsldoi t, t, 2
05974     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
05975       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
05976       return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl);
05977     }
05978     // t = vsplti c, result = vsldoi t, t, 3
05979     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
05980       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
05981       return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
05982     }
05983   }
05984 
05985   return SDValue();
05986 }
05987 
05988 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
05989 /// the specified operations to build the shuffle.
05990 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
05991                                       SDValue RHS, SelectionDAG &DAG,
05992                                       SDLoc dl) {
05993   unsigned OpNum = (PFEntry >> 26) & 0x0F;
05994   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
05995   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
05996 
05997   enum {
05998     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
05999     OP_VMRGHW,
06000     OP_VMRGLW,
06001     OP_VSPLTISW0,
06002     OP_VSPLTISW1,
06003     OP_VSPLTISW2,
06004     OP_VSPLTISW3,
06005     OP_VSLDOI4,
06006     OP_VSLDOI8,
06007     OP_VSLDOI12
06008   };
06009 
06010   if (OpNum == OP_COPY) {
06011     if (LHSID == (1*9+2)*9+3) return LHS;
06012     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
06013     return RHS;
06014   }
06015 
06016   SDValue OpLHS, OpRHS;
06017   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
06018   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
06019 
06020   int ShufIdxs[16];
06021   switch (OpNum) {
06022   default: llvm_unreachable("Unknown i32 permute!");
06023   case OP_VMRGHW:
06024     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
06025     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
06026     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
06027     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
06028     break;
06029   case OP_VMRGLW:
06030     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
06031     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
06032     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
06033     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
06034     break;
06035   case OP_VSPLTISW0:
06036     for (unsigned i = 0; i != 16; ++i)
06037       ShufIdxs[i] = (i&3)+0;
06038     break;
06039   case OP_VSPLTISW1:
06040     for (unsigned i = 0; i != 16; ++i)
06041       ShufIdxs[i] = (i&3)+4;
06042     break;
06043   case OP_VSPLTISW2:
06044     for (unsigned i = 0; i != 16; ++i)
06045       ShufIdxs[i] = (i&3)+8;
06046     break;
06047   case OP_VSPLTISW3:
06048     for (unsigned i = 0; i != 16; ++i)
06049       ShufIdxs[i] = (i&3)+12;
06050     break;
06051   case OP_VSLDOI4:
06052     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
06053   case OP_VSLDOI8:
06054     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
06055   case OP_VSLDOI12:
06056     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
06057   }
06058   EVT VT = OpLHS.getValueType();
06059   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
06060   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
06061   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
06062   return DAG.getNode(ISD::BITCAST, dl, VT, T);
06063 }
06064 
06065 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
06066 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
06067 /// return the code it can be lowered into.  Worst case, it can always be
06068 /// lowered into a vperm.
06069 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
06070                                                SelectionDAG &DAG) const {
06071   SDLoc dl(Op);
06072   SDValue V1 = Op.getOperand(0);
06073   SDValue V2 = Op.getOperand(1);
06074   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
06075   EVT VT = Op.getValueType();
06076   bool isLittleEndian = Subtarget.isLittleEndian();
06077 
06078   // Cases that are handled by instructions that take permute immediates
06079   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
06080   // selected by the instruction selector.
06081   if (V2.getOpcode() == ISD::UNDEF) {
06082     if (PPC::isSplatShuffleMask(SVOp, 1) ||
06083         PPC::isSplatShuffleMask(SVOp, 2) ||
06084         PPC::isSplatShuffleMask(SVOp, 4) ||
06085         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
06086         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
06087         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
06088         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
06089         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
06090         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
06091         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
06092         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
06093         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG)) {
06094       return Op;
06095     }
06096   }
06097 
06098   // Altivec has a variety of "shuffle immediates" that take two vector inputs
06099   // and produce a fixed permutation.  If any of these match, do not lower to
06100   // VPERM.
06101   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
06102   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
06103       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
06104       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
06105       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
06106       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
06107       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
06108       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
06109       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
06110       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG))
06111     return Op;
06112 
06113   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
06114   // perfect shuffle table to emit an optimal matching sequence.
06115   ArrayRef<int> PermMask = SVOp->getMask();
06116 
06117   unsigned PFIndexes[4];
06118   bool isFourElementShuffle = true;
06119   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
06120     unsigned EltNo = 8;   // Start out undef.
06121     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
06122       if (PermMask[i*4+j] < 0)
06123         continue;   // Undef, ignore it.
06124 
06125       unsigned ByteSource = PermMask[i*4+j];
06126       if ((ByteSource & 3) != j) {
06127         isFourElementShuffle = false;
06128         break;
06129       }
06130 
06131       if (EltNo == 8) {
06132         EltNo = ByteSource/4;
06133       } else if (EltNo != ByteSource/4) {
06134         isFourElementShuffle = false;
06135         break;
06136       }
06137     }
06138     PFIndexes[i] = EltNo;
06139   }
06140 
06141   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
06142   // perfect shuffle vector to determine if it is cost effective to do this as
06143   // discrete instructions, or whether we should use a vperm.
06144   // For now, we skip this for little endian until such time as we have a
06145   // little-endian perfect shuffle table.
06146   if (isFourElementShuffle && !isLittleEndian) {
06147     // Compute the index in the perfect shuffle table.
06148     unsigned PFTableIndex =
06149       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
06150 
06151     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
06152     unsigned Cost  = (PFEntry >> 30);
06153 
06154     // Determining when to avoid vperm is tricky.  Many things affect the cost
06155     // of vperm, particularly how many times the perm mask needs to be computed.
06156     // For example, if the perm mask can be hoisted out of a loop or is already
06157     // used (perhaps because there are multiple permutes with the same shuffle
06158     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
06159     // the loop requires an extra register.
06160     //
06161     // As a compromise, we only emit discrete instructions if the shuffle can be
06162     // generated in 3 or fewer operations.  When we have loop information
06163     // available, if this block is within a loop, we should avoid using vperm
06164     // for 3-operation perms and use a constant pool load instead.
06165     if (Cost < 3)
06166       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
06167   }
06168 
06169   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
06170   // vector that will get spilled to the constant pool.
06171   if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
06172 
06173   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
06174   // that it is in input element units, not in bytes.  Convert now.
06175 
06176   // For little endian, the order of the input vectors is reversed, and
06177   // the permutation mask is complemented with respect to 31.  This is
06178   // necessary to produce proper semantics with the big-endian-biased vperm
06179   // instruction.
06180   EVT EltVT = V1.getValueType().getVectorElementType();
06181   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
06182 
06183   SmallVector<SDValue, 16> ResultMask;
06184   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
06185     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
06186 
06187     for (unsigned j = 0; j != BytesPerElement; ++j)
06188       if (isLittleEndian)
06189         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement+j),
06190                                              MVT::i32));
06191       else
06192         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
06193                                              MVT::i32));
06194   }
06195 
06196   SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
06197                                   ResultMask);
06198   if (isLittleEndian)
06199     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType</