LLVM API Documentation

ARMISelLowering.cpp
Go to the documentation of this file.
00001 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file defines the interfaces that ARM uses to lower LLVM code into a
00011 // selection DAG.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 #include "ARMISelLowering.h"
00016 #include "ARMCallingConv.h"
00017 #include "ARMConstantPoolValue.h"
00018 #include "ARMMachineFunctionInfo.h"
00019 #include "ARMPerfectShuffle.h"
00020 #include "ARMSubtarget.h"
00021 #include "ARMTargetMachine.h"
00022 #include "ARMTargetObjectFile.h"
00023 #include "MCTargetDesc/ARMAddressingModes.h"
00024 #include "llvm/ADT/Statistic.h"
00025 #include "llvm/ADT/StringExtras.h"
00026 #include "llvm/CodeGen/CallingConvLower.h"
00027 #include "llvm/CodeGen/IntrinsicLowering.h"
00028 #include "llvm/CodeGen/MachineBasicBlock.h"
00029 #include "llvm/CodeGen/MachineFrameInfo.h"
00030 #include "llvm/CodeGen/MachineFunction.h"
00031 #include "llvm/CodeGen/MachineInstrBuilder.h"
00032 #include "llvm/CodeGen/MachineJumpTableInfo.h"
00033 #include "llvm/CodeGen/MachineModuleInfo.h"
00034 #include "llvm/CodeGen/MachineRegisterInfo.h"
00035 #include "llvm/CodeGen/SelectionDAG.h"
00036 #include "llvm/IR/CallingConv.h"
00037 #include "llvm/IR/Constants.h"
00038 #include "llvm/IR/Function.h"
00039 #include "llvm/IR/GlobalValue.h"
00040 #include "llvm/IR/IRBuilder.h"
00041 #include "llvm/IR/Instruction.h"
00042 #include "llvm/IR/Instructions.h"
00043 #include "llvm/IR/Intrinsics.h"
00044 #include "llvm/IR/Type.h"
00045 #include "llvm/MC/MCSectionMachO.h"
00046 #include "llvm/Support/CommandLine.h"
00047 #include "llvm/Support/Debug.h"
00048 #include "llvm/Support/ErrorHandling.h"
00049 #include "llvm/Support/MathExtras.h"
00050 #include "llvm/Target/TargetOptions.h"
00051 #include <utility>
00052 using namespace llvm;
00053 
00054 #define DEBUG_TYPE "arm-isel"
00055 
00056 STATISTIC(NumTailCalls, "Number of tail calls");
00057 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
00058 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
00059 
00060 cl::opt<bool>
00061 EnableARMLongCalls("arm-long-calls", cl::Hidden,
00062   cl::desc("Generate calls via indirect call instructions"),
00063   cl::init(false));
00064 
00065 static cl::opt<bool>
00066 ARMInterworking("arm-interworking", cl::Hidden,
00067   cl::desc("Enable / disable ARM interworking (for debugging only)"),
00068   cl::init(true));
00069 
00070 namespace {
00071   class ARMCCState : public CCState {
00072   public:
00073     ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
00074                SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
00075                ParmContext PC)
00076         : CCState(CC, isVarArg, MF, locs, C) {
00077       assert(((PC == Call) || (PC == Prologue)) &&
00078              "ARMCCState users must specify whether their context is call"
00079              "or prologue generation.");
00080       CallOrPrologue = PC;
00081     }
00082   };
00083 }
00084 
00085 // The APCS parameter registers.
00086 static const MCPhysReg GPRArgRegs[] = {
00087   ARM::R0, ARM::R1, ARM::R2, ARM::R3
00088 };
00089 
00090 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
00091                                        MVT PromotedBitwiseVT) {
00092   if (VT != PromotedLdStVT) {
00093     setOperationAction(ISD::LOAD, VT, Promote);
00094     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
00095 
00096     setOperationAction(ISD::STORE, VT, Promote);
00097     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
00098   }
00099 
00100   MVT ElemTy = VT.getVectorElementType();
00101   if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
00102     setOperationAction(ISD::SETCC, VT, Custom);
00103   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
00104   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
00105   if (ElemTy == MVT::i32) {
00106     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
00107     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
00108     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
00109     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
00110   } else {
00111     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
00112     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
00113     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
00114     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
00115   }
00116   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
00117   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
00118   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
00119   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
00120   setOperationAction(ISD::SELECT,            VT, Expand);
00121   setOperationAction(ISD::SELECT_CC,         VT, Expand);
00122   setOperationAction(ISD::VSELECT,           VT, Expand);
00123   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
00124   if (VT.isInteger()) {
00125     setOperationAction(ISD::SHL, VT, Custom);
00126     setOperationAction(ISD::SRA, VT, Custom);
00127     setOperationAction(ISD::SRL, VT, Custom);
00128   }
00129 
00130   // Promote all bit-wise operations.
00131   if (VT.isInteger() && VT != PromotedBitwiseVT) {
00132     setOperationAction(ISD::AND, VT, Promote);
00133     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
00134     setOperationAction(ISD::OR,  VT, Promote);
00135     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
00136     setOperationAction(ISD::XOR, VT, Promote);
00137     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
00138   }
00139 
00140   // Neon does not support vector divide/remainder operations.
00141   setOperationAction(ISD::SDIV, VT, Expand);
00142   setOperationAction(ISD::UDIV, VT, Expand);
00143   setOperationAction(ISD::FDIV, VT, Expand);
00144   setOperationAction(ISD::SREM, VT, Expand);
00145   setOperationAction(ISD::UREM, VT, Expand);
00146   setOperationAction(ISD::FREM, VT, Expand);
00147 }
00148 
00149 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
00150   addRegisterClass(VT, &ARM::DPRRegClass);
00151   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
00152 }
00153 
00154 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
00155   addRegisterClass(VT, &ARM::DPairRegClass);
00156   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
00157 }
00158 
00159 static TargetLoweringObjectFile *createTLOF(const Triple &TT) {
00160   if (TT.isOSBinFormatMachO())
00161     return new TargetLoweringObjectFileMachO();
00162   if (TT.isOSWindows())
00163     return new TargetLoweringObjectFileCOFF();
00164   return new ARMElfTargetObjectFile();
00165 }
00166 
00167 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
00168     : TargetLowering(TM, createTLOF(Triple(TM.getTargetTriple()))) {
00169   Subtarget = &TM.getSubtarget<ARMSubtarget>();
00170   RegInfo = TM.getSubtargetImpl()->getRegisterInfo();
00171   Itins = TM.getSubtargetImpl()->getInstrItineraryData();
00172 
00173   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
00174 
00175   if (Subtarget->isTargetMachO()) {
00176     // Uses VFP for Thumb libfuncs if available.
00177     if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
00178         Subtarget->hasARMOps() && !TM.Options.UseSoftFloat) {
00179       // Single-precision floating-point arithmetic.
00180       setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
00181       setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
00182       setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
00183       setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
00184 
00185       // Double-precision floating-point arithmetic.
00186       setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
00187       setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
00188       setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
00189       setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
00190 
00191       // Single-precision comparisons.
00192       setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
00193       setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
00194       setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
00195       setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
00196       setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
00197       setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
00198       setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
00199       setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
00200 
00201       setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
00202       setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
00203       setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
00204       setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
00205       setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
00206       setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
00207       setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
00208       setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
00209 
00210       // Double-precision comparisons.
00211       setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
00212       setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
00213       setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
00214       setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
00215       setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
00216       setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
00217       setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
00218       setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
00219 
00220       setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
00221       setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
00222       setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
00223       setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
00224       setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
00225       setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
00226       setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
00227       setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
00228 
00229       // Floating-point to integer conversions.
00230       // i64 conversions are done via library routines even when generating VFP
00231       // instructions, so use the same ones.
00232       setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
00233       setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
00234       setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
00235       setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
00236 
00237       // Conversions between floating types.
00238       setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
00239       setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
00240 
00241       // Integer to floating-point conversions.
00242       // i64 conversions are done via library routines even when generating VFP
00243       // instructions, so use the same ones.
00244       // FIXME: There appears to be some naming inconsistency in ARM libgcc:
00245       // e.g., __floatunsidf vs. __floatunssidfvfp.
00246       setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
00247       setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
00248       setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
00249       setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
00250     }
00251   }
00252 
00253   // These libcalls are not available in 32-bit.
00254   setLibcallName(RTLIB::SHL_I128, nullptr);
00255   setLibcallName(RTLIB::SRL_I128, nullptr);
00256   setLibcallName(RTLIB::SRA_I128, nullptr);
00257 
00258   if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetMachO() &&
00259       !Subtarget->isTargetWindows()) {
00260     static const struct {
00261       const RTLIB::Libcall Op;
00262       const char * const Name;
00263       const CallingConv::ID CC;
00264       const ISD::CondCode Cond;
00265     } LibraryCalls[] = {
00266       // Double-precision floating-point arithmetic helper functions
00267       // RTABI chapter 4.1.2, Table 2
00268       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00269       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00270       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00271       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00272 
00273       // Double-precision floating-point comparison helper functions
00274       // RTABI chapter 4.1.2, Table 3
00275       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00276       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00277       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00278       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00279       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00280       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00281       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00282       { RTLIB::O_F64,   "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00283 
00284       // Single-precision floating-point arithmetic helper functions
00285       // RTABI chapter 4.1.2, Table 4
00286       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00287       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00288       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00289       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00290 
00291       // Single-precision floating-point comparison helper functions
00292       // RTABI chapter 4.1.2, Table 5
00293       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00294       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00295       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00296       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00297       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00298       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00299       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00300       { RTLIB::O_F32,   "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00301 
00302       // Floating-point to integer conversions.
00303       // RTABI chapter 4.1.2, Table 6
00304       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00305       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00306       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00307       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00308       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00309       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00310       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00311       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00312 
00313       // Conversions between floating types.
00314       // RTABI chapter 4.1.2, Table 7
00315       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00316       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00317       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00318 
00319       // Integer to floating-point conversions.
00320       // RTABI chapter 4.1.2, Table 8
00321       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00322       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00323       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00324       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00325       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00326       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00327       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00328       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00329 
00330       // Long long helper functions
00331       // RTABI chapter 4.2, Table 9
00332       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00333       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00334       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00335       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00336 
00337       // Integer division functions
00338       // RTABI chapter 4.3.1
00339       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00340       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00341       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00342       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00343       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00344       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00345       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00346       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00347 
00348       // Memory operations
00349       // RTABI chapter 4.3.4
00350       { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00351       { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00352       { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00353     };
00354 
00355     for (const auto &LC : LibraryCalls) {
00356       setLibcallName(LC.Op, LC.Name);
00357       setLibcallCallingConv(LC.Op, LC.CC);
00358       if (LC.Cond != ISD::SETCC_INVALID)
00359         setCmpLibcallCC(LC.Op, LC.Cond);
00360     }
00361   }
00362 
00363   if (Subtarget->isTargetWindows()) {
00364     static const struct {
00365       const RTLIB::Libcall Op;
00366       const char * const Name;
00367       const CallingConv::ID CC;
00368     } LibraryCalls[] = {
00369       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
00370       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
00371       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
00372       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
00373       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
00374       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
00375       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
00376       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
00377     };
00378 
00379     for (const auto &LC : LibraryCalls) {
00380       setLibcallName(LC.Op, LC.Name);
00381       setLibcallCallingConv(LC.Op, LC.CC);
00382     }
00383   }
00384 
00385   // Use divmod compiler-rt calls for iOS 5.0 and later.
00386   if (Subtarget->getTargetTriple().isiOS() &&
00387       !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) {
00388     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
00389     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
00390   }
00391 
00392   // The half <-> float conversion functions are always soft-float, but are
00393   // needed for some targets which use a hard-float calling convention by
00394   // default.
00395   if (Subtarget->isAAPCS_ABI()) {
00396     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
00397     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
00398     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
00399   } else {
00400     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
00401     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
00402     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
00403   }
00404 
00405   if (Subtarget->isThumb1Only())
00406     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
00407   else
00408     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
00409   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00410       !Subtarget->isThumb1Only()) {
00411     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
00412     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
00413   }
00414 
00415   for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
00416        VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
00417     for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
00418          InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
00419       setTruncStoreAction((MVT::SimpleValueType)VT,
00420                           (MVT::SimpleValueType)InnerVT, Expand);
00421     setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
00422     setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
00423     setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
00424 
00425     setOperationAction(ISD::MULHS, (MVT::SimpleValueType)VT, Expand);
00426     setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
00427     setOperationAction(ISD::MULHU, (MVT::SimpleValueType)VT, Expand);
00428     setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
00429 
00430     setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
00431   }
00432 
00433   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
00434   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
00435 
00436   if (Subtarget->hasNEON()) {
00437     addDRTypeForNEON(MVT::v2f32);
00438     addDRTypeForNEON(MVT::v8i8);
00439     addDRTypeForNEON(MVT::v4i16);
00440     addDRTypeForNEON(MVT::v2i32);
00441     addDRTypeForNEON(MVT::v1i64);
00442 
00443     addQRTypeForNEON(MVT::v4f32);
00444     addQRTypeForNEON(MVT::v2f64);
00445     addQRTypeForNEON(MVT::v16i8);
00446     addQRTypeForNEON(MVT::v8i16);
00447     addQRTypeForNEON(MVT::v4i32);
00448     addQRTypeForNEON(MVT::v2i64);
00449 
00450     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
00451     // neither Neon nor VFP support any arithmetic operations on it.
00452     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
00453     // supported for v4f32.
00454     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
00455     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
00456     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
00457     // FIXME: Code duplication: FDIV and FREM are expanded always, see
00458     // ARMTargetLowering::addTypeForNEON method for details.
00459     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
00460     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
00461     // FIXME: Create unittest.
00462     // In another words, find a way when "copysign" appears in DAG with vector
00463     // operands.
00464     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
00465     // FIXME: Code duplication: SETCC has custom operation action, see
00466     // ARMTargetLowering::addTypeForNEON method for details.
00467     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
00468     // FIXME: Create unittest for FNEG and for FABS.
00469     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
00470     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
00471     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
00472     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
00473     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
00474     setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
00475     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
00476     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
00477     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
00478     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
00479     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
00480     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
00481     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
00482     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
00483     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
00484     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
00485     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
00486     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
00487     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
00488 
00489     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
00490     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
00491     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
00492     setOperationAction(ISD::FPOWI, MVT::v4f32, Expand);
00493     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
00494     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
00495     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
00496     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
00497     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
00498     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
00499     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
00500     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
00501     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
00502     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
00503     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
00504 
00505     // Mark v2f32 intrinsics.
00506     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
00507     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
00508     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
00509     setOperationAction(ISD::FPOWI, MVT::v2f32, Expand);
00510     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
00511     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
00512     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
00513     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
00514     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
00515     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
00516     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
00517     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
00518     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
00519     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
00520     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
00521 
00522     // Neon does not support some operations on v1i64 and v2i64 types.
00523     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
00524     // Custom handling for some quad-vector types to detect VMULL.
00525     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
00526     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
00527     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
00528     // Custom handling for some vector types to avoid expensive expansions
00529     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
00530     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
00531     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
00532     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
00533     setOperationAction(ISD::SETCC, MVT::v1i64, Expand);
00534     setOperationAction(ISD::SETCC, MVT::v2i64, Expand);
00535     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
00536     // a destination type that is wider than the source, and nor does
00537     // it have a FP_TO_[SU]INT instruction with a narrower destination than
00538     // source.
00539     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
00540     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
00541     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
00542     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
00543 
00544     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
00545     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
00546 
00547     // NEON does not have single instruction CTPOP for vectors with element
00548     // types wider than 8-bits.  However, custom lowering can leverage the
00549     // v8i8/v16i8 vcnt instruction.
00550     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
00551     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
00552     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
00553     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
00554 
00555     // NEON only has FMA instructions as of VFP4.
00556     if (!Subtarget->hasVFP4()) {
00557       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
00558       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
00559     }
00560 
00561     setTargetDAGCombine(ISD::INTRINSIC_VOID);
00562     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
00563     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
00564     setTargetDAGCombine(ISD::SHL);
00565     setTargetDAGCombine(ISD::SRL);
00566     setTargetDAGCombine(ISD::SRA);
00567     setTargetDAGCombine(ISD::SIGN_EXTEND);
00568     setTargetDAGCombine(ISD::ZERO_EXTEND);
00569     setTargetDAGCombine(ISD::ANY_EXTEND);
00570     setTargetDAGCombine(ISD::SELECT_CC);
00571     setTargetDAGCombine(ISD::BUILD_VECTOR);
00572     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
00573     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
00574     setTargetDAGCombine(ISD::STORE);
00575     setTargetDAGCombine(ISD::FP_TO_SINT);
00576     setTargetDAGCombine(ISD::FP_TO_UINT);
00577     setTargetDAGCombine(ISD::FDIV);
00578 
00579     // It is legal to extload from v4i8 to v4i16 or v4i32.
00580     MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8,
00581                   MVT::v4i16, MVT::v2i16,
00582                   MVT::v2i32};
00583     for (unsigned i = 0; i < 6; ++i) {
00584       setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal);
00585       setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal);
00586       setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal);
00587     }
00588   }
00589 
00590   // ARM and Thumb2 support UMLAL/SMLAL.
00591   if (!Subtarget->isThumb1Only())
00592     setTargetDAGCombine(ISD::ADDC);
00593 
00594   if (Subtarget->isFPOnlySP()) {
00595     // When targetting a floating-point unit with only single-precision
00596     // operations, f64 is legal for the few double-precision instructions which
00597     // are present However, no double-precision operations other than moves,
00598     // loads and stores are provided by the hardware.
00599     setOperationAction(ISD::FADD,       MVT::f64, Expand);
00600     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
00601     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
00602     setOperationAction(ISD::FMA,        MVT::f64, Expand);
00603     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
00604     setOperationAction(ISD::FREM,       MVT::f64, Expand);
00605     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
00606     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
00607     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
00608     setOperationAction(ISD::FABS,       MVT::f64, Expand);
00609     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
00610     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
00611     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
00612     setOperationAction(ISD::FPOWI,      MVT::f64, Expand);
00613     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
00614     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
00615     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
00616     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
00617     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
00618     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
00619     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
00620     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
00621     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
00622     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
00623     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
00624     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
00625     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
00626   }
00627 
00628   computeRegisterProperties();
00629 
00630   // ARM does not have floating-point extending loads.
00631   setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
00632   setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
00633 
00634   // ... or truncating stores
00635   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
00636   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
00637   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
00638 
00639   // ARM does not have i1 sign extending load.
00640   setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
00641 
00642   // ARM supports all 4 flavors of integer indexed load / store.
00643   if (!Subtarget->isThumb1Only()) {
00644     for (unsigned im = (unsigned)ISD::PRE_INC;
00645          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
00646       setIndexedLoadAction(im,  MVT::i1,  Legal);
00647       setIndexedLoadAction(im,  MVT::i8,  Legal);
00648       setIndexedLoadAction(im,  MVT::i16, Legal);
00649       setIndexedLoadAction(im,  MVT::i32, Legal);
00650       setIndexedStoreAction(im, MVT::i1,  Legal);
00651       setIndexedStoreAction(im, MVT::i8,  Legal);
00652       setIndexedStoreAction(im, MVT::i16, Legal);
00653       setIndexedStoreAction(im, MVT::i32, Legal);
00654     }
00655   }
00656 
00657   setOperationAction(ISD::SADDO, MVT::i32, Custom);
00658   setOperationAction(ISD::UADDO, MVT::i32, Custom);
00659   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
00660   setOperationAction(ISD::USUBO, MVT::i32, Custom);
00661 
00662   // i64 operation support.
00663   setOperationAction(ISD::MUL,     MVT::i64, Expand);
00664   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
00665   if (Subtarget->isThumb1Only()) {
00666     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
00667     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
00668   }
00669   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
00670       || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP()))
00671     setOperationAction(ISD::MULHS, MVT::i32, Expand);
00672 
00673   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
00674   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
00675   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
00676   setOperationAction(ISD::SRL,       MVT::i64, Custom);
00677   setOperationAction(ISD::SRA,       MVT::i64, Custom);
00678 
00679   if (!Subtarget->isThumb1Only()) {
00680     // FIXME: We should do this for Thumb1 as well.
00681     setOperationAction(ISD::ADDC,    MVT::i32, Custom);
00682     setOperationAction(ISD::ADDE,    MVT::i32, Custom);
00683     setOperationAction(ISD::SUBC,    MVT::i32, Custom);
00684     setOperationAction(ISD::SUBE,    MVT::i32, Custom);
00685   }
00686 
00687   // ARM does not have ROTL.
00688   setOperationAction(ISD::ROTL,  MVT::i32, Expand);
00689   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
00690   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
00691   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
00692     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
00693 
00694   // These just redirect to CTTZ and CTLZ on ARM.
00695   setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i32  , Expand);
00696   setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i32  , Expand);
00697 
00698   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
00699 
00700   // Only ARMv6 has BSWAP.
00701   if (!Subtarget->hasV6Ops())
00702     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
00703 
00704   if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) &&
00705       !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) {
00706     // These are expanded into libcalls if the cpu doesn't have HW divider.
00707     setOperationAction(ISD::SDIV,  MVT::i32, Expand);
00708     setOperationAction(ISD::UDIV,  MVT::i32, Expand);
00709   }
00710 
00711   // FIXME: Also set divmod for SREM on EABI
00712   setOperationAction(ISD::SREM,  MVT::i32, Expand);
00713   setOperationAction(ISD::UREM,  MVT::i32, Expand);
00714   // Register based DivRem for AEABI (RTABI 4.2)
00715   if (Subtarget->isTargetAEABI()) {
00716     setLibcallName(RTLIB::SDIVREM_I8,  "__aeabi_idivmod");
00717     setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod");
00718     setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod");
00719     setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod");
00720     setLibcallName(RTLIB::UDIVREM_I8,  "__aeabi_uidivmod");
00721     setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod");
00722     setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod");
00723     setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod");
00724 
00725     setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS);
00726     setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS);
00727     setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS);
00728     setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS);
00729     setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS);
00730     setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS);
00731     setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS);
00732     setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS);
00733 
00734     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
00735     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
00736   } else {
00737     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
00738     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
00739   }
00740 
00741   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
00742   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
00743   setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
00744   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
00745   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
00746 
00747   setOperationAction(ISD::TRAP, MVT::Other, Legal);
00748 
00749   // Use the default implementation.
00750   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
00751   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
00752   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
00753   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
00754   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
00755   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
00756 
00757   if (!Subtarget->isTargetMachO()) {
00758     // Non-MachO platforms may return values in these registers via the
00759     // personality function.
00760     setExceptionPointerRegister(ARM::R0);
00761     setExceptionSelectorRegister(ARM::R1);
00762   }
00763 
00764   if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
00765     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
00766   else
00767     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
00768 
00769   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
00770   // the default expansion. If we are targeting a single threaded system,
00771   // then set them all for expand so we can lower them later into their
00772   // non-atomic form.
00773   if (TM.Options.ThreadModel == ThreadModel::Single)
00774     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other, Expand);
00775   else if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) {
00776     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
00777     // to ldrex/strex loops already.
00778     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
00779 
00780     // On v8, we have particularly efficient implementations of atomic fences
00781     // if they can be combined with nearby atomic loads and stores.
00782     if (!Subtarget->hasV8Ops()) {
00783       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
00784       setInsertFencesForAtomic(true);
00785     }
00786   } else {
00787     // If there's anything we can use as a barrier, go through custom lowering
00788     // for ATOMIC_FENCE.
00789     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
00790                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
00791 
00792     // Set them all for expansion, which will force libcalls.
00793     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
00794     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
00795     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
00796     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
00797     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
00798     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
00799     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
00800     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
00801     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
00802     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
00803     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
00804     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
00805     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
00806     // Unordered/Monotonic case.
00807     setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
00808     setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
00809   }
00810 
00811   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
00812 
00813   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
00814   if (!Subtarget->hasV6Ops()) {
00815     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
00816     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
00817   }
00818   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00819 
00820   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00821       !Subtarget->isThumb1Only()) {
00822     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
00823     // iff target supports vfp2.
00824     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
00825     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
00826   }
00827 
00828   // We want to custom lower some of our intrinsics.
00829   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
00830   if (Subtarget->isTargetDarwin()) {
00831     setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
00832     setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
00833     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
00834   }
00835 
00836   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
00837   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
00838   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
00839   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
00840   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
00841   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
00842   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
00843   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
00844   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
00845 
00846   setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
00847   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
00848   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
00849   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
00850   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
00851 
00852   // We don't support sin/cos/fmod/copysign/pow
00853   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
00854   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
00855   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
00856   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
00857   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
00858   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
00859   setOperationAction(ISD::FREM,      MVT::f64, Expand);
00860   setOperationAction(ISD::FREM,      MVT::f32, Expand);
00861   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00862       !Subtarget->isThumb1Only()) {
00863     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
00864     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
00865   }
00866   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
00867   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
00868 
00869   if (!Subtarget->hasVFP4()) {
00870     setOperationAction(ISD::FMA, MVT::f64, Expand);
00871     setOperationAction(ISD::FMA, MVT::f32, Expand);
00872   }
00873 
00874   // Various VFP goodness
00875   if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) {
00876     // int <-> fp are custom expanded into bit_convert + ARMISD ops.
00877     if (Subtarget->hasVFP2()) {
00878       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00879       setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
00880       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00881       setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00882     }
00883 
00884     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
00885     if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
00886       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
00887       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
00888     }
00889 
00890     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
00891     if (!Subtarget->hasFP16()) {
00892       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
00893       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
00894     }
00895   }
00896 
00897   // Combine sin / cos into one node or libcall if possible.
00898   if (Subtarget->hasSinCos()) {
00899     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
00900     setLibcallName(RTLIB::SINCOS_F64, "sincos");
00901     if (Subtarget->getTargetTriple().isiOS()) {
00902       // For iOS, we don't want to the normal expansion of a libcall to
00903       // sincos. We want to issue a libcall to __sincos_stret.
00904       setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
00905       setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
00906     }
00907   }
00908 
00909   // FP-ARMv8 implements a lot of rounding-like FP operations.
00910   if (Subtarget->hasFPARMv8()) {
00911     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
00912     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
00913     setOperationAction(ISD::FROUND, MVT::f32, Legal);
00914     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
00915     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
00916     setOperationAction(ISD::FRINT, MVT::f32, Legal);
00917     if (!Subtarget->isFPOnlySP()) {
00918       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
00919       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
00920       setOperationAction(ISD::FROUND, MVT::f64, Legal);
00921       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
00922       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
00923       setOperationAction(ISD::FRINT, MVT::f64, Legal);
00924     }
00925   }
00926   // We have target-specific dag combine patterns for the following nodes:
00927   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
00928   setTargetDAGCombine(ISD::ADD);
00929   setTargetDAGCombine(ISD::SUB);
00930   setTargetDAGCombine(ISD::MUL);
00931   setTargetDAGCombine(ISD::AND);
00932   setTargetDAGCombine(ISD::OR);
00933   setTargetDAGCombine(ISD::XOR);
00934 
00935   if (Subtarget->hasV6Ops())
00936     setTargetDAGCombine(ISD::SRL);
00937 
00938   setStackPointerRegisterToSaveRestore(ARM::SP);
00939 
00940   if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() ||
00941       !Subtarget->hasVFP2())
00942     setSchedulingPreference(Sched::RegPressure);
00943   else
00944     setSchedulingPreference(Sched::Hybrid);
00945 
00946   //// temporary - rewrite interface to use type
00947   MaxStoresPerMemset = 8;
00948   MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
00949   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
00950   MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00951   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
00952   MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00953 
00954   // On ARM arguments smaller than 4 bytes are extended, so all arguments
00955   // are at least 4 bytes aligned.
00956   setMinStackArgumentAlignment(4);
00957 
00958   // Prefer likely predicted branches to selects on out-of-order cores.
00959   PredictableSelectIsExpensive = Subtarget->isLikeA9();
00960 
00961   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
00962 }
00963 
00964 // FIXME: It might make sense to define the representative register class as the
00965 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
00966 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
00967 // SPR's representative would be DPR_VFP2. This should work well if register
00968 // pressure tracking were modified such that a register use would increment the
00969 // pressure of the register class's representative and all of it's super
00970 // classes' representatives transitively. We have not implemented this because
00971 // of the difficulty prior to coalescing of modeling operand register classes
00972 // due to the common occurrence of cross class copies and subregister insertions
00973 // and extractions.
00974 std::pair<const TargetRegisterClass*, uint8_t>
00975 ARMTargetLowering::findRepresentativeClass(MVT VT) const{
00976   const TargetRegisterClass *RRC = nullptr;
00977   uint8_t Cost = 1;
00978   switch (VT.SimpleTy) {
00979   default:
00980     return TargetLowering::findRepresentativeClass(VT);
00981   // Use DPR as representative register class for all floating point
00982   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
00983   // the cost is 1 for both f32 and f64.
00984   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
00985   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
00986     RRC = &ARM::DPRRegClass;
00987     // When NEON is used for SP, only half of the register file is available
00988     // because operations that define both SP and DP results will be constrained
00989     // to the VFP2 class (D0-D15). We currently model this constraint prior to
00990     // coalescing by double-counting the SP regs. See the FIXME above.
00991     if (Subtarget->useNEONForSinglePrecisionFP())
00992       Cost = 2;
00993     break;
00994   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
00995   case MVT::v4f32: case MVT::v2f64:
00996     RRC = &ARM::DPRRegClass;
00997     Cost = 2;
00998     break;
00999   case MVT::v4i64:
01000     RRC = &ARM::DPRRegClass;
01001     Cost = 4;
01002     break;
01003   case MVT::v8i64:
01004     RRC = &ARM::DPRRegClass;
01005     Cost = 8;
01006     break;
01007   }
01008   return std::make_pair(RRC, Cost);
01009 }
01010 
01011 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
01012   switch (Opcode) {
01013   default: return nullptr;
01014   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
01015   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
01016   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
01017   case ARMISD::CALL:          return "ARMISD::CALL";
01018   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
01019   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
01020   case ARMISD::tCALL:         return "ARMISD::tCALL";
01021   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
01022   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
01023   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
01024   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
01025   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
01026   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
01027   case ARMISD::CMP:           return "ARMISD::CMP";
01028   case ARMISD::CMN:           return "ARMISD::CMN";
01029   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
01030   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
01031   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
01032   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
01033   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
01034 
01035   case ARMISD::CMOV:          return "ARMISD::CMOV";
01036 
01037   case ARMISD::RBIT:          return "ARMISD::RBIT";
01038 
01039   case ARMISD::FTOSI:         return "ARMISD::FTOSI";
01040   case ARMISD::FTOUI:         return "ARMISD::FTOUI";
01041   case ARMISD::SITOF:         return "ARMISD::SITOF";
01042   case ARMISD::UITOF:         return "ARMISD::UITOF";
01043 
01044   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
01045   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
01046   case ARMISD::RRX:           return "ARMISD::RRX";
01047 
01048   case ARMISD::ADDC:          return "ARMISD::ADDC";
01049   case ARMISD::ADDE:          return "ARMISD::ADDE";
01050   case ARMISD::SUBC:          return "ARMISD::SUBC";
01051   case ARMISD::SUBE:          return "ARMISD::SUBE";
01052 
01053   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
01054   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
01055 
01056   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
01057   case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
01058 
01059   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
01060 
01061   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
01062 
01063   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
01064 
01065   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
01066 
01067   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
01068 
01069   case ARMISD::WIN__CHKSTK:   return "ARMISD:::WIN__CHKSTK";
01070 
01071   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
01072   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
01073   case ARMISD::VCGE:          return "ARMISD::VCGE";
01074   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
01075   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
01076   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
01077   case ARMISD::VCGT:          return "ARMISD::VCGT";
01078   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
01079   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
01080   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
01081   case ARMISD::VTST:          return "ARMISD::VTST";
01082 
01083   case ARMISD::VSHL:          return "ARMISD::VSHL";
01084   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
01085   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
01086   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
01087   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
01088   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
01089   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
01090   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
01091   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
01092   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
01093   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
01094   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
01095   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
01096   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
01097   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
01098   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
01099   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
01100   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
01101   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
01102   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
01103   case ARMISD::VDUP:          return "ARMISD::VDUP";
01104   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
01105   case ARMISD::VEXT:          return "ARMISD::VEXT";
01106   case ARMISD::VREV64:        return "ARMISD::VREV64";
01107   case ARMISD::VREV32:        return "ARMISD::VREV32";
01108   case ARMISD::VREV16:        return "ARMISD::VREV16";
01109   case ARMISD::VZIP:          return "ARMISD::VZIP";
01110   case ARMISD::VUZP:          return "ARMISD::VUZP";
01111   case ARMISD::VTRN:          return "ARMISD::VTRN";
01112   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
01113   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
01114   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
01115   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
01116   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
01117   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
01118   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
01119   case ARMISD::FMAX:          return "ARMISD::FMAX";
01120   case ARMISD::FMIN:          return "ARMISD::FMIN";
01121   case ARMISD::VMAXNM:        return "ARMISD::VMAX";
01122   case ARMISD::VMINNM:        return "ARMISD::VMIN";
01123   case ARMISD::BFI:           return "ARMISD::BFI";
01124   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
01125   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
01126   case ARMISD::VBSL:          return "ARMISD::VBSL";
01127   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
01128   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
01129   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
01130   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
01131   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
01132   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
01133   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
01134   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
01135   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
01136   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
01137   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
01138   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
01139   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
01140   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
01141   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
01142   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
01143   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
01144   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
01145   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
01146   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
01147   }
01148 }
01149 
01150 EVT ARMTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
01151   if (!VT.isVector()) return getPointerTy();
01152   return VT.changeVectorElementTypeToInteger();
01153 }
01154 
01155 /// getRegClassFor - Return the register class that should be used for the
01156 /// specified value type.
01157 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
01158   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
01159   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
01160   // load / store 4 to 8 consecutive D registers.
01161   if (Subtarget->hasNEON()) {
01162     if (VT == MVT::v4i64)
01163       return &ARM::QQPRRegClass;
01164     if (VT == MVT::v8i64)
01165       return &ARM::QQQQPRRegClass;
01166   }
01167   return TargetLowering::getRegClassFor(VT);
01168 }
01169 
01170 // Create a fast isel object.
01171 FastISel *
01172 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
01173                                   const TargetLibraryInfo *libInfo) const {
01174   return ARM::createFastISel(funcInfo, libInfo);
01175 }
01176 
01177 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
01178 /// be used for loads / stores from the global.
01179 unsigned ARMTargetLowering::getMaximalGlobalOffset() const {
01180   return (Subtarget->isThumb1Only() ? 127 : 4095);
01181 }
01182 
01183 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
01184   unsigned NumVals = N->getNumValues();
01185   if (!NumVals)
01186     return Sched::RegPressure;
01187 
01188   for (unsigned i = 0; i != NumVals; ++i) {
01189     EVT VT = N->getValueType(i);
01190     if (VT == MVT::Glue || VT == MVT::Other)
01191       continue;
01192     if (VT.isFloatingPoint() || VT.isVector())
01193       return Sched::ILP;
01194   }
01195 
01196   if (!N->isMachineOpcode())
01197     return Sched::RegPressure;
01198 
01199   // Load are scheduled for latency even if there instruction itinerary
01200   // is not available.
01201   const TargetInstrInfo *TII =
01202       getTargetMachine().getSubtargetImpl()->getInstrInfo();
01203   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
01204 
01205   if (MCID.getNumDefs() == 0)
01206     return Sched::RegPressure;
01207   if (!Itins->isEmpty() &&
01208       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
01209     return Sched::ILP;
01210 
01211   return Sched::RegPressure;
01212 }
01213 
01214 //===----------------------------------------------------------------------===//
01215 // Lowering Code
01216 //===----------------------------------------------------------------------===//
01217 
01218 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
01219 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
01220   switch (CC) {
01221   default: llvm_unreachable("Unknown condition code!");
01222   case ISD::SETNE:  return ARMCC::NE;
01223   case ISD::SETEQ:  return ARMCC::EQ;
01224   case ISD::SETGT:  return ARMCC::GT;
01225   case ISD::SETGE:  return ARMCC::GE;
01226   case ISD::SETLT:  return ARMCC::LT;
01227   case ISD::SETLE:  return ARMCC::LE;
01228   case ISD::SETUGT: return ARMCC::HI;
01229   case ISD::SETUGE: return ARMCC::HS;
01230   case ISD::SETULT: return ARMCC::LO;
01231   case ISD::SETULE: return ARMCC::LS;
01232   }
01233 }
01234 
01235 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
01236 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
01237                         ARMCC::CondCodes &CondCode2) {
01238   CondCode2 = ARMCC::AL;
01239   switch (CC) {
01240   default: llvm_unreachable("Unknown FP condition!");
01241   case ISD::SETEQ:
01242   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
01243   case ISD::SETGT:
01244   case ISD::SETOGT: CondCode = ARMCC::GT; break;
01245   case ISD::SETGE:
01246   case ISD::SETOGE: CondCode = ARMCC::GE; break;
01247   case ISD::SETOLT: CondCode = ARMCC::MI; break;
01248   case ISD::SETOLE: CondCode = ARMCC::LS; break;
01249   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
01250   case ISD::SETO:   CondCode = ARMCC::VC; break;
01251   case ISD::SETUO:  CondCode = ARMCC::VS; break;
01252   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
01253   case ISD::SETUGT: CondCode = ARMCC::HI; break;
01254   case ISD::SETUGE: CondCode = ARMCC::PL; break;
01255   case ISD::SETLT:
01256   case ISD::SETULT: CondCode = ARMCC::LT; break;
01257   case ISD::SETLE:
01258   case ISD::SETULE: CondCode = ARMCC::LE; break;
01259   case ISD::SETNE:
01260   case ISD::SETUNE: CondCode = ARMCC::NE; break;
01261   }
01262 }
01263 
01264 //===----------------------------------------------------------------------===//
01265 //                      Calling Convention Implementation
01266 //===----------------------------------------------------------------------===//
01267 
01268 #include "ARMGenCallingConv.inc"
01269 
01270 /// getEffectiveCallingConv - Get the effective calling convention, taking into
01271 /// account presence of floating point hardware and calling convention
01272 /// limitations, such as support for variadic functions.
01273 CallingConv::ID
01274 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
01275                                            bool isVarArg) const {
01276   switch (CC) {
01277   default:
01278     llvm_unreachable("Unsupported calling convention");
01279   case CallingConv::ARM_AAPCS:
01280   case CallingConv::ARM_APCS:
01281   case CallingConv::GHC:
01282     return CC;
01283   case CallingConv::ARM_AAPCS_VFP:
01284     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
01285   case CallingConv::C:
01286     if (!Subtarget->isAAPCS_ABI())
01287       return CallingConv::ARM_APCS;
01288     else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
01289              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
01290              !isVarArg)
01291       return CallingConv::ARM_AAPCS_VFP;
01292     else
01293       return CallingConv::ARM_AAPCS;
01294   case CallingConv::Fast:
01295     if (!Subtarget->isAAPCS_ABI()) {
01296       if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01297         return CallingConv::Fast;
01298       return CallingConv::ARM_APCS;
01299     } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01300       return CallingConv::ARM_AAPCS_VFP;
01301     else
01302       return CallingConv::ARM_AAPCS;
01303   }
01304 }
01305 
01306 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
01307 /// CallingConvention.
01308 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
01309                                                  bool Return,
01310                                                  bool isVarArg) const {
01311   switch (getEffectiveCallingConv(CC, isVarArg)) {
01312   default:
01313     llvm_unreachable("Unsupported calling convention");
01314   case CallingConv::ARM_APCS:
01315     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
01316   case CallingConv::ARM_AAPCS:
01317     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
01318   case CallingConv::ARM_AAPCS_VFP:
01319     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
01320   case CallingConv::Fast:
01321     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
01322   case CallingConv::GHC:
01323     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
01324   }
01325 }
01326 
01327 /// LowerCallResult - Lower the result values of a call into the
01328 /// appropriate copies out of appropriate physical registers.
01329 SDValue
01330 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
01331                                    CallingConv::ID CallConv, bool isVarArg,
01332                                    const SmallVectorImpl<ISD::InputArg> &Ins,
01333                                    SDLoc dl, SelectionDAG &DAG,
01334                                    SmallVectorImpl<SDValue> &InVals,
01335                                    bool isThisReturn, SDValue ThisVal) const {
01336 
01337   // Assign locations to each value returned by this call.
01338   SmallVector<CCValAssign, 16> RVLocs;
01339   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
01340                     *DAG.getContext(), Call);
01341   CCInfo.AnalyzeCallResult(Ins,
01342                            CCAssignFnForNode(CallConv, /* Return*/ true,
01343                                              isVarArg));
01344 
01345   // Copy all of the result registers out of their specified physreg.
01346   for (unsigned i = 0; i != RVLocs.size(); ++i) {
01347     CCValAssign VA = RVLocs[i];
01348 
01349     // Pass 'this' value directly from the argument to return value, to avoid
01350     // reg unit interference
01351     if (i == 0 && isThisReturn) {
01352       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
01353              "unexpected return calling convention register assignment");
01354       InVals.push_back(ThisVal);
01355       continue;
01356     }
01357 
01358     SDValue Val;
01359     if (VA.needsCustom()) {
01360       // Handle f64 or half of a v2f64.
01361       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01362                                       InFlag);
01363       Chain = Lo.getValue(1);
01364       InFlag = Lo.getValue(2);
01365       VA = RVLocs[++i]; // skip ahead to next loc
01366       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01367                                       InFlag);
01368       Chain = Hi.getValue(1);
01369       InFlag = Hi.getValue(2);
01370       if (!Subtarget->isLittle())
01371         std::swap (Lo, Hi);
01372       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01373 
01374       if (VA.getLocVT() == MVT::v2f64) {
01375         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
01376         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01377                           DAG.getConstant(0, MVT::i32));
01378 
01379         VA = RVLocs[++i]; // skip ahead to next loc
01380         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01381         Chain = Lo.getValue(1);
01382         InFlag = Lo.getValue(2);
01383         VA = RVLocs[++i]; // skip ahead to next loc
01384         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01385         Chain = Hi.getValue(1);
01386         InFlag = Hi.getValue(2);
01387         if (!Subtarget->isLittle())
01388           std::swap (Lo, Hi);
01389         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01390         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01391                           DAG.getConstant(1, MVT::i32));
01392       }
01393     } else {
01394       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
01395                                InFlag);
01396       Chain = Val.getValue(1);
01397       InFlag = Val.getValue(2);
01398     }
01399 
01400     switch (VA.getLocInfo()) {
01401     default: llvm_unreachable("Unknown loc info!");
01402     case CCValAssign::Full: break;
01403     case CCValAssign::BCvt:
01404       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
01405       break;
01406     }
01407 
01408     InVals.push_back(Val);
01409   }
01410 
01411   return Chain;
01412 }
01413 
01414 /// LowerMemOpCallTo - Store the argument to the stack.
01415 SDValue
01416 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
01417                                     SDValue StackPtr, SDValue Arg,
01418                                     SDLoc dl, SelectionDAG &DAG,
01419                                     const CCValAssign &VA,
01420                                     ISD::ArgFlagsTy Flags) const {
01421   unsigned LocMemOffset = VA.getLocMemOffset();
01422   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
01423   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
01424   return DAG.getStore(Chain, dl, Arg, PtrOff,
01425                       MachinePointerInfo::getStack(LocMemOffset),
01426                       false, false, 0);
01427 }
01428 
01429 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
01430                                          SDValue Chain, SDValue &Arg,
01431                                          RegsToPassVector &RegsToPass,
01432                                          CCValAssign &VA, CCValAssign &NextVA,
01433                                          SDValue &StackPtr,
01434                                          SmallVectorImpl<SDValue> &MemOpChains,
01435                                          ISD::ArgFlagsTy Flags) const {
01436 
01437   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
01438                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
01439   unsigned id = Subtarget->isLittle() ? 0 : 1;
01440   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
01441 
01442   if (NextVA.isRegLoc())
01443     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
01444   else {
01445     assert(NextVA.isMemLoc());
01446     if (!StackPtr.getNode())
01447       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01448 
01449     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
01450                                            dl, DAG, NextVA,
01451                                            Flags));
01452   }
01453 }
01454 
01455 /// LowerCall - Lowering a call into a callseq_start <-
01456 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
01457 /// nodes.
01458 SDValue
01459 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
01460                              SmallVectorImpl<SDValue> &InVals) const {
01461   SelectionDAG &DAG                     = CLI.DAG;
01462   SDLoc &dl                          = CLI.DL;
01463   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
01464   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
01465   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
01466   SDValue Chain                         = CLI.Chain;
01467   SDValue Callee                        = CLI.Callee;
01468   bool &isTailCall                      = CLI.IsTailCall;
01469   CallingConv::ID CallConv              = CLI.CallConv;
01470   bool doesNotRet                       = CLI.DoesNotReturn;
01471   bool isVarArg                         = CLI.IsVarArg;
01472 
01473   MachineFunction &MF = DAG.getMachineFunction();
01474   bool isStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
01475   bool isThisReturn   = false;
01476   bool isSibCall      = false;
01477 
01478   // Disable tail calls if they're not supported.
01479   if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls)
01480     isTailCall = false;
01481 
01482   if (isTailCall) {
01483     // Check if it's really possible to do a tail call.
01484     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
01485                     isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
01486                                                    Outs, OutVals, Ins, DAG);
01487     if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
01488       report_fatal_error("failed to perform tail call elimination on a call "
01489                          "site marked musttail");
01490     // We don't support GuaranteedTailCallOpt for ARM, only automatically
01491     // detected sibcalls.
01492     if (isTailCall) {
01493       ++NumTailCalls;
01494       isSibCall = true;
01495     }
01496   }
01497 
01498   // Analyze operands of the call, assigning locations to each operand.
01499   SmallVector<CCValAssign, 16> ArgLocs;
01500   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
01501                     *DAG.getContext(), Call);
01502   CCInfo.AnalyzeCallOperands(Outs,
01503                              CCAssignFnForNode(CallConv, /* Return*/ false,
01504                                                isVarArg));
01505 
01506   // Get a count of how many bytes are to be pushed on the stack.
01507   unsigned NumBytes = CCInfo.getNextStackOffset();
01508 
01509   // For tail calls, memory operands are available in our caller's stack.
01510   if (isSibCall)
01511     NumBytes = 0;
01512 
01513   // Adjust the stack pointer for the new arguments...
01514   // These operations are automatically eliminated by the prolog/epilog pass
01515   if (!isSibCall)
01516     Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
01517                                  dl);
01518 
01519   SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01520 
01521   RegsToPassVector RegsToPass;
01522   SmallVector<SDValue, 8> MemOpChains;
01523 
01524   // Walk the register/memloc assignments, inserting copies/loads.  In the case
01525   // of tail call optimization, arguments are handled later.
01526   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
01527        i != e;
01528        ++i, ++realArgIdx) {
01529     CCValAssign &VA = ArgLocs[i];
01530     SDValue Arg = OutVals[realArgIdx];
01531     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
01532     bool isByVal = Flags.isByVal();
01533 
01534     // Promote the value if needed.
01535     switch (VA.getLocInfo()) {
01536     default: llvm_unreachable("Unknown loc info!");
01537     case CCValAssign::Full: break;
01538     case CCValAssign::SExt:
01539       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
01540       break;
01541     case CCValAssign::ZExt:
01542       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
01543       break;
01544     case CCValAssign::AExt:
01545       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
01546       break;
01547     case CCValAssign::BCvt:
01548       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
01549       break;
01550     }
01551 
01552     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
01553     if (VA.needsCustom()) {
01554       if (VA.getLocVT() == MVT::v2f64) {
01555         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01556                                   DAG.getConstant(0, MVT::i32));
01557         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01558                                   DAG.getConstant(1, MVT::i32));
01559 
01560         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
01561                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01562 
01563         VA = ArgLocs[++i]; // skip ahead to next loc
01564         if (VA.isRegLoc()) {
01565           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
01566                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01567         } else {
01568           assert(VA.isMemLoc());
01569 
01570           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
01571                                                  dl, DAG, VA, Flags));
01572         }
01573       } else {
01574         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
01575                          StackPtr, MemOpChains, Flags);
01576       }
01577     } else if (VA.isRegLoc()) {
01578       if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) {
01579         assert(VA.getLocVT() == MVT::i32 &&
01580                "unexpected calling convention register assignment");
01581         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
01582                "unexpected use of 'returned'");
01583         isThisReturn = true;
01584       }
01585       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
01586     } else if (isByVal) {
01587       assert(VA.isMemLoc());
01588       unsigned offset = 0;
01589 
01590       // True if this byval aggregate will be split between registers
01591       // and memory.
01592       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
01593       unsigned CurByValIdx = CCInfo.getInRegsParamsProceed();
01594 
01595       if (CurByValIdx < ByValArgsCount) {
01596 
01597         unsigned RegBegin, RegEnd;
01598         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
01599 
01600         EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
01601         unsigned int i, j;
01602         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
01603           SDValue Const = DAG.getConstant(4*i, MVT::i32);
01604           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
01605           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
01606                                      MachinePointerInfo(),
01607                                      false, false, false,
01608                                      DAG.InferPtrAlignment(AddArg));
01609           MemOpChains.push_back(Load.getValue(1));
01610           RegsToPass.push_back(std::make_pair(j, Load));
01611         }
01612 
01613         // If parameter size outsides register area, "offset" value
01614         // helps us to calculate stack slot for remained part properly.
01615         offset = RegEnd - RegBegin;
01616 
01617         CCInfo.nextInRegsParam();
01618       }
01619 
01620       if (Flags.getByValSize() > 4*offset) {
01621         unsigned LocMemOffset = VA.getLocMemOffset();
01622         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
01623         SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
01624                                   StkPtrOff);
01625         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
01626         SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
01627         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
01628                                            MVT::i32);
01629         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32);
01630 
01631         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
01632         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
01633         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
01634                                           Ops));
01635       }
01636     } else if (!isSibCall) {
01637       assert(VA.isMemLoc());
01638 
01639       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
01640                                              dl, DAG, VA, Flags));
01641     }
01642   }
01643 
01644   if (!MemOpChains.empty())
01645     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
01646 
01647   // Build a sequence of copy-to-reg nodes chained together with token chain
01648   // and flag operands which copy the outgoing args into the appropriate regs.
01649   SDValue InFlag;
01650   // Tail call byval lowering might overwrite argument registers so in case of
01651   // tail call optimization the copies to registers are lowered later.
01652   if (!isTailCall)
01653     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01654       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01655                                RegsToPass[i].second, InFlag);
01656       InFlag = Chain.getValue(1);
01657     }
01658 
01659   // For tail calls lower the arguments to the 'real' stack slot.
01660   if (isTailCall) {
01661     // Force all the incoming stack arguments to be loaded from the stack
01662     // before any new outgoing arguments are stored to the stack, because the
01663     // outgoing stack slots may alias the incoming argument stack slots, and
01664     // the alias isn't otherwise explicit. This is slightly more conservative
01665     // than necessary, because it means that each store effectively depends
01666     // on every argument instead of just those arguments it would clobber.
01667 
01668     // Do not flag preceding copytoreg stuff together with the following stuff.
01669     InFlag = SDValue();
01670     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01671       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01672                                RegsToPass[i].second, InFlag);
01673       InFlag = Chain.getValue(1);
01674     }
01675     InFlag = SDValue();
01676   }
01677 
01678   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
01679   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
01680   // node so that legalize doesn't hack it.
01681   bool isDirect = false;
01682   bool isARMFunc = false;
01683   bool isLocalARMFunc = false;
01684   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
01685 
01686   if (EnableARMLongCalls) {
01687     assert((Subtarget->isTargetWindows() ||
01688             getTargetMachine().getRelocationModel() == Reloc::Static) &&
01689            "long-calls with non-static relocation model!");
01690     // Handle a global address or an external symbol. If it's not one of
01691     // those, the target's already in a register, so we don't need to do
01692     // anything extra.
01693     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01694       const GlobalValue *GV = G->getGlobal();
01695       // Create a constant pool entry for the callee address
01696       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01697       ARMConstantPoolValue *CPV =
01698         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
01699 
01700       // Get the address of the callee into a register
01701       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01702       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01703       Callee = DAG.getLoad(getPointerTy(), dl,
01704                            DAG.getEntryNode(), CPAddr,
01705                            MachinePointerInfo::getConstantPool(),
01706                            false, false, false, 0);
01707     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
01708       const char *Sym = S->getSymbol();
01709 
01710       // Create a constant pool entry for the callee address
01711       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01712       ARMConstantPoolValue *CPV =
01713         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01714                                       ARMPCLabelIndex, 0);
01715       // Get the address of the callee into a register
01716       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01717       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01718       Callee = DAG.getLoad(getPointerTy(), dl,
01719                            DAG.getEntryNode(), CPAddr,
01720                            MachinePointerInfo::getConstantPool(),
01721                            false, false, false, 0);
01722     }
01723   } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01724     const GlobalValue *GV = G->getGlobal();
01725     isDirect = true;
01726     bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
01727     bool isStub = (isExt && Subtarget->isTargetMachO()) &&
01728                    getTargetMachine().getRelocationModel() != Reloc::Static;
01729     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01730     // ARM call to a local ARM function is predicable.
01731     isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
01732     // tBX takes a register source operand.
01733     if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01734       assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
01735       Callee = DAG.getNode(ARMISD::WrapperPIC, dl, getPointerTy(),
01736                            DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
01737                                                       0, ARMII::MO_NONLAZY));
01738       Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
01739                            MachinePointerInfo::getGOT(), false, false, true, 0);
01740     } else if (Subtarget->isTargetCOFF()) {
01741       assert(Subtarget->isTargetWindows() &&
01742              "Windows is the only supported COFF target");
01743       unsigned TargetFlags = GV->hasDLLImportStorageClass()
01744                                  ? ARMII::MO_DLLIMPORT
01745                                  : ARMII::MO_NO_FLAG;
01746       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), /*Offset=*/0,
01747                                           TargetFlags);
01748       if (GV->hasDLLImportStorageClass())
01749         Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
01750                              DAG.getNode(ARMISD::Wrapper, dl, getPointerTy(),
01751                                          Callee), MachinePointerInfo::getGOT(),
01752                              false, false, false, 0);
01753     } else {
01754       // On ELF targets for PIC code, direct calls should go through the PLT
01755       unsigned OpFlags = 0;
01756       if (Subtarget->isTargetELF() &&
01757           getTargetMachine().getRelocationModel() == Reloc::PIC_)
01758         OpFlags = ARMII::MO_PLT;
01759       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
01760     }
01761   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
01762     isDirect = true;
01763     bool isStub = Subtarget->isTargetMachO() &&
01764                   getTargetMachine().getRelocationModel() != Reloc::Static;
01765     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01766     // tBX takes a register source operand.
01767     const char *Sym = S->getSymbol();
01768     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01769       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01770       ARMConstantPoolValue *CPV =
01771         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01772                                       ARMPCLabelIndex, 4);
01773       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01774       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01775       Callee = DAG.getLoad(getPointerTy(), dl,
01776                            DAG.getEntryNode(), CPAddr,
01777                            MachinePointerInfo::getConstantPool(),
01778                            false, false, false, 0);
01779       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
01780       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
01781                            getPointerTy(), Callee, PICLabel);
01782     } else {
01783       unsigned OpFlags = 0;
01784       // On ELF targets for PIC code, direct calls should go through the PLT
01785       if (Subtarget->isTargetELF() &&
01786                   getTargetMachine().getRelocationModel() == Reloc::PIC_)
01787         OpFlags = ARMII::MO_PLT;
01788       Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
01789     }
01790   }
01791 
01792   // FIXME: handle tail calls differently.
01793   unsigned CallOpc;
01794   bool HasMinSizeAttr = MF.getFunction()->getAttributes().hasAttribute(
01795       AttributeSet::FunctionIndex, Attribute::MinSize);
01796   if (Subtarget->isThumb()) {
01797     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
01798       CallOpc = ARMISD::CALL_NOLINK;
01799     else
01800       CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
01801   } else {
01802     if (!isDirect && !Subtarget->hasV5TOps())
01803       CallOpc = ARMISD::CALL_NOLINK;
01804     else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
01805                // Emit regular call when code size is the priority
01806                !HasMinSizeAttr)
01807       // "mov lr, pc; b _foo" to avoid confusing the RSP
01808       CallOpc = ARMISD::CALL_NOLINK;
01809     else
01810       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
01811   }
01812 
01813   std::vector<SDValue> Ops;
01814   Ops.push_back(Chain);
01815   Ops.push_back(Callee);
01816 
01817   // Add argument registers to the end of the list so that they are known live
01818   // into the call.
01819   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
01820     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
01821                                   RegsToPass[i].second.getValueType()));
01822 
01823   // Add a register mask operand representing the call-preserved registers.
01824   if (!isTailCall) {
01825     const uint32_t *Mask;
01826     const TargetRegisterInfo *TRI =
01827         getTargetMachine().getSubtargetImpl()->getRegisterInfo();
01828     const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI);
01829     if (isThisReturn) {
01830       // For 'this' returns, use the R0-preserving mask if applicable
01831       Mask = ARI->getThisReturnPreservedMask(CallConv);
01832       if (!Mask) {
01833         // Set isThisReturn to false if the calling convention is not one that
01834         // allows 'returned' to be modeled in this way, so LowerCallResult does
01835         // not try to pass 'this' straight through
01836         isThisReturn = false;
01837         Mask = ARI->getCallPreservedMask(CallConv);
01838       }
01839     } else
01840       Mask = ARI->getCallPreservedMask(CallConv);
01841 
01842     assert(Mask && "Missing call preserved mask for calling convention");
01843     Ops.push_back(DAG.getRegisterMask(Mask));
01844   }
01845 
01846   if (InFlag.getNode())
01847     Ops.push_back(InFlag);
01848 
01849   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
01850   if (isTailCall)
01851     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
01852 
01853   // Returns a chain and a flag for retval copy to use.
01854   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
01855   InFlag = Chain.getValue(1);
01856 
01857   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
01858                              DAG.getIntPtrConstant(0, true), InFlag, dl);
01859   if (!Ins.empty())
01860     InFlag = Chain.getValue(1);
01861 
01862   // Handle result values, copying them out of physregs into vregs that we
01863   // return.
01864   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
01865                          InVals, isThisReturn,
01866                          isThisReturn ? OutVals[0] : SDValue());
01867 }
01868 
01869 /// HandleByVal - Every parameter *after* a byval parameter is passed
01870 /// on the stack.  Remember the next parameter register to allocate,
01871 /// and then confiscate the rest of the parameter registers to insure
01872 /// this.
01873 void
01874 ARMTargetLowering::HandleByVal(
01875     CCState *State, unsigned &size, unsigned Align) const {
01876   unsigned reg = State->AllocateReg(GPRArgRegs, 4);
01877   assert((State->getCallOrPrologue() == Prologue ||
01878           State->getCallOrPrologue() == Call) &&
01879          "unhandled ParmContext");
01880 
01881   if ((ARM::R0 <= reg) && (reg <= ARM::R3)) {
01882     if (Subtarget->isAAPCS_ABI() && Align > 4) {
01883       unsigned AlignInRegs = Align / 4;
01884       unsigned Waste = (ARM::R4 - reg) % AlignInRegs;
01885       for (unsigned i = 0; i < Waste; ++i)
01886         reg = State->AllocateReg(GPRArgRegs, 4);
01887     }
01888     if (reg != 0) {
01889       unsigned excess = 4 * (ARM::R4 - reg);
01890 
01891       // Special case when NSAA != SP and parameter size greater than size of
01892       // all remained GPR regs. In that case we can't split parameter, we must
01893       // send it to stack. We also must set NCRN to R4, so waste all
01894       // remained registers.
01895       const unsigned NSAAOffset = State->getNextStackOffset();
01896       if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) {
01897         while (State->AllocateReg(GPRArgRegs, 4))
01898           ;
01899         return;
01900       }
01901 
01902       // First register for byval parameter is the first register that wasn't
01903       // allocated before this method call, so it would be "reg".
01904       // If parameter is small enough to be saved in range [reg, r4), then
01905       // the end (first after last) register would be reg + param-size-in-regs,
01906       // else parameter would be splitted between registers and stack,
01907       // end register would be r4 in this case.
01908       unsigned ByValRegBegin = reg;
01909       unsigned ByValRegEnd = (size < excess) ? reg + size/4 : (unsigned)ARM::R4;
01910       State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
01911       // Note, first register is allocated in the beginning of function already,
01912       // allocate remained amount of registers we need.
01913       for (unsigned i = reg+1; i != ByValRegEnd; ++i)
01914         State->AllocateReg(GPRArgRegs, 4);
01915       // A byval parameter that is split between registers and memory needs its
01916       // size truncated here.
01917       // In the case where the entire structure fits in registers, we set the
01918       // size in memory to zero.
01919       if (size < excess)
01920         size = 0;
01921       else
01922         size -= excess;
01923     }
01924   }
01925 }
01926 
01927 /// MatchingStackOffset - Return true if the given stack call argument is
01928 /// already available in the same position (relatively) of the caller's
01929 /// incoming argument stack.
01930 static
01931 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
01932                          MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
01933                          const TargetInstrInfo *TII) {
01934   unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
01935   int FI = INT_MAX;
01936   if (Arg.getOpcode() == ISD::CopyFromReg) {
01937     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
01938     if (!TargetRegisterInfo::isVirtualRegister(VR))
01939       return false;
01940     MachineInstr *Def = MRI->getVRegDef(VR);
01941     if (!Def)
01942       return false;
01943     if (!Flags.isByVal()) {
01944       if (!TII->isLoadFromStackSlot(Def, FI))
01945         return false;
01946     } else {
01947       return false;
01948     }
01949   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
01950     if (Flags.isByVal())
01951       // ByVal argument is passed in as a pointer but it's now being
01952       // dereferenced. e.g.
01953       // define @foo(%struct.X* %A) {
01954       //   tail call @bar(%struct.X* byval %A)
01955       // }
01956       return false;
01957     SDValue Ptr = Ld->getBasePtr();
01958     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
01959     if (!FINode)
01960       return false;
01961     FI = FINode->getIndex();
01962   } else
01963     return false;
01964 
01965   assert(FI != INT_MAX);
01966   if (!MFI->isFixedObjectIndex(FI))
01967     return false;
01968   return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
01969 }
01970 
01971 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
01972 /// for tail call optimization. Targets which want to do tail call
01973 /// optimization should implement this function.
01974 bool
01975 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
01976                                                      CallingConv::ID CalleeCC,
01977                                                      bool isVarArg,
01978                                                      bool isCalleeStructRet,
01979                                                      bool isCallerStructRet,
01980                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
01981                                     const SmallVectorImpl<SDValue> &OutVals,
01982                                     const SmallVectorImpl<ISD::InputArg> &Ins,
01983                                                      SelectionDAG& DAG) const {
01984   const Function *CallerF = DAG.getMachineFunction().getFunction();
01985   CallingConv::ID CallerCC = CallerF->getCallingConv();
01986   bool CCMatch = CallerCC == CalleeCC;
01987 
01988   // Look for obvious safe cases to perform tail call optimization that do not
01989   // require ABI changes. This is what gcc calls sibcall.
01990 
01991   // Do not sibcall optimize vararg calls unless the call site is not passing
01992   // any arguments.
01993   if (isVarArg && !Outs.empty())
01994     return false;
01995 
01996   // Exception-handling functions need a special set of instructions to indicate
01997   // a return to the hardware. Tail-calling another function would probably
01998   // break this.
01999   if (CallerF->hasFnAttribute("interrupt"))
02000     return false;
02001 
02002   // Also avoid sibcall optimization if either caller or callee uses struct
02003   // return semantics.
02004   if (isCalleeStructRet || isCallerStructRet)
02005     return false;
02006 
02007   // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo::
02008   // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
02009   // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
02010   // support in the assembler and linker to be used. This would need to be
02011   // fixed to fully support tail calls in Thumb1.
02012   //
02013   // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
02014   // LR.  This means if we need to reload LR, it takes an extra instructions,
02015   // which outweighs the value of the tail call; but here we don't know yet
02016   // whether LR is going to be used.  Probably the right approach is to
02017   // generate the tail call here and turn it back into CALL/RET in
02018   // emitEpilogue if LR is used.
02019 
02020   // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
02021   // but we need to make sure there are enough registers; the only valid
02022   // registers are the 4 used for parameters.  We don't currently do this
02023   // case.
02024   if (Subtarget->isThumb1Only())
02025     return false;
02026 
02027   // Externally-defined functions with weak linkage should not be
02028   // tail-called on ARM when the OS does not support dynamic
02029   // pre-emption of symbols, as the AAELF spec requires normal calls
02030   // to undefined weak functions to be replaced with a NOP or jump to the
02031   // next instruction. The behaviour of branch instructions in this
02032   // situation (as used for tail calls) is implementation-defined, so we
02033   // cannot rely on the linker replacing the tail call with a return.
02034   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
02035     const GlobalValue *GV = G->getGlobal();
02036     if (GV->hasExternalWeakLinkage())
02037       return false;
02038   }
02039 
02040   // If the calling conventions do not match, then we'd better make sure the
02041   // results are returned in the same way as what the caller expects.
02042   if (!CCMatch) {
02043     SmallVector<CCValAssign, 16> RVLocs1;
02044     ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
02045                        *DAG.getContext(), Call);
02046     CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
02047 
02048     SmallVector<CCValAssign, 16> RVLocs2;
02049     ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
02050                        *DAG.getContext(), Call);
02051     CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
02052 
02053     if (RVLocs1.size() != RVLocs2.size())
02054       return false;
02055     for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
02056       if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
02057         return false;
02058       if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
02059         return false;
02060       if (RVLocs1[i].isRegLoc()) {
02061         if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
02062           return false;
02063       } else {
02064         if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
02065           return false;
02066       }
02067     }
02068   }
02069 
02070   // If Caller's vararg or byval argument has been split between registers and
02071   // stack, do not perform tail call, since part of the argument is in caller's
02072   // local frame.
02073   const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction().
02074                                       getInfo<ARMFunctionInfo>();
02075   if (AFI_Caller->getArgRegsSaveSize())
02076     return false;
02077 
02078   // If the callee takes no arguments then go on to check the results of the
02079   // call.
02080   if (!Outs.empty()) {
02081     // Check if stack adjustment is needed. For now, do not do this if any
02082     // argument is passed on the stack.
02083     SmallVector<CCValAssign, 16> ArgLocs;
02084     ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
02085                       *DAG.getContext(), Call);
02086     CCInfo.AnalyzeCallOperands(Outs,
02087                                CCAssignFnForNode(CalleeCC, false, isVarArg));
02088     if (CCInfo.getNextStackOffset()) {
02089       MachineFunction &MF = DAG.getMachineFunction();
02090 
02091       // Check if the arguments are already laid out in the right way as
02092       // the caller's fixed stack objects.
02093       MachineFrameInfo *MFI = MF.getFrameInfo();
02094       const MachineRegisterInfo *MRI = &MF.getRegInfo();
02095       const TargetInstrInfo *TII =
02096           getTargetMachine().getSubtargetImpl()->getInstrInfo();
02097       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
02098            i != e;
02099            ++i, ++realArgIdx) {
02100         CCValAssign &VA = ArgLocs[i];
02101         EVT RegVT = VA.getLocVT();
02102         SDValue Arg = OutVals[realArgIdx];
02103         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
02104         if (VA.getLocInfo() == CCValAssign::Indirect)
02105           return false;
02106         if (VA.needsCustom()) {
02107           // f64 and vector types are split into multiple registers or
02108           // register/stack-slot combinations.  The types will not match
02109           // the registers; give up on memory f64 refs until we figure
02110           // out what to do about this.
02111           if (!VA.isRegLoc())
02112             return false;
02113           if (!ArgLocs[++i].isRegLoc())
02114             return false;
02115           if (RegVT == MVT::v2f64) {
02116             if (!ArgLocs[++i].isRegLoc())
02117               return false;
02118             if (!ArgLocs[++i].isRegLoc())
02119               return false;
02120           }
02121         } else if (!VA.isRegLoc()) {
02122           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
02123                                    MFI, MRI, TII))
02124             return false;
02125         }
02126       }
02127     }
02128   }
02129 
02130   return true;
02131 }
02132 
02133 bool
02134 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
02135                                   MachineFunction &MF, bool isVarArg,
02136                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
02137                                   LLVMContext &Context) const {
02138   SmallVector<CCValAssign, 16> RVLocs;
02139   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
02140   return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true,
02141                                                     isVarArg));
02142 }
02143 
02144 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
02145                                     SDLoc DL, SelectionDAG &DAG) {
02146   const MachineFunction &MF = DAG.getMachineFunction();
02147   const Function *F = MF.getFunction();
02148 
02149   StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
02150 
02151   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
02152   // version of the "preferred return address". These offsets affect the return
02153   // instruction if this is a return from PL1 without hypervisor extensions.
02154   //    IRQ/FIQ: +4     "subs pc, lr, #4"
02155   //    SWI:     0      "subs pc, lr, #0"
02156   //    ABORT:   +4     "subs pc, lr, #4"
02157   //    UNDEF:   +4/+2  "subs pc, lr, #0"
02158   // UNDEF varies depending on where the exception came from ARM or Thumb
02159   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
02160 
02161   int64_t LROffset;
02162   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
02163       IntKind == "ABORT")
02164     LROffset = 4;
02165   else if (IntKind == "SWI" || IntKind == "UNDEF")
02166     LROffset = 0;
02167   else
02168     report_fatal_error("Unsupported interrupt attribute. If present, value "
02169                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
02170 
02171   RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false));
02172 
02173   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
02174 }
02175 
02176 SDValue
02177 ARMTargetLowering::LowerReturn(SDValue Chain,
02178                                CallingConv::ID CallConv, bool isVarArg,
02179                                const SmallVectorImpl<ISD::OutputArg> &Outs,
02180                                const SmallVectorImpl<SDValue> &OutVals,
02181                                SDLoc dl, SelectionDAG &DAG) const {
02182 
02183   // CCValAssign - represent the assignment of the return value to a location.
02184   SmallVector<CCValAssign, 16> RVLocs;
02185 
02186   // CCState - Info about the registers and stack slots.
02187   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
02188                     *DAG.getContext(), Call);
02189 
02190   // Analyze outgoing return values.
02191   CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
02192                                                isVarArg));
02193 
02194   SDValue Flag;
02195   SmallVector<SDValue, 4> RetOps;
02196   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
02197   bool isLittleEndian = Subtarget->isLittle();
02198 
02199   MachineFunction &MF = DAG.getMachineFunction();
02200   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02201   AFI->setReturnRegsCount(RVLocs.size());
02202 
02203   // Copy the result values into the output registers.
02204   for (unsigned i = 0, realRVLocIdx = 0;
02205        i != RVLocs.size();
02206        ++i, ++realRVLocIdx) {
02207     CCValAssign &VA = RVLocs[i];
02208     assert(VA.isRegLoc() && "Can only return in registers!");
02209 
02210     SDValue Arg = OutVals[realRVLocIdx];
02211 
02212     switch (VA.getLocInfo()) {
02213     default: llvm_unreachable("Unknown loc info!");
02214     case CCValAssign::Full: break;
02215     case CCValAssign::BCvt:
02216       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
02217       break;
02218     }
02219 
02220     if (VA.needsCustom()) {
02221       if (VA.getLocVT() == MVT::v2f64) {
02222         // Extract the first half and return it in two registers.
02223         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02224                                    DAG.getConstant(0, MVT::i32));
02225         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
02226                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
02227 
02228         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02229                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
02230                                  Flag);
02231         Flag = Chain.getValue(1);
02232         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02233         VA = RVLocs[++i]; // skip ahead to next loc
02234         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02235                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
02236                                  Flag);
02237         Flag = Chain.getValue(1);
02238         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02239         VA = RVLocs[++i]; // skip ahead to next loc
02240 
02241         // Extract the 2nd half and fall through to handle it as an f64 value.
02242         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02243                           DAG.getConstant(1, MVT::i32));
02244       }
02245       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
02246       // available.
02247       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
02248                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
02249       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02250                                fmrrd.getValue(isLittleEndian ? 0 : 1),
02251                                Flag);
02252       Flag = Chain.getValue(1);
02253       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02254       VA = RVLocs[++i]; // skip ahead to next loc
02255       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02256                                fmrrd.getValue(isLittleEndian ? 1 : 0),
02257                                Flag);
02258     } else
02259       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
02260 
02261     // Guarantee that all emitted copies are
02262     // stuck together, avoiding something bad.
02263     Flag = Chain.getValue(1);
02264     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02265   }
02266 
02267   // Update chain and glue.
02268   RetOps[0] = Chain;
02269   if (Flag.getNode())
02270     RetOps.push_back(Flag);
02271 
02272   // CPUs which aren't M-class use a special sequence to return from
02273   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
02274   // though we use "subs pc, lr, #N").
02275   //
02276   // M-class CPUs actually use a normal return sequence with a special
02277   // (hardware-provided) value in LR, so the normal code path works.
02278   if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
02279       !Subtarget->isMClass()) {
02280     if (Subtarget->isThumb1Only())
02281       report_fatal_error("interrupt attribute is not supported in Thumb1");
02282     return LowerInterruptReturn(RetOps, dl, DAG);
02283   }
02284 
02285   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
02286 }
02287 
02288 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
02289   if (N->getNumValues() != 1)
02290     return false;
02291   if (!N->hasNUsesOfValue(1, 0))
02292     return false;
02293 
02294   SDValue TCChain = Chain;
02295   SDNode *Copy = *N->use_begin();
02296   if (Copy->getOpcode() == ISD::CopyToReg) {
02297     // If the copy has a glue operand, we conservatively assume it isn't safe to
02298     // perform a tail call.
02299     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02300       return false;
02301     TCChain = Copy->getOperand(0);
02302   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
02303     SDNode *VMov = Copy;
02304     // f64 returned in a pair of GPRs.
02305     SmallPtrSet<SDNode*, 2> Copies;
02306     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02307          UI != UE; ++UI) {
02308       if (UI->getOpcode() != ISD::CopyToReg)
02309         return false;
02310       Copies.insert(*UI);
02311     }
02312     if (Copies.size() > 2)
02313       return false;
02314 
02315     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02316          UI != UE; ++UI) {
02317       SDValue UseChain = UI->getOperand(0);
02318       if (Copies.count(UseChain.getNode()))
02319         // Second CopyToReg
02320         Copy = *UI;
02321       else {
02322         // We are at the top of this chain.
02323         // If the copy has a glue operand, we conservatively assume it
02324         // isn't safe to perform a tail call.
02325         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
02326           return false;
02327         // First CopyToReg
02328         TCChain = UseChain;
02329       }
02330     }
02331   } else if (Copy->getOpcode() == ISD::BITCAST) {
02332     // f32 returned in a single GPR.
02333     if (!Copy->hasOneUse())
02334       return false;
02335     Copy = *Copy->use_begin();
02336     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
02337       return false;
02338     // If the copy has a glue operand, we conservatively assume it isn't safe to
02339     // perform a tail call.
02340     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02341       return false;
02342     TCChain = Copy->getOperand(0);
02343   } else {
02344     return false;
02345   }
02346 
02347   bool HasRet = false;
02348   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
02349        UI != UE; ++UI) {
02350     if (UI->getOpcode() != ARMISD::RET_FLAG &&
02351         UI->getOpcode() != ARMISD::INTRET_FLAG)
02352       return false;
02353     HasRet = true;
02354   }
02355 
02356   if (!HasRet)
02357     return false;
02358 
02359   Chain = TCChain;
02360   return true;
02361 }
02362 
02363 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
02364   if (!Subtarget->supportsTailCall())
02365     return false;
02366 
02367   if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
02368     return false;
02369 
02370   return !Subtarget->isThumb1Only();
02371 }
02372 
02373 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
02374 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
02375 // one of the above mentioned nodes. It has to be wrapped because otherwise
02376 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
02377 // be used to form addressing mode. These wrapped nodes will be selected
02378 // into MOVi.
02379 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
02380   EVT PtrVT = Op.getValueType();
02381   // FIXME there is no actual debug info here
02382   SDLoc dl(Op);
02383   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
02384   SDValue Res;
02385   if (CP->isMachineConstantPoolEntry())
02386     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
02387                                     CP->getAlignment());
02388   else
02389     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
02390                                     CP->getAlignment());
02391   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
02392 }
02393 
02394 unsigned ARMTargetLowering::getJumpTableEncoding() const {
02395   return MachineJumpTableInfo::EK_Inline;
02396 }
02397 
02398 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
02399                                              SelectionDAG &DAG) const {
02400   MachineFunction &MF = DAG.getMachineFunction();
02401   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02402   unsigned ARMPCLabelIndex = 0;
02403   SDLoc DL(Op);
02404   EVT PtrVT = getPointerTy();
02405   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
02406   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02407   SDValue CPAddr;
02408   if (RelocM == Reloc::Static) {
02409     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
02410   } else {
02411     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02412     ARMPCLabelIndex = AFI->createPICLabelUId();
02413     ARMConstantPoolValue *CPV =
02414       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
02415                                       ARMCP::CPBlockAddress, PCAdj);
02416     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02417   }
02418   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
02419   SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
02420                                MachinePointerInfo::getConstantPool(),
02421                                false, false, false, 0);
02422   if (RelocM == Reloc::Static)
02423     return Result;
02424   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02425   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
02426 }
02427 
02428 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
02429 SDValue
02430 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
02431                                                  SelectionDAG &DAG) const {
02432   SDLoc dl(GA);
02433   EVT PtrVT = getPointerTy();
02434   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02435   MachineFunction &MF = DAG.getMachineFunction();
02436   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02437   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02438   ARMConstantPoolValue *CPV =
02439     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02440                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
02441   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02442   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
02443   Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
02444                          MachinePointerInfo::getConstantPool(),
02445                          false, false, false, 0);
02446   SDValue Chain = Argument.getValue(1);
02447 
02448   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02449   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
02450 
02451   // call __tls_get_addr.
02452   ArgListTy Args;
02453   ArgListEntry Entry;
02454   Entry.Node = Argument;
02455   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
02456   Args.push_back(Entry);
02457 
02458   // FIXME: is there useful debug info available here?
02459   TargetLowering::CallLoweringInfo CLI(DAG);
02460   CLI.setDebugLoc(dl).setChain(Chain)
02461     .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
02462                DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args),
02463                0);
02464 
02465   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
02466   return CallResult.first;
02467 }
02468 
02469 // Lower ISD::GlobalTLSAddress using the "initial exec" or
02470 // "local exec" model.
02471 SDValue
02472 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
02473                                         SelectionDAG &DAG,
02474                                         TLSModel::Model model) const {
02475   const GlobalValue *GV = GA->getGlobal();
02476   SDLoc dl(GA);
02477   SDValue Offset;
02478   SDValue Chain = DAG.getEntryNode();
02479   EVT PtrVT = getPointerTy();
02480   // Get the Thread Pointer
02481   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02482 
02483   if (model == TLSModel::InitialExec) {
02484     MachineFunction &MF = DAG.getMachineFunction();
02485     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02486     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02487     // Initial exec model.
02488     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02489     ARMConstantPoolValue *CPV =
02490       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02491                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
02492                                       true);
02493     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02494     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02495     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02496                          MachinePointerInfo::getConstantPool(),
02497                          false, false, false, 0);
02498     Chain = Offset.getValue(1);
02499 
02500     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02501     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
02502 
02503     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02504                          MachinePointerInfo::getConstantPool(),
02505                          false, false, false, 0);
02506   } else {
02507     // local exec model
02508     assert(model == TLSModel::LocalExec);
02509     ARMConstantPoolValue *CPV =
02510       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
02511     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02512     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02513     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02514                          MachinePointerInfo::getConstantPool(),
02515                          false, false, false, 0);
02516   }
02517 
02518   // The address of the thread local variable is the add of the thread
02519   // pointer with the offset of the variable.
02520   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
02521 }
02522 
02523 SDValue
02524 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
02525   // TODO: implement the "local dynamic" model
02526   assert(Subtarget->isTargetELF() &&
02527          "TLS not implemented for non-ELF targets");
02528   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
02529 
02530   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
02531 
02532   switch (model) {
02533     case TLSModel::GeneralDynamic:
02534     case TLSModel::LocalDynamic:
02535       return LowerToTLSGeneralDynamicModel(GA, DAG);
02536     case TLSModel::InitialExec:
02537     case TLSModel::LocalExec:
02538       return LowerToTLSExecModels(GA, DAG, model);
02539   }
02540   llvm_unreachable("bogus TLS model");
02541 }
02542 
02543 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
02544                                                  SelectionDAG &DAG) const {
02545   EVT PtrVT = getPointerTy();
02546   SDLoc dl(Op);
02547   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02548   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
02549     bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
02550     ARMConstantPoolValue *CPV =
02551       ARMConstantPoolConstant::Create(GV,
02552                                       UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
02553     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02554     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02555     SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
02556                                  CPAddr,
02557                                  MachinePointerInfo::getConstantPool(),
02558                                  false, false, false, 0);
02559     SDValue Chain = Result.getValue(1);
02560     SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
02561     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
02562     if (!UseGOTOFF)
02563       Result = DAG.getLoad(PtrVT, dl, Chain, Result,
02564                            MachinePointerInfo::getGOT(),
02565                            false, false, false, 0);
02566     return Result;
02567   }
02568 
02569   // If we have T2 ops, we can materialize the address directly via movt/movw
02570   // pair. This is always cheaper.
02571   if (Subtarget->useMovt(DAG.getMachineFunction())) {
02572     ++NumMovwMovt;
02573     // FIXME: Once remat is capable of dealing with instructions with register
02574     // operands, expand this into two nodes.
02575     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
02576                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
02577   } else {
02578     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
02579     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02580     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02581                        MachinePointerInfo::getConstantPool(),
02582                        false, false, false, 0);
02583   }
02584 }
02585 
02586 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
02587                                                     SelectionDAG &DAG) const {
02588   EVT PtrVT = getPointerTy();
02589   SDLoc dl(Op);
02590   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02591   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02592 
02593   if (Subtarget->useMovt(DAG.getMachineFunction()))
02594     ++NumMovwMovt;
02595 
02596   // FIXME: Once remat is capable of dealing with instructions with register
02597   // operands, expand this into multiple nodes
02598   unsigned Wrapper =
02599       RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper;
02600 
02601   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
02602   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
02603 
02604   if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
02605     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
02606                          MachinePointerInfo::getGOT(), false, false, false, 0);
02607   return Result;
02608 }
02609 
02610 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
02611                                                      SelectionDAG &DAG) const {
02612   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
02613   assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
02614          "Windows on ARM expects to use movw/movt");
02615 
02616   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02617   const ARMII::TOF TargetFlags =
02618     (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
02619   EVT PtrVT = getPointerTy();
02620   SDValue Result;
02621   SDLoc DL(Op);
02622 
02623   ++NumMovwMovt;
02624 
02625   // FIXME: Once remat is capable of dealing with instructions with register
02626   // operands, expand this into two nodes.
02627   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
02628                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
02629                                                   TargetFlags));
02630   if (GV->hasDLLImportStorageClass())
02631     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
02632                          MachinePointerInfo::getGOT(), false, false, false, 0);
02633   return Result;
02634 }
02635 
02636 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
02637                                                     SelectionDAG &DAG) const {
02638   assert(Subtarget->isTargetELF() &&
02639          "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
02640   MachineFunction &MF = DAG.getMachineFunction();
02641   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02642   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02643   EVT PtrVT = getPointerTy();
02644   SDLoc dl(Op);
02645   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02646   ARMConstantPoolValue *CPV =
02647     ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_",
02648                                   ARMPCLabelIndex, PCAdj);
02649   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02650   CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02651   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02652                                MachinePointerInfo::getConstantPool(),
02653                                false, false, false, 0);
02654   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02655   return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02656 }
02657 
02658 SDValue
02659 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
02660   SDLoc dl(Op);
02661   SDValue Val = DAG.getConstant(0, MVT::i32);
02662   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
02663                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
02664                      Op.getOperand(1), Val);
02665 }
02666 
02667 SDValue
02668 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
02669   SDLoc dl(Op);
02670   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
02671                      Op.getOperand(1), DAG.getConstant(0, MVT::i32));
02672 }
02673 
02674 SDValue
02675 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
02676                                           const ARMSubtarget *Subtarget) const {
02677   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
02678   SDLoc dl(Op);
02679   switch (IntNo) {
02680   default: return SDValue();    // Don't custom lower most intrinsics.
02681   case Intrinsic::arm_rbit: {
02682     assert(Op.getOperand(1).getValueType() == MVT::i32 &&
02683            "RBIT intrinsic must have i32 type!");
02684     return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(1));
02685   }
02686   case Intrinsic::arm_thread_pointer: {
02687     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02688     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02689   }
02690   case Intrinsic::eh_sjlj_lsda: {
02691     MachineFunction &MF = DAG.getMachineFunction();
02692     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02693     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02694     EVT PtrVT = getPointerTy();
02695     Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02696     SDValue CPAddr;
02697     unsigned PCAdj = (RelocM != Reloc::PIC_)
02698       ? 0 : (Subtarget->isThumb() ? 4 : 8);
02699     ARMConstantPoolValue *CPV =
02700       ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
02701                                       ARMCP::CPLSDA, PCAdj);
02702     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02703     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02704     SDValue Result =
02705       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02706                   MachinePointerInfo::getConstantPool(),
02707                   false, false, false, 0);
02708 
02709     if (RelocM == Reloc::PIC_) {
02710       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02711       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02712     }
02713     return Result;
02714   }
02715   case Intrinsic::arm_neon_vmulls:
02716   case Intrinsic::arm_neon_vmullu: {
02717     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
02718       ? ARMISD::VMULLs : ARMISD::VMULLu;
02719     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
02720                        Op.getOperand(1), Op.getOperand(2));
02721   }
02722   }
02723 }
02724 
02725 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
02726                                  const ARMSubtarget *Subtarget) {
02727   // FIXME: handle "fence singlethread" more efficiently.
02728   SDLoc dl(Op);
02729   if (!Subtarget->hasDataBarrier()) {
02730     // Some ARMv6 cpus can support data barriers with an mcr instruction.
02731     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
02732     // here.
02733     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
02734            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
02735     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
02736                        DAG.getConstant(0, MVT::i32));
02737   }
02738 
02739   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
02740   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
02741   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
02742   if (Subtarget->isMClass()) {
02743     // Only a full system barrier exists in the M-class architectures.
02744     Domain = ARM_MB::SY;
02745   } else if (Subtarget->isSwift() && Ord == Release) {
02746     // Swift happens to implement ISHST barriers in a way that's compatible with
02747     // Release semantics but weaker than ISH so we'd be fools not to use
02748     // it. Beware: other processors probably don't!
02749     Domain = ARM_MB::ISHST;
02750   }
02751 
02752   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
02753                      DAG.getConstant(Intrinsic::arm_dmb, MVT::i32),
02754                      DAG.getConstant(Domain, MVT::i32));
02755 }
02756 
02757 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
02758                              const ARMSubtarget *Subtarget) {
02759   // ARM pre v5TE and Thumb1 does not have preload instructions.
02760   if (!(Subtarget->isThumb2() ||
02761         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
02762     // Just preserve the chain.
02763     return Op.getOperand(0);
02764 
02765   SDLoc dl(Op);
02766   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
02767   if (!isRead &&
02768       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
02769     // ARMv7 with MP extension has PLDW.
02770     return Op.getOperand(0);
02771 
02772   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
02773   if (Subtarget->isThumb()) {
02774     // Invert the bits.
02775     isRead = ~isRead & 1;
02776     isData = ~isData & 1;
02777   }
02778 
02779   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
02780                      Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
02781                      DAG.getConstant(isData, MVT::i32));
02782 }
02783 
02784 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
02785   MachineFunction &MF = DAG.getMachineFunction();
02786   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
02787 
02788   // vastart just stores the address of the VarArgsFrameIndex slot into the
02789   // memory location argument.
02790   SDLoc dl(Op);
02791   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02792   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02793   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02794   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
02795                       MachinePointerInfo(SV), false, false, 0);
02796 }
02797 
02798 SDValue
02799 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
02800                                         SDValue &Root, SelectionDAG &DAG,
02801                                         SDLoc dl) const {
02802   MachineFunction &MF = DAG.getMachineFunction();
02803   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02804 
02805   const TargetRegisterClass *RC;
02806   if (AFI->isThumb1OnlyFunction())
02807     RC = &ARM::tGPRRegClass;
02808   else
02809     RC = &ARM::GPRRegClass;
02810 
02811   // Transform the arguments stored in physical registers into virtual ones.
02812   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
02813   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02814 
02815   SDValue ArgValue2;
02816   if (NextVA.isMemLoc()) {
02817     MachineFrameInfo *MFI = MF.getFrameInfo();
02818     int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
02819 
02820     // Create load node to retrieve arguments from the stack.
02821     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
02822     ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
02823                             MachinePointerInfo::getFixedStack(FI),
02824                             false, false, false, 0);
02825   } else {
02826     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
02827     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02828   }
02829   if (!Subtarget->isLittle())
02830     std::swap (ArgValue, ArgValue2);
02831   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
02832 }
02833 
02834 void
02835 ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
02836                                   unsigned InRegsParamRecordIdx,
02837                                   unsigned ArgSize,
02838                                   unsigned &ArgRegsSize,
02839                                   unsigned &ArgRegsSaveSize)
02840   const {
02841   unsigned NumGPRs;
02842   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
02843     unsigned RBegin, REnd;
02844     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
02845     NumGPRs = REnd - RBegin;
02846   } else {
02847     unsigned int firstUnalloced;
02848     firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs,
02849                                                 sizeof(GPRArgRegs) /
02850                                                 sizeof(GPRArgRegs[0]));
02851     NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0;
02852   }
02853 
02854   unsigned Align = MF.getTarget()
02855                        .getSubtargetImpl()
02856                        ->getFrameLowering()
02857                        ->getStackAlignment();
02858   ArgRegsSize = NumGPRs * 4;
02859 
02860   // If parameter is split between stack and GPRs...
02861   if (NumGPRs && Align > 4 &&
02862       (ArgRegsSize < ArgSize ||
02863         InRegsParamRecordIdx >= CCInfo.getInRegsParamsCount())) {
02864     // Add padding for part of param recovered from GPRs.  For example,
02865     // if Align == 8, its last byte must be at address K*8 - 1.
02866     // We need to do it, since remained (stack) part of parameter has
02867     // stack alignment, and we need to "attach" "GPRs head" without gaps
02868     // to it:
02869     // Stack:
02870     // |---- 8 bytes block ----| |---- 8 bytes block ----| |---- 8 bytes...
02871     // [ [padding] [GPRs head] ] [        Tail passed via stack       ....
02872     //
02873     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02874     unsigned Padding =
02875         OffsetToAlignment(ArgRegsSize + AFI->getArgRegsSaveSize(), Align);
02876     ArgRegsSaveSize = ArgRegsSize + Padding;
02877   } else
02878     // We don't need to extend regs save size for byval parameters if they
02879     // are passed via GPRs only.
02880     ArgRegsSaveSize = ArgRegsSize;
02881 }
02882 
02883 // The remaining GPRs hold either the beginning of variable-argument
02884 // data, or the beginning of an aggregate passed by value (usually
02885 // byval).  Either way, we allocate stack slots adjacent to the data
02886 // provided by our caller, and store the unallocated registers there.
02887 // If this is a variadic function, the va_list pointer will begin with
02888 // these values; otherwise, this reassembles a (byval) structure that
02889 // was split between registers and memory.
02890 // Return: The frame index registers were stored into.
02891 int
02892 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
02893                                   SDLoc dl, SDValue &Chain,
02894                                   const Value *OrigArg,
02895                                   unsigned InRegsParamRecordIdx,
02896                                   unsigned OffsetFromOrigArg,
02897                                   unsigned ArgOffset,
02898                                   unsigned ArgSize,
02899                                   bool ForceMutable,
02900                                   unsigned ByValStoreOffset,
02901                                   unsigned TotalArgRegsSaveSize) const {
02902 
02903   // Currently, two use-cases possible:
02904   // Case #1. Non-var-args function, and we meet first byval parameter.
02905   //          Setup first unallocated register as first byval register;
02906   //          eat all remained registers
02907   //          (these two actions are performed by HandleByVal method).
02908   //          Then, here, we initialize stack frame with
02909   //          "store-reg" instructions.
02910   // Case #2. Var-args function, that doesn't contain byval parameters.
02911   //          The same: eat all remained unallocated registers,
02912   //          initialize stack frame.
02913 
02914   MachineFunction &MF = DAG.getMachineFunction();
02915   MachineFrameInfo *MFI = MF.getFrameInfo();
02916   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02917   unsigned firstRegToSaveIndex, lastRegToSaveIndex;
02918   unsigned RBegin, REnd;
02919   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
02920     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
02921     firstRegToSaveIndex = RBegin - ARM::R0;
02922     lastRegToSaveIndex = REnd - ARM::R0;
02923   } else {
02924     firstRegToSaveIndex = CCInfo.getFirstUnallocated
02925       (GPRArgRegs, array_lengthof(GPRArgRegs));
02926     lastRegToSaveIndex = 4;
02927   }
02928 
02929   unsigned ArgRegsSize, ArgRegsSaveSize;
02930   computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgSize,
02931                  ArgRegsSize, ArgRegsSaveSize);
02932 
02933   // Store any by-val regs to their spots on the stack so that they may be
02934   // loaded by deferencing the result of formal parameter pointer or va_next.
02935   // Note: once stack area for byval/varargs registers
02936   // was initialized, it can't be initialized again.
02937   if (ArgRegsSaveSize) {
02938     unsigned Padding = ArgRegsSaveSize - ArgRegsSize;
02939 
02940     if (Padding) {
02941       assert(AFI->getStoredByValParamsPadding() == 0 &&
02942              "The only parameter may be padded.");
02943       AFI->setStoredByValParamsPadding(Padding);
02944     }
02945 
02946     int FrameIndex = MFI->CreateFixedObject(ArgRegsSaveSize,
02947                                             Padding +
02948                                               ByValStoreOffset -
02949                                               (int64_t)TotalArgRegsSaveSize,
02950                                             false);
02951     SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy());
02952     if (Padding) {
02953        MFI->CreateFixedObject(Padding,
02954                               ArgOffset + ByValStoreOffset -
02955                                 (int64_t)ArgRegsSaveSize,
02956                               false);
02957     }
02958 
02959     SmallVector<SDValue, 4> MemOps;
02960     for (unsigned i = 0; firstRegToSaveIndex < lastRegToSaveIndex;
02961          ++firstRegToSaveIndex, ++i) {
02962       const TargetRegisterClass *RC;
02963       if (AFI->isThumb1OnlyFunction())
02964         RC = &ARM::tGPRRegClass;
02965       else
02966         RC = &ARM::GPRRegClass;
02967 
02968       unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC);
02969       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
02970       SDValue Store =
02971         DAG.getStore(Val.getValue(1), dl, Val, FIN,
02972                      MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i),
02973                      false, false, 0);
02974       MemOps.push_back(Store);
02975       FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
02976                         DAG.getConstant(4, getPointerTy()));
02977     }
02978 
02979     AFI->setArgRegsSaveSize(ArgRegsSaveSize + AFI->getArgRegsSaveSize());
02980 
02981     if (!MemOps.empty())
02982       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02983     return FrameIndex;
02984   } else {
02985     if (ArgSize == 0) {
02986       // We cannot allocate a zero-byte object for the first variadic argument,
02987       // so just make up a size.
02988       ArgSize = 4;
02989     }
02990     // This will point to the next argument passed via stack.
02991     return MFI->CreateFixedObject(
02992       ArgSize, ArgOffset, !ForceMutable);
02993   }
02994 }
02995 
02996 // Setup stack frame, the va_list pointer will start from.
02997 void
02998 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
02999                                         SDLoc dl, SDValue &Chain,
03000                                         unsigned ArgOffset,
03001                                         unsigned TotalArgRegsSaveSize,
03002                                         bool ForceMutable) const {
03003   MachineFunction &MF = DAG.getMachineFunction();
03004   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
03005 
03006   // Try to store any remaining integer argument regs
03007   // to their spots on the stack so that they may be loaded by deferencing
03008   // the result of va_next.
03009   // If there is no regs to be stored, just point address after last
03010   // argument passed via stack.
03011   int FrameIndex =
03012     StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
03013                    CCInfo.getInRegsParamsCount(), 0, ArgOffset, 0, ForceMutable,
03014                    0, TotalArgRegsSaveSize);
03015 
03016   AFI->setVarArgsFrameIndex(FrameIndex);
03017 }
03018 
03019 SDValue
03020 ARMTargetLowering::LowerFormalArguments(SDValue Chain,
03021                                         CallingConv::ID CallConv, bool isVarArg,
03022                                         const SmallVectorImpl<ISD::InputArg>
03023                                           &Ins,
03024                                         SDLoc dl, SelectionDAG &DAG,
03025                                         SmallVectorImpl<SDValue> &InVals)
03026                                           const {
03027   MachineFunction &MF = DAG.getMachineFunction();
03028   MachineFrameInfo *MFI = MF.getFrameInfo();
03029 
03030   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
03031 
03032   // Assign locations to all of the incoming arguments.
03033   SmallVector<CCValAssign, 16> ArgLocs;
03034   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
03035                     *DAG.getContext(), Prologue);
03036   CCInfo.AnalyzeFormalArguments(Ins,
03037                                 CCAssignFnForNode(CallConv, /* Return*/ false,
03038                                                   isVarArg));
03039 
03040   SmallVector<SDValue, 16> ArgValues;
03041   int lastInsIndex = -1;
03042   SDValue ArgValue;
03043   Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
03044   unsigned CurArgIdx = 0;
03045 
03046   // Initially ArgRegsSaveSize is zero.
03047   // Then we increase this value each time we meet byval parameter.
03048   // We also increase this value in case of varargs function.
03049   AFI->setArgRegsSaveSize(0);
03050 
03051   unsigned ByValStoreOffset = 0;
03052   unsigned TotalArgRegsSaveSize = 0;
03053   unsigned ArgRegsSaveSizeMaxAlign = 4;
03054 
03055   // Calculate the amount of stack space that we need to allocate to store
03056   // byval and variadic arguments that are passed in registers.
03057   // We need to know this before we allocate the first byval or variadic
03058   // argument, as they will be allocated a stack slot below the CFA (Canonical
03059   // Frame Address, the stack pointer at entry to the function).
03060   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
03061     CCValAssign &VA = ArgLocs[i];
03062     if (VA.isMemLoc()) {
03063       int index = VA.getValNo();
03064       if (index != lastInsIndex) {
03065         ISD::ArgFlagsTy Flags = Ins[index].Flags;
03066         if (Flags.isByVal()) {
03067           unsigned ExtraArgRegsSize;
03068           unsigned ExtraArgRegsSaveSize;
03069           computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsProceed(),
03070                          Flags.getByValSize(),
03071                          ExtraArgRegsSize, ExtraArgRegsSaveSize);
03072 
03073           TotalArgRegsSaveSize += ExtraArgRegsSaveSize;
03074           if (Flags.getByValAlign() > ArgRegsSaveSizeMaxAlign)
03075               ArgRegsSaveSizeMaxAlign = Flags.getByValAlign();
03076           CCInfo.nextInRegsParam();
03077         }
03078         lastInsIndex = index;
03079       }
03080     }
03081   }
03082   CCInfo.rewindByValRegsInfo();
03083   lastInsIndex = -1;
03084   if (isVarArg && MFI->hasVAStart()) {
03085     unsigned ExtraArgRegsSize;
03086     unsigned ExtraArgRegsSaveSize;
03087     computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsCount(), 0,
03088                    ExtraArgRegsSize, ExtraArgRegsSaveSize);
03089     TotalArgRegsSaveSize += ExtraArgRegsSaveSize;
03090   }
03091   // If the arg regs save area contains N-byte aligned values, the
03092   // bottom of it must be at least N-byte aligned.
03093   TotalArgRegsSaveSize = RoundUpToAlignment(TotalArgRegsSaveSize, ArgRegsSaveSizeMaxAlign);
03094   TotalArgRegsSaveSize = std::min(TotalArgRegsSaveSize, 16U);
03095 
03096   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
03097     CCValAssign &VA = ArgLocs[i];
03098     std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx);
03099     CurArgIdx = Ins[VA.getValNo()].OrigArgIndex;
03100     // Arguments stored in registers.
03101     if (VA.isRegLoc()) {
03102       EVT RegVT = VA.getLocVT();
03103 
03104       if (VA.needsCustom()) {
03105         // f64 and vector types are split up into multiple registers or
03106         // combinations of registers and stack slots.
03107         if (VA.getLocVT() == MVT::v2f64) {
03108           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
03109                                                    Chain, DAG, dl);
03110           VA = ArgLocs[++i]; // skip ahead to next loc
03111           SDValue ArgValue2;
03112           if (VA.isMemLoc()) {
03113             int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
03114             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03115             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
03116                                     MachinePointerInfo::getFixedStack(FI),
03117                                     false, false, false, 0);
03118           } else {
03119             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
03120                                              Chain, DAG, dl);
03121           }
03122           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
03123           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03124                                  ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
03125           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03126                                  ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
03127         } else
03128           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
03129 
03130       } else {
03131         const TargetRegisterClass *RC;
03132 
03133         if (RegVT == MVT::f32)
03134           RC = &ARM::SPRRegClass;
03135         else if (RegVT == MVT::f64)
03136           RC = &ARM::DPRRegClass;
03137         else if (RegVT == MVT::v2f64)
03138           RC = &ARM::QPRRegClass;
03139         else if (RegVT == MVT::i32)
03140           RC = AFI->isThumb1OnlyFunction() ?
03141             (const TargetRegisterClass*)&ARM::tGPRRegClass :
03142             (const TargetRegisterClass*)&ARM::GPRRegClass;
03143         else
03144           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
03145 
03146         // Transform the arguments in physical registers into virtual ones.
03147         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
03148         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
03149       }
03150 
03151       // If this is an 8 or 16-bit value, it is really passed promoted
03152       // to 32 bits.  Insert an assert[sz]ext to capture this, then
03153       // truncate to the right size.
03154       switch (VA.getLocInfo()) {
03155       default: llvm_unreachable("Unknown loc info!");
03156       case CCValAssign::Full: break;
03157       case CCValAssign::BCvt:
03158         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
03159         break;
03160       case CCValAssign::SExt:
03161         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
03162                                DAG.getValueType(VA.getValVT()));
03163         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03164         break;
03165       case CCValAssign::ZExt:
03166         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
03167                                DAG.getValueType(VA.getValVT()));
03168         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03169         break;
03170       }
03171 
03172       InVals.push_back(ArgValue);
03173 
03174     } else { // VA.isRegLoc()
03175 
03176       // sanity check
03177       assert(VA.isMemLoc());
03178       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
03179 
03180       int index = ArgLocs[i].getValNo();
03181 
03182       // Some Ins[] entries become multiple ArgLoc[] entries.
03183       // Process them only once.
03184       if (index != lastInsIndex)
03185         {
03186           ISD::ArgFlagsTy Flags = Ins[index].Flags;
03187           // FIXME: For now, all byval parameter objects are marked mutable.
03188           // This can be changed with more analysis.
03189           // In case of tail call optimization mark all arguments mutable.
03190           // Since they could be overwritten by lowering of arguments in case of
03191           // a tail call.
03192           if (Flags.isByVal()) {
03193             unsigned CurByValIndex = CCInfo.getInRegsParamsProceed();
03194 
03195             ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
03196             int FrameIndex = StoreByValRegs(
03197                 CCInfo, DAG, dl, Chain, CurOrigArg,
03198                 CurByValIndex,
03199                 Ins[VA.getValNo()].PartOffset,
03200                 VA.getLocMemOffset(),
03201                 Flags.getByValSize(),
03202                 true /*force mutable frames*/,
03203                 ByValStoreOffset,
03204                 TotalArgRegsSaveSize);
03205             ByValStoreOffset += Flags.getByValSize();
03206             ByValStoreOffset = std::min(ByValStoreOffset, 16U);
03207             InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy()));
03208             CCInfo.nextInRegsParam();
03209           } else {
03210             unsigned FIOffset = VA.getLocMemOffset();
03211             int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
03212                                             FIOffset, true);
03213 
03214             // Create load nodes to retrieve arguments from the stack.
03215             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03216             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
03217                                          MachinePointerInfo::getFixedStack(FI),
03218                                          false, false, false, 0));
03219           }
03220           lastInsIndex = index;
03221         }
03222     }
03223   }
03224 
03225   // varargs
03226   if (isVarArg && MFI->hasVAStart())
03227     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
03228                          CCInfo.getNextStackOffset(),
03229                          TotalArgRegsSaveSize);
03230 
03231   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
03232 
03233   return Chain;
03234 }
03235 
03236 /// isFloatingPointZero - Return true if this is +0.0.
03237 static bool isFloatingPointZero(SDValue Op) {
03238   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
03239     return CFP->getValueAPF().isPosZero();
03240   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
03241     // Maybe this has already been legalized into the constant pool?
03242     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
03243       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
03244       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
03245         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
03246           return CFP->getValueAPF().isPosZero();
03247     }
03248   }
03249   return false;
03250 }
03251 
03252 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
03253 /// the given operands.
03254 SDValue
03255 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
03256                              SDValue &ARMcc, SelectionDAG &DAG,
03257                              SDLoc dl) const {
03258   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
03259     unsigned C = RHSC->getZExtValue();
03260     if (!isLegalICmpImmediate(C)) {
03261       // Constant does not fit, try adjusting it by one?
03262       switch (CC) {
03263       default: break;
03264       case ISD::SETLT:
03265       case ISD::SETGE:
03266         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
03267           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
03268           RHS = DAG.getConstant(C-1, MVT::i32);
03269         }
03270         break;
03271       case ISD::SETULT:
03272       case ISD::SETUGE:
03273         if (C != 0 && isLegalICmpImmediate(C-1)) {
03274           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
03275           RHS = DAG.getConstant(C-1, MVT::i32);
03276         }
03277         break;
03278       case ISD::SETLE:
03279       case ISD::SETGT:
03280         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
03281           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
03282           RHS = DAG.getConstant(C+1, MVT::i32);
03283         }
03284         break;
03285       case ISD::SETULE:
03286       case ISD::SETUGT:
03287         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
03288           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
03289           RHS = DAG.getConstant(C+1, MVT::i32);
03290         }
03291         break;
03292       }
03293     }
03294   }
03295 
03296   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03297   ARMISD::NodeType CompareType;
03298   switch (CondCode) {
03299   default:
03300     CompareType = ARMISD::CMP;
03301     break;
03302   case ARMCC::EQ:
03303   case ARMCC::NE:
03304     // Uses only Z Flag
03305     CompareType = ARMISD::CMPZ;
03306     break;
03307   }
03308   ARMcc = DAG.getConstant(CondCode, MVT::i32);
03309   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
03310 }
03311 
03312 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
03313 SDValue
03314 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
03315                              SDLoc dl) const {
03316   assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
03317   SDValue Cmp;
03318   if (!isFloatingPointZero(RHS))
03319     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
03320   else
03321     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
03322   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
03323 }
03324 
03325 /// duplicateCmp - Glue values can have only one use, so this function
03326 /// duplicates a comparison node.
03327 SDValue
03328 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
03329   unsigned Opc = Cmp.getOpcode();
03330   SDLoc DL(Cmp);
03331   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
03332     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03333 
03334   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
03335   Cmp = Cmp.getOperand(0);
03336   Opc = Cmp.getOpcode();
03337   if (Opc == ARMISD::CMPFP)
03338     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03339   else {
03340     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
03341     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
03342   }
03343   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
03344 }
03345 
03346 std::pair<SDValue, SDValue>
03347 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
03348                                  SDValue &ARMcc) const {
03349   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
03350 
03351   SDValue Value, OverflowCmp;
03352   SDValue LHS = Op.getOperand(0);
03353   SDValue RHS = Op.getOperand(1);
03354 
03355 
03356   // FIXME: We are currently always generating CMPs because we don't support
03357   // generating CMN through the backend. This is not as good as the natural
03358   // CMP case because it causes a register dependency and cannot be folded
03359   // later.
03360 
03361   switch (Op.getOpcode()) {
03362   default:
03363     llvm_unreachable("Unknown overflow instruction!");
03364   case ISD::SADDO:
03365     ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
03366     Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
03367     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
03368     break;
03369   case ISD::UADDO:
03370     ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
03371     Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
03372     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
03373     break;
03374   case ISD::SSUBO:
03375     ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
03376     Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
03377     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
03378     break;
03379   case ISD::USUBO:
03380     ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
03381     Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
03382     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
03383     break;
03384   } // switch (...)
03385 
03386   return std::make_pair(Value, OverflowCmp);
03387 }
03388 
03389 
03390 SDValue
03391 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
03392   // Let legalize expand this if it isn't a legal type yet.
03393   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
03394     return SDValue();
03395 
03396   SDValue Value, OverflowCmp;
03397   SDValue ARMcc;
03398   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
03399   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03400   // We use 0 and 1 as false and true values.
03401   SDValue TVal = DAG.getConstant(1, MVT::i32);
03402   SDValue FVal = DAG.getConstant(0, MVT::i32);
03403   EVT VT = Op.getValueType();
03404 
03405   SDValue Overflow = DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, TVal, FVal,
03406                                  ARMcc, CCR, OverflowCmp);
03407 
03408   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
03409   return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow);
03410 }
03411 
03412 
03413 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
03414   SDValue Cond = Op.getOperand(0);
03415   SDValue SelectTrue = Op.getOperand(1);
03416   SDValue SelectFalse = Op.getOperand(2);
03417   SDLoc dl(Op);
03418   unsigned Opc = Cond.getOpcode();
03419 
03420   if (Cond.getResNo() == 1 &&
03421       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
03422        Opc == ISD::USUBO)) {
03423     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
03424       return SDValue();
03425 
03426     SDValue Value, OverflowCmp;
03427     SDValue ARMcc;
03428     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
03429     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03430     EVT VT = Op.getValueType();
03431 
03432     return getCMOV(SDLoc(Op), VT, SelectTrue, SelectFalse, ARMcc, CCR,
03433                    OverflowCmp, DAG);
03434   }
03435 
03436   // Convert:
03437   //
03438   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
03439   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
03440   //
03441   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
03442     const ConstantSDNode *CMOVTrue =
03443       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
03444     const ConstantSDNode *CMOVFalse =
03445       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
03446 
03447     if (CMOVTrue && CMOVFalse) {
03448       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
03449       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
03450 
03451       SDValue True;
03452       SDValue False;
03453       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
03454         True = SelectTrue;
03455         False = SelectFalse;
03456       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
03457         True = SelectFalse;
03458         False = SelectTrue;
03459       }
03460 
03461       if (True.getNode() && False.getNode()) {
03462         EVT VT = Op.getValueType();
03463         SDValue ARMcc = Cond.getOperand(2);
03464         SDValue CCR = Cond.getOperand(3);
03465         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
03466         assert(True.getValueType() == VT);
03467         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
03468       }
03469     }
03470   }
03471 
03472   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
03473   // undefined bits before doing a full-word comparison with zero.
03474   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
03475                      DAG.getConstant(1, Cond.getValueType()));
03476 
03477   return DAG.getSelectCC(dl, Cond,
03478                          DAG.getConstant(0, Cond.getValueType()),
03479                          SelectTrue, SelectFalse, ISD::SETNE);
03480 }
03481 
03482 static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) {
03483   if (CC == ISD::SETNE)
03484     return ISD::SETEQ;
03485   return ISD::getSetCCInverse(CC, true);
03486 }
03487 
03488 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
03489                                  bool &swpCmpOps, bool &swpVselOps) {
03490   // Start by selecting the GE condition code for opcodes that return true for
03491   // 'equality'
03492   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
03493       CC == ISD::SETULE)
03494     CondCode = ARMCC::GE;
03495 
03496   // and GT for opcodes that return false for 'equality'.
03497   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
03498            CC == ISD::SETULT)
03499     CondCode = ARMCC::GT;
03500 
03501   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
03502   // to swap the compare operands.
03503   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
03504       CC == ISD::SETULT)
03505     swpCmpOps = true;
03506 
03507   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
03508   // If we have an unordered opcode, we need to swap the operands to the VSEL
03509   // instruction (effectively negating the condition).
03510   //
03511   // This also has the effect of swapping which one of 'less' or 'greater'
03512   // returns true, so we also swap the compare operands. It also switches
03513   // whether we return true for 'equality', so we compensate by picking the
03514   // opposite condition code to our original choice.
03515   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
03516       CC == ISD::SETUGT) {
03517     swpCmpOps = !swpCmpOps;
03518     swpVselOps = !swpVselOps;
03519     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
03520   }
03521 
03522   // 'ordered' is 'anything but unordered', so use the VS condition code and
03523   // swap the VSEL operands.
03524   if (CC == ISD::SETO) {
03525     CondCode = ARMCC::VS;
03526     swpVselOps = true;
03527   }
03528 
03529   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
03530   // code and swap the VSEL operands.
03531   if (CC == ISD::SETUNE) {
03532     CondCode = ARMCC::EQ;
03533     swpVselOps = true;
03534   }
03535 }
03536 
03537 SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal,
03538                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
03539                                    SDValue Cmp, SelectionDAG &DAG) const {
03540   if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
03541     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03542                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
03543     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03544                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
03545 
03546     SDValue TrueLow = TrueVal.getValue(0);
03547     SDValue TrueHigh = TrueVal.getValue(1);
03548     SDValue FalseLow = FalseVal.getValue(0);
03549     SDValue FalseHigh = FalseVal.getValue(1);
03550 
03551     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
03552                               ARMcc, CCR, Cmp);
03553     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
03554                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
03555 
03556     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
03557   } else {
03558     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
03559                        Cmp);
03560   }
03561 }
03562 
03563 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
03564   EVT VT = Op.getValueType();
03565   SDValue LHS = Op.getOperand(0);
03566   SDValue RHS = Op.getOperand(1);
03567   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
03568   SDValue TrueVal = Op.getOperand(2);
03569   SDValue FalseVal = Op.getOperand(3);
03570   SDLoc dl(Op);
03571 
03572   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03573     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03574                                                     dl);
03575 
03576     // If softenSetCCOperands only returned one value, we should compare it to
03577     // zero.
03578     if (!RHS.getNode()) {
03579       RHS = DAG.getConstant(0, LHS.getValueType());
03580       CC = ISD::SETNE;
03581     }
03582   }
03583 
03584   if (LHS.getValueType() == MVT::i32) {
03585     // Try to generate VSEL on ARMv8.
03586     // The VSEL instruction can't use all the usual ARM condition
03587     // codes: it only has two bits to select the condition code, so it's
03588     // constrained to use only GE, GT, VS and EQ.
03589     //
03590     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
03591     // swap the operands of the previous compare instruction (effectively
03592     // inverting the compare condition, swapping 'less' and 'greater') and
03593     // sometimes need to swap the operands to the VSEL (which inverts the
03594     // condition in the sense of firing whenever the previous condition didn't)
03595     if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03596                                       TrueVal.getValueType() == MVT::f64)) {
03597       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03598       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
03599           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
03600         CC = getInverseCCForVSEL(CC);
03601         std::swap(TrueVal, FalseVal);
03602       }
03603     }
03604 
03605     SDValue ARMcc;
03606     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03607     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03608     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03609   }
03610 
03611   ARMCC::CondCodes CondCode, CondCode2;
03612   FPCCToARMCC(CC, CondCode, CondCode2);
03613 
03614   // Try to generate VSEL on ARMv8.
03615   if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03616                                     TrueVal.getValueType() == MVT::f64)) {
03617     // We can select VMAXNM/VMINNM from a compare followed by a select with the
03618     // same operands, as follows:
03619     //   c = fcmp [ogt, olt, ugt, ult] a, b
03620     //   select c, a, b
03621     // We only do this in unsafe-fp-math, because signed zeros and NaNs are
03622     // handled differently than the original code sequence.
03623     if (getTargetMachine().Options.UnsafeFPMath && LHS == TrueVal &&
03624         RHS == FalseVal) {
03625       if (CC == ISD::SETOGT || CC == ISD::SETUGT)
03626         return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal);
03627       if (CC == ISD::SETOLT || CC == ISD::SETULT)
03628         return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal);
03629     }
03630 
03631     bool swpCmpOps = false;
03632     bool swpVselOps = false;
03633     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
03634 
03635     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
03636         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
03637       if (swpCmpOps)
03638         std::swap(LHS, RHS);
03639       if (swpVselOps)
03640         std::swap(TrueVal, FalseVal);
03641     }
03642   }
03643 
03644   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
03645   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03646   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03647   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03648   if (CondCode2 != ARMCC::AL) {
03649     SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
03650     // FIXME: Needs another CMP because flag can have but one use.
03651     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
03652     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
03653   }
03654   return Result;
03655 }
03656 
03657 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
03658 /// to morph to an integer compare sequence.
03659 static bool canChangeToInt(SDValue Op, bool &SeenZero,
03660                            const ARMSubtarget *Subtarget) {
03661   SDNode *N = Op.getNode();
03662   if (!N->hasOneUse())
03663     // Otherwise it requires moving the value from fp to integer registers.
03664     return false;
03665   if (!N->getNumValues())
03666     return false;
03667   EVT VT = Op.getValueType();
03668   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
03669     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
03670     // vmrs are very slow, e.g. cortex-a8.
03671     return false;
03672 
03673   if (isFloatingPointZero(Op)) {
03674     SeenZero = true;
03675     return true;
03676   }
03677   return ISD::isNormalLoad(N);
03678 }
03679 
03680 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
03681   if (isFloatingPointZero(Op))
03682     return DAG.getConstant(0, MVT::i32);
03683 
03684   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
03685     return DAG.getLoad(MVT::i32, SDLoc(Op),
03686                        Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
03687                        Ld->isVolatile(), Ld->isNonTemporal(),
03688                        Ld->isInvariant(), Ld->getAlignment());
03689 
03690   llvm_unreachable("Unknown VFP cmp argument!");
03691 }
03692 
03693 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
03694                            SDValue &RetVal1, SDValue &RetVal2) {
03695   if (isFloatingPointZero(Op)) {
03696     RetVal1 = DAG.getConstant(0, MVT::i32);
03697     RetVal2 = DAG.getConstant(0, MVT::i32);
03698     return;
03699   }
03700 
03701   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
03702     SDValue Ptr = Ld->getBasePtr();
03703     RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op),
03704                           Ld->getChain(), Ptr,
03705                           Ld->getPointerInfo(),
03706                           Ld->isVolatile(), Ld->isNonTemporal(),
03707                           Ld->isInvariant(), Ld->getAlignment());
03708 
03709     EVT PtrType = Ptr.getValueType();
03710     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
03711     SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op),
03712                                  PtrType, Ptr, DAG.getConstant(4, PtrType));
03713     RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op),
03714                           Ld->getChain(), NewPtr,
03715                           Ld->getPointerInfo().getWithOffset(4),
03716                           Ld->isVolatile(), Ld->isNonTemporal(),
03717                           Ld->isInvariant(), NewAlign);
03718     return;
03719   }
03720 
03721   llvm_unreachable("Unknown VFP cmp argument!");
03722 }
03723 
03724 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
03725 /// f32 and even f64 comparisons to integer ones.
03726 SDValue
03727 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
03728   SDValue Chain = Op.getOperand(0);
03729   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03730   SDValue LHS = Op.getOperand(2);
03731   SDValue RHS = Op.getOperand(3);
03732   SDValue Dest = Op.getOperand(4);
03733   SDLoc dl(Op);
03734 
03735   bool LHSSeenZero = false;
03736   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
03737   bool RHSSeenZero = false;
03738   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
03739   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
03740     // If unsafe fp math optimization is enabled and there are no other uses of
03741     // the CMP operands, and the condition code is EQ or NE, we can optimize it
03742     // to an integer comparison.
03743     if (CC == ISD::SETOEQ)
03744       CC = ISD::SETEQ;
03745     else if (CC == ISD::SETUNE)
03746       CC = ISD::SETNE;
03747 
03748     SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32);
03749     SDValue ARMcc;
03750     if (LHS.getValueType() == MVT::f32) {
03751       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03752                         bitcastf32Toi32(LHS, DAG), Mask);
03753       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03754                         bitcastf32Toi32(RHS, DAG), Mask);
03755       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03756       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03757       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03758                          Chain, Dest, ARMcc, CCR, Cmp);
03759     }
03760 
03761     SDValue LHS1, LHS2;
03762     SDValue RHS1, RHS2;
03763     expandf64Toi32(LHS, DAG, LHS1, LHS2);
03764     expandf64Toi32(RHS, DAG, RHS1, RHS2);
03765     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
03766     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
03767     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03768     ARMcc = DAG.getConstant(CondCode, MVT::i32);
03769     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03770     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
03771     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
03772   }
03773 
03774   return SDValue();
03775 }
03776 
03777 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
03778   SDValue Chain = Op.getOperand(0);
03779   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03780   SDValue LHS = Op.getOperand(2);
03781   SDValue RHS = Op.getOperand(3);
03782   SDValue Dest = Op.getOperand(4);
03783   SDLoc dl(Op);
03784 
03785   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03786     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03787                                                     dl);
03788 
03789     // If softenSetCCOperands only returned one value, we should compare it to
03790     // zero.
03791     if (!RHS.getNode()) {
03792       RHS = DAG.getConstant(0, LHS.getValueType());
03793       CC = ISD::SETNE;
03794     }
03795   }
03796 
03797   if (LHS.getValueType() == MVT::i32) {
03798     SDValue ARMcc;
03799     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03800     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03801     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03802                        Chain, Dest, ARMcc, CCR, Cmp);
03803   }
03804 
03805   assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
03806 
03807   if (getTargetMachine().Options.UnsafeFPMath &&
03808       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
03809        CC == ISD::SETNE || CC == ISD::SETUNE)) {
03810     SDValue Result = OptimizeVFPBrcond(Op, DAG);
03811     if (Result.getNode())
03812       return Result;
03813   }
03814 
03815   ARMCC::CondCodes CondCode, CondCode2;
03816   FPCCToARMCC(CC, CondCode, CondCode2);
03817 
03818   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
03819   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03820   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03821   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03822   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
03823   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03824   if (CondCode2 != ARMCC::AL) {
03825     ARMcc = DAG.getConstant(CondCode2, MVT::i32);
03826     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
03827     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03828   }
03829   return Res;
03830 }
03831 
03832 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
03833   SDValue Chain = Op.getOperand(0);
03834   SDValue Table = Op.getOperand(1);
03835   SDValue Index = Op.getOperand(2);
03836   SDLoc dl(Op);
03837 
03838   EVT PTy = getPointerTy();
03839   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
03840   ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
03841   SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
03842   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
03843   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
03844   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
03845   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
03846   if (Subtarget->isThumb2()) {
03847     // Thumb2 uses a two-level jump. That is, it jumps into the jump table
03848     // which does another jump to the destination. This also makes it easier
03849     // to translate it to TBB / TBH later.
03850     // FIXME: This might not work if the function is extremely large.
03851     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
03852                        Addr, Op.getOperand(2), JTI, UId);
03853   }
03854   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
03855     Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
03856                        MachinePointerInfo::getJumpTable(),
03857                        false, false, false, 0);
03858     Chain = Addr.getValue(1);
03859     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
03860     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
03861   } else {
03862     Addr = DAG.getLoad(PTy, dl, Chain, Addr,
03863                        MachinePointerInfo::getJumpTable(),
03864                        false, false, false, 0);
03865     Chain = Addr.getValue(1);
03866     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
03867   }
03868 }
03869 
03870 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
03871   EVT VT = Op.getValueType();
03872   SDLoc dl(Op);
03873 
03874   if (Op.getValueType().getVectorElementType() == MVT::i32) {
03875     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
03876       return Op;
03877     return DAG.UnrollVectorOp(Op.getNode());
03878   }
03879 
03880   assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
03881          "Invalid type for custom lowering!");
03882   if (VT != MVT::v4i16)
03883     return DAG.UnrollVectorOp(Op.getNode());
03884 
03885   Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
03886   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
03887 }
03888 
03889 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
03890   EVT VT = Op.getValueType();
03891   if (VT.isVector())
03892     return LowerVectorFP_TO_INT(Op, DAG);
03893 
03894   if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
03895     RTLIB::Libcall LC;
03896     if (Op.getOpcode() == ISD::FP_TO_SINT)
03897       LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
03898                               Op.getValueType());
03899     else
03900       LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
03901                               Op.getValueType());
03902     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03903                        /*isSigned*/ false, SDLoc(Op)).first;
03904   }
03905 
03906   SDLoc dl(Op);
03907   unsigned Opc;
03908 
03909   switch (Op.getOpcode()) {
03910   default: llvm_unreachable("Invalid opcode!");
03911   case ISD::FP_TO_SINT:
03912     Opc = ARMISD::FTOSI;
03913     break;
03914   case ISD::FP_TO_UINT:
03915     Opc = ARMISD::FTOUI;
03916     break;
03917   }
03918   Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
03919   return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
03920 }
03921 
03922 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
03923   EVT VT = Op.getValueType();
03924   SDLoc dl(Op);
03925 
03926   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
03927     if (VT.getVectorElementType() == MVT::f32)
03928       return Op;
03929     return DAG.UnrollVectorOp(Op.getNode());
03930   }
03931 
03932   assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
03933          "Invalid type for custom lowering!");
03934   if (VT != MVT::v4f32)
03935     return DAG.UnrollVectorOp(Op.getNode());
03936 
03937   unsigned CastOpc;
03938   unsigned Opc;
03939   switch (Op.getOpcode()) {
03940   default: llvm_unreachable("Invalid opcode!");
03941   case ISD::SINT_TO_FP:
03942     CastOpc = ISD::SIGN_EXTEND;
03943     Opc = ISD::SINT_TO_FP;
03944     break;
03945   case ISD::UINT_TO_FP:
03946     CastOpc = ISD::ZERO_EXTEND;
03947     Opc = ISD::UINT_TO_FP;
03948     break;
03949   }
03950 
03951   Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
03952   return DAG.getNode(Opc, dl, VT, Op);
03953 }
03954 
03955 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
03956   EVT VT = Op.getValueType();
03957   if (VT.isVector())
03958     return LowerVectorINT_TO_FP(Op, DAG);
03959 
03960   if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
03961     RTLIB::Libcall LC;
03962     if (Op.getOpcode() == ISD::SINT_TO_FP)
03963       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
03964                               Op.getValueType());
03965     else
03966       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
03967                               Op.getValueType());
03968     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03969                        /*isSigned*/ false, SDLoc(Op)).first;
03970   }
03971 
03972   SDLoc dl(Op);
03973   unsigned Opc;
03974 
03975   switch (Op.getOpcode()) {
03976   default: llvm_unreachable("Invalid opcode!");
03977   case ISD::SINT_TO_FP:
03978     Opc = ARMISD::SITOF;
03979     break;
03980   case ISD::UINT_TO_FP:
03981     Opc = ARMISD::UITOF;
03982     break;
03983   }
03984 
03985   Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
03986   return DAG.getNode(Opc, dl, VT, Op);
03987 }
03988 
03989 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
03990   // Implement fcopysign with a fabs and a conditional fneg.
03991   SDValue Tmp0 = Op.getOperand(0);
03992   SDValue Tmp1 = Op.getOperand(1);
03993   SDLoc dl(Op);
03994   EVT VT = Op.getValueType();
03995   EVT SrcVT = Tmp1.getValueType();
03996   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
03997     Tmp0.getOpcode() == ARMISD::VMOVDRR;
03998   bool UseNEON = !InGPR && Subtarget->hasNEON();
03999 
04000   if (UseNEON) {
04001     // Use VBSL to copy the sign bit.
04002     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
04003     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
04004                                DAG.getTargetConstant(EncodedVal, MVT::i32));
04005     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
04006     if (VT == MVT::f64)
04007       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
04008                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
04009                          DAG.getConstant(32, MVT::i32));
04010     else /*if (VT == MVT::f32)*/
04011       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
04012     if (SrcVT == MVT::f32) {
04013       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
04014       if (VT == MVT::f64)
04015         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
04016                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
04017                            DAG.getConstant(32, MVT::i32));
04018     } else if (VT == MVT::f32)
04019       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
04020                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
04021                          DAG.getConstant(32, MVT::i32));
04022     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
04023     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
04024 
04025     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
04026                                             MVT::i32);
04027     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
04028     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
04029                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
04030 
04031     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
04032                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
04033                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
04034     if (VT == MVT::f32) {
04035       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
04036       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
04037                         DAG.getConstant(0, MVT::i32));
04038     } else {
04039       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
04040     }
04041 
04042     return Res;
04043   }
04044 
04045   // Bitcast operand 1 to i32.
04046   if (SrcVT == MVT::f64)
04047     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
04048                        Tmp1).getValue(1);
04049   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
04050 
04051   // Or in the signbit with integer operations.
04052   SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32);
04053   SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32);
04054   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
04055   if (VT == MVT::f32) {
04056     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
04057                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
04058     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
04059                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
04060   }
04061 
04062   // f64: Or the high part with signbit and then combine two parts.
04063   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
04064                      Tmp0);
04065   SDValue Lo = Tmp0.getValue(0);
04066   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
04067   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
04068   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
04069 }
04070 
04071 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
04072   MachineFunction &MF = DAG.getMachineFunction();
04073   MachineFrameInfo *MFI = MF.getFrameInfo();
04074   MFI->setReturnAddressIsTaken(true);
04075 
04076   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
04077     return SDValue();
04078 
04079   EVT VT = Op.getValueType();
04080   SDLoc dl(Op);
04081   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
04082   if (Depth) {
04083     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
04084     SDValue Offset = DAG.getConstant(4, MVT::i32);
04085     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
04086                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
04087                        MachinePointerInfo(), false, false, false, 0);
04088   }
04089 
04090   // Return LR, which contains the return address. Mark it an implicit live-in.
04091   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
04092   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
04093 }
04094 
04095 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
04096   const ARMBaseRegisterInfo &ARI =
04097     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
04098   MachineFunction &MF = DAG.getMachineFunction();
04099   MachineFrameInfo *MFI = MF.getFrameInfo();
04100   MFI->setFrameAddressIsTaken(true);
04101 
04102   EVT VT = Op.getValueType();
04103   SDLoc dl(Op);  // FIXME probably not meaningful
04104   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
04105   unsigned FrameReg = ARI.getFrameRegister(MF);
04106   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
04107   while (Depth--)
04108     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
04109                             MachinePointerInfo(),
04110                             false, false, false, 0);
04111   return FrameAddr;
04112 }
04113 
04114 // FIXME? Maybe this could be a TableGen attribute on some registers and
04115 // this table could be generated automatically from RegInfo.
04116 unsigned ARMTargetLowering::getRegisterByName(const char* RegName,
04117                                               EVT VT) const {
04118   unsigned Reg = StringSwitch<unsigned>(RegName)
04119                        .Case("sp", ARM::SP)
04120                        .Default(0);
04121   if (Reg)
04122     return Reg;
04123   report_fatal_error("Invalid register name global variable");
04124 }
04125 
04126 /// ExpandBITCAST - If the target supports VFP, this function is called to
04127 /// expand a bit convert where either the source or destination type is i64 to
04128 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
04129 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
04130 /// vectors), since the legalizer won't know what to do with that.
04131 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
04132   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
04133   SDLoc dl(N);
04134   SDValue Op = N->getOperand(0);
04135 
04136   // This function is only supposed to be called for i64 types, either as the
04137   // source or destination of the bit convert.
04138   EVT SrcVT = Op.getValueType();
04139   EVT DstVT = N->getValueType(0);
04140   assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
04141          "ExpandBITCAST called for non-i64 type");
04142 
04143   // Turn i64->f64 into VMOVDRR.
04144   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
04145     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04146                              DAG.getConstant(0, MVT::i32));
04147     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04148                              DAG.getConstant(1, MVT::i32));
04149     return DAG.getNode(ISD::BITCAST, dl, DstVT,
04150                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
04151   }
04152 
04153   // Turn f64->i64 into VMOVRRD.
04154   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
04155     SDValue Cvt;
04156     if (TLI.isBigEndian() && SrcVT.isVector() &&
04157         SrcVT.getVectorNumElements() > 1)
04158       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04159                         DAG.getVTList(MVT::i32, MVT::i32),
04160                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
04161     else
04162       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04163                         DAG.getVTList(MVT::i32, MVT::i32), Op);
04164     // Merge the pieces into a single i64 value.
04165     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
04166   }
04167 
04168   return SDValue();
04169 }
04170 
04171 /// getZeroVector - Returns a vector of specified type with all zero elements.
04172 /// Zero vectors are used to represent vector negation and in those cases
04173 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
04174 /// not support i64 elements, so sometimes the zero vectors will need to be
04175 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
04176 /// zero vector.
04177 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
04178   assert(VT.isVector() && "Expected a vector type");
04179   // The canonical modified immediate encoding of a zero vector is....0!
04180   SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
04181   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
04182   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
04183   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
04184 }
04185 
04186 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
04187 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04188 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
04189                                                 SelectionDAG &DAG) const {
04190   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04191   EVT VT = Op.getValueType();
04192   unsigned VTBits = VT.getSizeInBits();
04193   SDLoc dl(Op);
04194   SDValue ShOpLo = Op.getOperand(0);
04195   SDValue ShOpHi = Op.getOperand(1);
04196   SDValue ShAmt  = Op.getOperand(2);
04197   SDValue ARMcc;
04198   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
04199 
04200   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
04201 
04202   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04203                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
04204   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
04205   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04206                                    DAG.getConstant(VTBits, MVT::i32));
04207   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
04208   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04209   SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
04210 
04211   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04212   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
04213                           ARMcc, DAG, dl);
04214   SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
04215   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
04216                            CCR, Cmp);
04217 
04218   SDValue Ops[2] = { Lo, Hi };
04219   return DAG.getMergeValues(Ops, dl);
04220 }
04221 
04222 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
04223 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04224 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
04225                                                SelectionDAG &DAG) const {
04226   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04227   EVT VT = Op.getValueType();
04228   unsigned VTBits = VT.getSizeInBits();
04229   SDLoc dl(Op);
04230   SDValue ShOpLo = Op.getOperand(0);
04231   SDValue ShOpHi = Op.getOperand(1);
04232   SDValue ShAmt  = Op.getOperand(2);
04233   SDValue ARMcc;
04234 
04235   assert(Op.getOpcode() == ISD::SHL_PARTS);
04236   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04237                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
04238   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
04239   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04240                                    DAG.getConstant(VTBits, MVT::i32));
04241   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
04242   SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
04243 
04244   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04245   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04246   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
04247                           ARMcc, DAG, dl);
04248   SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
04249   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
04250                            CCR, Cmp);
04251 
04252   SDValue Ops[2] = { Lo, Hi };
04253   return DAG.getMergeValues(Ops, dl);
04254 }
04255 
04256 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
04257                                             SelectionDAG &DAG) const {
04258   // The rounding mode is in bits 23:22 of the FPSCR.
04259   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
04260   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
04261   // so that the shift + and get folded into a bitfield extract.
04262   SDLoc dl(Op);
04263   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
04264                               DAG.getConstant(Intrinsic::arm_get_fpscr,
04265                                               MVT::i32));
04266   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
04267                                   DAG.getConstant(1U << 22, MVT::i32));
04268   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
04269                               DAG.getConstant(22, MVT::i32));
04270   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
04271                      DAG.getConstant(3, MVT::i32));
04272 }
04273 
04274 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
04275                          const ARMSubtarget *ST) {
04276   EVT VT = N->getValueType(0);
04277   SDLoc dl(N);
04278 
04279   if (!ST->hasV6T2Ops())
04280     return SDValue();
04281 
04282   SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
04283   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
04284 }
04285 
04286 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
04287 /// for each 16-bit element from operand, repeated.  The basic idea is to
04288 /// leverage vcnt to get the 8-bit counts, gather and add the results.
04289 ///
04290 /// Trace for v4i16:
04291 /// input    = [v0    v1    v2    v3   ] (vi 16-bit element)
04292 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
04293 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
04294 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6]
04295 ///            [b0 b1 b2 b3 b4 b5 b6 b7]
04296 ///           +[b1 b0 b3 b2 b5 b4 b7 b6]
04297 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0,
04298 /// vuzp:    = [k0 k1 k2 k3 k0 k1 k2 k3]  each ki is 8-bits)
04299 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
04300   EVT VT = N->getValueType(0);
04301   SDLoc DL(N);
04302 
04303   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
04304   SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
04305   SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0);
04306   SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1);
04307   SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2);
04308   return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3);
04309 }
04310 
04311 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the
04312 /// bit-count for each 16-bit element from the operand.  We need slightly
04313 /// different sequencing for v4i16 and v8i16 to stay within NEON's available
04314 /// 64/128-bit registers.
04315 ///
04316 /// Trace for v4i16:
04317 /// input           = [v0    v1    v2    v3    ] (vi 16-bit element)
04318 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi)
04319 /// v8i16:Extended  = [k0    k1    k2    k3    k0    k1    k2    k3    ]
04320 /// v4i16:Extracted = [k0    k1    k2    k3    ]
04321 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
04322   EVT VT = N->getValueType(0);
04323   SDLoc DL(N);
04324 
04325   SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
04326   if (VT.is64BitVector()) {
04327     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
04328     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
04329                        DAG.getIntPtrConstant(0));
04330   } else {
04331     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
04332                                     BitCounts, DAG.getIntPtrConstant(0));
04333     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
04334   }
04335 }
04336 
04337 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the
04338 /// bit-count for each 32-bit element from the operand.  The idea here is
04339 /// to split the vector into 16-bit elements, leverage the 16-bit count
04340 /// routine, and then combine the results.
04341 ///
04342 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged):
04343 /// input    = [v0    v1    ] (vi: 32-bit elements)
04344 /// Bitcast  = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1])
04345 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi)
04346 /// vrev: N0 = [k1 k0 k3 k2 ]
04347 ///            [k0 k1 k2 k3 ]
04348 ///       N1 =+[k1 k0 k3 k2 ]
04349 ///            [k0 k2 k1 k3 ]
04350 ///       N2 =+[k1 k3 k0 k2 ]
04351 ///            [k0    k2    k1    k3    ]
04352 /// Extended =+[k1    k3    k0    k2    ]
04353 ///            [k0    k2    ]
04354 /// Extracted=+[k1    k3    ]
04355 ///
04356 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
04357   EVT VT = N->getValueType(0);
04358   SDLoc DL(N);
04359 
04360   EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
04361 
04362   SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0));
04363   SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG);
04364   SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16);
04365   SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0);
04366   SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1);
04367 
04368   if (VT.is64BitVector()) {
04369     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
04370     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
04371                        DAG.getIntPtrConstant(0));
04372   } else {
04373     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
04374                                     DAG.getIntPtrConstant(0));
04375     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
04376   }
04377 }
04378 
04379 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
04380                           const ARMSubtarget *ST) {
04381   EVT VT = N->getValueType(0);
04382 
04383   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
04384   assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||
04385           VT == MVT::v4i16 || VT == MVT::v8i16) &&
04386          "Unexpected type for custom ctpop lowering");
04387 
04388   if (VT.getVectorElementType() == MVT::i32)
04389     return lowerCTPOP32BitElements(N, DAG);
04390   else
04391     return lowerCTPOP16BitElements(N, DAG);
04392 }
04393 
04394 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
04395                           const ARMSubtarget *ST) {
04396   EVT VT = N->getValueType(0);
04397   SDLoc dl(N);
04398 
04399   if (!VT.isVector())
04400     return SDValue();
04401 
04402   // Lower vector shifts on NEON to use VSHL.
04403   assert(ST->hasNEON() && "unexpected vector shift");
04404 
04405   // Left shifts translate directly to the vshiftu intrinsic.
04406   if (N->getOpcode() == ISD::SHL)
04407     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
04408                        DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
04409                        N->getOperand(0), N->getOperand(1));
04410 
04411   assert((N->getOpcode() == ISD::SRA ||
04412           N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
04413 
04414   // NEON uses the same intrinsics for both left and right shifts.  For
04415   // right shifts, the shift amounts are negative, so negate the vector of
04416   // shift amounts.
04417   EVT ShiftVT = N->getOperand(1).getValueType();
04418   SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
04419                                      getZeroVector(ShiftVT, DAG, dl),
04420                                      N->getOperand(1));
04421   Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
04422                              Intrinsic::arm_neon_vshifts :
04423                              Intrinsic::arm_neon_vshiftu);
04424   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
04425                      DAG.getConstant(vshiftInt, MVT::i32),
04426                      N->getOperand(0), NegatedCount);
04427 }
04428 
04429 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
04430                                 const ARMSubtarget *ST) {
04431   EVT VT = N->getValueType(0);
04432   SDLoc dl(N);
04433 
04434   // We can get here for a node like i32 = ISD::SHL i32, i64
04435   if (VT != MVT::i64)
04436     return SDValue();
04437 
04438   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
04439          "Unknown shift to lower!");
04440 
04441   // We only lower SRA, SRL of 1 here, all others use generic lowering.
04442   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
04443       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
04444     return SDValue();
04445 
04446   // If we are in thumb mode, we don't have RRX.
04447   if (ST->isThumb1Only()) return SDValue();
04448 
04449   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
04450   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
04451                            DAG.getConstant(0, MVT::i32));
04452   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
04453                            DAG.getConstant(1, MVT::i32));
04454 
04455   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
04456   // captures the result into a carry flag.
04457   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
04458   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
04459 
04460   // The low part is an ARMISD::RRX operand, which shifts the carry in.
04461   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
04462 
04463   // Merge the pieces into a single i64 value.
04464  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
04465 }
04466 
04467 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
04468   SDValue TmpOp0, TmpOp1;
04469   bool Invert = false;
04470   bool Swap = false;
04471   unsigned Opc = 0;
04472 
04473   SDValue Op0 = Op.getOperand(0);
04474   SDValue Op1 = Op.getOperand(1);
04475   SDValue CC = Op.getOperand(2);
04476   EVT VT = Op.getValueType();
04477   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
04478   SDLoc dl(Op);
04479 
04480   if (Op1.getValueType().isFloatingPoint()) {
04481     switch (SetCCOpcode) {
04482     default: llvm_unreachable("Illegal FP comparison");
04483     case ISD::SETUNE:
04484     case ISD::SETNE:  Invert = true; // Fallthrough
04485     case ISD::SETOEQ:
04486     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
04487     case ISD::SETOLT:
04488     case ISD::SETLT: Swap = true; // Fallthrough
04489     case ISD::SETOGT:
04490     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
04491     case ISD::SETOLE:
04492     case ISD::SETLE:  Swap = true; // Fallthrough
04493     case ISD::SETOGE:
04494     case ISD::SETGE: Opc = ARMISD::VCGE; break;
04495     case ISD::SETUGE: Swap = true; // Fallthrough
04496     case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
04497     case ISD::SETUGT: Swap = true; // Fallthrough
04498     case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
04499     case ISD::SETUEQ: Invert = true; // Fallthrough
04500     case ISD::SETONE:
04501       // Expand this to (OLT | OGT).
04502       TmpOp0 = Op0;
04503       TmpOp1 = Op1;
04504       Opc = ISD::OR;
04505       Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
04506       Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
04507       break;
04508     case ISD::SETUO: Invert = true; // Fallthrough
04509     case ISD::SETO:
04510       // Expand this to (OLT | OGE).
04511       TmpOp0 = Op0;
04512       TmpOp1 = Op1;
04513       Opc = ISD::OR;
04514       Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
04515       Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
04516       break;
04517     }
04518   } else {
04519     // Integer comparisons.
04520     switch (SetCCOpcode) {
04521     default: llvm_unreachable("Illegal integer comparison");
04522     case ISD::SETNE:  Invert = true;
04523     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
04524     case ISD::SETLT:  Swap = true;
04525     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
04526     case ISD::SETLE:  Swap = true;
04527     case ISD::SETGE:  Opc = ARMISD::VCGE; break;
04528     case ISD::SETULT: Swap = true;
04529     case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
04530     case ISD::SETULE: Swap = true;
04531     case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
04532     }
04533 
04534     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
04535     if (Opc == ARMISD::VCEQ) {
04536 
04537       SDValue AndOp;
04538       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
04539         AndOp = Op0;
04540       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
04541         AndOp = Op1;
04542 
04543       // Ignore bitconvert.
04544       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
04545         AndOp = AndOp.getOperand(0);
04546 
04547       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
04548         Opc = ARMISD::VTST;
04549         Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
04550         Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
04551         Invert = !Invert;
04552       }
04553     }
04554   }
04555 
04556   if (Swap)
04557     std::swap(Op0, Op1);
04558 
04559   // If one of the operands is a constant vector zero, attempt to fold the
04560   // comparison to a specialized compare-against-zero form.
04561   SDValue SingleOp;
04562   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
04563     SingleOp = Op0;
04564   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
04565     if (Opc == ARMISD::VCGE)
04566       Opc = ARMISD::VCLEZ;
04567     else if (Opc == ARMISD::VCGT)
04568       Opc = ARMISD::VCLTZ;
04569     SingleOp = Op1;
04570   }
04571 
04572   SDValue Result;
04573   if (SingleOp.getNode()) {
04574     switch (Opc) {
04575     case ARMISD::VCEQ:
04576       Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
04577     case ARMISD::VCGE:
04578       Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
04579     case ARMISD::VCLEZ:
04580       Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
04581     case ARMISD::VCGT:
04582       Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
04583     case ARMISD::VCLTZ:
04584       Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
04585     default:
04586       Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
04587     }
04588   } else {
04589      Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
04590   }
04591 
04592   if (Invert)
04593     Result = DAG.getNOT(dl, Result, VT);
04594 
04595   return Result;
04596 }
04597 
04598 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
04599 /// valid vector constant for a NEON instruction with a "modified immediate"
04600 /// operand (e.g., VMOV).  If so, return the encoded value.
04601 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
04602                                  unsigned SplatBitSize, SelectionDAG &DAG,
04603                                  EVT &VT, bool is128Bits, NEONModImmType type) {
04604   unsigned OpCmode, Imm;
04605 
04606   // SplatBitSize is set to the smallest size that splats the vector, so a
04607   // zero vector will always have SplatBitSize == 8.  However, NEON modified
04608   // immediate instructions others than VMOV do not support the 8-bit encoding
04609   // of a zero vector, and the default encoding of zero is supposed to be the
04610   // 32-bit version.
04611   if (SplatBits == 0)
04612     SplatBitSize = 32;
04613 
04614   switch (SplatBitSize) {
04615   case 8:
04616     if (type != VMOVModImm)
04617       return SDValue();
04618     // Any 1-byte value is OK.  Op=0, Cmode=1110.
04619     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
04620     OpCmode = 0xe;
04621     Imm = SplatBits;
04622     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
04623     break;
04624 
04625   case 16:
04626     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
04627     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
04628     if ((SplatBits & ~0xff) == 0) {
04629       // Value = 0x00nn: Op=x, Cmode=100x.
04630       OpCmode = 0x8;
04631       Imm = SplatBits;
04632       break;
04633     }
04634     if ((SplatBits & ~0xff00) == 0) {
04635       // Value = 0xnn00: Op=x, Cmode=101x.
04636       OpCmode = 0xa;
04637       Imm = SplatBits >> 8;
04638       break;
04639     }
04640     return SDValue();
04641 
04642   case 32:
04643     // NEON's 32-bit VMOV supports splat values where:
04644     // * only one byte is nonzero, or
04645     // * the least significant byte is 0xff and the second byte is nonzero, or
04646     // * the least significant 2 bytes are 0xff and the third is nonzero.
04647     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
04648     if ((SplatBits & ~0xff) == 0) {
04649       // Value = 0x000000nn: Op=x, Cmode=000x.
04650       OpCmode = 0;
04651       Imm = SplatBits;
04652       break;
04653     }
04654     if ((SplatBits & ~0xff00) == 0) {
04655       // Value = 0x0000nn00: Op=x, Cmode=001x.
04656       OpCmode = 0x2;
04657       Imm = SplatBits >> 8;
04658       break;
04659     }
04660     if ((SplatBits & ~0xff0000) == 0) {
04661       // Value = 0x00nn0000: Op=x, Cmode=010x.
04662       OpCmode = 0x4;
04663       Imm = SplatBits >> 16;
04664       break;
04665     }
04666     if ((SplatBits & ~0xff000000) == 0) {
04667       // Value = 0xnn000000: Op=x, Cmode=011x.
04668       OpCmode = 0x6;
04669       Imm = SplatBits >> 24;
04670       break;
04671     }
04672 
04673     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
04674     if (type == OtherModImm) return SDValue();
04675 
04676     if ((SplatBits & ~0xffff) == 0 &&
04677         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
04678       // Value = 0x0000nnff: Op=x, Cmode=1100.
04679       OpCmode = 0xc;
04680       Imm = SplatBits >> 8;
04681       break;
04682     }
04683 
04684     if ((SplatBits & ~0xffffff) == 0 &&
04685         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
04686       // Value = 0x00nnffff: Op=x, Cmode=1101.
04687       OpCmode = 0xd;
04688       Imm = SplatBits >> 16;
04689       break;
04690     }
04691 
04692     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
04693     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
04694     // VMOV.I32.  A (very) minor optimization would be to replicate the value
04695     // and fall through here to test for a valid 64-bit splat.  But, then the
04696     // caller would also need to check and handle the change in size.
04697     return SDValue();
04698 
04699   case 64: {
04700     if (type != VMOVModImm)
04701       return SDValue();
04702     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
04703     uint64_t BitMask = 0xff;
04704     uint64_t Val = 0;
04705     unsigned ImmMask = 1;
04706     Imm = 0;
04707     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
04708       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
04709         Val |= BitMask;
04710         Imm |= ImmMask;
04711       } else if ((SplatBits & BitMask) != 0) {
04712         return SDValue();
04713       }
04714       BitMask <<= 8;
04715       ImmMask <<= 1;
04716     }
04717 
04718     if (DAG.getTargetLoweringInfo().isBigEndian())
04719       // swap higher and lower 32 bit word
04720       Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
04721 
04722     // Op=1, Cmode=1110.
04723     OpCmode = 0x1e;
04724     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
04725     break;
04726   }
04727 
04728   default:
04729     llvm_unreachable("unexpected size for isNEONModifiedImm");
04730   }
04731 
04732   unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
04733   return DAG.getTargetConstant(EncodedVal, MVT::i32);
04734 }
04735 
04736 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
04737                                            const ARMSubtarget *ST) const {
04738   if (!ST->hasVFP3())
04739     return SDValue();
04740 
04741   bool IsDouble = Op.getValueType() == MVT::f64;
04742   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
04743 
04744   // Use the default (constant pool) lowering for double constants when we have
04745   // an SP-only FPU
04746   if (IsDouble && Subtarget->isFPOnlySP())
04747     return SDValue();
04748 
04749   // Try splatting with a VMOV.f32...
04750   APFloat FPVal = CFP->getValueAPF();
04751   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
04752 
04753   if (ImmVal != -1) {
04754     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
04755       // We have code in place to select a valid ConstantFP already, no need to
04756       // do any mangling.
04757       return Op;
04758     }
04759 
04760     // It's a float and we are trying to use NEON operations where
04761     // possible. Lower it to a splat followed by an extract.
04762     SDLoc DL(Op);
04763     SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32);
04764     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
04765                                       NewVal);
04766     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
04767                        DAG.getConstant(0, MVT::i32));
04768   }
04769 
04770   // The rest of our options are NEON only, make sure that's allowed before
04771   // proceeding..
04772   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
04773     return SDValue();
04774 
04775   EVT VMovVT;
04776   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
04777 
04778   // It wouldn't really be worth bothering for doubles except for one very
04779   // important value, which does happen to match: 0.0. So make sure we don't do
04780   // anything stupid.
04781   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
04782     return SDValue();
04783 
04784   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
04785   SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
04786                                      false, VMOVModImm);
04787   if (NewVal != SDValue()) {
04788     SDLoc DL(Op);
04789     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
04790                                       NewVal);
04791     if (IsDouble)
04792       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
04793 
04794     // It's a float: cast and extract a vector element.
04795     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
04796                                        VecConstant);
04797     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
04798                        DAG.getConstant(0, MVT::i32));
04799   }
04800 
04801   // Finally, try a VMVN.i32
04802   NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
04803                              false, VMVNModImm);
04804   if (NewVal != SDValue()) {
04805     SDLoc DL(Op);
04806     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
04807 
04808     if (IsDouble)
04809       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
04810 
04811     // It's a float: cast and extract a vector element.
04812     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
04813                                        VecConstant);
04814     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
04815                        DAG.getConstant(0, MVT::i32));
04816   }
04817 
04818   return SDValue();
04819 }
04820 
04821 // check if an VEXT instruction can handle the shuffle mask when the
04822 // vector sources of the shuffle are the same.
04823 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
04824   unsigned NumElts = VT.getVectorNumElements();
04825 
04826   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
04827   if (M[0] < 0)
04828     return false;
04829 
04830   Imm = M[0];
04831 
04832   // If this is a VEXT shuffle, the immediate value is the index of the first
04833   // element.  The other shuffle indices must be the successive elements after
04834   // the first one.
04835   unsigned ExpectedElt = Imm;
04836   for (unsigned i = 1; i < NumElts; ++i) {
04837     // Increment the expected index.  If it wraps around, just follow it
04838     // back to index zero and keep going.
04839     ++ExpectedElt;
04840     if (ExpectedElt == NumElts)
04841       ExpectedElt = 0;
04842 
04843     if (M[i] < 0) continue; // ignore UNDEF indices
04844     if (ExpectedElt != static_cast<unsigned>(M[i]))
04845       return false;
04846   }
04847 
04848   return true;
04849 }
04850 
04851 
04852 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
04853                        bool &ReverseVEXT, unsigned &Imm) {
04854   unsigned NumElts = VT.getVectorNumElements();
04855   ReverseVEXT = false;
04856 
04857   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
04858   if (M[0] < 0)
04859     return false;
04860 
04861   Imm = M[0];
04862 
04863   // If this is a VEXT shuffle, the immediate value is the index of the first
04864   // element.  The other shuffle indices must be the successive elements after
04865   // the first one.
04866   unsigned ExpectedElt = Imm;
04867   for (unsigned i = 1; i < NumElts; ++i) {
04868     // Increment the expected index.  If it wraps around, it may still be
04869     // a VEXT but the source vectors must be swapped.
04870     ExpectedElt += 1;
04871     if (ExpectedElt == NumElts * 2) {
04872       ExpectedElt = 0;
04873       ReverseVEXT = true;
04874     }
04875 
04876     if (M[i] < 0) continue; // ignore UNDEF indices
04877     if (ExpectedElt != static_cast<unsigned>(M[i]))
04878       return false;
04879   }
04880 
04881   // Adjust the index value if the source operands will be swapped.
04882   if (ReverseVEXT)
04883     Imm -= NumElts;
04884 
04885   return true;
04886 }
04887 
04888 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
04889 /// instruction with the specified blocksize.  (The order of the elements
04890 /// within each block of the vector is reversed.)
04891 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
04892   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
04893          "Only possible block sizes for VREV are: 16, 32, 64");
04894 
04895   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04896   if (EltSz == 64)
04897     return false;
04898 
04899   unsigned NumElts = VT.getVectorNumElements();
04900   unsigned BlockElts = M[0] + 1;
04901   // If the first shuffle index is UNDEF, be optimistic.
04902   if (M[0] < 0)
04903     BlockElts = BlockSize / EltSz;
04904 
04905   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
04906     return false;
04907 
04908   for (unsigned i = 0; i < NumElts; ++i) {
04909     if (M[i] < 0) continue; // ignore UNDEF indices
04910     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
04911       return false;
04912   }
04913 
04914   return true;
04915 }
04916 
04917 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
04918   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
04919   // range, then 0 is placed into the resulting vector. So pretty much any mask
04920   // of 8 elements can work here.
04921   return VT == MVT::v8i8 && M.size() == 8;
04922 }
04923 
04924 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04925   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04926   if (EltSz == 64)
04927     return false;
04928 
04929   unsigned NumElts = VT.getVectorNumElements();
04930   WhichResult = (M[0] == 0 ? 0 : 1);
04931   for (unsigned i = 0; i < NumElts; i += 2) {
04932     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
04933         (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult))
04934       return false;
04935   }
04936   return true;
04937 }
04938 
04939 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
04940 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04941 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
04942 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04943   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04944   if (EltSz == 64)
04945     return false;
04946 
04947   unsigned NumElts = VT.getVectorNumElements();
04948   WhichResult = (M[0] == 0 ? 0 : 1);
04949   for (unsigned i = 0; i < NumElts; i += 2) {
04950     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
04951         (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult))
04952       return false;
04953   }
04954   return true;
04955 }
04956 
04957 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04958   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04959   if (EltSz == 64)
04960     return false;
04961 
04962   unsigned NumElts = VT.getVectorNumElements();
04963   WhichResult = (M[0] == 0 ? 0 : 1);
04964   for (unsigned i = 0; i != NumElts; ++i) {
04965     if (M[i] < 0) continue; // ignore UNDEF indices
04966     if ((unsigned) M[i] != 2 * i + WhichResult)
04967       return false;
04968   }
04969 
04970   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
04971   if (VT.is64BitVector() && EltSz == 32)
04972     return false;
04973 
04974   return true;
04975 }
04976 
04977 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
04978 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04979 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
04980 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04981   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04982   if (EltSz == 64)
04983     return false;
04984 
04985   unsigned Half = VT.getVectorNumElements() / 2;
04986   WhichResult = (M[0] == 0 ? 0 : 1);
04987   for (unsigned j = 0; j != 2; ++j) {
04988     unsigned Idx = WhichResult;
04989     for (unsigned i = 0; i != Half; ++i) {
04990       int MIdx = M[i + j * Half];
04991       if (MIdx >= 0 && (unsigned) MIdx != Idx)
04992         return false;
04993       Idx += 2;
04994     }
04995   }
04996 
04997   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
04998   if (VT.is64BitVector() && EltSz == 32)
04999     return false;
05000 
05001   return true;
05002 }
05003 
05004 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
05005   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
05006   if (EltSz == 64)
05007     return false;
05008 
05009   unsigned NumElts = VT.getVectorNumElements();
05010   WhichResult = (M[0] == 0 ? 0 : 1);
05011   unsigned Idx = WhichResult * NumElts / 2;
05012   for (unsigned i = 0; i != NumElts; i += 2) {
05013     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
05014         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts))
05015       return false;
05016     Idx += 1;
05017   }
05018 
05019   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
05020   if (VT.is64BitVector() && EltSz == 32)
05021     return false;
05022 
05023   return true;
05024 }
05025 
05026 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
05027 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
05028 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
05029 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
05030   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
05031   if (EltSz == 64)
05032     return false;
05033 
05034   unsigned NumElts = VT.getVectorNumElements();
05035   WhichResult = (M[0] == 0 ? 0 : 1);
05036   unsigned Idx = WhichResult * NumElts / 2;
05037   for (unsigned i = 0; i != NumElts; i += 2) {
05038     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
05039         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx))
05040       return false;
05041     Idx += 1;
05042   }
05043 
05044   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
05045   if (VT.is64BitVector() && EltSz == 32)
05046     return false;
05047 
05048   return true;
05049 }
05050 
05051 /// \return true if this is a reverse operation on an vector.
05052 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
05053   unsigned NumElts = VT.getVectorNumElements();
05054   // Make sure the mask has the right size.
05055   if (NumElts != M.size())
05056       return false;
05057 
05058   // Look for <15, ..., 3, -1, 1, 0>.
05059   for (unsigned i = 0; i != NumElts; ++i)
05060     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
05061       return false;
05062 
05063   return true;
05064 }
05065 
05066 // If N is an integer constant that can be moved into a register in one
05067 // instruction, return an SDValue of such a constant (will become a MOV
05068 // instruction).  Otherwise return null.
05069 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
05070                                      const ARMSubtarget *ST, SDLoc dl) {
05071   uint64_t Val;
05072   if (!isa<ConstantSDNode>(N))
05073     return SDValue();
05074   Val = cast<ConstantSDNode>(N)->getZExtValue();
05075 
05076   if (ST->isThumb1Only()) {
05077     if (Val <= 255 || ~Val <= 255)
05078       return DAG.getConstant(Val, MVT::i32);
05079   } else {
05080     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
05081       return DAG.getConstant(Val, MVT::i32);
05082   }
05083   return SDValue();
05084 }
05085 
05086 // If this is a case we can't handle, return null and let the default
05087 // expansion code take care of it.
05088 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
05089                                              const ARMSubtarget *ST) const {
05090   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
05091   SDLoc dl(Op);
05092   EVT VT = Op.getValueType();
05093 
05094   APInt SplatBits, SplatUndef;
05095   unsigned SplatBitSize;
05096   bool HasAnyUndefs;
05097   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
05098     if (SplatBitSize <= 64) {
05099       // Check if an immediate VMOV works.
05100       EVT VmovVT;
05101       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
05102                                       SplatUndef.getZExtValue(), SplatBitSize,
05103                                       DAG, VmovVT, VT.is128BitVector(),
05104                                       VMOVModImm);
05105       if (Val.getNode()) {
05106         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
05107         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
05108       }
05109 
05110       // Try an immediate VMVN.
05111       uint64_t NegatedImm = (~SplatBits).getZExtValue();
05112       Val = isNEONModifiedImm(NegatedImm,
05113                                       SplatUndef.getZExtValue(), SplatBitSize,
05114                                       DAG, VmovVT, VT.is128BitVector(),
05115                                       VMVNModImm);
05116       if (Val.getNode()) {
05117         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
05118         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
05119       }
05120 
05121       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
05122       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
05123         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
05124         if (ImmVal != -1) {
05125           SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
05126           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
05127         }
05128       }
05129     }
05130   }
05131 
05132   // Scan through the operands to see if only one value is used.
05133   //
05134   // As an optimisation, even if more than one value is used it may be more
05135   // profitable to splat with one value then change some lanes.
05136   //
05137   // Heuristically we decide to do this if the vector has a "dominant" value,
05138   // defined as splatted to more than half of the lanes.
05139   unsigned NumElts = VT.getVectorNumElements();
05140   bool isOnlyLowElement = true;
05141   bool usesOnlyOneValue = true;
05142   bool hasDominantValue = false;
05143   bool isConstant = true;
05144 
05145   // Map of the number of times a particular SDValue appears in the
05146   // element list.
05147   DenseMap<SDValue, unsigned> ValueCounts;
05148   SDValue Value;
05149   for (unsigned i = 0; i < NumElts; ++i) {
05150     SDValue V = Op.getOperand(i);
05151     if (V.getOpcode() == ISD::UNDEF)
05152       continue;
05153     if (i > 0)
05154       isOnlyLowElement = false;
05155     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
05156       isConstant = false;
05157 
05158     ValueCounts.insert(std::make_pair(V, 0));
05159     unsigned &Count = ValueCounts[V];
05160 
05161     // Is this value dominant? (takes up more than half of the lanes)
05162     if (++Count > (NumElts / 2)) {
05163       hasDominantValue = true;
05164       Value = V;
05165     }
05166   }
05167   if (ValueCounts.size() != 1)
05168     usesOnlyOneValue = false;
05169   if (!Value.getNode() && ValueCounts.size() > 0)
05170     Value = ValueCounts.begin()->first;
05171 
05172   if (ValueCounts.size() == 0)
05173     return DAG.getUNDEF(VT);
05174 
05175   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
05176   // Keep going if we are hitting this case.
05177   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
05178     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
05179 
05180   unsigned EltSize = VT.getVectorElementType().getSizeInBits();
05181 
05182   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
05183   // i32 and try again.
05184   if (hasDominantValue && EltSize <= 32) {
05185     if (!isConstant) {
05186       SDValue N;
05187 
05188       // If we are VDUPing a value that comes directly from a vector, that will
05189       // cause an unnecessary move to and from a GPR, where instead we could
05190       // just use VDUPLANE. We can only do this if the lane being extracted
05191       // is at a constant index, as the VDUP from lane instructions only have
05192       // constant-index forms.
05193       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
05194           isa<ConstantSDNode>(Value->getOperand(1))) {
05195         // We need to create a new undef vector to use for the VDUPLANE if the
05196         // size of the vector from which we get the value is different than the
05197         // size of the vector that we need to create. We will insert the element
05198         // such that the register coalescer will remove unnecessary copies.
05199         if (VT != Value->getOperand(0).getValueType()) {
05200           ConstantSDNode *constIndex;
05201           constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1));
05202           assert(constIndex && "The index is not a constant!");
05203           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
05204                              VT.getVectorNumElements();
05205           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
05206                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
05207                         Value, DAG.getConstant(index, MVT::i32)),
05208                            DAG.getConstant(index, MVT::i32));
05209         } else
05210           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
05211                         Value->getOperand(0), Value->getOperand(1));
05212       } else
05213         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
05214 
05215       if (!usesOnlyOneValue) {
05216         // The dominant value was splatted as 'N', but we now have to insert
05217         // all differing elements.
05218         for (unsigned I = 0; I < NumElts; ++I) {
05219           if (Op.getOperand(I) == Value)
05220             continue;
05221           SmallVector<SDValue, 3> Ops;
05222           Ops.push_back(N);
05223           Ops.push_back(Op.getOperand(I));
05224           Ops.push_back(DAG.getConstant(I, MVT::i32));
05225           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
05226         }
05227       }
05228       return N;
05229     }
05230     if (VT.getVectorElementType().isFloatingPoint()) {
05231       SmallVector<SDValue, 8> Ops;
05232       for (unsigned i = 0; i < NumElts; ++i)
05233         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
05234                                   Op.getOperand(i)));
05235       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
05236       SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
05237       Val = LowerBUILD_VECTOR(Val, DAG, ST);
05238       if (Val.getNode())
05239         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
05240     }
05241     if (usesOnlyOneValue) {
05242       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
05243       if (isConstant && Val.getNode())
05244         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
05245     }
05246   }
05247 
05248   // If all elements are constants and the case above didn't get hit, fall back
05249   // to the default expansion, which will generate a load from the constant
05250   // pool.
05251   if (isConstant)
05252     return SDValue();
05253 
05254   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
05255   if (NumElts >= 4) {
05256     SDValue shuffle = ReconstructShuffle(Op, DAG);
05257     if (shuffle != SDValue())
05258       return shuffle;
05259   }
05260 
05261   // Vectors with 32- or 64-bit elements can be built by directly assigning
05262   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
05263   // will be legalized.
05264   if (EltSize >= 32) {
05265     // Do the expansion with floating-point types, since that is what the VFP
05266     // registers are defined to use, and since i64 is not legal.
05267     EVT EltVT = EVT::getFloatingPointVT(EltSize);
05268     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
05269     SmallVector<SDValue, 8> Ops;
05270     for (unsigned i = 0; i < NumElts; ++i)
05271       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
05272     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
05273     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
05274   }
05275 
05276   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
05277   // know the default expansion would otherwise fall back on something even
05278   // worse. For a vector with one or two non-undef values, that's
05279   // scalar_to_vector for the elements followed by a shuffle (provided the
05280   // shuffle is valid for the target) and materialization element by element
05281   // on the stack followed by a load for everything else.
05282   if (!isConstant && !usesOnlyOneValue) {
05283     SDValue Vec = DAG.getUNDEF(VT);
05284     for (unsigned i = 0 ; i < NumElts; ++i) {
05285       SDValue V = Op.getOperand(i);
05286       if (V.getOpcode() == ISD::UNDEF)
05287         continue;
05288       SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
05289       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
05290     }
05291     return Vec;
05292   }
05293 
05294   return SDValue();
05295 }
05296 
05297 // Gather data to see if the operation can be modelled as a
05298 // shuffle in combination with VEXTs.
05299 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
05300                                               SelectionDAG &DAG) const {
05301   SDLoc dl(Op);
05302   EVT VT = Op.getValueType();
05303   unsigned NumElts = VT.getVectorNumElements();
05304 
05305   SmallVector<SDValue, 2> SourceVecs;
05306   SmallVector<unsigned, 2> MinElts;
05307   SmallVector<unsigned, 2> MaxElts;
05308 
05309   for (unsigned i = 0; i < NumElts; ++i) {
05310     SDValue V = Op.getOperand(i);
05311     if (V.getOpcode() == ISD::UNDEF)
05312       continue;
05313     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
05314       // A shuffle can only come from building a vector from various
05315       // elements of other vectors.
05316       return SDValue();
05317     } else if (V.getOperand(0).getValueType().getVectorElementType() !=
05318                VT.getVectorElementType()) {
05319       // This code doesn't know how to handle shuffles where the vector
05320       // element types do not match (this happens because type legalization
05321       // promotes the return type of EXTRACT_VECTOR_ELT).
05322       // FIXME: It might be appropriate to extend this code to handle
05323       // mismatched types.
05324       return SDValue();
05325     }
05326 
05327     // Record this extraction against the appropriate vector if possible...
05328     SDValue SourceVec = V.getOperand(0);
05329     // If the element number isn't a constant, we can't effectively
05330     // analyze what's going on.
05331     if (!isa<ConstantSDNode>(V.getOperand(1)))
05332       return SDValue();
05333     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
05334     bool FoundSource = false;
05335     for (unsigned j = 0; j < SourceVecs.size(); ++j) {
05336       if (SourceVecs[j] == SourceVec) {
05337         if (MinElts[j] > EltNo)
05338           MinElts[j] = EltNo;
05339         if (MaxElts[j] < EltNo)
05340           MaxElts[j] = EltNo;
05341         FoundSource = true;
05342         break;
05343       }
05344     }
05345 
05346     // Or record a new source if not...
05347     if (!FoundSource) {
05348       SourceVecs.push_back(SourceVec);
05349       MinElts.push_back(EltNo);
05350       MaxElts.push_back(EltNo);
05351     }
05352   }
05353 
05354   // Currently only do something sane when at most two source vectors
05355   // involved.
05356   if (SourceVecs.size() > 2)
05357     return SDValue();
05358 
05359   SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
05360   int VEXTOffsets[2] = {0, 0};
05361 
05362   // This loop extracts the usage patterns of the source vectors
05363   // and prepares appropriate SDValues for a shuffle if possible.
05364   for (unsigned i = 0; i < SourceVecs.size(); ++i) {
05365     if (SourceVecs[i].getValueType() == VT) {
05366       // No VEXT necessary
05367       ShuffleSrcs[i] = SourceVecs[i];
05368       VEXTOffsets[i] = 0;
05369       continue;
05370     } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) {
05371       // It probably isn't worth padding out a smaller vector just to
05372       // break it down again in a shuffle.
05373       return SDValue();
05374     }
05375 
05376     // Since only 64-bit and 128-bit vectors are legal on ARM and
05377     // we've eliminated the other cases...
05378     assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts &&
05379            "unexpected vector sizes in ReconstructShuffle");
05380 
05381     if (MaxElts[i] - MinElts[i] >= NumElts) {
05382       // Span too large for a VEXT to cope
05383       return SDValue();
05384     }
05385 
05386     if (MinElts[i] >= NumElts) {
05387       // The extraction can just take the second half
05388       VEXTOffsets[i] = NumElts;
05389       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05390                                    SourceVecs[i],
05391                                    DAG.getIntPtrConstant(NumElts));
05392     } else if (MaxElts[i] < NumElts) {
05393       // The extraction can just take the first half
05394       VEXTOffsets[i] = 0;
05395       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05396                                    SourceVecs[i],
05397                                    DAG.getIntPtrConstant(0));
05398     } else {
05399       // An actual VEXT is needed
05400       VEXTOffsets[i] = MinElts[i];
05401       SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05402                                      SourceVecs[i],
05403                                      DAG.getIntPtrConstant(0));
05404       SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05405                                      SourceVecs[i],
05406                                      DAG.getIntPtrConstant(NumElts));
05407       ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
05408                                    DAG.getConstant(VEXTOffsets[i], MVT::i32));
05409     }
05410   }
05411 
05412   SmallVector<int, 8> Mask;
05413 
05414   for (unsigned i = 0; i < NumElts; ++i) {
05415     SDValue Entry = Op.getOperand(i);
05416     if (Entry.getOpcode() == ISD::UNDEF) {
05417       Mask.push_back(-1);
05418       continue;
05419     }
05420 
05421     SDValue ExtractVec = Entry.getOperand(0);
05422     int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i)
05423                                           .getOperand(1))->getSExtValue();
05424     if (ExtractVec == SourceVecs[0]) {
05425       Mask.push_back(ExtractElt - VEXTOffsets[0]);
05426     } else {
05427       Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]);
05428     }
05429   }
05430 
05431   // Final check before we try to produce nonsense...
05432   if (isShuffleMaskLegal(Mask, VT))
05433     return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1],
05434                                 &Mask[0]);
05435 
05436   return SDValue();
05437 }
05438 
05439 /// isShuffleMaskLegal - Targets can use this to indicate that they only
05440 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
05441 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
05442 /// are assumed to be legal.
05443 bool
05444 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
05445                                       EVT VT) const {
05446   if (VT.