LLVM API Documentation

ARMISelLowering.cpp
Go to the documentation of this file.
00001 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file defines the interfaces that ARM uses to lower LLVM code into a
00011 // selection DAG.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 #include "ARMISelLowering.h"
00016 #include "ARMCallingConv.h"
00017 #include "ARMConstantPoolValue.h"
00018 #include "ARMMachineFunctionInfo.h"
00019 #include "ARMPerfectShuffle.h"
00020 #include "ARMSubtarget.h"
00021 #include "ARMTargetMachine.h"
00022 #include "ARMTargetObjectFile.h"
00023 #include "MCTargetDesc/ARMAddressingModes.h"
00024 #include "llvm/ADT/Statistic.h"
00025 #include "llvm/ADT/StringExtras.h"
00026 #include "llvm/ADT/StringSwitch.h"
00027 #include "llvm/CodeGen/CallingConvLower.h"
00028 #include "llvm/CodeGen/IntrinsicLowering.h"
00029 #include "llvm/CodeGen/MachineBasicBlock.h"
00030 #include "llvm/CodeGen/MachineFrameInfo.h"
00031 #include "llvm/CodeGen/MachineFunction.h"
00032 #include "llvm/CodeGen/MachineInstrBuilder.h"
00033 #include "llvm/CodeGen/MachineJumpTableInfo.h"
00034 #include "llvm/CodeGen/MachineModuleInfo.h"
00035 #include "llvm/CodeGen/MachineRegisterInfo.h"
00036 #include "llvm/CodeGen/SelectionDAG.h"
00037 #include "llvm/IR/CallingConv.h"
00038 #include "llvm/IR/Constants.h"
00039 #include "llvm/IR/Function.h"
00040 #include "llvm/IR/GlobalValue.h"
00041 #include "llvm/IR/IRBuilder.h"
00042 #include "llvm/IR/Instruction.h"
00043 #include "llvm/IR/Instructions.h"
00044 #include "llvm/IR/Intrinsics.h"
00045 #include "llvm/IR/Type.h"
00046 #include "llvm/MC/MCSectionMachO.h"
00047 #include "llvm/Support/CommandLine.h"
00048 #include "llvm/Support/Debug.h"
00049 #include "llvm/Support/ErrorHandling.h"
00050 #include "llvm/Support/MathExtras.h"
00051 #include "llvm/Target/TargetOptions.h"
00052 #include <utility>
00053 using namespace llvm;
00054 
00055 #define DEBUG_TYPE "arm-isel"
00056 
00057 STATISTIC(NumTailCalls, "Number of tail calls");
00058 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
00059 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
00060 
00061 cl::opt<bool>
00062 EnableARMLongCalls("arm-long-calls", cl::Hidden,
00063   cl::desc("Generate calls via indirect call instructions"),
00064   cl::init(false));
00065 
00066 static cl::opt<bool>
00067 ARMInterworking("arm-interworking", cl::Hidden,
00068   cl::desc("Enable / disable ARM interworking (for debugging only)"),
00069   cl::init(true));
00070 
00071 namespace {
00072   class ARMCCState : public CCState {
00073   public:
00074     ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
00075                SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
00076                ParmContext PC)
00077         : CCState(CC, isVarArg, MF, locs, C) {
00078       assert(((PC == Call) || (PC == Prologue)) &&
00079              "ARMCCState users must specify whether their context is call"
00080              "or prologue generation.");
00081       CallOrPrologue = PC;
00082     }
00083   };
00084 }
00085 
00086 // The APCS parameter registers.
00087 static const MCPhysReg GPRArgRegs[] = {
00088   ARM::R0, ARM::R1, ARM::R2, ARM::R3
00089 };
00090 
00091 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
00092                                        MVT PromotedBitwiseVT) {
00093   if (VT != PromotedLdStVT) {
00094     setOperationAction(ISD::LOAD, VT, Promote);
00095     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
00096 
00097     setOperationAction(ISD::STORE, VT, Promote);
00098     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
00099   }
00100 
00101   MVT ElemTy = VT.getVectorElementType();
00102   if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
00103     setOperationAction(ISD::SETCC, VT, Custom);
00104   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
00105   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
00106   if (ElemTy == MVT::i32) {
00107     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
00108     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
00109     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
00110     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
00111   } else {
00112     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
00113     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
00114     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
00115     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
00116   }
00117   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
00118   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
00119   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
00120   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
00121   setOperationAction(ISD::SELECT,            VT, Expand);
00122   setOperationAction(ISD::SELECT_CC,         VT, Expand);
00123   setOperationAction(ISD::VSELECT,           VT, Expand);
00124   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
00125   if (VT.isInteger()) {
00126     setOperationAction(ISD::SHL, VT, Custom);
00127     setOperationAction(ISD::SRA, VT, Custom);
00128     setOperationAction(ISD::SRL, VT, Custom);
00129   }
00130 
00131   // Promote all bit-wise operations.
00132   if (VT.isInteger() && VT != PromotedBitwiseVT) {
00133     setOperationAction(ISD::AND, VT, Promote);
00134     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
00135     setOperationAction(ISD::OR,  VT, Promote);
00136     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
00137     setOperationAction(ISD::XOR, VT, Promote);
00138     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
00139   }
00140 
00141   // Neon does not support vector divide/remainder operations.
00142   setOperationAction(ISD::SDIV, VT, Expand);
00143   setOperationAction(ISD::UDIV, VT, Expand);
00144   setOperationAction(ISD::FDIV, VT, Expand);
00145   setOperationAction(ISD::SREM, VT, Expand);
00146   setOperationAction(ISD::UREM, VT, Expand);
00147   setOperationAction(ISD::FREM, VT, Expand);
00148 }
00149 
00150 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
00151   addRegisterClass(VT, &ARM::DPRRegClass);
00152   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
00153 }
00154 
00155 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
00156   addRegisterClass(VT, &ARM::DPairRegClass);
00157   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
00158 }
00159 
00160 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
00161                                      const ARMSubtarget &STI)
00162     : TargetLowering(TM), Subtarget(&STI) {
00163   RegInfo = Subtarget->getRegisterInfo();
00164   Itins = Subtarget->getInstrItineraryData();
00165 
00166   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
00167 
00168   if (Subtarget->isTargetMachO()) {
00169     // Uses VFP for Thumb libfuncs if available.
00170     if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
00171         Subtarget->hasARMOps() && !TM.Options.UseSoftFloat) {
00172       // Single-precision floating-point arithmetic.
00173       setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
00174       setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
00175       setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
00176       setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
00177 
00178       // Double-precision floating-point arithmetic.
00179       setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
00180       setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
00181       setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
00182       setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
00183 
00184       // Single-precision comparisons.
00185       setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
00186       setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
00187       setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
00188       setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
00189       setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
00190       setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
00191       setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
00192       setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
00193 
00194       setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
00195       setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
00196       setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
00197       setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
00198       setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
00199       setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
00200       setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
00201       setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
00202 
00203       // Double-precision comparisons.
00204       setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
00205       setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
00206       setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
00207       setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
00208       setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
00209       setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
00210       setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
00211       setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
00212 
00213       setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
00214       setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
00215       setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
00216       setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
00217       setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
00218       setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
00219       setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
00220       setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
00221 
00222       // Floating-point to integer conversions.
00223       // i64 conversions are done via library routines even when generating VFP
00224       // instructions, so use the same ones.
00225       setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
00226       setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
00227       setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
00228       setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
00229 
00230       // Conversions between floating types.
00231       setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
00232       setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
00233 
00234       // Integer to floating-point conversions.
00235       // i64 conversions are done via library routines even when generating VFP
00236       // instructions, so use the same ones.
00237       // FIXME: There appears to be some naming inconsistency in ARM libgcc:
00238       // e.g., __floatunsidf vs. __floatunssidfvfp.
00239       setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
00240       setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
00241       setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
00242       setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
00243     }
00244   }
00245 
00246   // These libcalls are not available in 32-bit.
00247   setLibcallName(RTLIB::SHL_I128, nullptr);
00248   setLibcallName(RTLIB::SRL_I128, nullptr);
00249   setLibcallName(RTLIB::SRA_I128, nullptr);
00250 
00251   if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetMachO() &&
00252       !Subtarget->isTargetWindows()) {
00253     static const struct {
00254       const RTLIB::Libcall Op;
00255       const char * const Name;
00256       const CallingConv::ID CC;
00257       const ISD::CondCode Cond;
00258     } LibraryCalls[] = {
00259       // Double-precision floating-point arithmetic helper functions
00260       // RTABI chapter 4.1.2, Table 2
00261       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00262       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00263       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00264       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00265 
00266       // Double-precision floating-point comparison helper functions
00267       // RTABI chapter 4.1.2, Table 3
00268       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00269       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00270       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00271       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00272       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00273       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00274       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00275       { RTLIB::O_F64,   "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00276 
00277       // Single-precision floating-point arithmetic helper functions
00278       // RTABI chapter 4.1.2, Table 4
00279       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00280       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00281       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00282       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00283 
00284       // Single-precision floating-point comparison helper functions
00285       // RTABI chapter 4.1.2, Table 5
00286       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00287       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00288       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00289       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00290       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00291       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00292       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00293       { RTLIB::O_F32,   "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00294 
00295       // Floating-point to integer conversions.
00296       // RTABI chapter 4.1.2, Table 6
00297       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00298       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00299       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00300       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00301       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00302       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00303       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00304       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00305 
00306       // Conversions between floating types.
00307       // RTABI chapter 4.1.2, Table 7
00308       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00309       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00310       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00311 
00312       // Integer to floating-point conversions.
00313       // RTABI chapter 4.1.2, Table 8
00314       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00315       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00316       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00317       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00318       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00319       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00320       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00321       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00322 
00323       // Long long helper functions
00324       // RTABI chapter 4.2, Table 9
00325       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00326       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00327       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00328       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00329 
00330       // Integer division functions
00331       // RTABI chapter 4.3.1
00332       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00333       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00334       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00335       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00336       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00337       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00338       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00339       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00340 
00341       // Memory operations
00342       // RTABI chapter 4.3.4
00343       { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00344       { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00345       { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00346     };
00347 
00348     for (const auto &LC : LibraryCalls) {
00349       setLibcallName(LC.Op, LC.Name);
00350       setLibcallCallingConv(LC.Op, LC.CC);
00351       if (LC.Cond != ISD::SETCC_INVALID)
00352         setCmpLibcallCC(LC.Op, LC.Cond);
00353     }
00354   }
00355 
00356   if (Subtarget->isTargetWindows()) {
00357     static const struct {
00358       const RTLIB::Libcall Op;
00359       const char * const Name;
00360       const CallingConv::ID CC;
00361     } LibraryCalls[] = {
00362       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
00363       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
00364       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
00365       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
00366       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
00367       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
00368       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
00369       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
00370     };
00371 
00372     for (const auto &LC : LibraryCalls) {
00373       setLibcallName(LC.Op, LC.Name);
00374       setLibcallCallingConv(LC.Op, LC.CC);
00375     }
00376   }
00377 
00378   // Use divmod compiler-rt calls for iOS 5.0 and later.
00379   if (Subtarget->getTargetTriple().isiOS() &&
00380       !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) {
00381     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
00382     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
00383   }
00384 
00385   // The half <-> float conversion functions are always soft-float, but are
00386   // needed for some targets which use a hard-float calling convention by
00387   // default.
00388   if (Subtarget->isAAPCS_ABI()) {
00389     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
00390     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
00391     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
00392   } else {
00393     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
00394     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
00395     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
00396   }
00397 
00398   if (Subtarget->isThumb1Only())
00399     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
00400   else
00401     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
00402   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00403       !Subtarget->isThumb1Only()) {
00404     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
00405     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
00406   }
00407 
00408   for (MVT VT : MVT::vector_valuetypes()) {
00409     for (MVT InnerVT : MVT::vector_valuetypes()) {
00410       setTruncStoreAction(VT, InnerVT, Expand);
00411       setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
00412       setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
00413       setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
00414     }
00415 
00416     setOperationAction(ISD::MULHS, VT, Expand);
00417     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
00418     setOperationAction(ISD::MULHU, VT, Expand);
00419     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
00420 
00421     setOperationAction(ISD::BSWAP, VT, Expand);
00422   }
00423 
00424   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
00425   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
00426 
00427   if (Subtarget->hasNEON()) {
00428     addDRTypeForNEON(MVT::v2f32);
00429     addDRTypeForNEON(MVT::v8i8);
00430     addDRTypeForNEON(MVT::v4i16);
00431     addDRTypeForNEON(MVT::v2i32);
00432     addDRTypeForNEON(MVT::v1i64);
00433 
00434     addQRTypeForNEON(MVT::v4f32);
00435     addQRTypeForNEON(MVT::v2f64);
00436     addQRTypeForNEON(MVT::v16i8);
00437     addQRTypeForNEON(MVT::v8i16);
00438     addQRTypeForNEON(MVT::v4i32);
00439     addQRTypeForNEON(MVT::v2i64);
00440 
00441     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
00442     // neither Neon nor VFP support any arithmetic operations on it.
00443     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
00444     // supported for v4f32.
00445     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
00446     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
00447     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
00448     // FIXME: Code duplication: FDIV and FREM are expanded always, see
00449     // ARMTargetLowering::addTypeForNEON method for details.
00450     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
00451     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
00452     // FIXME: Create unittest.
00453     // In another words, find a way when "copysign" appears in DAG with vector
00454     // operands.
00455     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
00456     // FIXME: Code duplication: SETCC has custom operation action, see
00457     // ARMTargetLowering::addTypeForNEON method for details.
00458     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
00459     // FIXME: Create unittest for FNEG and for FABS.
00460     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
00461     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
00462     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
00463     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
00464     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
00465     setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
00466     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
00467     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
00468     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
00469     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
00470     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
00471     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
00472     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
00473     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
00474     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
00475     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
00476     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
00477     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
00478     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
00479 
00480     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
00481     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
00482     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
00483     setOperationAction(ISD::FPOWI, MVT::v4f32, Expand);
00484     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
00485     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
00486     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
00487     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
00488     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
00489     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
00490     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
00491     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
00492     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
00493     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
00494     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
00495 
00496     // Mark v2f32 intrinsics.
00497     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
00498     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
00499     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
00500     setOperationAction(ISD::FPOWI, MVT::v2f32, Expand);
00501     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
00502     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
00503     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
00504     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
00505     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
00506     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
00507     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
00508     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
00509     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
00510     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
00511     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
00512 
00513     // Neon does not support some operations on v1i64 and v2i64 types.
00514     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
00515     // Custom handling for some quad-vector types to detect VMULL.
00516     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
00517     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
00518     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
00519     // Custom handling for some vector types to avoid expensive expansions
00520     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
00521     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
00522     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
00523     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
00524     setOperationAction(ISD::SETCC, MVT::v1i64, Expand);
00525     setOperationAction(ISD::SETCC, MVT::v2i64, Expand);
00526     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
00527     // a destination type that is wider than the source, and nor does
00528     // it have a FP_TO_[SU]INT instruction with a narrower destination than
00529     // source.
00530     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
00531     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
00532     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
00533     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
00534 
00535     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
00536     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
00537 
00538     // NEON does not have single instruction CTPOP for vectors with element
00539     // types wider than 8-bits.  However, custom lowering can leverage the
00540     // v8i8/v16i8 vcnt instruction.
00541     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
00542     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
00543     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
00544     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
00545 
00546     // NEON only has FMA instructions as of VFP4.
00547     if (!Subtarget->hasVFP4()) {
00548       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
00549       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
00550     }
00551 
00552     setTargetDAGCombine(ISD::INTRINSIC_VOID);
00553     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
00554     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
00555     setTargetDAGCombine(ISD::SHL);
00556     setTargetDAGCombine(ISD::SRL);
00557     setTargetDAGCombine(ISD::SRA);
00558     setTargetDAGCombine(ISD::SIGN_EXTEND);
00559     setTargetDAGCombine(ISD::ZERO_EXTEND);
00560     setTargetDAGCombine(ISD::ANY_EXTEND);
00561     setTargetDAGCombine(ISD::SELECT_CC);
00562     setTargetDAGCombine(ISD::BUILD_VECTOR);
00563     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
00564     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
00565     setTargetDAGCombine(ISD::STORE);
00566     setTargetDAGCombine(ISD::FP_TO_SINT);
00567     setTargetDAGCombine(ISD::FP_TO_UINT);
00568     setTargetDAGCombine(ISD::FDIV);
00569     setTargetDAGCombine(ISD::LOAD);
00570 
00571     // It is legal to extload from v4i8 to v4i16 or v4i32.
00572     MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8,
00573                   MVT::v4i16, MVT::v2i16,
00574                   MVT::v2i32};
00575     for (unsigned i = 0; i < 6; ++i) {
00576       for (MVT VT : MVT::integer_vector_valuetypes()) {
00577         setLoadExtAction(ISD::EXTLOAD, VT, Tys[i], Legal);
00578         setLoadExtAction(ISD::ZEXTLOAD, VT, Tys[i], Legal);
00579         setLoadExtAction(ISD::SEXTLOAD, VT, Tys[i], Legal);
00580       }
00581     }
00582   }
00583 
00584   // ARM and Thumb2 support UMLAL/SMLAL.
00585   if (!Subtarget->isThumb1Only())
00586     setTargetDAGCombine(ISD::ADDC);
00587 
00588   if (Subtarget->isFPOnlySP()) {
00589     // When targetting a floating-point unit with only single-precision
00590     // operations, f64 is legal for the few double-precision instructions which
00591     // are present However, no double-precision operations other than moves,
00592     // loads and stores are provided by the hardware.
00593     setOperationAction(ISD::FADD,       MVT::f64, Expand);
00594     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
00595     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
00596     setOperationAction(ISD::FMA,        MVT::f64, Expand);
00597     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
00598     setOperationAction(ISD::FREM,       MVT::f64, Expand);
00599     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
00600     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
00601     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
00602     setOperationAction(ISD::FABS,       MVT::f64, Expand);
00603     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
00604     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
00605     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
00606     setOperationAction(ISD::FPOWI,      MVT::f64, Expand);
00607     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
00608     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
00609     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
00610     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
00611     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
00612     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
00613     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
00614     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
00615     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
00616     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
00617     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
00618     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
00619     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
00620   }
00621 
00622   computeRegisterProperties(Subtarget->getRegisterInfo());
00623 
00624   // ARM does not have floating-point extending loads.
00625   for (MVT VT : MVT::fp_valuetypes()) {
00626     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
00627     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
00628   }
00629 
00630   // ... or truncating stores
00631   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
00632   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
00633   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
00634 
00635   // ARM does not have i1 sign extending load.
00636   for (MVT VT : MVT::integer_valuetypes())
00637     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
00638 
00639   // ARM supports all 4 flavors of integer indexed load / store.
00640   if (!Subtarget->isThumb1Only()) {
00641     for (unsigned im = (unsigned)ISD::PRE_INC;
00642          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
00643       setIndexedLoadAction(im,  MVT::i1,  Legal);
00644       setIndexedLoadAction(im,  MVT::i8,  Legal);
00645       setIndexedLoadAction(im,  MVT::i16, Legal);
00646       setIndexedLoadAction(im,  MVT::i32, Legal);
00647       setIndexedStoreAction(im, MVT::i1,  Legal);
00648       setIndexedStoreAction(im, MVT::i8,  Legal);
00649       setIndexedStoreAction(im, MVT::i16, Legal);
00650       setIndexedStoreAction(im, MVT::i32, Legal);
00651     }
00652   }
00653 
00654   setOperationAction(ISD::SADDO, MVT::i32, Custom);
00655   setOperationAction(ISD::UADDO, MVT::i32, Custom);
00656   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
00657   setOperationAction(ISD::USUBO, MVT::i32, Custom);
00658 
00659   // i64 operation support.
00660   setOperationAction(ISD::MUL,     MVT::i64, Expand);
00661   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
00662   if (Subtarget->isThumb1Only()) {
00663     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
00664     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
00665   }
00666   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
00667       || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP()))
00668     setOperationAction(ISD::MULHS, MVT::i32, Expand);
00669 
00670   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
00671   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
00672   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
00673   setOperationAction(ISD::SRL,       MVT::i64, Custom);
00674   setOperationAction(ISD::SRA,       MVT::i64, Custom);
00675 
00676   if (!Subtarget->isThumb1Only()) {
00677     // FIXME: We should do this for Thumb1 as well.
00678     setOperationAction(ISD::ADDC,    MVT::i32, Custom);
00679     setOperationAction(ISD::ADDE,    MVT::i32, Custom);
00680     setOperationAction(ISD::SUBC,    MVT::i32, Custom);
00681     setOperationAction(ISD::SUBE,    MVT::i32, Custom);
00682   }
00683 
00684   // ARM does not have ROTL.
00685   setOperationAction(ISD::ROTL,  MVT::i32, Expand);
00686   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
00687   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
00688   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
00689     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
00690 
00691   // These just redirect to CTTZ and CTLZ on ARM.
00692   setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i32  , Expand);
00693   setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i32  , Expand);
00694 
00695   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
00696 
00697   // Only ARMv6 has BSWAP.
00698   if (!Subtarget->hasV6Ops())
00699     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
00700 
00701   if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) &&
00702       !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) {
00703     // These are expanded into libcalls if the cpu doesn't have HW divider.
00704     setOperationAction(ISD::SDIV,  MVT::i32, Expand);
00705     setOperationAction(ISD::UDIV,  MVT::i32, Expand);
00706   }
00707 
00708   // FIXME: Also set divmod for SREM on EABI
00709   setOperationAction(ISD::SREM,  MVT::i32, Expand);
00710   setOperationAction(ISD::UREM,  MVT::i32, Expand);
00711   // Register based DivRem for AEABI (RTABI 4.2)
00712   if (Subtarget->isTargetAEABI()) {
00713     setLibcallName(RTLIB::SDIVREM_I8,  "__aeabi_idivmod");
00714     setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod");
00715     setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod");
00716     setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod");
00717     setLibcallName(RTLIB::UDIVREM_I8,  "__aeabi_uidivmod");
00718     setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod");
00719     setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod");
00720     setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod");
00721 
00722     setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS);
00723     setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS);
00724     setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS);
00725     setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS);
00726     setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS);
00727     setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS);
00728     setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS);
00729     setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS);
00730 
00731     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
00732     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
00733   } else {
00734     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
00735     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
00736   }
00737 
00738   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
00739   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
00740   setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
00741   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
00742   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
00743 
00744   setOperationAction(ISD::TRAP, MVT::Other, Legal);
00745 
00746   // Use the default implementation.
00747   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
00748   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
00749   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
00750   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
00751   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
00752   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
00753 
00754   if (!Subtarget->isTargetMachO()) {
00755     // Non-MachO platforms may return values in these registers via the
00756     // personality function.
00757     setExceptionPointerRegister(ARM::R0);
00758     setExceptionSelectorRegister(ARM::R1);
00759   }
00760 
00761   if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
00762     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
00763   else
00764     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
00765 
00766   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
00767   // the default expansion. If we are targeting a single threaded system,
00768   // then set them all for expand so we can lower them later into their
00769   // non-atomic form.
00770   if (TM.Options.ThreadModel == ThreadModel::Single)
00771     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other, Expand);
00772   else if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) {
00773     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
00774     // to ldrex/strex loops already.
00775     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
00776 
00777     // On v8, we have particularly efficient implementations of atomic fences
00778     // if they can be combined with nearby atomic loads and stores.
00779     if (!Subtarget->hasV8Ops()) {
00780       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
00781       setInsertFencesForAtomic(true);
00782     }
00783   } else {
00784     // If there's anything we can use as a barrier, go through custom lowering
00785     // for ATOMIC_FENCE.
00786     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
00787                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
00788 
00789     // Set them all for expansion, which will force libcalls.
00790     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
00791     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
00792     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
00793     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
00794     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
00795     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
00796     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
00797     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
00798     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
00799     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
00800     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
00801     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
00802     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
00803     // Unordered/Monotonic case.
00804     setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
00805     setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
00806   }
00807 
00808   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
00809 
00810   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
00811   if (!Subtarget->hasV6Ops()) {
00812     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
00813     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
00814   }
00815   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00816 
00817   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00818       !Subtarget->isThumb1Only()) {
00819     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
00820     // iff target supports vfp2.
00821     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
00822     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
00823   }
00824 
00825   // We want to custom lower some of our intrinsics.
00826   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
00827   if (Subtarget->isTargetDarwin()) {
00828     setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
00829     setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
00830     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
00831   }
00832 
00833   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
00834   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
00835   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
00836   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
00837   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
00838   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
00839   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
00840   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
00841   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
00842 
00843   setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
00844   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
00845   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
00846   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
00847   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
00848 
00849   // We don't support sin/cos/fmod/copysign/pow
00850   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
00851   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
00852   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
00853   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
00854   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
00855   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
00856   setOperationAction(ISD::FREM,      MVT::f64, Expand);
00857   setOperationAction(ISD::FREM,      MVT::f32, Expand);
00858   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00859       !Subtarget->isThumb1Only()) {
00860     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
00861     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
00862   }
00863   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
00864   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
00865 
00866   if (!Subtarget->hasVFP4()) {
00867     setOperationAction(ISD::FMA, MVT::f64, Expand);
00868     setOperationAction(ISD::FMA, MVT::f32, Expand);
00869   }
00870 
00871   // Various VFP goodness
00872   if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) {
00873     // int <-> fp are custom expanded into bit_convert + ARMISD ops.
00874     if (Subtarget->hasVFP2()) {
00875       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00876       setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
00877       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00878       setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00879     }
00880 
00881     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
00882     if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
00883       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
00884       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
00885     }
00886 
00887     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
00888     if (!Subtarget->hasFP16()) {
00889       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
00890       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
00891     }
00892   }
00893 
00894   // Combine sin / cos into one node or libcall if possible.
00895   if (Subtarget->hasSinCos()) {
00896     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
00897     setLibcallName(RTLIB::SINCOS_F64, "sincos");
00898     if (Subtarget->getTargetTriple().isiOS()) {
00899       // For iOS, we don't want to the normal expansion of a libcall to
00900       // sincos. We want to issue a libcall to __sincos_stret.
00901       setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
00902       setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
00903     }
00904   }
00905 
00906   // FP-ARMv8 implements a lot of rounding-like FP operations.
00907   if (Subtarget->hasFPARMv8()) {
00908     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
00909     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
00910     setOperationAction(ISD::FROUND, MVT::f32, Legal);
00911     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
00912     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
00913     setOperationAction(ISD::FRINT, MVT::f32, Legal);
00914     if (!Subtarget->isFPOnlySP()) {
00915       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
00916       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
00917       setOperationAction(ISD::FROUND, MVT::f64, Legal);
00918       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
00919       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
00920       setOperationAction(ISD::FRINT, MVT::f64, Legal);
00921     }
00922   }
00923   // We have target-specific dag combine patterns for the following nodes:
00924   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
00925   setTargetDAGCombine(ISD::ADD);
00926   setTargetDAGCombine(ISD::SUB);
00927   setTargetDAGCombine(ISD::MUL);
00928   setTargetDAGCombine(ISD::AND);
00929   setTargetDAGCombine(ISD::OR);
00930   setTargetDAGCombine(ISD::XOR);
00931 
00932   if (Subtarget->hasV6Ops())
00933     setTargetDAGCombine(ISD::SRL);
00934 
00935   setStackPointerRegisterToSaveRestore(ARM::SP);
00936 
00937   if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() ||
00938       !Subtarget->hasVFP2())
00939     setSchedulingPreference(Sched::RegPressure);
00940   else
00941     setSchedulingPreference(Sched::Hybrid);
00942 
00943   //// temporary - rewrite interface to use type
00944   MaxStoresPerMemset = 8;
00945   MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
00946   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
00947   MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00948   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
00949   MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00950 
00951   // On ARM arguments smaller than 4 bytes are extended, so all arguments
00952   // are at least 4 bytes aligned.
00953   setMinStackArgumentAlignment(4);
00954 
00955   // Prefer likely predicted branches to selects on out-of-order cores.
00956   PredictableSelectIsExpensive = Subtarget->isLikeA9();
00957 
00958   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
00959 }
00960 
00961 // FIXME: It might make sense to define the representative register class as the
00962 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
00963 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
00964 // SPR's representative would be DPR_VFP2. This should work well if register
00965 // pressure tracking were modified such that a register use would increment the
00966 // pressure of the register class's representative and all of it's super
00967 // classes' representatives transitively. We have not implemented this because
00968 // of the difficulty prior to coalescing of modeling operand register classes
00969 // due to the common occurrence of cross class copies and subregister insertions
00970 // and extractions.
00971 std::pair<const TargetRegisterClass *, uint8_t>
00972 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
00973                                            MVT VT) const {
00974   const TargetRegisterClass *RRC = nullptr;
00975   uint8_t Cost = 1;
00976   switch (VT.SimpleTy) {
00977   default:
00978     return TargetLowering::findRepresentativeClass(TRI, VT);
00979   // Use DPR as representative register class for all floating point
00980   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
00981   // the cost is 1 for both f32 and f64.
00982   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
00983   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
00984     RRC = &ARM::DPRRegClass;
00985     // When NEON is used for SP, only half of the register file is available
00986     // because operations that define both SP and DP results will be constrained
00987     // to the VFP2 class (D0-D15). We currently model this constraint prior to
00988     // coalescing by double-counting the SP regs. See the FIXME above.
00989     if (Subtarget->useNEONForSinglePrecisionFP())
00990       Cost = 2;
00991     break;
00992   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
00993   case MVT::v4f32: case MVT::v2f64:
00994     RRC = &ARM::DPRRegClass;
00995     Cost = 2;
00996     break;
00997   case MVT::v4i64:
00998     RRC = &ARM::DPRRegClass;
00999     Cost = 4;
01000     break;
01001   case MVT::v8i64:
01002     RRC = &ARM::DPRRegClass;
01003     Cost = 8;
01004     break;
01005   }
01006   return std::make_pair(RRC, Cost);
01007 }
01008 
01009 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
01010   switch (Opcode) {
01011   default: return nullptr;
01012   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
01013   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
01014   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
01015   case ARMISD::CALL:          return "ARMISD::CALL";
01016   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
01017   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
01018   case ARMISD::tCALL:         return "ARMISD::tCALL";
01019   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
01020   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
01021   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
01022   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
01023   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
01024   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
01025   case ARMISD::CMP:           return "ARMISD::CMP";
01026   case ARMISD::CMN:           return "ARMISD::CMN";
01027   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
01028   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
01029   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
01030   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
01031   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
01032 
01033   case ARMISD::CMOV:          return "ARMISD::CMOV";
01034 
01035   case ARMISD::RBIT:          return "ARMISD::RBIT";
01036 
01037   case ARMISD::FTOSI:         return "ARMISD::FTOSI";
01038   case ARMISD::FTOUI:         return "ARMISD::FTOUI";
01039   case ARMISD::SITOF:         return "ARMISD::SITOF";
01040   case ARMISD::UITOF:         return "ARMISD::UITOF";
01041 
01042   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
01043   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
01044   case ARMISD::RRX:           return "ARMISD::RRX";
01045 
01046   case ARMISD::ADDC:          return "ARMISD::ADDC";
01047   case ARMISD::ADDE:          return "ARMISD::ADDE";
01048   case ARMISD::SUBC:          return "ARMISD::SUBC";
01049   case ARMISD::SUBE:          return "ARMISD::SUBE";
01050 
01051   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
01052   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
01053 
01054   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
01055   case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
01056 
01057   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
01058 
01059   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
01060 
01061   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
01062 
01063   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
01064 
01065   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
01066 
01067   case ARMISD::WIN__CHKSTK:   return "ARMISD:::WIN__CHKSTK";
01068 
01069   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
01070   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
01071   case ARMISD::VCGE:          return "ARMISD::VCGE";
01072   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
01073   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
01074   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
01075   case ARMISD::VCGT:          return "ARMISD::VCGT";
01076   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
01077   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
01078   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
01079   case ARMISD::VTST:          return "ARMISD::VTST";
01080 
01081   case ARMISD::VSHL:          return "ARMISD::VSHL";
01082   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
01083   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
01084   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
01085   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
01086   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
01087   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
01088   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
01089   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
01090   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
01091   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
01092   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
01093   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
01094   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
01095   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
01096   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
01097   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
01098   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
01099   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
01100   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
01101   case ARMISD::VDUP:          return "ARMISD::VDUP";
01102   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
01103   case ARMISD::VEXT:          return "ARMISD::VEXT";
01104   case ARMISD::VREV64:        return "ARMISD::VREV64";
01105   case ARMISD::VREV32:        return "ARMISD::VREV32";
01106   case ARMISD::VREV16:        return "ARMISD::VREV16";
01107   case ARMISD::VZIP:          return "ARMISD::VZIP";
01108   case ARMISD::VUZP:          return "ARMISD::VUZP";
01109   case ARMISD::VTRN:          return "ARMISD::VTRN";
01110   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
01111   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
01112   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
01113   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
01114   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
01115   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
01116   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
01117   case ARMISD::FMAX:          return "ARMISD::FMAX";
01118   case ARMISD::FMIN:          return "ARMISD::FMIN";
01119   case ARMISD::VMAXNM:        return "ARMISD::VMAX";
01120   case ARMISD::VMINNM:        return "ARMISD::VMIN";
01121   case ARMISD::BFI:           return "ARMISD::BFI";
01122   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
01123   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
01124   case ARMISD::VBSL:          return "ARMISD::VBSL";
01125   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
01126   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
01127   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
01128   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
01129   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
01130   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
01131   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
01132   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
01133   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
01134   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
01135   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
01136   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
01137   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
01138   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
01139   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
01140   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
01141   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
01142   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
01143   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
01144   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
01145   }
01146 }
01147 
01148 EVT ARMTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
01149   if (!VT.isVector()) return getPointerTy();
01150   return VT.changeVectorElementTypeToInteger();
01151 }
01152 
01153 /// getRegClassFor - Return the register class that should be used for the
01154 /// specified value type.
01155 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
01156   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
01157   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
01158   // load / store 4 to 8 consecutive D registers.
01159   if (Subtarget->hasNEON()) {
01160     if (VT == MVT::v4i64)
01161       return &ARM::QQPRRegClass;
01162     if (VT == MVT::v8i64)
01163       return &ARM::QQQQPRRegClass;
01164   }
01165   return TargetLowering::getRegClassFor(VT);
01166 }
01167 
01168 // Create a fast isel object.
01169 FastISel *
01170 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
01171                                   const TargetLibraryInfo *libInfo) const {
01172   return ARM::createFastISel(funcInfo, libInfo);
01173 }
01174 
01175 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
01176   unsigned NumVals = N->getNumValues();
01177   if (!NumVals)
01178     return Sched::RegPressure;
01179 
01180   for (unsigned i = 0; i != NumVals; ++i) {
01181     EVT VT = N->getValueType(i);
01182     if (VT == MVT::Glue || VT == MVT::Other)
01183       continue;
01184     if (VT.isFloatingPoint() || VT.isVector())
01185       return Sched::ILP;
01186   }
01187 
01188   if (!N->isMachineOpcode())
01189     return Sched::RegPressure;
01190 
01191   // Load are scheduled for latency even if there instruction itinerary
01192   // is not available.
01193   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
01194   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
01195 
01196   if (MCID.getNumDefs() == 0)
01197     return Sched::RegPressure;
01198   if (!Itins->isEmpty() &&
01199       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
01200     return Sched::ILP;
01201 
01202   return Sched::RegPressure;
01203 }
01204 
01205 //===----------------------------------------------------------------------===//
01206 // Lowering Code
01207 //===----------------------------------------------------------------------===//
01208 
01209 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
01210 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
01211   switch (CC) {
01212   default: llvm_unreachable("Unknown condition code!");
01213   case ISD::SETNE:  return ARMCC::NE;
01214   case ISD::SETEQ:  return ARMCC::EQ;
01215   case ISD::SETGT:  return ARMCC::GT;
01216   case ISD::SETGE:  return ARMCC::GE;
01217   case ISD::SETLT:  return ARMCC::LT;
01218   case ISD::SETLE:  return ARMCC::LE;
01219   case ISD::SETUGT: return ARMCC::HI;
01220   case ISD::SETUGE: return ARMCC::HS;
01221   case ISD::SETULT: return ARMCC::LO;
01222   case ISD::SETULE: return ARMCC::LS;
01223   }
01224 }
01225 
01226 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
01227 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
01228                         ARMCC::CondCodes &CondCode2) {
01229   CondCode2 = ARMCC::AL;
01230   switch (CC) {
01231   default: llvm_unreachable("Unknown FP condition!");
01232   case ISD::SETEQ:
01233   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
01234   case ISD::SETGT:
01235   case ISD::SETOGT: CondCode = ARMCC::GT; break;
01236   case ISD::SETGE:
01237   case ISD::SETOGE: CondCode = ARMCC::GE; break;
01238   case ISD::SETOLT: CondCode = ARMCC::MI; break;
01239   case ISD::SETOLE: CondCode = ARMCC::LS; break;
01240   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
01241   case ISD::SETO:   CondCode = ARMCC::VC; break;
01242   case ISD::SETUO:  CondCode = ARMCC::VS; break;
01243   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
01244   case ISD::SETUGT: CondCode = ARMCC::HI; break;
01245   case ISD::SETUGE: CondCode = ARMCC::PL; break;
01246   case ISD::SETLT:
01247   case ISD::SETULT: CondCode = ARMCC::LT; break;
01248   case ISD::SETLE:
01249   case ISD::SETULE: CondCode = ARMCC::LE; break;
01250   case ISD::SETNE:
01251   case ISD::SETUNE: CondCode = ARMCC::NE; break;
01252   }
01253 }
01254 
01255 //===----------------------------------------------------------------------===//
01256 //                      Calling Convention Implementation
01257 //===----------------------------------------------------------------------===//
01258 
01259 #include "ARMGenCallingConv.inc"
01260 
01261 /// getEffectiveCallingConv - Get the effective calling convention, taking into
01262 /// account presence of floating point hardware and calling convention
01263 /// limitations, such as support for variadic functions.
01264 CallingConv::ID
01265 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
01266                                            bool isVarArg) const {
01267   switch (CC) {
01268   default:
01269     llvm_unreachable("Unsupported calling convention");
01270   case CallingConv::ARM_AAPCS:
01271   case CallingConv::ARM_APCS:
01272   case CallingConv::GHC:
01273     return CC;
01274   case CallingConv::ARM_AAPCS_VFP:
01275     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
01276   case CallingConv::C:
01277     if (!Subtarget->isAAPCS_ABI())
01278       return CallingConv::ARM_APCS;
01279     else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
01280              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
01281              !isVarArg)
01282       return CallingConv::ARM_AAPCS_VFP;
01283     else
01284       return CallingConv::ARM_AAPCS;
01285   case CallingConv::Fast:
01286     if (!Subtarget->isAAPCS_ABI()) {
01287       if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01288         return CallingConv::Fast;
01289       return CallingConv::ARM_APCS;
01290     } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01291       return CallingConv::ARM_AAPCS_VFP;
01292     else
01293       return CallingConv::ARM_AAPCS;
01294   }
01295 }
01296 
01297 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
01298 /// CallingConvention.
01299 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
01300                                                  bool Return,
01301                                                  bool isVarArg) const {
01302   switch (getEffectiveCallingConv(CC, isVarArg)) {
01303   default:
01304     llvm_unreachable("Unsupported calling convention");
01305   case CallingConv::ARM_APCS:
01306     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
01307   case CallingConv::ARM_AAPCS:
01308     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
01309   case CallingConv::ARM_AAPCS_VFP:
01310     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
01311   case CallingConv::Fast:
01312     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
01313   case CallingConv::GHC:
01314     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
01315   }
01316 }
01317 
01318 /// LowerCallResult - Lower the result values of a call into the
01319 /// appropriate copies out of appropriate physical registers.
01320 SDValue
01321 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
01322                                    CallingConv::ID CallConv, bool isVarArg,
01323                                    const SmallVectorImpl<ISD::InputArg> &Ins,
01324                                    SDLoc dl, SelectionDAG &DAG,
01325                                    SmallVectorImpl<SDValue> &InVals,
01326                                    bool isThisReturn, SDValue ThisVal) const {
01327 
01328   // Assign locations to each value returned by this call.
01329   SmallVector<CCValAssign, 16> RVLocs;
01330   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
01331                     *DAG.getContext(), Call);
01332   CCInfo.AnalyzeCallResult(Ins,
01333                            CCAssignFnForNode(CallConv, /* Return*/ true,
01334                                              isVarArg));
01335 
01336   // Copy all of the result registers out of their specified physreg.
01337   for (unsigned i = 0; i != RVLocs.size(); ++i) {
01338     CCValAssign VA = RVLocs[i];
01339 
01340     // Pass 'this' value directly from the argument to return value, to avoid
01341     // reg unit interference
01342     if (i == 0 && isThisReturn) {
01343       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
01344              "unexpected return calling convention register assignment");
01345       InVals.push_back(ThisVal);
01346       continue;
01347     }
01348 
01349     SDValue Val;
01350     if (VA.needsCustom()) {
01351       // Handle f64 or half of a v2f64.
01352       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01353                                       InFlag);
01354       Chain = Lo.getValue(1);
01355       InFlag = Lo.getValue(2);
01356       VA = RVLocs[++i]; // skip ahead to next loc
01357       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01358                                       InFlag);
01359       Chain = Hi.getValue(1);
01360       InFlag = Hi.getValue(2);
01361       if (!Subtarget->isLittle())
01362         std::swap (Lo, Hi);
01363       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01364 
01365       if (VA.getLocVT() == MVT::v2f64) {
01366         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
01367         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01368                           DAG.getConstant(0, MVT::i32));
01369 
01370         VA = RVLocs[++i]; // skip ahead to next loc
01371         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01372         Chain = Lo.getValue(1);
01373         InFlag = Lo.getValue(2);
01374         VA = RVLocs[++i]; // skip ahead to next loc
01375         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01376         Chain = Hi.getValue(1);
01377         InFlag = Hi.getValue(2);
01378         if (!Subtarget->isLittle())
01379           std::swap (Lo, Hi);
01380         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01381         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01382                           DAG.getConstant(1, MVT::i32));
01383       }
01384     } else {
01385       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
01386                                InFlag);
01387       Chain = Val.getValue(1);
01388       InFlag = Val.getValue(2);
01389     }
01390 
01391     switch (VA.getLocInfo()) {
01392     default: llvm_unreachable("Unknown loc info!");
01393     case CCValAssign::Full: break;
01394     case CCValAssign::BCvt:
01395       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
01396       break;
01397     }
01398 
01399     InVals.push_back(Val);
01400   }
01401 
01402   return Chain;
01403 }
01404 
01405 /// LowerMemOpCallTo - Store the argument to the stack.
01406 SDValue
01407 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
01408                                     SDValue StackPtr, SDValue Arg,
01409                                     SDLoc dl, SelectionDAG &DAG,
01410                                     const CCValAssign &VA,
01411                                     ISD::ArgFlagsTy Flags) const {
01412   unsigned LocMemOffset = VA.getLocMemOffset();
01413   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
01414   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
01415   return DAG.getStore(Chain, dl, Arg, PtrOff,
01416                       MachinePointerInfo::getStack(LocMemOffset),
01417                       false, false, 0);
01418 }
01419 
01420 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
01421                                          SDValue Chain, SDValue &Arg,
01422                                          RegsToPassVector &RegsToPass,
01423                                          CCValAssign &VA, CCValAssign &NextVA,
01424                                          SDValue &StackPtr,
01425                                          SmallVectorImpl<SDValue> &MemOpChains,
01426                                          ISD::ArgFlagsTy Flags) const {
01427 
01428   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
01429                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
01430   unsigned id = Subtarget->isLittle() ? 0 : 1;
01431   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
01432 
01433   if (NextVA.isRegLoc())
01434     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
01435   else {
01436     assert(NextVA.isMemLoc());
01437     if (!StackPtr.getNode())
01438       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01439 
01440     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
01441                                            dl, DAG, NextVA,
01442                                            Flags));
01443   }
01444 }
01445 
01446 /// LowerCall - Lowering a call into a callseq_start <-
01447 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
01448 /// nodes.
01449 SDValue
01450 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
01451                              SmallVectorImpl<SDValue> &InVals) const {
01452   SelectionDAG &DAG                     = CLI.DAG;
01453   SDLoc &dl                          = CLI.DL;
01454   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
01455   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
01456   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
01457   SDValue Chain                         = CLI.Chain;
01458   SDValue Callee                        = CLI.Callee;
01459   bool &isTailCall                      = CLI.IsTailCall;
01460   CallingConv::ID CallConv              = CLI.CallConv;
01461   bool doesNotRet                       = CLI.DoesNotReturn;
01462   bool isVarArg                         = CLI.IsVarArg;
01463 
01464   MachineFunction &MF = DAG.getMachineFunction();
01465   bool isStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
01466   bool isThisReturn   = false;
01467   bool isSibCall      = false;
01468 
01469   // Disable tail calls if they're not supported.
01470   if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls)
01471     isTailCall = false;
01472 
01473   if (isTailCall) {
01474     // Check if it's really possible to do a tail call.
01475     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
01476                     isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
01477                                                    Outs, OutVals, Ins, DAG);
01478     if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
01479       report_fatal_error("failed to perform tail call elimination on a call "
01480                          "site marked musttail");
01481     // We don't support GuaranteedTailCallOpt for ARM, only automatically
01482     // detected sibcalls.
01483     if (isTailCall) {
01484       ++NumTailCalls;
01485       isSibCall = true;
01486     }
01487   }
01488 
01489   // Analyze operands of the call, assigning locations to each operand.
01490   SmallVector<CCValAssign, 16> ArgLocs;
01491   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
01492                     *DAG.getContext(), Call);
01493   CCInfo.AnalyzeCallOperands(Outs,
01494                              CCAssignFnForNode(CallConv, /* Return*/ false,
01495                                                isVarArg));
01496 
01497   // Get a count of how many bytes are to be pushed on the stack.
01498   unsigned NumBytes = CCInfo.getNextStackOffset();
01499 
01500   // For tail calls, memory operands are available in our caller's stack.
01501   if (isSibCall)
01502     NumBytes = 0;
01503 
01504   // Adjust the stack pointer for the new arguments...
01505   // These operations are automatically eliminated by the prolog/epilog pass
01506   if (!isSibCall)
01507     Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
01508                                  dl);
01509 
01510   SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01511 
01512   RegsToPassVector RegsToPass;
01513   SmallVector<SDValue, 8> MemOpChains;
01514 
01515   // Walk the register/memloc assignments, inserting copies/loads.  In the case
01516   // of tail call optimization, arguments are handled later.
01517   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
01518        i != e;
01519        ++i, ++realArgIdx) {
01520     CCValAssign &VA = ArgLocs[i];
01521     SDValue Arg = OutVals[realArgIdx];
01522     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
01523     bool isByVal = Flags.isByVal();
01524 
01525     // Promote the value if needed.
01526     switch (VA.getLocInfo()) {
01527     default: llvm_unreachable("Unknown loc info!");
01528     case CCValAssign::Full: break;
01529     case CCValAssign::SExt:
01530       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
01531       break;
01532     case CCValAssign::ZExt:
01533       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
01534       break;
01535     case CCValAssign::AExt:
01536       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
01537       break;
01538     case CCValAssign::BCvt:
01539       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
01540       break;
01541     }
01542 
01543     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
01544     if (VA.needsCustom()) {
01545       if (VA.getLocVT() == MVT::v2f64) {
01546         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01547                                   DAG.getConstant(0, MVT::i32));
01548         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01549                                   DAG.getConstant(1, MVT::i32));
01550 
01551         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
01552                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01553 
01554         VA = ArgLocs[++i]; // skip ahead to next loc
01555         if (VA.isRegLoc()) {
01556           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
01557                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01558         } else {
01559           assert(VA.isMemLoc());
01560 
01561           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
01562                                                  dl, DAG, VA, Flags));
01563         }
01564       } else {
01565         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
01566                          StackPtr, MemOpChains, Flags);
01567       }
01568     } else if (VA.isRegLoc()) {
01569       if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) {
01570         assert(VA.getLocVT() == MVT::i32 &&
01571                "unexpected calling convention register assignment");
01572         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
01573                "unexpected use of 'returned'");
01574         isThisReturn = true;
01575       }
01576       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
01577     } else if (isByVal) {
01578       assert(VA.isMemLoc());
01579       unsigned offset = 0;
01580 
01581       // True if this byval aggregate will be split between registers
01582       // and memory.
01583       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
01584       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
01585 
01586       if (CurByValIdx < ByValArgsCount) {
01587 
01588         unsigned RegBegin, RegEnd;
01589         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
01590 
01591         EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
01592         unsigned int i, j;
01593         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
01594           SDValue Const = DAG.getConstant(4*i, MVT::i32);
01595           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
01596           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
01597                                      MachinePointerInfo(),
01598                                      false, false, false,
01599                                      DAG.InferPtrAlignment(AddArg));
01600           MemOpChains.push_back(Load.getValue(1));
01601           RegsToPass.push_back(std::make_pair(j, Load));
01602         }
01603 
01604         // If parameter size outsides register area, "offset" value
01605         // helps us to calculate stack slot for remained part properly.
01606         offset = RegEnd - RegBegin;
01607 
01608         CCInfo.nextInRegsParam();
01609       }
01610 
01611       if (Flags.getByValSize() > 4*offset) {
01612         unsigned LocMemOffset = VA.getLocMemOffset();
01613         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
01614         SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
01615                                   StkPtrOff);
01616         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
01617         SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
01618         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
01619                                            MVT::i32);
01620         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32);
01621 
01622         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
01623         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
01624         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
01625                                           Ops));
01626       }
01627     } else if (!isSibCall) {
01628       assert(VA.isMemLoc());
01629 
01630       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
01631                                              dl, DAG, VA, Flags));
01632     }
01633   }
01634 
01635   if (!MemOpChains.empty())
01636     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
01637 
01638   // Build a sequence of copy-to-reg nodes chained together with token chain
01639   // and flag operands which copy the outgoing args into the appropriate regs.
01640   SDValue InFlag;
01641   // Tail call byval lowering might overwrite argument registers so in case of
01642   // tail call optimization the copies to registers are lowered later.
01643   if (!isTailCall)
01644     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01645       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01646                                RegsToPass[i].second, InFlag);
01647       InFlag = Chain.getValue(1);
01648     }
01649 
01650   // For tail calls lower the arguments to the 'real' stack slot.
01651   if (isTailCall) {
01652     // Force all the incoming stack arguments to be loaded from the stack
01653     // before any new outgoing arguments are stored to the stack, because the
01654     // outgoing stack slots may alias the incoming argument stack slots, and
01655     // the alias isn't otherwise explicit. This is slightly more conservative
01656     // than necessary, because it means that each store effectively depends
01657     // on every argument instead of just those arguments it would clobber.
01658 
01659     // Do not flag preceding copytoreg stuff together with the following stuff.
01660     InFlag = SDValue();
01661     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01662       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01663                                RegsToPass[i].second, InFlag);
01664       InFlag = Chain.getValue(1);
01665     }
01666     InFlag = SDValue();
01667   }
01668 
01669   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
01670   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
01671   // node so that legalize doesn't hack it.
01672   bool isDirect = false;
01673   bool isARMFunc = false;
01674   bool isLocalARMFunc = false;
01675   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
01676 
01677   if (EnableARMLongCalls) {
01678     assert((Subtarget->isTargetWindows() ||
01679             getTargetMachine().getRelocationModel() == Reloc::Static) &&
01680            "long-calls with non-static relocation model!");
01681     // Handle a global address or an external symbol. If it's not one of
01682     // those, the target's already in a register, so we don't need to do
01683     // anything extra.
01684     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01685       const GlobalValue *GV = G->getGlobal();
01686       // Create a constant pool entry for the callee address
01687       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01688       ARMConstantPoolValue *CPV =
01689         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
01690 
01691       // Get the address of the callee into a register
01692       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01693       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01694       Callee = DAG.getLoad(getPointerTy(), dl,
01695                            DAG.getEntryNode(), CPAddr,
01696                            MachinePointerInfo::getConstantPool(),
01697                            false, false, false, 0);
01698     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
01699       const char *Sym = S->getSymbol();
01700 
01701       // Create a constant pool entry for the callee address
01702       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01703       ARMConstantPoolValue *CPV =
01704         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01705                                       ARMPCLabelIndex, 0);
01706       // Get the address of the callee into a register
01707       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01708       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01709       Callee = DAG.getLoad(getPointerTy(), dl,
01710                            DAG.getEntryNode(), CPAddr,
01711                            MachinePointerInfo::getConstantPool(),
01712                            false, false, false, 0);
01713     }
01714   } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01715     const GlobalValue *GV = G->getGlobal();
01716     isDirect = true;
01717     bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
01718     bool isStub = (isExt && Subtarget->isTargetMachO()) &&
01719                    getTargetMachine().getRelocationModel() != Reloc::Static;
01720     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01721     // ARM call to a local ARM function is predicable.
01722     isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
01723     // tBX takes a register source operand.
01724     if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01725       assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
01726       Callee = DAG.getNode(ARMISD::WrapperPIC, dl, getPointerTy(),
01727                            DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
01728                                                       0, ARMII::MO_NONLAZY));
01729       Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
01730                            MachinePointerInfo::getGOT(), false, false, true, 0);
01731     } else if (Subtarget->isTargetCOFF()) {
01732       assert(Subtarget->isTargetWindows() &&
01733              "Windows is the only supported COFF target");
01734       unsigned TargetFlags = GV->hasDLLImportStorageClass()
01735                                  ? ARMII::MO_DLLIMPORT
01736                                  : ARMII::MO_NO_FLAG;
01737       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), /*Offset=*/0,
01738                                           TargetFlags);
01739       if (GV->hasDLLImportStorageClass())
01740         Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
01741                              DAG.getNode(ARMISD::Wrapper, dl, getPointerTy(),
01742                                          Callee), MachinePointerInfo::getGOT(),
01743                              false, false, false, 0);
01744     } else {
01745       // On ELF targets for PIC code, direct calls should go through the PLT
01746       unsigned OpFlags = 0;
01747       if (Subtarget->isTargetELF() &&
01748           getTargetMachine().getRelocationModel() == Reloc::PIC_)
01749         OpFlags = ARMII::MO_PLT;
01750       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
01751     }
01752   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
01753     isDirect = true;
01754     bool isStub = Subtarget->isTargetMachO() &&
01755                   getTargetMachine().getRelocationModel() != Reloc::Static;
01756     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01757     // tBX takes a register source operand.
01758     const char *Sym = S->getSymbol();
01759     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01760       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01761       ARMConstantPoolValue *CPV =
01762         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01763                                       ARMPCLabelIndex, 4);
01764       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01765       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01766       Callee = DAG.getLoad(getPointerTy(), dl,
01767                            DAG.getEntryNode(), CPAddr,
01768                            MachinePointerInfo::getConstantPool(),
01769                            false, false, false, 0);
01770       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
01771       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
01772                            getPointerTy(), Callee, PICLabel);
01773     } else {
01774       unsigned OpFlags = 0;
01775       // On ELF targets for PIC code, direct calls should go through the PLT
01776       if (Subtarget->isTargetELF() &&
01777                   getTargetMachine().getRelocationModel() == Reloc::PIC_)
01778         OpFlags = ARMII::MO_PLT;
01779       Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
01780     }
01781   }
01782 
01783   // FIXME: handle tail calls differently.
01784   unsigned CallOpc;
01785   bool HasMinSizeAttr = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
01786   if (Subtarget->isThumb()) {
01787     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
01788       CallOpc = ARMISD::CALL_NOLINK;
01789     else
01790       CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
01791   } else {
01792     if (!isDirect && !Subtarget->hasV5TOps())
01793       CallOpc = ARMISD::CALL_NOLINK;
01794     else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
01795                // Emit regular call when code size is the priority
01796                !HasMinSizeAttr)
01797       // "mov lr, pc; b _foo" to avoid confusing the RSP
01798       CallOpc = ARMISD::CALL_NOLINK;
01799     else
01800       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
01801   }
01802 
01803   std::vector<SDValue> Ops;
01804   Ops.push_back(Chain);
01805   Ops.push_back(Callee);
01806 
01807   // Add argument registers to the end of the list so that they are known live
01808   // into the call.
01809   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
01810     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
01811                                   RegsToPass[i].second.getValueType()));
01812 
01813   // Add a register mask operand representing the call-preserved registers.
01814   if (!isTailCall) {
01815     const uint32_t *Mask;
01816     const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
01817     if (isThisReturn) {
01818       // For 'this' returns, use the R0-preserving mask if applicable
01819       Mask = ARI->getThisReturnPreservedMask(CallConv);
01820       if (!Mask) {
01821         // Set isThisReturn to false if the calling convention is not one that
01822         // allows 'returned' to be modeled in this way, so LowerCallResult does
01823         // not try to pass 'this' straight through
01824         isThisReturn = false;
01825         Mask = ARI->getCallPreservedMask(CallConv);
01826       }
01827     } else
01828       Mask = ARI->getCallPreservedMask(CallConv);
01829 
01830     assert(Mask && "Missing call preserved mask for calling convention");
01831     Ops.push_back(DAG.getRegisterMask(Mask));
01832   }
01833 
01834   if (InFlag.getNode())
01835     Ops.push_back(InFlag);
01836 
01837   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
01838   if (isTailCall)
01839     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
01840 
01841   // Returns a chain and a flag for retval copy to use.
01842   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
01843   InFlag = Chain.getValue(1);
01844 
01845   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
01846                              DAG.getIntPtrConstant(0, true), InFlag, dl);
01847   if (!Ins.empty())
01848     InFlag = Chain.getValue(1);
01849 
01850   // Handle result values, copying them out of physregs into vregs that we
01851   // return.
01852   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
01853                          InVals, isThisReturn,
01854                          isThisReturn ? OutVals[0] : SDValue());
01855 }
01856 
01857 /// HandleByVal - Every parameter *after* a byval parameter is passed
01858 /// on the stack.  Remember the next parameter register to allocate,
01859 /// and then confiscate the rest of the parameter registers to insure
01860 /// this.
01861 void
01862 ARMTargetLowering::HandleByVal(
01863     CCState *State, unsigned &size, unsigned Align) const {
01864   unsigned reg = State->AllocateReg(GPRArgRegs);
01865   assert((State->getCallOrPrologue() == Prologue ||
01866           State->getCallOrPrologue() == Call) &&
01867          "unhandled ParmContext");
01868 
01869   if ((ARM::R0 <= reg) && (reg <= ARM::R3)) {
01870     if (Subtarget->isAAPCS_ABI() && Align > 4) {
01871       unsigned AlignInRegs = Align / 4;
01872       unsigned Waste = (ARM::R4 - reg) % AlignInRegs;
01873       for (unsigned i = 0; i < Waste; ++i)
01874         reg = State->AllocateReg(GPRArgRegs);
01875     }
01876     if (reg != 0) {
01877       unsigned excess = 4 * (ARM::R4 - reg);
01878 
01879       // Special case when NSAA != SP and parameter size greater than size of
01880       // all remained GPR regs. In that case we can't split parameter, we must
01881       // send it to stack. We also must set NCRN to R4, so waste all
01882       // remained registers.
01883       const unsigned NSAAOffset = State->getNextStackOffset();
01884       if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) {
01885         while (State->AllocateReg(GPRArgRegs))
01886           ;
01887         return;
01888       }
01889 
01890       // First register for byval parameter is the first register that wasn't
01891       // allocated before this method call, so it would be "reg".
01892       // If parameter is small enough to be saved in range [reg, r4), then
01893       // the end (first after last) register would be reg + param-size-in-regs,
01894       // else parameter would be splitted between registers and stack,
01895       // end register would be r4 in this case.
01896       unsigned ByValRegBegin = reg;
01897       unsigned ByValRegEnd = (size < excess) ? reg + size/4 : (unsigned)ARM::R4;
01898       State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
01899       // Note, first register is allocated in the beginning of function already,
01900       // allocate remained amount of registers we need.
01901       for (unsigned i = reg+1; i != ByValRegEnd; ++i)
01902         State->AllocateReg(GPRArgRegs);
01903       // A byval parameter that is split between registers and memory needs its
01904       // size truncated here.
01905       // In the case where the entire structure fits in registers, we set the
01906       // size in memory to zero.
01907       if (size < excess)
01908         size = 0;
01909       else
01910         size -= excess;
01911     }
01912   }
01913 }
01914 
01915 /// MatchingStackOffset - Return true if the given stack call argument is
01916 /// already available in the same position (relatively) of the caller's
01917 /// incoming argument stack.
01918 static
01919 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
01920                          MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
01921                          const TargetInstrInfo *TII) {
01922   unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
01923   int FI = INT_MAX;
01924   if (Arg.getOpcode() == ISD::CopyFromReg) {
01925     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
01926     if (!TargetRegisterInfo::isVirtualRegister(VR))
01927       return false;
01928     MachineInstr *Def = MRI->getVRegDef(VR);
01929     if (!Def)
01930       return false;
01931     if (!Flags.isByVal()) {
01932       if (!TII->isLoadFromStackSlot(Def, FI))
01933         return false;
01934     } else {
01935       return false;
01936     }
01937   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
01938     if (Flags.isByVal())
01939       // ByVal argument is passed in as a pointer but it's now being
01940       // dereferenced. e.g.
01941       // define @foo(%struct.X* %A) {
01942       //   tail call @bar(%struct.X* byval %A)
01943       // }
01944       return false;
01945     SDValue Ptr = Ld->getBasePtr();
01946     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
01947     if (!FINode)
01948       return false;
01949     FI = FINode->getIndex();
01950   } else
01951     return false;
01952 
01953   assert(FI != INT_MAX);
01954   if (!MFI->isFixedObjectIndex(FI))
01955     return false;
01956   return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
01957 }
01958 
01959 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
01960 /// for tail call optimization. Targets which want to do tail call
01961 /// optimization should implement this function.
01962 bool
01963 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
01964                                                      CallingConv::ID CalleeCC,
01965                                                      bool isVarArg,
01966                                                      bool isCalleeStructRet,
01967                                                      bool isCallerStructRet,
01968                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
01969                                     const SmallVectorImpl<SDValue> &OutVals,
01970                                     const SmallVectorImpl<ISD::InputArg> &Ins,
01971                                                      SelectionDAG& DAG) const {
01972   const Function *CallerF = DAG.getMachineFunction().getFunction();
01973   CallingConv::ID CallerCC = CallerF->getCallingConv();
01974   bool CCMatch = CallerCC == CalleeCC;
01975 
01976   // Look for obvious safe cases to perform tail call optimization that do not
01977   // require ABI changes. This is what gcc calls sibcall.
01978 
01979   // Do not sibcall optimize vararg calls unless the call site is not passing
01980   // any arguments.
01981   if (isVarArg && !Outs.empty())
01982     return false;
01983 
01984   // Exception-handling functions need a special set of instructions to indicate
01985   // a return to the hardware. Tail-calling another function would probably
01986   // break this.
01987   if (CallerF->hasFnAttribute("interrupt"))
01988     return false;
01989 
01990   // Also avoid sibcall optimization if either caller or callee uses struct
01991   // return semantics.
01992   if (isCalleeStructRet || isCallerStructRet)
01993     return false;
01994 
01995   // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo::
01996   // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
01997   // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
01998   // support in the assembler and linker to be used. This would need to be
01999   // fixed to fully support tail calls in Thumb1.
02000   //
02001   // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
02002   // LR.  This means if we need to reload LR, it takes an extra instructions,
02003   // which outweighs the value of the tail call; but here we don't know yet
02004   // whether LR is going to be used.  Probably the right approach is to
02005   // generate the tail call here and turn it back into CALL/RET in
02006   // emitEpilogue if LR is used.
02007 
02008   // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
02009   // but we need to make sure there are enough registers; the only valid
02010   // registers are the 4 used for parameters.  We don't currently do this
02011   // case.
02012   if (Subtarget->isThumb1Only())
02013     return false;
02014 
02015   // Externally-defined functions with weak linkage should not be
02016   // tail-called on ARM when the OS does not support dynamic
02017   // pre-emption of symbols, as the AAELF spec requires normal calls
02018   // to undefined weak functions to be replaced with a NOP or jump to the
02019   // next instruction. The behaviour of branch instructions in this
02020   // situation (as used for tail calls) is implementation-defined, so we
02021   // cannot rely on the linker replacing the tail call with a return.
02022   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
02023     const GlobalValue *GV = G->getGlobal();
02024     const Triple TT(getTargetMachine().getTargetTriple());
02025     if (GV->hasExternalWeakLinkage() &&
02026         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
02027       return false;
02028   }
02029 
02030   // If the calling conventions do not match, then we'd better make sure the
02031   // results are returned in the same way as what the caller expects.
02032   if (!CCMatch) {
02033     SmallVector<CCValAssign, 16> RVLocs1;
02034     ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
02035                        *DAG.getContext(), Call);
02036     CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
02037 
02038     SmallVector<CCValAssign, 16> RVLocs2;
02039     ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
02040                        *DAG.getContext(), Call);
02041     CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
02042 
02043     if (RVLocs1.size() != RVLocs2.size())
02044       return false;
02045     for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
02046       if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
02047         return false;
02048       if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
02049         return false;
02050       if (RVLocs1[i].isRegLoc()) {
02051         if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
02052           return false;
02053       } else {
02054         if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
02055           return false;
02056       }
02057     }
02058   }
02059 
02060   // If Caller's vararg or byval argument has been split between registers and
02061   // stack, do not perform tail call, since part of the argument is in caller's
02062   // local frame.
02063   const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction().
02064                                       getInfo<ARMFunctionInfo>();
02065   if (AFI_Caller->getArgRegsSaveSize())
02066     return false;
02067 
02068   // If the callee takes no arguments then go on to check the results of the
02069   // call.
02070   if (!Outs.empty()) {
02071     // Check if stack adjustment is needed. For now, do not do this if any
02072     // argument is passed on the stack.
02073     SmallVector<CCValAssign, 16> ArgLocs;
02074     ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
02075                       *DAG.getContext(), Call);
02076     CCInfo.AnalyzeCallOperands(Outs,
02077                                CCAssignFnForNode(CalleeCC, false, isVarArg));
02078     if (CCInfo.getNextStackOffset()) {
02079       MachineFunction &MF = DAG.getMachineFunction();
02080 
02081       // Check if the arguments are already laid out in the right way as
02082       // the caller's fixed stack objects.
02083       MachineFrameInfo *MFI = MF.getFrameInfo();
02084       const MachineRegisterInfo *MRI = &MF.getRegInfo();
02085       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
02086       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
02087            i != e;
02088            ++i, ++realArgIdx) {
02089         CCValAssign &VA = ArgLocs[i];
02090         EVT RegVT = VA.getLocVT();
02091         SDValue Arg = OutVals[realArgIdx];
02092         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
02093         if (VA.getLocInfo() == CCValAssign::Indirect)
02094           return false;
02095         if (VA.needsCustom()) {
02096           // f64 and vector types are split into multiple registers or
02097           // register/stack-slot combinations.  The types will not match
02098           // the registers; give up on memory f64 refs until we figure
02099           // out what to do about this.
02100           if (!VA.isRegLoc())
02101             return false;
02102           if (!ArgLocs[++i].isRegLoc())
02103             return false;
02104           if (RegVT == MVT::v2f64) {
02105             if (!ArgLocs[++i].isRegLoc())
02106               return false;
02107             if (!ArgLocs[++i].isRegLoc())
02108               return false;
02109           }
02110         } else if (!VA.isRegLoc()) {
02111           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
02112                                    MFI, MRI, TII))
02113             return false;
02114         }
02115       }
02116     }
02117   }
02118 
02119   return true;
02120 }
02121 
02122 bool
02123 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
02124                                   MachineFunction &MF, bool isVarArg,
02125                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
02126                                   LLVMContext &Context) const {
02127   SmallVector<CCValAssign, 16> RVLocs;
02128   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
02129   return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true,
02130                                                     isVarArg));
02131 }
02132 
02133 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
02134                                     SDLoc DL, SelectionDAG &DAG) {
02135   const MachineFunction &MF = DAG.getMachineFunction();
02136   const Function *F = MF.getFunction();
02137 
02138   StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
02139 
02140   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
02141   // version of the "preferred return address". These offsets affect the return
02142   // instruction if this is a return from PL1 without hypervisor extensions.
02143   //    IRQ/FIQ: +4     "subs pc, lr, #4"
02144   //    SWI:     0      "subs pc, lr, #0"
02145   //    ABORT:   +4     "subs pc, lr, #4"
02146   //    UNDEF:   +4/+2  "subs pc, lr, #0"
02147   // UNDEF varies depending on where the exception came from ARM or Thumb
02148   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
02149 
02150   int64_t LROffset;
02151   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
02152       IntKind == "ABORT")
02153     LROffset = 4;
02154   else if (IntKind == "SWI" || IntKind == "UNDEF")
02155     LROffset = 0;
02156   else
02157     report_fatal_error("Unsupported interrupt attribute. If present, value "
02158                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
02159 
02160   RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false));
02161 
02162   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
02163 }
02164 
02165 SDValue
02166 ARMTargetLowering::LowerReturn(SDValue Chain,
02167                                CallingConv::ID CallConv, bool isVarArg,
02168                                const SmallVectorImpl<ISD::OutputArg> &Outs,
02169                                const SmallVectorImpl<SDValue> &OutVals,
02170                                SDLoc dl, SelectionDAG &DAG) const {
02171 
02172   // CCValAssign - represent the assignment of the return value to a location.
02173   SmallVector<CCValAssign, 16> RVLocs;
02174 
02175   // CCState - Info about the registers and stack slots.
02176   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
02177                     *DAG.getContext(), Call);
02178 
02179   // Analyze outgoing return values.
02180   CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
02181                                                isVarArg));
02182 
02183   SDValue Flag;
02184   SmallVector<SDValue, 4> RetOps;
02185   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
02186   bool isLittleEndian = Subtarget->isLittle();
02187 
02188   MachineFunction &MF = DAG.getMachineFunction();
02189   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02190   AFI->setReturnRegsCount(RVLocs.size());
02191 
02192   // Copy the result values into the output registers.
02193   for (unsigned i = 0, realRVLocIdx = 0;
02194        i != RVLocs.size();
02195        ++i, ++realRVLocIdx) {
02196     CCValAssign &VA = RVLocs[i];
02197     assert(VA.isRegLoc() && "Can only return in registers!");
02198 
02199     SDValue Arg = OutVals[realRVLocIdx];
02200 
02201     switch (VA.getLocInfo()) {
02202     default: llvm_unreachable("Unknown loc info!");
02203     case CCValAssign::Full: break;
02204     case CCValAssign::BCvt:
02205       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
02206       break;
02207     }
02208 
02209     if (VA.needsCustom()) {
02210       if (VA.getLocVT() == MVT::v2f64) {
02211         // Extract the first half and return it in two registers.
02212         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02213                                    DAG.getConstant(0, MVT::i32));
02214         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
02215                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
02216 
02217         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02218                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
02219                                  Flag);
02220         Flag = Chain.getValue(1);
02221         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02222         VA = RVLocs[++i]; // skip ahead to next loc
02223         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02224                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
02225                                  Flag);
02226         Flag = Chain.getValue(1);
02227         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02228         VA = RVLocs[++i]; // skip ahead to next loc
02229 
02230         // Extract the 2nd half and fall through to handle it as an f64 value.
02231         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02232                           DAG.getConstant(1, MVT::i32));
02233       }
02234       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
02235       // available.
02236       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
02237                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
02238       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02239                                fmrrd.getValue(isLittleEndian ? 0 : 1),
02240                                Flag);
02241       Flag = Chain.getValue(1);
02242       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02243       VA = RVLocs[++i]; // skip ahead to next loc
02244       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02245                                fmrrd.getValue(isLittleEndian ? 1 : 0),
02246                                Flag);
02247     } else
02248       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
02249 
02250     // Guarantee that all emitted copies are
02251     // stuck together, avoiding something bad.
02252     Flag = Chain.getValue(1);
02253     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02254   }
02255 
02256   // Update chain and glue.
02257   RetOps[0] = Chain;
02258   if (Flag.getNode())
02259     RetOps.push_back(Flag);
02260 
02261   // CPUs which aren't M-class use a special sequence to return from
02262   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
02263   // though we use "subs pc, lr, #N").
02264   //
02265   // M-class CPUs actually use a normal return sequence with a special
02266   // (hardware-provided) value in LR, so the normal code path works.
02267   if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
02268       !Subtarget->isMClass()) {
02269     if (Subtarget->isThumb1Only())
02270       report_fatal_error("interrupt attribute is not supported in Thumb1");
02271     return LowerInterruptReturn(RetOps, dl, DAG);
02272   }
02273 
02274   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
02275 }
02276 
02277 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
02278   if (N->getNumValues() != 1)
02279     return false;
02280   if (!N->hasNUsesOfValue(1, 0))
02281     return false;
02282 
02283   SDValue TCChain = Chain;
02284   SDNode *Copy = *N->use_begin();
02285   if (Copy->getOpcode() == ISD::CopyToReg) {
02286     // If the copy has a glue operand, we conservatively assume it isn't safe to
02287     // perform a tail call.
02288     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02289       return false;
02290     TCChain = Copy->getOperand(0);
02291   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
02292     SDNode *VMov = Copy;
02293     // f64 returned in a pair of GPRs.
02294     SmallPtrSet<SDNode*, 2> Copies;
02295     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02296          UI != UE; ++UI) {
02297       if (UI->getOpcode() != ISD::CopyToReg)
02298         return false;
02299       Copies.insert(*UI);
02300     }
02301     if (Copies.size() > 2)
02302       return false;
02303 
02304     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02305          UI != UE; ++UI) {
02306       SDValue UseChain = UI->getOperand(0);
02307       if (Copies.count(UseChain.getNode()))
02308         // Second CopyToReg
02309         Copy = *UI;
02310       else {
02311         // We are at the top of this chain.
02312         // If the copy has a glue operand, we conservatively assume it
02313         // isn't safe to perform a tail call.
02314         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
02315           return false;
02316         // First CopyToReg
02317         TCChain = UseChain;
02318       }
02319     }
02320   } else if (Copy->getOpcode() == ISD::BITCAST) {
02321     // f32 returned in a single GPR.
02322     if (!Copy->hasOneUse())
02323       return false;
02324     Copy = *Copy->use_begin();
02325     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
02326       return false;
02327     // If the copy has a glue operand, we conservatively assume it isn't safe to
02328     // perform a tail call.
02329     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02330       return false;
02331     TCChain = Copy->getOperand(0);
02332   } else {
02333     return false;
02334   }
02335 
02336   bool HasRet = false;
02337   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
02338        UI != UE; ++UI) {
02339     if (UI->getOpcode() != ARMISD::RET_FLAG &&
02340         UI->getOpcode() != ARMISD::INTRET_FLAG)
02341       return false;
02342     HasRet = true;
02343   }
02344 
02345   if (!HasRet)
02346     return false;
02347 
02348   Chain = TCChain;
02349   return true;
02350 }
02351 
02352 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
02353   if (!Subtarget->supportsTailCall())
02354     return false;
02355 
02356   if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
02357     return false;
02358 
02359   return !Subtarget->isThumb1Only();
02360 }
02361 
02362 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
02363 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
02364 // one of the above mentioned nodes. It has to be wrapped because otherwise
02365 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
02366 // be used to form addressing mode. These wrapped nodes will be selected
02367 // into MOVi.
02368 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
02369   EVT PtrVT = Op.getValueType();
02370   // FIXME there is no actual debug info here
02371   SDLoc dl(Op);
02372   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
02373   SDValue Res;
02374   if (CP->isMachineConstantPoolEntry())
02375     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
02376                                     CP->getAlignment());
02377   else
02378     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
02379                                     CP->getAlignment());
02380   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
02381 }
02382 
02383 unsigned ARMTargetLowering::getJumpTableEncoding() const {
02384   return MachineJumpTableInfo::EK_Inline;
02385 }
02386 
02387 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
02388                                              SelectionDAG &DAG) const {
02389   MachineFunction &MF = DAG.getMachineFunction();
02390   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02391   unsigned ARMPCLabelIndex = 0;
02392   SDLoc DL(Op);
02393   EVT PtrVT = getPointerTy();
02394   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
02395   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02396   SDValue CPAddr;
02397   if (RelocM == Reloc::Static) {
02398     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
02399   } else {
02400     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02401     ARMPCLabelIndex = AFI->createPICLabelUId();
02402     ARMConstantPoolValue *CPV =
02403       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
02404                                       ARMCP::CPBlockAddress, PCAdj);
02405     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02406   }
02407   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
02408   SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
02409                                MachinePointerInfo::getConstantPool(),
02410                                false, false, false, 0);
02411   if (RelocM == Reloc::Static)
02412     return Result;
02413   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02414   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
02415 }
02416 
02417 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
02418 SDValue
02419 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
02420                                                  SelectionDAG &DAG) const {
02421   SDLoc dl(GA);
02422   EVT PtrVT = getPointerTy();
02423   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02424   MachineFunction &MF = DAG.getMachineFunction();
02425   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02426   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02427   ARMConstantPoolValue *CPV =
02428     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02429                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
02430   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02431   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
02432   Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
02433                          MachinePointerInfo::getConstantPool(),
02434                          false, false, false, 0);
02435   SDValue Chain = Argument.getValue(1);
02436 
02437   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02438   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
02439 
02440   // call __tls_get_addr.
02441   ArgListTy Args;
02442   ArgListEntry Entry;
02443   Entry.Node = Argument;
02444   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
02445   Args.push_back(Entry);
02446 
02447   // FIXME: is there useful debug info available here?
02448   TargetLowering::CallLoweringInfo CLI(DAG);
02449   CLI.setDebugLoc(dl).setChain(Chain)
02450     .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
02451                DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args),
02452                0);
02453 
02454   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
02455   return CallResult.first;
02456 }
02457 
02458 // Lower ISD::GlobalTLSAddress using the "initial exec" or
02459 // "local exec" model.
02460 SDValue
02461 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
02462                                         SelectionDAG &DAG,
02463                                         TLSModel::Model model) const {
02464   const GlobalValue *GV = GA->getGlobal();
02465   SDLoc dl(GA);
02466   SDValue Offset;
02467   SDValue Chain = DAG.getEntryNode();
02468   EVT PtrVT = getPointerTy();
02469   // Get the Thread Pointer
02470   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02471 
02472   if (model == TLSModel::InitialExec) {
02473     MachineFunction &MF = DAG.getMachineFunction();
02474     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02475     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02476     // Initial exec model.
02477     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02478     ARMConstantPoolValue *CPV =
02479       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02480                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
02481                                       true);
02482     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02483     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02484     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02485                          MachinePointerInfo::getConstantPool(),
02486                          false, false, false, 0);
02487     Chain = Offset.getValue(1);
02488 
02489     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02490     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
02491 
02492     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02493                          MachinePointerInfo::getConstantPool(),
02494                          false, false, false, 0);
02495   } else {
02496     // local exec model
02497     assert(model == TLSModel::LocalExec);
02498     ARMConstantPoolValue *CPV =
02499       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
02500     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02501     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02502     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02503                          MachinePointerInfo::getConstantPool(),
02504                          false, false, false, 0);
02505   }
02506 
02507   // The address of the thread local variable is the add of the thread
02508   // pointer with the offset of the variable.
02509   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
02510 }
02511 
02512 SDValue
02513 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
02514   // TODO: implement the "local dynamic" model
02515   assert(Subtarget->isTargetELF() &&
02516          "TLS not implemented for non-ELF targets");
02517   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
02518 
02519   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
02520 
02521   switch (model) {
02522     case TLSModel::GeneralDynamic:
02523     case TLSModel::LocalDynamic:
02524       return LowerToTLSGeneralDynamicModel(GA, DAG);
02525     case TLSModel::InitialExec:
02526     case TLSModel::LocalExec:
02527       return LowerToTLSExecModels(GA, DAG, model);
02528   }
02529   llvm_unreachable("bogus TLS model");
02530 }
02531 
02532 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
02533                                                  SelectionDAG &DAG) const {
02534   EVT PtrVT = getPointerTy();
02535   SDLoc dl(Op);
02536   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02537   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
02538     bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
02539     ARMConstantPoolValue *CPV =
02540       ARMConstantPoolConstant::Create(GV,
02541                                       UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
02542     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02543     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02544     SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
02545                                  CPAddr,
02546                                  MachinePointerInfo::getConstantPool(),
02547                                  false, false, false, 0);
02548     SDValue Chain = Result.getValue(1);
02549     SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
02550     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
02551     if (!UseGOTOFF)
02552       Result = DAG.getLoad(PtrVT, dl, Chain, Result,
02553                            MachinePointerInfo::getGOT(),
02554                            false, false, false, 0);
02555     return Result;
02556   }
02557 
02558   // If we have T2 ops, we can materialize the address directly via movt/movw
02559   // pair. This is always cheaper.
02560   if (Subtarget->useMovt(DAG.getMachineFunction())) {
02561     ++NumMovwMovt;
02562     // FIXME: Once remat is capable of dealing with instructions with register
02563     // operands, expand this into two nodes.
02564     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
02565                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
02566   } else {
02567     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
02568     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02569     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02570                        MachinePointerInfo::getConstantPool(),
02571                        false, false, false, 0);
02572   }
02573 }
02574 
02575 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
02576                                                     SelectionDAG &DAG) const {
02577   EVT PtrVT = getPointerTy();
02578   SDLoc dl(Op);
02579   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02580   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02581 
02582   if (Subtarget->useMovt(DAG.getMachineFunction()))
02583     ++NumMovwMovt;
02584 
02585   // FIXME: Once remat is capable of dealing with instructions with register
02586   // operands, expand this into multiple nodes
02587   unsigned Wrapper =
02588       RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper;
02589 
02590   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
02591   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
02592 
02593   if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
02594     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
02595                          MachinePointerInfo::getGOT(), false, false, false, 0);
02596   return Result;
02597 }
02598 
02599 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
02600                                                      SelectionDAG &DAG) const {
02601   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
02602   assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
02603          "Windows on ARM expects to use movw/movt");
02604 
02605   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02606   const ARMII::TOF TargetFlags =
02607     (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
02608   EVT PtrVT = getPointerTy();
02609   SDValue Result;
02610   SDLoc DL(Op);
02611 
02612   ++NumMovwMovt;
02613 
02614   // FIXME: Once remat is capable of dealing with instructions with register
02615   // operands, expand this into two nodes.
02616   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
02617                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
02618                                                   TargetFlags));
02619   if (GV->hasDLLImportStorageClass())
02620     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
02621                          MachinePointerInfo::getGOT(), false, false, false, 0);
02622   return Result;
02623 }
02624 
02625 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
02626                                                     SelectionDAG &DAG) const {
02627   assert(Subtarget->isTargetELF() &&
02628          "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
02629   MachineFunction &MF = DAG.getMachineFunction();
02630   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02631   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02632   EVT PtrVT = getPointerTy();
02633   SDLoc dl(Op);
02634   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02635   ARMConstantPoolValue *CPV =
02636     ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_",
02637                                   ARMPCLabelIndex, PCAdj);
02638   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02639   CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02640   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02641                                MachinePointerInfo::getConstantPool(),
02642                                false, false, false, 0);
02643   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02644   return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02645 }
02646 
02647 SDValue
02648 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
02649   SDLoc dl(Op);
02650   SDValue Val = DAG.getConstant(0, MVT::i32);
02651   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
02652                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
02653                      Op.getOperand(1), Val);
02654 }
02655 
02656 SDValue
02657 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
02658   SDLoc dl(Op);
02659   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
02660                      Op.getOperand(1), DAG.getConstant(0, MVT::i32));
02661 }
02662 
02663 SDValue
02664 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
02665                                           const ARMSubtarget *Subtarget) const {
02666   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
02667   SDLoc dl(Op);
02668   switch (IntNo) {
02669   default: return SDValue();    // Don't custom lower most intrinsics.
02670   case Intrinsic::arm_rbit: {
02671     assert(Op.getOperand(1).getValueType() == MVT::i32 &&
02672            "RBIT intrinsic must have i32 type!");
02673     return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(1));
02674   }
02675   case Intrinsic::arm_thread_pointer: {
02676     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02677     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02678   }
02679   case Intrinsic::eh_sjlj_lsda: {
02680     MachineFunction &MF = DAG.getMachineFunction();
02681     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02682     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02683     EVT PtrVT = getPointerTy();
02684     Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02685     SDValue CPAddr;
02686     unsigned PCAdj = (RelocM != Reloc::PIC_)
02687       ? 0 : (Subtarget->isThumb() ? 4 : 8);
02688     ARMConstantPoolValue *CPV =
02689       ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
02690                                       ARMCP::CPLSDA, PCAdj);
02691     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02692     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02693     SDValue Result =
02694       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02695                   MachinePointerInfo::getConstantPool(),
02696                   false, false, false, 0);
02697 
02698     if (RelocM == Reloc::PIC_) {
02699       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02700       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02701     }
02702     return Result;
02703   }
02704   case Intrinsic::arm_neon_vmulls:
02705   case Intrinsic::arm_neon_vmullu: {
02706     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
02707       ? ARMISD::VMULLs : ARMISD::VMULLu;
02708     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
02709                        Op.getOperand(1), Op.getOperand(2));
02710   }
02711   }
02712 }
02713 
02714 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
02715                                  const ARMSubtarget *Subtarget) {
02716   // FIXME: handle "fence singlethread" more efficiently.
02717   SDLoc dl(Op);
02718   if (!Subtarget->hasDataBarrier()) {
02719     // Some ARMv6 cpus can support data barriers with an mcr instruction.
02720     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
02721     // here.
02722     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
02723            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
02724     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
02725                        DAG.getConstant(0, MVT::i32));
02726   }
02727 
02728   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
02729   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
02730   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
02731   if (Subtarget->isMClass()) {
02732     // Only a full system barrier exists in the M-class architectures.
02733     Domain = ARM_MB::SY;
02734   } else if (Subtarget->isSwift() && Ord == Release) {
02735     // Swift happens to implement ISHST barriers in a way that's compatible with
02736     // Release semantics but weaker than ISH so we'd be fools not to use
02737     // it. Beware: other processors probably don't!
02738     Domain = ARM_MB::ISHST;
02739   }
02740 
02741   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
02742                      DAG.getConstant(Intrinsic::arm_dmb, MVT::i32),
02743                      DAG.getConstant(Domain, MVT::i32));
02744 }
02745 
02746 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
02747                              const ARMSubtarget *Subtarget) {
02748   // ARM pre v5TE and Thumb1 does not have preload instructions.
02749   if (!(Subtarget->isThumb2() ||
02750         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
02751     // Just preserve the chain.
02752     return Op.getOperand(0);
02753 
02754   SDLoc dl(Op);
02755   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
02756   if (!isRead &&
02757       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
02758     // ARMv7 with MP extension has PLDW.
02759     return Op.getOperand(0);
02760 
02761   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
02762   if (Subtarget->isThumb()) {
02763     // Invert the bits.
02764     isRead = ~isRead & 1;
02765     isData = ~isData & 1;
02766   }
02767 
02768   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
02769                      Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
02770                      DAG.getConstant(isData, MVT::i32));
02771 }
02772 
02773 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
02774   MachineFunction &MF = DAG.getMachineFunction();
02775   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
02776 
02777   // vastart just stores the address of the VarArgsFrameIndex slot into the
02778   // memory location argument.
02779   SDLoc dl(Op);
02780   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02781   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02782   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02783   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
02784                       MachinePointerInfo(SV), false, false, 0);
02785 }
02786 
02787 SDValue
02788 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
02789                                         SDValue &Root, SelectionDAG &DAG,
02790                                         SDLoc dl) const {
02791   MachineFunction &MF = DAG.getMachineFunction();
02792   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02793 
02794   const TargetRegisterClass *RC;
02795   if (AFI->isThumb1OnlyFunction())
02796     RC = &ARM::tGPRRegClass;
02797   else
02798     RC = &ARM::GPRRegClass;
02799 
02800   // Transform the arguments stored in physical registers into virtual ones.
02801   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
02802   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02803 
02804   SDValue ArgValue2;
02805   if (NextVA.isMemLoc()) {
02806     MachineFrameInfo *MFI = MF.getFrameInfo();
02807     int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
02808 
02809     // Create load node to retrieve arguments from the stack.
02810     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
02811     ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
02812                             MachinePointerInfo::getFixedStack(FI),
02813                             false, false, false, 0);
02814   } else {
02815     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
02816     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02817   }
02818   if (!Subtarget->isLittle())
02819     std::swap (ArgValue, ArgValue2);
02820   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
02821 }
02822 
02823 void
02824 ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
02825                                   unsigned InRegsParamRecordIdx,
02826                                   unsigned ArgSize,
02827                                   unsigned &ArgRegsSize,
02828                                   unsigned &ArgRegsSaveSize)
02829   const {
02830   unsigned NumGPRs;
02831   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
02832     unsigned RBegin, REnd;
02833     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
02834     NumGPRs = REnd - RBegin;
02835   } else {
02836     unsigned int firstUnalloced;
02837     firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs);
02838     NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0;
02839   }
02840 
02841   unsigned Align = Subtarget->getFrameLowering()->getStackAlignment();
02842   ArgRegsSize = NumGPRs * 4;
02843 
02844   // If parameter is split between stack and GPRs...
02845   if (NumGPRs && Align > 4 &&
02846       (ArgRegsSize < ArgSize ||
02847         InRegsParamRecordIdx >= CCInfo.getInRegsParamsCount())) {
02848     // Add padding for part of param recovered from GPRs.  For example,
02849     // if Align == 8, its last byte must be at address K*8 - 1.
02850     // We need to do it, since remained (stack) part of parameter has
02851     // stack alignment, and we need to "attach" "GPRs head" without gaps
02852     // to it:
02853     // Stack:
02854     // |---- 8 bytes block ----| |---- 8 bytes block ----| |---- 8 bytes...
02855     // [ [padding] [GPRs head] ] [        Tail passed via stack       ....
02856     //
02857     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02858     unsigned Padding =
02859         OffsetToAlignment(ArgRegsSize + AFI->getArgRegsSaveSize(), Align);
02860     ArgRegsSaveSize = ArgRegsSize + Padding;
02861   } else
02862     // We don't need to extend regs save size for byval parameters if they
02863     // are passed via GPRs only.
02864     ArgRegsSaveSize = ArgRegsSize;
02865 }
02866 
02867 // The remaining GPRs hold either the beginning of variable-argument
02868 // data, or the beginning of an aggregate passed by value (usually
02869 // byval).  Either way, we allocate stack slots adjacent to the data
02870 // provided by our caller, and store the unallocated registers there.
02871 // If this is a variadic function, the va_list pointer will begin with
02872 // these values; otherwise, this reassembles a (byval) structure that
02873 // was split between registers and memory.
02874 // Return: The frame index registers were stored into.
02875 int
02876 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
02877                                   SDLoc dl, SDValue &Chain,
02878                                   const Value *OrigArg,
02879                                   unsigned InRegsParamRecordIdx,
02880                                   unsigned OffsetFromOrigArg,
02881                                   unsigned ArgOffset,
02882                                   unsigned ArgSize,
02883                                   bool ForceMutable,
02884                                   unsigned ByValStoreOffset,
02885                                   unsigned TotalArgRegsSaveSize) const {
02886 
02887   // Currently, two use-cases possible:
02888   // Case #1. Non-var-args function, and we meet first byval parameter.
02889   //          Setup first unallocated register as first byval register;
02890   //          eat all remained registers
02891   //          (these two actions are performed by HandleByVal method).
02892   //          Then, here, we initialize stack frame with
02893   //          "store-reg" instructions.
02894   // Case #2. Var-args function, that doesn't contain byval parameters.
02895   //          The same: eat all remained unallocated registers,
02896   //          initialize stack frame.
02897 
02898   MachineFunction &MF = DAG.getMachineFunction();
02899   MachineFrameInfo *MFI = MF.getFrameInfo();
02900   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02901   unsigned firstRegToSaveIndex, lastRegToSaveIndex;
02902   unsigned RBegin, REnd;
02903   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
02904     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
02905     firstRegToSaveIndex = RBegin - ARM::R0;
02906     lastRegToSaveIndex = REnd - ARM::R0;
02907   } else {
02908     firstRegToSaveIndex = CCInfo.getFirstUnallocated(GPRArgRegs);
02909     lastRegToSaveIndex = 4;
02910   }
02911 
02912   unsigned ArgRegsSize, ArgRegsSaveSize;
02913   computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgSize,
02914                  ArgRegsSize, ArgRegsSaveSize);
02915 
02916   // Store any by-val regs to their spots on the stack so that they may be
02917   // loaded by deferencing the result of formal parameter pointer or va_next.
02918   // Note: once stack area for byval/varargs registers
02919   // was initialized, it can't be initialized again.
02920   if (ArgRegsSaveSize) {
02921     unsigned Padding = ArgRegsSaveSize - ArgRegsSize;
02922 
02923     if (Padding) {
02924       assert(AFI->getStoredByValParamsPadding() == 0 &&
02925              "The only parameter may be padded.");
02926       AFI->setStoredByValParamsPadding(Padding);
02927     }
02928 
02929     int FrameIndex = MFI->CreateFixedObject(ArgRegsSaveSize,
02930                                             Padding +
02931                                               ByValStoreOffset -
02932                                               (int64_t)TotalArgRegsSaveSize,
02933                                             false);
02934     SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy());
02935     if (Padding) {
02936        MFI->CreateFixedObject(Padding,
02937                               ArgOffset + ByValStoreOffset -
02938                                 (int64_t)ArgRegsSaveSize,
02939                               false);
02940     }
02941 
02942     SmallVector<SDValue, 4> MemOps;
02943     for (unsigned i = 0; firstRegToSaveIndex < lastRegToSaveIndex;
02944          ++firstRegToSaveIndex, ++i) {
02945       const TargetRegisterClass *RC;
02946       if (AFI->isThumb1OnlyFunction())
02947         RC = &ARM::tGPRRegClass;
02948       else
02949         RC = &ARM::GPRRegClass;
02950 
02951       unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC);
02952       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
02953       SDValue Store =
02954         DAG.getStore(Val.getValue(1), dl, Val, FIN,
02955                      MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i),
02956                      false, false, 0);
02957       MemOps.push_back(Store);
02958       FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
02959                         DAG.getConstant(4, getPointerTy()));
02960     }
02961 
02962     AFI->setArgRegsSaveSize(ArgRegsSaveSize + AFI->getArgRegsSaveSize());
02963 
02964     if (!MemOps.empty())
02965       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02966     return FrameIndex;
02967   } else {
02968     if (ArgSize == 0) {
02969       // We cannot allocate a zero-byte object for the first variadic argument,
02970       // so just make up a size.
02971       ArgSize = 4;
02972     }
02973     // This will point to the next argument passed via stack.
02974     return MFI->CreateFixedObject(
02975       ArgSize, ArgOffset, !ForceMutable);
02976   }
02977 }
02978 
02979 // Setup stack frame, the va_list pointer will start from.
02980 void
02981 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
02982                                         SDLoc dl, SDValue &Chain,
02983                                         unsigned ArgOffset,
02984                                         unsigned TotalArgRegsSaveSize,
02985                                         bool ForceMutable) const {
02986   MachineFunction &MF = DAG.getMachineFunction();
02987   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02988 
02989   // Try to store any remaining integer argument regs
02990   // to their spots on the stack so that they may be loaded by deferencing
02991   // the result of va_next.
02992   // If there is no regs to be stored, just point address after last
02993   // argument passed via stack.
02994   int FrameIndex =
02995     StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
02996                    CCInfo.getInRegsParamsCount(), 0, ArgOffset, 0, ForceMutable,
02997                    0, TotalArgRegsSaveSize);
02998 
02999   AFI->setVarArgsFrameIndex(FrameIndex);
03000 }
03001 
03002 SDValue
03003 ARMTargetLowering::LowerFormalArguments(SDValue Chain,
03004                                         CallingConv::ID CallConv, bool isVarArg,
03005                                         const SmallVectorImpl<ISD::InputArg>
03006                                           &Ins,
03007                                         SDLoc dl, SelectionDAG &DAG,
03008                                         SmallVectorImpl<SDValue> &InVals)
03009                                           const {
03010   MachineFunction &MF = DAG.getMachineFunction();
03011   MachineFrameInfo *MFI = MF.getFrameInfo();
03012 
03013   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
03014 
03015   // Assign locations to all of the incoming arguments.
03016   SmallVector<CCValAssign, 16> ArgLocs;
03017   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
03018                     *DAG.getContext(), Prologue);
03019   CCInfo.AnalyzeFormalArguments(Ins,
03020                                 CCAssignFnForNode(CallConv, /* Return*/ false,
03021                                                   isVarArg));
03022 
03023   SmallVector<SDValue, 16> ArgValues;
03024   int lastInsIndex = -1;
03025   SDValue ArgValue;
03026   Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
03027   unsigned CurArgIdx = 0;
03028 
03029   // Initially ArgRegsSaveSize is zero.
03030   // Then we increase this value each time we meet byval parameter.
03031   // We also increase this value in case of varargs function.
03032   AFI->setArgRegsSaveSize(0);
03033 
03034   unsigned ByValStoreOffset = 0;
03035   unsigned TotalArgRegsSaveSize = 0;
03036   unsigned ArgRegsSaveSizeMaxAlign = 4;
03037 
03038   // Calculate the amount of stack space that we need to allocate to store
03039   // byval and variadic arguments that are passed in registers.
03040   // We need to know this before we allocate the first byval or variadic
03041   // argument, as they will be allocated a stack slot below the CFA (Canonical
03042   // Frame Address, the stack pointer at entry to the function).
03043   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
03044     CCValAssign &VA = ArgLocs[i];
03045     if (VA.isMemLoc()) {
03046       int index = VA.getValNo();
03047       if (index != lastInsIndex) {
03048         ISD::ArgFlagsTy Flags = Ins[index].Flags;
03049         if (Flags.isByVal()) {
03050           unsigned ExtraArgRegsSize;
03051           unsigned ExtraArgRegsSaveSize;
03052           computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsProcessed(),
03053                          Flags.getByValSize(),
03054                          ExtraArgRegsSize, ExtraArgRegsSaveSize);
03055 
03056           TotalArgRegsSaveSize += ExtraArgRegsSaveSize;
03057           if (Flags.getByValAlign() > ArgRegsSaveSizeMaxAlign)
03058               ArgRegsSaveSizeMaxAlign = Flags.getByValAlign();
03059           CCInfo.nextInRegsParam();
03060         }
03061         lastInsIndex = index;
03062       }
03063     }
03064   }
03065   CCInfo.rewindByValRegsInfo();
03066   lastInsIndex = -1;
03067   if (isVarArg && MFI->hasVAStart()) {
03068     unsigned ExtraArgRegsSize;
03069     unsigned ExtraArgRegsSaveSize;
03070     computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsCount(), 0,
03071                    ExtraArgRegsSize, ExtraArgRegsSaveSize);
03072     TotalArgRegsSaveSize += ExtraArgRegsSaveSize;
03073   }
03074   // If the arg regs save area contains N-byte aligned values, the
03075   // bottom of it must be at least N-byte aligned.
03076   TotalArgRegsSaveSize = RoundUpToAlignment(TotalArgRegsSaveSize, ArgRegsSaveSizeMaxAlign);
03077   TotalArgRegsSaveSize = std::min(TotalArgRegsSaveSize, 16U);
03078 
03079   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
03080     CCValAssign &VA = ArgLocs[i];
03081     if (Ins[VA.getValNo()].isOrigArg()) {
03082       std::advance(CurOrigArg,
03083                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
03084       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
03085     }
03086     // Arguments stored in registers.
03087     if (VA.isRegLoc()) {
03088       EVT RegVT = VA.getLocVT();
03089 
03090       if (VA.needsCustom()) {
03091         // f64 and vector types are split up into multiple registers or
03092         // combinations of registers and stack slots.
03093         if (VA.getLocVT() == MVT::v2f64) {
03094           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
03095                                                    Chain, DAG, dl);
03096           VA = ArgLocs[++i]; // skip ahead to next loc
03097           SDValue ArgValue2;
03098           if (VA.isMemLoc()) {
03099             int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
03100             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03101             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
03102                                     MachinePointerInfo::getFixedStack(FI),
03103                                     false, false, false, 0);
03104           } else {
03105             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
03106                                              Chain, DAG, dl);
03107           }
03108           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
03109           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03110                                  ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
03111           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03112                                  ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
03113         } else
03114           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
03115 
03116       } else {
03117         const TargetRegisterClass *RC;
03118 
03119         if (RegVT == MVT::f32)
03120           RC = &ARM::SPRRegClass;
03121         else if (RegVT == MVT::f64)
03122           RC = &ARM::DPRRegClass;
03123         else if (RegVT == MVT::v2f64)
03124           RC = &ARM::QPRRegClass;
03125         else if (RegVT == MVT::i32)
03126           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
03127                                            : &ARM::GPRRegClass;
03128         else
03129           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
03130 
03131         // Transform the arguments in physical registers into virtual ones.
03132         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
03133         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
03134       }
03135 
03136       // If this is an 8 or 16-bit value, it is really passed promoted
03137       // to 32 bits.  Insert an assert[sz]ext to capture this, then
03138       // truncate to the right size.
03139       switch (VA.getLocInfo()) {
03140       default: llvm_unreachable("Unknown loc info!");
03141       case CCValAssign::Full: break;
03142       case CCValAssign::BCvt:
03143         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
03144         break;
03145       case CCValAssign::SExt:
03146         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
03147                                DAG.getValueType(VA.getValVT()));
03148         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03149         break;
03150       case CCValAssign::ZExt:
03151         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
03152                                DAG.getValueType(VA.getValVT()));
03153         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03154         break;
03155       }
03156 
03157       InVals.push_back(ArgValue);
03158 
03159     } else { // VA.isRegLoc()
03160 
03161       // sanity check
03162       assert(VA.isMemLoc());
03163       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
03164 
03165       int index = VA.getValNo();
03166 
03167       // Some Ins[] entries become multiple ArgLoc[] entries.
03168       // Process them only once.
03169       if (index != lastInsIndex)
03170         {
03171           ISD::ArgFlagsTy Flags = Ins[index].Flags;
03172           // FIXME: For now, all byval parameter objects are marked mutable.
03173           // This can be changed with more analysis.
03174           // In case of tail call optimization mark all arguments mutable.
03175           // Since they could be overwritten by lowering of arguments in case of
03176           // a tail call.
03177           if (Flags.isByVal()) {
03178             assert(Ins[index].isOrigArg() &&
03179                    "Byval arguments cannot be implicit");
03180             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
03181 
03182             ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
03183             int FrameIndex = StoreByValRegs(
03184                 CCInfo, DAG, dl, Chain, CurOrigArg,
03185                 CurByValIndex,
03186                 Ins[VA.getValNo()].PartOffset,
03187                 VA.getLocMemOffset(),
03188                 Flags.getByValSize(),
03189                 true /*force mutable frames*/,
03190                 ByValStoreOffset,
03191                 TotalArgRegsSaveSize);
03192             ByValStoreOffset += Flags.getByValSize();
03193             ByValStoreOffset = std::min(ByValStoreOffset, 16U);
03194             InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy()));
03195             CCInfo.nextInRegsParam();
03196           } else {
03197             unsigned FIOffset = VA.getLocMemOffset();
03198             int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
03199                                             FIOffset, true);
03200 
03201             // Create load nodes to retrieve arguments from the stack.
03202             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03203             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
03204                                          MachinePointerInfo::getFixedStack(FI),
03205                                          false, false, false, 0));
03206           }
03207           lastInsIndex = index;
03208         }
03209     }
03210   }
03211 
03212   // varargs
03213   if (isVarArg && MFI->hasVAStart())
03214     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
03215                          CCInfo.getNextStackOffset(),
03216                          TotalArgRegsSaveSize);
03217 
03218   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
03219 
03220   return Chain;
03221 }
03222 
03223 /// isFloatingPointZero - Return true if this is +0.0.
03224 static bool isFloatingPointZero(SDValue Op) {
03225   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
03226     return CFP->getValueAPF().isPosZero();
03227   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
03228     // Maybe this has already been legalized into the constant pool?
03229     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
03230       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
03231       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
03232         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
03233           return CFP->getValueAPF().isPosZero();
03234     }
03235   } else if (Op->getOpcode() == ISD::BITCAST &&
03236              Op->getValueType(0) == MVT::f64) {
03237     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
03238     // created by LowerConstantFP().
03239     SDValue BitcastOp = Op->getOperand(0);
03240     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM) {
03241       SDValue MoveOp = BitcastOp->getOperand(0);
03242       if (MoveOp->getOpcode() == ISD::TargetConstant &&
03243           cast<ConstantSDNode>(MoveOp)->getZExtValue() == 0) {
03244         return true;
03245       }
03246     }
03247   }
03248   return false;
03249 }
03250 
03251 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
03252 /// the given operands.
03253 SDValue
03254 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
03255                              SDValue &ARMcc, SelectionDAG &DAG,
03256                              SDLoc dl) const {
03257   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
03258     unsigned C = RHSC->getZExtValue();
03259     if (!isLegalICmpImmediate(C)) {
03260       // Constant does not fit, try adjusting it by one?
03261       switch (CC) {
03262       default: break;
03263       case ISD::SETLT:
03264       case ISD::SETGE:
03265         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
03266           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
03267           RHS = DAG.getConstant(C-1, MVT::i32);
03268         }
03269         break;
03270       case ISD::SETULT:
03271       case ISD::SETUGE:
03272         if (C != 0 && isLegalICmpImmediate(C-1)) {
03273           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
03274           RHS = DAG.getConstant(C-1, MVT::i32);
03275         }
03276         break;
03277       case ISD::SETLE:
03278       case ISD::SETGT:
03279         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
03280           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
03281           RHS = DAG.getConstant(C+1, MVT::i32);
03282         }
03283         break;
03284       case ISD::SETULE:
03285       case ISD::SETUGT:
03286         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
03287           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
03288           RHS = DAG.getConstant(C+1, MVT::i32);
03289         }
03290         break;
03291       }
03292     }
03293   }
03294 
03295   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03296   ARMISD::NodeType CompareType;
03297   switch (CondCode) {
03298   default:
03299     CompareType = ARMISD::CMP;
03300     break;
03301   case ARMCC::EQ:
03302   case ARMCC::NE:
03303     // Uses only Z Flag
03304     CompareType = ARMISD::CMPZ;
03305     break;
03306   }
03307   ARMcc = DAG.getConstant(CondCode, MVT::i32);
03308   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
03309 }
03310 
03311 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
03312 SDValue
03313 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
03314                              SDLoc dl) const {
03315   assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
03316   SDValue Cmp;
03317   if (!isFloatingPointZero(RHS))
03318     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
03319   else
03320     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
03321   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
03322 }
03323 
03324 /// duplicateCmp - Glue values can have only one use, so this function
03325 /// duplicates a comparison node.
03326 SDValue
03327 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
03328   unsigned Opc = Cmp.getOpcode();
03329   SDLoc DL(Cmp);
03330   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
03331     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03332 
03333   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
03334   Cmp = Cmp.getOperand(0);
03335   Opc = Cmp.getOpcode();
03336   if (Opc == ARMISD::CMPFP)
03337     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03338   else {
03339     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
03340     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
03341   }
03342   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
03343 }
03344 
03345 std::pair<SDValue, SDValue>
03346 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
03347                                  SDValue &ARMcc) const {
03348   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
03349 
03350   SDValue Value, OverflowCmp;
03351   SDValue LHS = Op.getOperand(0);
03352   SDValue RHS = Op.getOperand(1);
03353 
03354 
03355   // FIXME: We are currently always generating CMPs because we don't support
03356   // generating CMN through the backend. This is not as good as the natural
03357   // CMP case because it causes a register dependency and cannot be folded
03358   // later.
03359 
03360   switch (Op.getOpcode()) {
03361   default:
03362     llvm_unreachable("Unknown overflow instruction!");
03363   case ISD::SADDO:
03364     ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
03365     Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
03366     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
03367     break;
03368   case ISD::UADDO:
03369     ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
03370     Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
03371     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
03372     break;
03373   case ISD::SSUBO:
03374     ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
03375     Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
03376     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
03377     break;
03378   case ISD::USUBO:
03379     ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
03380     Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
03381     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
03382     break;
03383   } // switch (...)
03384 
03385   return std::make_pair(Value, OverflowCmp);
03386 }
03387 
03388 
03389 SDValue
03390 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
03391   // Let legalize expand this if it isn't a legal type yet.
03392   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
03393     return SDValue();
03394 
03395   SDValue Value, OverflowCmp;
03396   SDValue ARMcc;
03397   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
03398   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03399   // We use 0 and 1 as false and true values.
03400   SDValue TVal = DAG.getConstant(1, MVT::i32);
03401   SDValue FVal = DAG.getConstant(0, MVT::i32);
03402   EVT VT = Op.getValueType();
03403 
03404   SDValue Overflow = DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, TVal, FVal,
03405                                  ARMcc, CCR, OverflowCmp);
03406 
03407   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
03408   return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow);
03409 }
03410 
03411 
03412 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
03413   SDValue Cond = Op.getOperand(0);
03414   SDValue SelectTrue = Op.getOperand(1);
03415   SDValue SelectFalse = Op.getOperand(2);
03416   SDLoc dl(Op);
03417   unsigned Opc = Cond.getOpcode();
03418 
03419   if (Cond.getResNo() == 1 &&
03420       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
03421        Opc == ISD::USUBO)) {
03422     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
03423       return SDValue();
03424 
03425     SDValue Value, OverflowCmp;
03426     SDValue ARMcc;
03427     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
03428     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03429     EVT VT = Op.getValueType();
03430 
03431     return getCMOV(SDLoc(Op), VT, SelectTrue, SelectFalse, ARMcc, CCR,
03432                    OverflowCmp, DAG);
03433   }
03434 
03435   // Convert:
03436   //
03437   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
03438   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
03439   //
03440   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
03441     const ConstantSDNode *CMOVTrue =
03442       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
03443     const ConstantSDNode *CMOVFalse =
03444       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
03445 
03446     if (CMOVTrue && CMOVFalse) {
03447       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
03448       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
03449 
03450       SDValue True;
03451       SDValue False;
03452       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
03453         True = SelectTrue;
03454         False = SelectFalse;
03455       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
03456         True = SelectFalse;
03457         False = SelectTrue;
03458       }
03459 
03460       if (True.getNode() && False.getNode()) {
03461         EVT VT = Op.getValueType();
03462         SDValue ARMcc = Cond.getOperand(2);
03463         SDValue CCR = Cond.getOperand(3);
03464         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
03465         assert(True.getValueType() == VT);
03466         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
03467       }
03468     }
03469   }
03470 
03471   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
03472   // undefined bits before doing a full-word comparison with zero.
03473   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
03474                      DAG.getConstant(1, Cond.getValueType()));
03475 
03476   return DAG.getSelectCC(dl, Cond,
03477                          DAG.getConstant(0, Cond.getValueType()),
03478                          SelectTrue, SelectFalse, ISD::SETNE);
03479 }
03480 
03481 static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) {
03482   if (CC == ISD::SETNE)
03483     return ISD::SETEQ;
03484   return ISD::getSetCCInverse(CC, true);
03485 }
03486 
03487 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
03488                                  bool &swpCmpOps, bool &swpVselOps) {
03489   // Start by selecting the GE condition code for opcodes that return true for
03490   // 'equality'
03491   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
03492       CC == ISD::SETULE)
03493     CondCode = ARMCC::GE;
03494 
03495   // and GT for opcodes that return false for 'equality'.
03496   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
03497            CC == ISD::SETULT)
03498     CondCode = ARMCC::GT;
03499 
03500   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
03501   // to swap the compare operands.
03502   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
03503       CC == ISD::SETULT)
03504     swpCmpOps = true;
03505 
03506   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
03507   // If we have an unordered opcode, we need to swap the operands to the VSEL
03508   // instruction (effectively negating the condition).
03509   //
03510   // This also has the effect of swapping which one of 'less' or 'greater'
03511   // returns true, so we also swap the compare operands. It also switches
03512   // whether we return true for 'equality', so we compensate by picking the
03513   // opposite condition code to our original choice.
03514   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
03515       CC == ISD::SETUGT) {
03516     swpCmpOps = !swpCmpOps;
03517     swpVselOps = !swpVselOps;
03518     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
03519   }
03520 
03521   // 'ordered' is 'anything but unordered', so use the VS condition code and
03522   // swap the VSEL operands.
03523   if (CC == ISD::SETO) {
03524     CondCode = ARMCC::VS;
03525     swpVselOps = true;
03526   }
03527 
03528   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
03529   // code and swap the VSEL operands.
03530   if (CC == ISD::SETUNE) {
03531     CondCode = ARMCC::EQ;
03532     swpVselOps = true;
03533   }
03534 }
03535 
03536 SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal,
03537                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
03538                                    SDValue Cmp, SelectionDAG &DAG) const {
03539   if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
03540     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03541                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
03542     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03543                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
03544 
03545     SDValue TrueLow = TrueVal.getValue(0);
03546     SDValue TrueHigh = TrueVal.getValue(1);
03547     SDValue FalseLow = FalseVal.getValue(0);
03548     SDValue FalseHigh = FalseVal.getValue(1);
03549 
03550     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
03551                               ARMcc, CCR, Cmp);
03552     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
03553                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
03554 
03555     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
03556   } else {
03557     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
03558                        Cmp);
03559   }
03560 }
03561 
03562 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
03563   EVT VT = Op.getValueType();
03564   SDValue LHS = Op.getOperand(0);
03565   SDValue RHS = Op.getOperand(1);
03566   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
03567   SDValue TrueVal = Op.getOperand(2);
03568   SDValue FalseVal = Op.getOperand(3);
03569   SDLoc dl(Op);
03570 
03571   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03572     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03573                                                     dl);
03574 
03575     // If softenSetCCOperands only returned one value, we should compare it to
03576     // zero.
03577     if (!RHS.getNode()) {
03578       RHS = DAG.getConstant(0, LHS.getValueType());
03579       CC = ISD::SETNE;
03580     }
03581   }
03582 
03583   if (LHS.getValueType() == MVT::i32) {
03584     // Try to generate VSEL on ARMv8.
03585     // The VSEL instruction can't use all the usual ARM condition
03586     // codes: it only has two bits to select the condition code, so it's
03587     // constrained to use only GE, GT, VS and EQ.
03588     //
03589     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
03590     // swap the operands of the previous compare instruction (effectively
03591     // inverting the compare condition, swapping 'less' and 'greater') and
03592     // sometimes need to swap the operands to the VSEL (which inverts the
03593     // condition in the sense of firing whenever the previous condition didn't)
03594     if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03595                                     TrueVal.getValueType() == MVT::f64)) {
03596       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03597       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
03598           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
03599         CC = getInverseCCForVSEL(CC);
03600         std::swap(TrueVal, FalseVal);
03601       }
03602     }
03603 
03604     SDValue ARMcc;
03605     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03606     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03607     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03608   }
03609 
03610   ARMCC::CondCodes CondCode, CondCode2;
03611   FPCCToARMCC(CC, CondCode, CondCode2);
03612 
03613   // Try to generate VSEL on ARMv8.
03614   if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03615                                   TrueVal.getValueType() == MVT::f64)) {
03616     // We can select VMAXNM/VMINNM from a compare followed by a select with the
03617     // same operands, as follows:
03618     //   c = fcmp [ogt, olt, ugt, ult] a, b
03619     //   select c, a, b
03620     // We only do this in unsafe-fp-math, because signed zeros and NaNs are
03621     // handled differently than the original code sequence.
03622     if (getTargetMachine().Options.UnsafeFPMath) {
03623       if (LHS == TrueVal && RHS == FalseVal) {
03624         if (CC == ISD::SETOGT || CC == ISD::SETUGT)
03625           return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal);
03626         if (CC == ISD::SETOLT || CC == ISD::SETULT)
03627           return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal);
03628       } else if (LHS == FalseVal && RHS == TrueVal) {
03629         if (CC == ISD::SETOLT || CC == ISD::SETULT)
03630           return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal);
03631         if (CC == ISD::SETOGT || CC == ISD::SETUGT)
03632           return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal);
03633       }
03634     }
03635 
03636     bool swpCmpOps = false;
03637     bool swpVselOps = false;
03638     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
03639 
03640     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
03641         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
03642       if (swpCmpOps)
03643         std::swap(LHS, RHS);
03644       if (swpVselOps)
03645         std::swap(TrueVal, FalseVal);
03646     }
03647   }
03648 
03649   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
03650   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03651   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03652   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03653   if (CondCode2 != ARMCC::AL) {
03654     SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
03655     // FIXME: Needs another CMP because flag can have but one use.
03656     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
03657     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
03658   }
03659   return Result;
03660 }
03661 
03662 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
03663 /// to morph to an integer compare sequence.
03664 static bool canChangeToInt(SDValue Op, bool &SeenZero,
03665                            const ARMSubtarget *Subtarget) {
03666   SDNode *N = Op.getNode();
03667   if (!N->hasOneUse())
03668     // Otherwise it requires moving the value from fp to integer registers.
03669     return false;
03670   if (!N->getNumValues())
03671     return false;
03672   EVT VT = Op.getValueType();
03673   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
03674     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
03675     // vmrs are very slow, e.g. cortex-a8.
03676     return false;
03677 
03678   if (isFloatingPointZero(Op)) {
03679     SeenZero = true;
03680     return true;
03681   }
03682   return ISD::isNormalLoad(N);
03683 }
03684 
03685 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
03686   if (isFloatingPointZero(Op))
03687     return DAG.getConstant(0, MVT::i32);
03688 
03689   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
03690     return DAG.getLoad(MVT::i32, SDLoc(Op),
03691                        Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
03692                        Ld->isVolatile(), Ld->isNonTemporal(),
03693                        Ld->isInvariant(), Ld->getAlignment());
03694 
03695   llvm_unreachable("Unknown VFP cmp argument!");
03696 }
03697 
03698 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
03699                            SDValue &RetVal1, SDValue &RetVal2) {
03700   if (isFloatingPointZero(Op)) {
03701     RetVal1 = DAG.getConstant(0, MVT::i32);
03702     RetVal2 = DAG.getConstant(0, MVT::i32);
03703     return;
03704   }
03705 
03706   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
03707     SDValue Ptr = Ld->getBasePtr();
03708     RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op),
03709                           Ld->getChain(), Ptr,
03710                           Ld->getPointerInfo(),
03711                           Ld->isVolatile(), Ld->isNonTemporal(),
03712                           Ld->isInvariant(), Ld->getAlignment());
03713 
03714     EVT PtrType = Ptr.getValueType();
03715     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
03716     SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op),
03717                                  PtrType, Ptr, DAG.getConstant(4, PtrType));
03718     RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op),
03719                           Ld->getChain(), NewPtr,
03720                           Ld->getPointerInfo().getWithOffset(4),
03721                           Ld->isVolatile(), Ld->isNonTemporal(),
03722                           Ld->isInvariant(), NewAlign);
03723     return;
03724   }
03725 
03726   llvm_unreachable("Unknown VFP cmp argument!");
03727 }
03728 
03729 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
03730 /// f32 and even f64 comparisons to integer ones.
03731 SDValue
03732 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
03733   SDValue Chain = Op.getOperand(0);
03734   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03735   SDValue LHS = Op.getOperand(2);
03736   SDValue RHS = Op.getOperand(3);
03737   SDValue Dest = Op.getOperand(4);
03738   SDLoc dl(Op);
03739 
03740   bool LHSSeenZero = false;
03741   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
03742   bool RHSSeenZero = false;
03743   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
03744   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
03745     // If unsafe fp math optimization is enabled and there are no other uses of
03746     // the CMP operands, and the condition code is EQ or NE, we can optimize it
03747     // to an integer comparison.
03748     if (CC == ISD::SETOEQ)
03749       CC = ISD::SETEQ;
03750     else if (CC == ISD::SETUNE)
03751       CC = ISD::SETNE;
03752 
03753     SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32);
03754     SDValue ARMcc;
03755     if (LHS.getValueType() == MVT::f32) {
03756       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03757                         bitcastf32Toi32(LHS, DAG), Mask);
03758       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03759                         bitcastf32Toi32(RHS, DAG), Mask);
03760       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03761       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03762       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03763                          Chain, Dest, ARMcc, CCR, Cmp);
03764     }
03765 
03766     SDValue LHS1, LHS2;
03767     SDValue RHS1, RHS2;
03768     expandf64Toi32(LHS, DAG, LHS1, LHS2);
03769     expandf64Toi32(RHS, DAG, RHS1, RHS2);
03770     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
03771     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
03772     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03773     ARMcc = DAG.getConstant(CondCode, MVT::i32);
03774     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03775     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
03776     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
03777   }
03778 
03779   return SDValue();
03780 }
03781 
03782 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
03783   SDValue Chain = Op.getOperand(0);
03784   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03785   SDValue LHS = Op.getOperand(2);
03786   SDValue RHS = Op.getOperand(3);
03787   SDValue Dest = Op.getOperand(4);
03788   SDLoc dl(Op);
03789 
03790   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03791     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03792                                                     dl);
03793 
03794     // If softenSetCCOperands only returned one value, we should compare it to
03795     // zero.
03796     if (!RHS.getNode()) {
03797       RHS = DAG.getConstant(0, LHS.getValueType());
03798       CC = ISD::SETNE;
03799     }
03800   }
03801 
03802   if (LHS.getValueType() == MVT::i32) {
03803     SDValue ARMcc;
03804     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03805     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03806     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03807                        Chain, Dest, ARMcc, CCR, Cmp);
03808   }
03809 
03810   assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
03811 
03812   if (getTargetMachine().Options.UnsafeFPMath &&
03813       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
03814        CC == ISD::SETNE || CC == ISD::SETUNE)) {
03815     SDValue Result = OptimizeVFPBrcond(Op, DAG);
03816     if (Result.getNode())
03817       return Result;
03818   }
03819 
03820   ARMCC::CondCodes CondCode, CondCode2;
03821   FPCCToARMCC(CC, CondCode, CondCode2);
03822 
03823   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
03824   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03825   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03826   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03827   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
03828   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03829   if (CondCode2 != ARMCC::AL) {
03830     ARMcc = DAG.getConstant(CondCode2, MVT::i32);
03831     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
03832     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03833   }
03834   return Res;
03835 }
03836 
03837 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
03838   SDValue Chain = Op.getOperand(0);
03839   SDValue Table = Op.getOperand(1);
03840   SDValue Index = Op.getOperand(2);
03841   SDLoc dl(Op);
03842 
03843   EVT PTy = getPointerTy();
03844   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
03845   ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
03846   SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
03847   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
03848   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
03849   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
03850   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
03851   if (Subtarget->isThumb2()) {
03852     // Thumb2 uses a two-level jump. That is, it jumps into the jump table
03853     // which does another jump to the destination. This also makes it easier
03854     // to translate it to TBB / TBH later.
03855     // FIXME: This might not work if the function is extremely large.
03856     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
03857                        Addr, Op.getOperand(2), JTI, UId);
03858   }
03859   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
03860     Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
03861                        MachinePointerInfo::getJumpTable(),
03862                        false, false, false, 0);
03863     Chain = Addr.getValue(1);
03864     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
03865     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
03866   } else {
03867     Addr = DAG.getLoad(PTy, dl, Chain, Addr,
03868                        MachinePointerInfo::getJumpTable(),
03869                        false, false, false, 0);
03870     Chain = Addr.getValue(1);
03871     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
03872   }
03873 }
03874 
03875 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
03876   EVT VT = Op.getValueType();
03877   SDLoc dl(Op);
03878 
03879   if (Op.getValueType().getVectorElementType() == MVT::i32) {
03880     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
03881       return Op;
03882     return DAG.UnrollVectorOp(Op.getNode());
03883   }
03884 
03885   assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
03886          "Invalid type for custom lowering!");
03887   if (VT != MVT::v4i16)
03888     return DAG.UnrollVectorOp(Op.getNode());
03889 
03890   Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
03891   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
03892 }
03893 
03894 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
03895   EVT VT = Op.getValueType();
03896   if (VT.isVector())
03897     return LowerVectorFP_TO_INT(Op, DAG);
03898 
03899   if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
03900     RTLIB::Libcall LC;
03901     if (Op.getOpcode() == ISD::FP_TO_SINT)
03902       LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
03903                               Op.getValueType());
03904     else
03905       LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
03906                               Op.getValueType());
03907     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03908                        /*isSigned*/ false, SDLoc(Op)).first;
03909   }
03910 
03911   SDLoc dl(Op);
03912   unsigned Opc;
03913 
03914   switch (Op.getOpcode()) {
03915   default: llvm_unreachable("Invalid opcode!");
03916   case ISD::FP_TO_SINT:
03917     Opc = ARMISD::FTOSI;
03918     break;
03919   case ISD::FP_TO_UINT:
03920     Opc = ARMISD::FTOUI;
03921     break;
03922   }
03923   Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
03924   return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
03925 }
03926 
03927 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
03928   EVT VT = Op.getValueType();
03929   SDLoc dl(Op);
03930 
03931   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
03932     if (VT.getVectorElementType() == MVT::f32)
03933       return Op;
03934     return DAG.UnrollVectorOp(Op.getNode());
03935   }
03936 
03937   assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
03938          "Invalid type for custom lowering!");
03939   if (VT != MVT::v4f32)
03940     return DAG.UnrollVectorOp(Op.getNode());
03941 
03942   unsigned CastOpc;
03943   unsigned Opc;
03944   switch (Op.getOpcode()) {
03945   default: llvm_unreachable("Invalid opcode!");
03946   case ISD::SINT_TO_FP:
03947     CastOpc = ISD::SIGN_EXTEND;
03948     Opc = ISD::SINT_TO_FP;
03949     break;
03950   case ISD::UINT_TO_FP:
03951     CastOpc = ISD::ZERO_EXTEND;
03952     Opc = ISD::UINT_TO_FP;
03953     break;
03954   }
03955 
03956   Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
03957   return DAG.getNode(Opc, dl, VT, Op);
03958 }
03959 
03960 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
03961   EVT VT = Op.getValueType();
03962   if (VT.isVector())
03963     return LowerVectorINT_TO_FP(Op, DAG);
03964 
03965   if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
03966     RTLIB::Libcall LC;
03967     if (Op.getOpcode() == ISD::SINT_TO_FP)
03968       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
03969                               Op.getValueType());
03970     else
03971       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
03972                               Op.getValueType());
03973     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03974                        /*isSigned*/ false, SDLoc(Op)).first;
03975   }
03976 
03977   SDLoc dl(Op);
03978   unsigned Opc;
03979 
03980   switch (Op.getOpcode()) {
03981   default: llvm_unreachable("Invalid opcode!");
03982   case ISD::SINT_TO_FP:
03983     Opc = ARMISD::SITOF;
03984     break;
03985   case ISD::UINT_TO_FP:
03986     Opc = ARMISD::UITOF;
03987     break;
03988   }
03989 
03990   Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
03991   return DAG.getNode(Opc, dl, VT, Op);
03992 }
03993 
03994 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
03995   // Implement fcopysign with a fabs and a conditional fneg.
03996   SDValue Tmp0 = Op.getOperand(0);
03997   SDValue Tmp1 = Op.getOperand(1);
03998   SDLoc dl(Op);
03999   EVT VT = Op.getValueType();
04000   EVT SrcVT = Tmp1.getValueType();
04001   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
04002     Tmp0.getOpcode() == ARMISD::VMOVDRR;
04003   bool UseNEON = !InGPR && Subtarget->hasNEON();
04004 
04005   if (UseNEON) {
04006     // Use VBSL to copy the sign bit.
04007     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
04008     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
04009                                DAG.getTargetConstant(EncodedVal, MVT::i32));
04010     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
04011     if (VT == MVT::f64)
04012       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
04013                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
04014                          DAG.getConstant(32, MVT::i32));
04015     else /*if (VT == MVT::f32)*/
04016       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
04017     if (SrcVT == MVT::f32) {
04018       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
04019       if (VT == MVT::f64)
04020         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
04021                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
04022                            DAG.getConstant(32, MVT::i32));
04023     } else if (VT == MVT::f32)
04024       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
04025                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
04026                          DAG.getConstant(32, MVT::i32));
04027     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
04028     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
04029 
04030     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
04031                                             MVT::i32);
04032     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
04033     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
04034                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
04035 
04036     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
04037                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
04038                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
04039     if (VT == MVT::f32) {
04040       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
04041       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
04042                         DAG.getConstant(0, MVT::i32));
04043     } else {
04044       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
04045     }
04046 
04047     return Res;
04048   }
04049 
04050   // Bitcast operand 1 to i32.
04051   if (SrcVT == MVT::f64)
04052     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
04053                        Tmp1).getValue(1);
04054   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
04055 
04056   // Or in the signbit with integer operations.
04057   SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32);
04058   SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32);
04059   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
04060   if (VT == MVT::f32) {
04061     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
04062                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
04063     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
04064                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
04065   }
04066 
04067   // f64: Or the high part with signbit and then combine two parts.
04068   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
04069                      Tmp0);
04070   SDValue Lo = Tmp0.getValue(0);
04071   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
04072   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
04073   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
04074 }
04075 
04076 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
04077   MachineFunction &MF = DAG.getMachineFunction();
04078   MachineFrameInfo *MFI = MF.getFrameInfo();
04079   MFI->setReturnAddressIsTaken(true);
04080 
04081   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
04082     return SDValue();
04083 
04084   EVT VT = Op.getValueType();
04085   SDLoc dl(Op);
04086   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
04087   if (Depth) {
04088     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
04089     SDValue Offset = DAG.getConstant(4, MVT::i32);
04090     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
04091                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
04092                        MachinePointerInfo(), false, false, false, 0);
04093   }
04094 
04095   // Return LR, which contains the return address. Mark it an implicit live-in.
04096   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
04097   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
04098 }
04099 
04100 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
04101   const ARMBaseRegisterInfo &ARI =
04102     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
04103   MachineFunction &MF = DAG.getMachineFunction();
04104   MachineFrameInfo *MFI = MF.getFrameInfo();
04105   MFI->setFrameAddressIsTaken(true);
04106 
04107   EVT VT = Op.getValueType();
04108   SDLoc dl(Op);  // FIXME probably not meaningful
04109   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
04110   unsigned FrameReg = ARI.getFrameRegister(MF);
04111   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
04112   while (Depth--)
04113     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
04114                             MachinePointerInfo(),
04115                             false, false, false, 0);
04116   return FrameAddr;
04117 }
04118 
04119 // FIXME? Maybe this could be a TableGen attribute on some registers and
04120 // this table could be generated automatically from RegInfo.
04121 unsigned ARMTargetLowering::getRegisterByName(const char* RegName,
04122                                               EVT VT) const {
04123   unsigned Reg = StringSwitch<unsigned>(RegName)
04124                        .Case("sp", ARM::SP)
04125                        .Default(0);
04126   if (Reg)
04127     return Reg;
04128   report_fatal_error("Invalid register name global variable");
04129 }
04130 
04131 /// ExpandBITCAST - If the target supports VFP, this function is called to
04132 /// expand a bit convert where either the source or destination type is i64 to
04133 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
04134 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
04135 /// vectors), since the legalizer won't know what to do with that.
04136 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
04137   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
04138   SDLoc dl(N);
04139   SDValue Op = N->getOperand(0);
04140 
04141   // This function is only supposed to be called for i64 types, either as the
04142   // source or destination of the bit convert.
04143   EVT SrcVT = Op.getValueType();
04144   EVT DstVT = N->getValueType(0);
04145   assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
04146          "ExpandBITCAST called for non-i64 type");
04147 
04148   // Turn i64->f64 into VMOVDRR.
04149   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
04150     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04151                              DAG.getConstant(0, MVT::i32));
04152     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04153                              DAG.getConstant(1, MVT::i32));
04154     return DAG.getNode(ISD::BITCAST, dl, DstVT,
04155                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
04156   }
04157 
04158   // Turn f64->i64 into VMOVRRD.
04159   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
04160     SDValue Cvt;
04161     if (TLI.isBigEndian() && SrcVT.isVector() &&
04162         SrcVT.getVectorNumElements() > 1)
04163       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04164                         DAG.getVTList(MVT::i32, MVT::i32),
04165                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
04166     else
04167       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04168                         DAG.getVTList(MVT::i32, MVT::i32), Op);
04169     // Merge the pieces into a single i64 value.
04170     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
04171   }
04172 
04173   return SDValue();
04174 }
04175 
04176 /// getZeroVector - Returns a vector of specified type with all zero elements.
04177 /// Zero vectors are used to represent vector negation and in those cases
04178 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
04179 /// not support i64 elements, so sometimes the zero vectors will need to be
04180 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
04181 /// zero vector.
04182 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
04183   assert(VT.isVector() && "Expected a vector type");
04184   // The canonical modified immediate encoding of a zero vector is....0!
04185   SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
04186   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
04187   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
04188   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
04189 }
04190 
04191 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
04192 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04193 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
04194                                                 SelectionDAG &DAG) const {
04195   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04196   EVT VT = Op.getValueType();
04197   unsigned VTBits = VT.getSizeInBits();
04198   SDLoc dl(Op);
04199   SDValue ShOpLo = Op.getOperand(0);
04200   SDValue ShOpHi = Op.getOperand(1);
04201   SDValue ShAmt  = Op.getOperand(2);
04202   SDValue ARMcc;
04203   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
04204 
04205   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
04206 
04207   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04208                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
04209   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
04210   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04211                                    DAG.getConstant(VTBits, MVT::i32));
04212   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
04213   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04214   SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
04215 
04216   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04217   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
04218                           ARMcc, DAG, dl);
04219   SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
04220   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
04221                            CCR, Cmp);
04222 
04223   SDValue Ops[2] = { Lo, Hi };
04224   return DAG.getMergeValues(Ops, dl);
04225 }
04226 
04227 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
04228 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04229 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
04230                                                SelectionDAG &DAG) const {
04231   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04232   EVT VT = Op.getValueType();
04233   unsigned VTBits = VT.getSizeInBits();
04234   SDLoc dl(Op);
04235   SDValue ShOpLo = Op.getOperand(0);
04236   SDValue ShOpHi = Op.getOperand(1);
04237   SDValue ShAmt  = Op.getOperand(2);
04238   SDValue ARMcc;
04239 
04240   assert(Op.getOpcode() == ISD::SHL_PARTS);
04241   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04242                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
04243   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
04244   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04245                                    DAG.getConstant(VTBits, MVT::i32));
04246   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
04247   SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
04248 
04249   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04250   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04251   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
04252                           ARMcc, DAG, dl);
04253   SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
04254   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
04255                            CCR, Cmp);
04256 
04257   SDValue Ops[2] = { Lo, Hi };
04258   return DAG.getMergeValues(Ops, dl);
04259 }
04260 
04261 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
04262                                             SelectionDAG &DAG) const {
04263   // The rounding mode is in bits 23:22 of the FPSCR.
04264   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
04265   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
04266   // so that the shift + and get folded into a bitfield extract.
04267   SDLoc dl(Op);
04268   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
04269                               DAG.getConstant(Intrinsic::arm_get_fpscr,
04270                                               MVT::i32));
04271   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
04272                                   DAG.getConstant(1U << 22, MVT::i32));
04273   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
04274                               DAG.getConstant(22, MVT::i32));
04275   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
04276                      DAG.getConstant(3, MVT::i32));
04277 }
04278 
04279 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
04280                          const ARMSubtarget *ST) {
04281   EVT VT = N->getValueType(0);
04282   SDLoc dl(N);
04283 
04284   if (!ST->hasV6T2Ops())
04285     return SDValue();
04286 
04287   SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
04288   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
04289 }
04290 
04291 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
04292 /// for each 16-bit element from operand, repeated.  The basic idea is to
04293 /// leverage vcnt to get the 8-bit counts, gather and add the results.
04294 ///
04295 /// Trace for v4i16:
04296 /// input    = [v0    v1    v2    v3   ] (vi 16-bit element)
04297 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
04298 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
04299 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6]
04300 ///            [b0 b1 b2 b3 b4 b5 b6 b7]
04301 ///           +[b1 b0 b3 b2 b5 b4 b7 b6]
04302 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0,
04303 /// vuzp:    = [k0 k1 k2 k3 k0 k1 k2 k3]  each ki is 8-bits)
04304 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
04305   EVT VT = N->getValueType(0);
04306   SDLoc DL(N);
04307 
04308   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
04309   SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
04310   SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0);
04311   SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1);
04312   SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2);
04313   return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3);
04314 }
04315 
04316 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the
04317 /// bit-count for each 16-bit element from the operand.  We need slightly
04318 /// different sequencing for v4i16 and v8i16 to stay within NEON's available
04319 /// 64/128-bit registers.
04320 ///
04321 /// Trace for v4i16:
04322 /// input           = [v0    v1    v2    v3    ] (vi 16-bit element)
04323 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi)
04324 /// v8i16:Extended  = [k0    k1    k2    k3    k0    k1    k2    k3    ]
04325 /// v4i16:Extracted = [k0    k1    k2    k3    ]
04326 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
04327   EVT VT = N->getValueType(0);
04328   SDLoc DL(N);
04329 
04330   SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
04331   if (VT.is64BitVector()) {
04332     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
04333     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
04334                        DAG.getIntPtrConstant(0));
04335   } else {
04336     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
04337                                     BitCounts, DAG.getIntPtrConstant(0));
04338     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
04339   }
04340 }
04341 
04342 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the
04343 /// bit-count for each 32-bit element from the operand.  The idea here is
04344 /// to split the vector into 16-bit elements, leverage the 16-bit count
04345 /// routine, and then combine the results.
04346 ///
04347 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged):
04348 /// input    = [v0    v1    ] (vi: 32-bit elements)
04349 /// Bitcast  = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1])
04350 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi)
04351 /// vrev: N0 = [k1 k0 k3 k2 ]
04352 ///            [k0 k1 k2 k3 ]
04353 ///       N1 =+[k1 k0 k3 k2 ]
04354 ///            [k0 k2 k1 k3 ]
04355 ///       N2 =+[k1 k3 k0 k2 ]
04356 ///            [k0    k2    k1    k3    ]
04357 /// Extended =+[k1    k3    k0    k2    ]
04358 ///            [k0    k2    ]
04359 /// Extracted=+[k1    k3    ]
04360 ///
04361 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
04362   EVT VT = N->getValueType(0);
04363   SDLoc DL(N);
04364 
04365   EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
04366 
04367   SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0));
04368   SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG);
04369   SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16);
04370   SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0);
04371   SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1);
04372 
04373   if (VT.is64BitVector()) {
04374     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
04375     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
04376                        DAG.getIntPtrConstant(0));
04377   } else {
04378     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
04379                                     DAG.getIntPtrConstant(0));
04380     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
04381   }
04382 }
04383 
04384 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
04385                           const ARMSubtarget *ST) {
04386   EVT VT = N->getValueType(0);
04387 
04388   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
04389   assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||
04390           VT == MVT::v4i16 || VT == MVT::v8i16) &&
04391          "Unexpected type for custom ctpop lowering");
04392 
04393   if (VT.getVectorElementType() == MVT::i32)
04394     return lowerCTPOP32BitElements(N, DAG);
04395   else
04396     return lowerCTPOP16BitElements(N, DAG);
04397 }
04398 
04399 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
04400                           const ARMSubtarget *ST) {
04401   EVT VT = N->getValueType(0);
04402   SDLoc dl(N);
04403 
04404   if (!VT.isVector())
04405     return SDValue();
04406 
04407   // Lower vector shifts on NEON to use VSHL.
04408   assert(ST->hasNEON() && "unexpected vector shift");
04409 
04410   // Left shifts translate directly to the vshiftu intrinsic.
04411   if (N->getOpcode() == ISD::SHL)
04412     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
04413                        DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
04414                        N->getOperand(0), N->getOperand(1));
04415 
04416   assert((N->getOpcode() == ISD::SRA ||
04417           N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
04418 
04419   // NEON uses the same intrinsics for both left and right shifts.  For
04420   // right shifts, the shift amounts are negative, so negate the vector of
04421   // shift amounts.
04422   EVT ShiftVT = N->getOperand(1).getValueType();
04423   SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
04424                                      getZeroVector(ShiftVT, DAG, dl),
04425                                      N->getOperand(1));
04426   Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
04427                              Intrinsic::arm_neon_vshifts :
04428                              Intrinsic::arm_neon_vshiftu);
04429   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
04430                      DAG.getConstant(vshiftInt, MVT::i32),
04431                      N->getOperand(0), NegatedCount);
04432 }
04433 
04434 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
04435                                 const ARMSubtarget *ST) {
04436   EVT VT = N->getValueType(0);
04437   SDLoc dl(N);
04438 
04439   // We can get here for a node like i32 = ISD::SHL i32, i64
04440   if (VT != MVT::i64)
04441     return SDValue();
04442 
04443   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
04444          "Unknown shift to lower!");
04445 
04446   // We only lower SRA, SRL of 1 here, all others use generic lowering.
04447   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
04448       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
04449     return SDValue();
04450 
04451   // If we are in thumb mode, we don't have RRX.
04452   if (ST->isThumb1Only()) return SDValue();
04453 
04454   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
04455   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
04456                            DAG.getConstant(0, MVT::i32));
04457   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
04458                            DAG.getConstant(1, MVT::i32));
04459 
04460   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
04461   // captures the result into a carry flag.
04462   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
04463   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
04464 
04465   // The low part is an ARMISD::RRX operand, which shifts the carry in.
04466   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
04467 
04468   // Merge the pieces into a single i64 value.
04469  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
04470 }
04471 
04472 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
04473   SDValue TmpOp0, TmpOp1;
04474   bool Invert = false;
04475   bool Swap = false;
04476   unsigned Opc = 0;
04477 
04478   SDValue Op0 = Op.getOperand(0);
04479   SDValue Op1 = Op.getOperand(1);
04480   SDValue CC = Op.getOperand(2);
04481   EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
04482   EVT VT = Op.getValueType();
04483   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
04484   SDLoc dl(Op);
04485 
04486   if (Op1.getValueType().isFloatingPoint()) {
04487     switch (SetCCOpcode) {
04488     default: llvm_unreachable("Illegal FP comparison");
04489     case ISD::SETUNE:
04490     case ISD::SETNE:  Invert = true; // Fallthrough
04491     case ISD::SETOEQ:
04492     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
04493     case ISD::SETOLT:
04494     case ISD::SETLT: Swap = true; // Fallthrough
04495     case ISD::SETOGT:
04496     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
04497     case ISD::SETOLE:
04498     case ISD::SETLE:  Swap = true; // Fallthrough
04499     case ISD::SETOGE:
04500     case ISD::SETGE: Opc = ARMISD::VCGE; break;
04501     case ISD::SETUGE: Swap = true; // Fallthrough
04502     case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
04503     case ISD::SETUGT: Swap = true; // Fallthrough
04504     case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
04505     case ISD::SETUEQ: Invert = true; // Fallthrough
04506     case ISD::SETONE:
04507       // Expand this to (OLT | OGT).
04508       TmpOp0 = Op0;
04509       TmpOp1 = Op1;
04510       Opc = ISD::OR;
04511       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
04512       Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
04513       break;
04514     case ISD::SETUO: Invert = true; // Fallthrough
04515     case ISD::SETO:
04516       // Expand this to (OLT | OGE).
04517       TmpOp0 = Op0;
04518       TmpOp1 = Op1;
04519       Opc = ISD::OR;
04520       Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
04521       Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
04522       break;
04523     }
04524   } else {
04525     // Integer comparisons.
04526     switch (SetCCOpcode) {
04527     default: llvm_unreachable("Illegal integer comparison");
04528     case ISD::SETNE:  Invert = true;
04529     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
04530     case ISD::SETLT:  Swap = true;
04531     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
04532     case ISD::SETLE:  Swap = true;
04533     case ISD::SETGE:  Opc = ARMISD::VCGE; break;
04534     case ISD::SETULT: Swap = true;
04535     case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
04536     case ISD::SETULE: Swap = true;
04537     case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
04538     }
04539 
04540     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
04541     if (Opc == ARMISD::VCEQ) {
04542 
04543       SDValue AndOp;
04544       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
04545         AndOp = Op0;
04546       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
04547         AndOp = Op1;
04548 
04549       // Ignore bitconvert.
04550       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
04551         AndOp = AndOp.getOperand(0);
04552 
04553       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
04554         Opc = ARMISD::VTST;
04555         Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
04556         Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
04557         Invert = !Invert;
04558       }
04559     }
04560   }
04561 
04562   if (Swap)
04563     std::swap(Op0, Op1);
04564 
04565   // If one of the operands is a constant vector zero, attempt to fold the
04566   // comparison to a specialized compare-against-zero form.
04567   SDValue SingleOp;
04568   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
04569     SingleOp = Op0;
04570   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
04571     if (Opc == ARMISD::VCGE)
04572       Opc = ARMISD::VCLEZ;
04573     else if (Opc == ARMISD::VCGT)
04574       Opc = ARMISD::VCLTZ;
04575     SingleOp = Op1;
04576   }
04577 
04578   SDValue Result;
04579   if (SingleOp.getNode()) {
04580     switch (Opc) {
04581     case ARMISD::VCEQ:
04582       Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
04583     case ARMISD::VCGE:
04584       Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
04585     case ARMISD::VCLEZ:
04586       Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
04587     case ARMISD::VCGT:
04588       Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
04589     case ARMISD::VCLTZ:
04590       Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
04591     default:
04592       Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
04593     }
04594   } else {
04595      Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
04596   }
04597 
04598   Result = DAG.getSExtOrTrunc(Result, dl, VT);
04599 
04600   if (Invert)
04601     Result = DAG.getNOT(dl, Result, VT);
04602 
04603   return Result;
04604 }
04605 
04606 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
04607 /// valid vector constant for a NEON instruction with a "modified immediate"
04608 /// operand (e.g., VMOV).  If so, return the encoded value.
04609 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
04610                                  unsigned SplatBitSize, SelectionDAG &DAG,
04611                                  EVT &VT, bool is128Bits, NEONModImmType type) {
04612   unsigned OpCmode, Imm;
04613 
04614   // SplatBitSize is set to the smallest size that splats the vector, so a
04615   // zero vector will always have SplatBitSize == 8.  However, NEON modified
04616   // immediate instructions others than VMOV do not support the 8-bit encoding
04617   // of a zero vector, and the default encoding of zero is supposed to be the
04618   // 32-bit version.
04619   if (SplatBits == 0)
04620     SplatBitSize = 32;
04621 
04622   switch (SplatBitSize) {
04623   case 8:
04624     if (type != VMOVModImm)
04625       return SDValue();
04626     // Any 1-byte value is OK.  Op=0, Cmode=1110.
04627     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
04628     OpCmode = 0xe;
04629     Imm = SplatBits;
04630     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
04631     break;
04632 
04633   case 16:
04634     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
04635     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
04636     if ((SplatBits & ~0xff) == 0) {
04637       // Value = 0x00nn: Op=x, Cmode=100x.
04638       OpCmode = 0x8;
04639       Imm = SplatBits;
04640       break;
04641     }
04642     if ((SplatBits & ~0xff00) == 0) {
04643       // Value = 0xnn00: Op=x, Cmode=101x.
04644       OpCmode = 0xa;
04645       Imm = SplatBits >> 8;
04646       break;
04647     }
04648     return SDValue();
04649 
04650   case 32:
04651     // NEON's 32-bit VMOV supports splat values where:
04652     // * only one byte is nonzero, or
04653     // * the least significant byte is 0xff and the second byte is nonzero, or
04654     // * the least significant 2 bytes are 0xff and the third is nonzero.
04655     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
04656     if ((SplatBits & ~0xff) == 0) {
04657       // Value = 0x000000nn: Op=x, Cmode=000x.
04658       OpCmode = 0;
04659       Imm = SplatBits;
04660       break;
04661     }
04662     if ((SplatBits & ~0xff00) == 0) {
04663       // Value = 0x0000nn00: Op=x, Cmode=001x.
04664       OpCmode = 0x2;
04665       Imm = SplatBits >> 8;
04666       break;
04667     }
04668     if ((SplatBits & ~0xff0000) == 0) {
04669       // Value = 0x00nn0000: Op=x, Cmode=010x.
04670       OpCmode = 0x4;
04671       Imm = SplatBits >> 16;
04672       break;
04673     }
04674     if ((SplatBits & ~0xff000000) == 0) {
04675       // Value = 0xnn000000: Op=x, Cmode=011x.
04676       OpCmode = 0x6;
04677       Imm = SplatBits >> 24;
04678       break;
04679     }
04680 
04681     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
04682     if (type == OtherModImm) return SDValue();
04683 
04684     if ((SplatBits & ~0xffff) == 0 &&
04685         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
04686       // Value = 0x0000nnff: Op=x, Cmode=1100.
04687       OpCmode = 0xc;
04688       Imm = SplatBits >> 8;
04689       break;
04690     }
04691 
04692     if ((SplatBits & ~0xffffff) == 0 &&
04693         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
04694       // Value = 0x00nnffff: Op=x, Cmode=1101.
04695       OpCmode = 0xd;
04696       Imm = SplatBits >> 16;
04697       break;
04698     }
04699 
04700     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
04701     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
04702     // VMOV.I32.  A (very) minor optimization would be to replicate the value
04703     // and fall through here to test for a valid 64-bit splat.  But, then the
04704     // caller would also need to check and handle the change in size.
04705     return SDValue();
04706 
04707   case 64: {
04708     if (type != VMOVModImm)
04709       return SDValue();
04710     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
04711     uint64_t BitMask = 0xff;
04712     uint64_t Val = 0;
04713     unsigned ImmMask = 1;
04714     Imm = 0;
04715     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
04716       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
04717         Val |= BitMask;
04718         Imm |= ImmMask;
04719       } else if ((SplatBits & BitMask) != 0) {
04720         return SDValue();
04721       }
04722       BitMask <<= 8;
04723       ImmMask <<= 1;
04724     }
04725 
04726     if (DAG.getTargetLoweringInfo().isBigEndian())
04727       // swap higher and lower 32 bit word
04728       Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
04729 
04730     // Op=1, Cmode=1110.
04731     OpCmode = 0x1e;
04732     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
04733     break;
04734   }
04735 
04736   default:
04737     llvm_unreachable("unexpected size for isNEONModifiedImm");
04738   }
04739 
04740   unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
04741   return DAG.getTargetConstant(EncodedVal, MVT::i32);
04742 }
04743 
04744 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
04745                                            const ARMSubtarget *ST) const {
04746   if (!ST->hasVFP3())
04747     return SDValue();
04748 
04749   bool IsDouble = Op.getValueType() == MVT::f64;
04750   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
04751 
04752   // Use the default (constant pool) lowering for double constants when we have
04753   // an SP-only FPU
04754   if (IsDouble && Subtarget->isFPOnlySP())
04755     return SDValue();
04756 
04757   // Try splatting with a VMOV.f32...
04758   APFloat FPVal = CFP->getValueAPF();
04759   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
04760 
04761   if (ImmVal != -1) {
04762     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
04763       // We have code in place to select a valid ConstantFP already, no need to
04764       // do any mangling.
04765       return Op;
04766     }
04767 
04768     // It's a float and we are trying to use NEON operations where
04769     // possible. Lower it to a splat followed by an extract.
04770     SDLoc DL(Op);
04771     SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32);
04772     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
04773                                       NewVal);
04774     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
04775                        DAG.getConstant(0, MVT::i32));
04776   }
04777 
04778   // The rest of our options are NEON only, make sure that's allowed before
04779   // proceeding..
04780   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
04781     return SDValue();
04782 
04783   EVT VMovVT;
04784   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
04785 
04786   // It wouldn't really be worth bothering for doubles except for one very
04787   // important value, which does happen to match: 0.0. So make sure we don't do
04788   // anything stupid.
04789   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
04790     return SDValue();
04791 
04792   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
04793   SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
04794                                      false, VMOVModImm);
04795   if (NewVal != SDValue()) {
04796     SDLoc DL(Op);
04797     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
04798                                       NewVal);
04799     if (IsDouble)
04800       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
04801 
04802     // It's a float: cast and extract a vector element.
04803     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
04804                                        VecConstant);
04805     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
04806                        DAG.getConstant(0, MVT::i32));
04807   }
04808 
04809   // Finally, try a VMVN.i32
04810   NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
04811                              false, VMVNModImm);
04812   if (NewVal != SDValue()) {
04813     SDLoc DL(Op);
04814     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
04815 
04816     if (IsDouble)
04817       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
04818 
04819     // It's a float: cast and extract a vector element.
04820     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
04821                                        VecConstant);
04822     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
04823                        DAG.getConstant(0, MVT::i32));
04824   }
04825 
04826   return SDValue();
04827 }
04828 
04829 // check if an VEXT instruction can handle the shuffle mask when the
04830 // vector sources of the shuffle are the same.
04831 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
04832   unsigned NumElts = VT.getVectorNumElements();
04833 
04834   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
04835   if (M[0] < 0)
04836     return false;
04837 
04838   Imm = M[0];
04839 
04840   // If this is a VEXT shuffle, the immediate value is the index of the first
04841   // element.  The other shuffle indices must be the successive elements after
04842   // the first one.
04843   unsigned ExpectedElt = Imm;
04844   for (unsigned i = 1; i < NumElts; ++i) {
04845     // Increment the expected index.  If it wraps around, just follow it
04846     // back to index zero and keep going.
04847     ++ExpectedElt;
04848     if (ExpectedElt == NumElts)
04849       ExpectedElt = 0;
04850 
04851     if (M[i] < 0) continue; // ignore UNDEF indices
04852     if (ExpectedElt != static_cast<unsigned>(M[i]))
04853       return false;
04854   }
04855 
04856   return true;
04857 }
04858 
04859 
04860 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
04861                        bool &ReverseVEXT, unsigned &Imm) {
04862   unsigned NumElts = VT.getVectorNumElements();
04863   ReverseVEXT = false;
04864 
04865   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
04866   if (M[0] < 0)
04867     return false;
04868 
04869   Imm = M[0];
04870 
04871   // If this is a VEXT shuffle, the immediate value is the index of the first
04872   // element.  The other shuffle indices must be the successive elements after
04873   // the first one.
04874   unsigned ExpectedElt = Imm;
04875   for (unsigned i = 1; i < NumElts; ++i) {
04876     // Increment the expected index.  If it wraps around, it may still be
04877     // a VEXT but the source vectors must be swapped.
04878     ExpectedElt += 1;
04879     if (ExpectedElt == NumElts * 2) {
04880       ExpectedElt = 0;
04881       ReverseVEXT = true;
04882     }
04883 
04884     if (M[i] < 0) continue; // ignore UNDEF indices
04885     if (ExpectedElt != static_cast<unsigned>(M[i]))
04886       return false;
04887   }
04888 
04889   // Adjust the index value if the source operands will be swapped.
04890   if (ReverseVEXT)
04891     Imm -= NumElts;
04892 
04893   return true;
04894 }
04895 
04896 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
04897 /// instruction with the specified blocksize.  (The order of the elements
04898 /// within each block of the vector is reversed.)
04899 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
04900   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
04901          "Only possible block sizes for VREV are: 16, 32, 64");
04902 
04903   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04904   if (EltSz == 64)
04905     return false;
04906 
04907   unsigned NumElts = VT.getVectorNumElements();
04908   unsigned BlockElts = M[0] + 1;
04909   // If the first shuffle index is UNDEF, be optimistic.
04910   if (M[0] < 0)
04911     BlockElts = BlockSize / EltSz;
04912 
04913   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
04914     return false;
04915 
04916   for (unsigned i = 0; i < NumElts; ++i) {
04917     if (M[i] < 0) continue; // ignore UNDEF indices
04918     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
04919       return false;
04920   }
04921 
04922   return true;
04923 }
04924 
04925 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
04926   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
04927   // range, then 0 is placed into the resulting vector. So pretty much any mask
04928   // of 8 elements can work here.
04929   return VT == MVT::v8i8 && M.size() == 8;
04930 }
04931 
04932 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04933   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04934   if (EltSz == 64)
04935     return false;
04936 
04937   unsigned NumElts = VT.getVectorNumElements();
04938   WhichResult = (M[0] == 0 ? 0 : 1);
04939   for (unsigned i = 0; i < NumElts; i += 2) {
04940     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
04941         (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult))
04942       return false;
04943   }
04944   return true;
04945 }
04946 
04947 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
04948 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04949 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
04950 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04951   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04952   if (EltSz == 64)
04953     return false;
04954 
04955   unsigned NumElts = VT.getVectorNumElements();
04956   WhichResult = (M[0] == 0 ? 0 : 1);
04957   for (unsigned i = 0; i < NumElts; i += 2) {
04958     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
04959         (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult))
04960       return false;
04961   }
04962   return true;
04963 }
04964 
04965 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04966   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04967   if (EltSz == 64)
04968     return false;
04969 
04970   unsigned NumElts = VT.getVectorNumElements();
04971   WhichResult = (M[0] == 0 ? 0 : 1);
04972   for (unsigned i = 0; i != NumElts; ++i) {
04973     if (M[i] < 0) continue; // ignore UNDEF indices
04974     if ((unsigned) M[i] != 2 * i + WhichResult)
04975       return false;
04976   }
04977 
04978   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
04979   if (VT.is64BitVector() && EltSz == 32)
04980     return false;
04981 
04982   return true;
04983 }
04984 
04985 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
04986 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04987 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
04988 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04989   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04990   if (EltSz == 64)
04991     return false;
04992 
04993   unsigned Half = VT.getVectorNumElements() / 2;
04994   WhichResult = (M[0] == 0 ? 0 : 1);
04995   for (unsigned j = 0; j != 2; ++j) {
04996     unsigned Idx = WhichResult;
04997     for (unsigned i = 0; i != Half; ++i) {
04998       int MIdx = M[i + j * Half];
04999       if (MIdx >= 0 && (unsigned) MIdx != Idx)
05000         return false;
05001       Idx += 2;
05002     }
05003   }
05004 
05005   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
05006   if (VT.is64BitVector() && EltSz == 32)
05007     return false;
05008 
05009   return true;
05010 }
05011 
05012 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
05013   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
05014   if (EltSz == 64)
05015     return false;
05016 
05017   unsigned NumElts = VT.getVectorNumElements();
05018   WhichResult = (M[0] == 0 ? 0 : 1);
05019   unsigned Idx = WhichResult * NumElts / 2;
05020   for (unsigned i = 0; i != NumElts; i += 2) {
05021     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
05022         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts))
05023       return false;
05024     Idx += 1;
05025   }
05026 
05027   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
05028   if (VT.is64BitVector() && EltSz == 32)
05029     return false;
05030 
05031   return true;
05032 }
05033 
05034 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
05035 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
05036 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
05037 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
05038   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
05039   if (EltSz == 64)
05040     return false;
05041 
05042   unsigned NumElts = VT.getVectorNumElements();
05043   WhichResult = (M[0] == 0 ? 0 : 1);
05044   unsigned Idx = WhichResult * NumElts / 2;
05045   for (unsigned i = 0; i != NumElts; i += 2) {
05046     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
05047         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx))
05048       return false;
05049     Idx += 1;
05050   }
05051 
05052   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
05053   if (VT.is64BitVector() && EltSz == 32)
05054     return false;
05055 
05056   return true;
05057 }
05058 
05059 /// \return true if this is a reverse operation on an vector.
05060 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
05061   unsigned NumElts = VT.getVectorNumElements();
05062   // Make sure the mask has the right size.
05063   if (NumElts != M.size())
05064       return false;
05065 
05066   // Look for <15, ..., 3, -1, 1, 0>.
05067   for (unsigned i = 0; i != NumElts; ++i)
05068     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
05069       return false;
05070 
05071   return true;
05072 }
05073 
05074 // If N is an integer constant that can be moved into a register in one
05075 // instruction, return an SDValue of such a constant (will become a MOV
05076 // instruction).  Otherwise return null.
05077 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
05078                                      const ARMSubtarget *ST, SDLoc dl) {
05079   uint64_t Val;
05080   if (!isa<ConstantSDNode>(N))
05081     return SDValue();
05082   Val = cast<ConstantSDNode>(N)->getZExtValue();
05083 
05084   if (ST->isThumb1Only()) {
05085     if (Val <= 255 || ~Val <= 255)
05086       return DAG.getConstant(Val, MVT::i32);
05087   } else {
05088     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
05089       return DAG.getConstant(Val, MVT::i32);
05090   }
05091   return SDValue();
05092 }
05093 
05094 // If this is a case we can't handle, return null and let the default
05095 // expansion code take care of it.
05096 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
05097                                              const ARMSubtarget *ST) const {
05098   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
05099   SDLoc dl(Op);
05100   EVT VT = Op.getValueType();
05101 
05102   APInt SplatBits, SplatUndef;
05103   unsigned SplatBitSize;
05104   bool HasAnyUndefs;
05105   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
05106     if (SplatBitSize <= 64) {
05107       // Check if an immediate VMOV works.
05108       EVT VmovVT;
05109       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
05110                                       SplatUndef.getZExtValue(), SplatBitSize,
05111                                       DAG, VmovVT, VT.is128BitVector(),
05112                                       VMOVModImm);
05113       if (Val.getNode()) {
05114         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
05115         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
05116       }
05117 
05118       // Try an immediate VMVN.
05119       uint64_t NegatedImm = (~SplatBits).getZExtValue();
05120       Val = isNEONModifiedImm(NegatedImm,
05121                                       SplatUndef.getZExtValue(), SplatBitSize,
05122                                       DAG, VmovVT, VT.is128BitVector(),
05123                                       VMVNModImm);
05124       if (Val.getNode()) {
05125         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
05126         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
05127       }
05128 
05129       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
05130       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
05131         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
05132         if (ImmVal != -1) {
05133           SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
05134           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
05135         }
05136       }
05137     }
05138   }
05139 
05140   // Scan through the operands to see if only one value is used.
05141   //
05142   // As an optimisation, even if more than one value is used it may be more
05143   // profitable to splat with one value then change some lanes.
05144   //
05145   // Heuristically we decide to do this if the vector has a "dominant" value,
05146   // defined as splatted to more than half of the lanes.
05147   unsigned NumElts = VT.getVectorNumElements();
05148   bool isOnlyLowElement = true;
05149   bool usesOnlyOneValue = true;
05150   bool hasDominantValue = false;
05151   bool isConstant = true;
05152 
05153   // Map of the number of times a particular SDValue appears in the
05154   // element list.
05155   DenseMap<SDValue, unsigned> ValueCounts;
05156   SDValue Value;
05157   for (unsigned i = 0; i < NumElts; ++i) {
05158     SDValue V = Op.getOperand(i);
05159     if (V.getOpcode() == ISD::UNDEF)
05160       continue;
05161     if (i > 0)
05162       isOnlyLowElement = false;
05163     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
05164       isConstant = false;
05165 
05166     ValueCounts.insert(std::make_pair(V, 0));
05167     unsigned &Count = ValueCounts[V];
05168 
05169     // Is this value dominant? (takes up more than half of the lanes)
05170     if (++Count > (NumElts / 2)) {
05171       hasDominantValue = true;
05172       Value = V;
05173     }
05174   }
05175   if (ValueCounts.size() != 1)
05176     usesOnlyOneValue = false;
05177   if (!Value.getNode() && ValueCounts.size() > 0)
05178     Value = ValueCounts.begin()->first;
05179 
05180   if (ValueCounts.size() == 0)
05181     return DAG.getUNDEF(VT);
05182 
05183   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
05184   // Keep going if we are hitting this case.
05185   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
05186     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
05187 
05188   unsigned EltSize = VT.getVectorElementType().getSizeInBits();
05189 
05190   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
05191   // i32 and try again.
05192   if (hasDominantValue && EltSize <= 32) {
05193     if (!isConstant) {
05194       SDValue N;
05195 
05196       // If we are VDUPing a value that comes directly from a vector, that will
05197       // cause an unnecessary move to and from a GPR, where instead we could
05198       // just use VDUPLANE. We can only do this if the lane being extracted
05199       // is at a constant index, as the VDUP from lane instructions only have
05200       // constant-index forms.
05201       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
05202           isa<ConstantSDNode>(Value->getOperand(1))) {
05203         // We need to create a new undef vector to use for the VDUPLANE if the
05204         // size of the vector from which we get the value is different than the
05205         // size of the vector that we need to create. We will insert the element
05206         // such that the register coalescer will remove unnecessary copies.
05207         if (VT != Value->getOperand(0).getValueType()) {
05208           ConstantSDNode *constIndex;
05209           constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1));
05210           assert(constIndex && "The index is not a constant!");
05211           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
05212                              VT.getVectorNumElements();
05213           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
05214                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
05215                         Value, DAG.getConstant(index, MVT::i32)),
05216                            DAG.getConstant(index, MVT::i32));
05217         } else
05218           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
05219                         Value->getOperand(0), Value->getOperand(1));
05220       } else
05221         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
05222 
05223       if (!usesOnlyOneValue) {
05224         // The dominant value was splatted as 'N', but we now have to insert
05225         // all differing elements.
05226         for (unsigned I = 0; I < NumElts; ++I) {
05227           if (Op.getOperand(I) == Value)
05228             continue;
05229           SmallVector<SDValue, 3> Ops;
05230           Ops.push_back(N);
05231           Ops.push_back(Op.getOperand(I));
05232           Ops.push_back(DAG.getConstant(I, MVT::i32));
05233           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
05234         }
05235       }
05236       return N;
05237     }
05238     if (VT.getVectorElementType().isFloatingPoint()) {
05239       SmallVector<SDValue, 8> Ops;
05240       for (unsigned i = 0; i < NumElts; ++i)
05241         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
05242                                   Op.getOperand(i)));
05243       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
05244       SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
05245       Val = LowerBUILD_VECTOR(Val, DAG, ST);
05246       if (Val.getNode())
05247         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
05248     }
05249     if (usesOnlyOneValue) {
05250       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
05251       if (isConstant && Val.getNode())
05252         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
05253     }
05254   }
05255 
05256   // If all elements are constants and the case above didn't get hit, fall back
05257   // to the default expansion, which will generate a load from the constant
05258   // pool.
05259   if (isConstant)
05260     return SDValue();
05261 
05262   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
05263   if (NumElts >= 4) {
05264     SDValue shuffle = ReconstructShuffle(Op, DAG);
05265     if (shuffle != SDValue())
05266       return shuffle;
05267   }
05268 
05269   // Vectors with 32- or 64-bit elements can be built by directly assigning
05270   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
05271   // will be legalized.
05272   if (EltSize >= 32) {
05273     // Do the expansion with floating-point types, since that is what the VFP
05274     // registers are defined to use, and since i64 is not legal.
05275     EVT EltVT = EVT::getFloatingPointVT(EltSize);
05276     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
05277     SmallVector<SDValue, 8> Ops;
05278     for (unsigned i = 0; i < NumElts; ++i)
05279       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
05280     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
05281     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
05282   }
05283 
05284   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
05285   // know the default expansion would otherwise fall back on something even
05286   // worse. For a vector with one or two non-undef values, that's
05287   // scalar_to_vector for the elements followed by a shuffle (provided the
05288   // shuffle is valid for the target) and materialization element by element
05289   // on the stack followed by a load for everything else.
05290   if (!isConstant && !usesOnlyOneValue) {
05291     SDValue Vec = DAG.getUNDEF(VT);
05292     for (unsigned i = 0 ; i < NumElts; ++i) {
05293       SDValue V = Op.getOperand(i);
05294       if (V.getOpcode() == ISD::UNDEF)
05295         continue;
05296       SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
05297       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
05298     }
05299     return Vec;
05300   }
05301 
05302   return SDValue();
05303 }
05304 
05305 // Gather data to see if the operation can be modelled as a
05306 // shuffle in combination with VEXTs.
05307 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
05308                                               SelectionDAG &DAG) const {
05309   SDLoc dl(Op);
05310   EVT VT = Op.getValueType();
05311   unsigned NumElts = VT.getVectorNumElements();
05312 
05313   SmallVector<SDValue, 2> SourceVecs;
05314   SmallVector<unsigned, 2> MinElts;
05315   SmallVector<unsigned, 2> MaxElts;
05316 
05317   for (unsigned i = 0; i < NumElts; ++i) {
05318     SDValue V = Op.getOperand(i);
05319     if (V.getOpcode() == ISD::UNDEF)
05320       continue;
05321     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
05322       // A shuffle can only come from building a vector from various
05323       // elements of other vectors.
05324       return SDValue();
05325     } else if (V.getOperand(0).getValueType().getVectorElementType() !=
05326                VT.getVectorElementType()) {
05327       // This code doesn't know how to handle shuffles where the vector
05328       // element types do not match (this happens because type legalization
05329       // promotes the return type of EXTRACT_VECTOR_ELT).
05330       // FIXME: It might be appropriate to extend this code to handle
05331       // mismatched types.
05332       return SDValue();
05333     }
05334 
05335     // Record this extraction against the appropriate vector if possible...
05336     SDValue SourceVec = V.getOperand(0);
05337     // If the element number isn't a constant, we can't effectively
05338     // analyze what's going on.
05339     if (!isa<ConstantSDNode>(V.getOperand(1)))
05340       return SDValue();
05341     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
05342     bool FoundSource = false;
05343     for (unsigned j = 0; j < SourceVecs.size(); ++j) {
05344       if (SourceVecs[j] == SourceVec) {
05345         if (MinElts[j] > EltNo)
05346           MinElts[j] = EltNo;
05347         if (MaxElts[j] < EltNo)
05348           MaxElts[j] = EltNo;
05349         FoundSource = true;
05350         break;
05351       }
05352     }
05353 
05354     // Or record a new source if not...
05355     if (!FoundSource) {
05356       SourceVecs.push_back(SourceVec);
05357       MinElts.push_back(EltNo);
05358       MaxElts.push_back(EltNo);
05359     }
05360   }
05361 
05362   // Currently only do something sane when at most two source vectors
05363   // involved.
05364   if (SourceVecs.size() > 2)
05365     return SDValue();
05366 
05367   SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
05368   int VEXTOffsets[2] = {0, 0};
05369 
05370   // This loop extracts the usage patterns of the source vectors
05371   // and prepares appropriate SDValues for a shuffle if possible.
05372   for (unsigned i = 0; i < SourceVecs.size(); ++i) {
05373     if (SourceVecs[i].getValueType() == VT) {
05374       // No VEXT necessary
05375       ShuffleSrcs[i] = SourceVecs[i];
05376       VEXTOffsets[i] = 0;
05377       continue;
05378     } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) {
05379       // It probably isn't worth padding out a smaller vector just to
05380       // break it down again in a shuffle.
05381       return SDValue();
05382     }
05383 
05384     // Since only 64-bit and 128-bit vectors are legal on ARM and
05385     // we've eliminated the other cases...
05386     assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts &&
05387            "unexpected vector sizes in ReconstructShuffle");
05388 
05389     if (MaxElts[i] - MinElts[i] >= NumElts) {
05390       // Span too large for a VEXT to cope
05391       return SDValue();
05392     }
05393 
05394     if (MinElts[i] >= NumElts) {
05395       // The extraction can just take the second half
05396       VEXTOffsets[i] = NumElts;
05397       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05398                                    SourceVecs[i],
05399                                    DAG.getIntPtrConstant(NumElts));
05400     } else if (MaxElts[i] < NumElts) {
05401       // The extraction can just take the first half
05402       VEXTOffsets[i] = 0;
05403       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05404                                    SourceVecs[i],
05405                                    DAG.getIntPtrConstant(0));
05406     } else {
05407       // An actual VEXT is needed
05408       VEXTOffsets[i] = MinElts[i];
05409       SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05410                                      SourceVecs[i],
05411                                      DAG.getIntPtrConstant(0));
05412       SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05413                                      SourceVecs[i],
05414                                      DAG.getIntPtrConstant(NumElts));
05415       ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
05416                                    DAG.getConstant(VEXTOffsets[i], MVT::i32));
05417     }
05418   }
05419 
05420   SmallVector<int, 8> Mask;
05421 
05422   for (unsigned i = 0; i < NumElts; ++i) {
05423     SDValue Entry = Op.getOperand(i);
05424     if (Entry.getOpcode() == ISD::UNDEF) {
05425       Mask.push_back(-1);
05426       continue;
05427     }
05428 
05429     SDValue ExtractVec = Entry.getOperand(0);
05430     int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i)
05431                                           .getOperand(1))->getSExtValue();
05432     if (ExtractVec == SourceVecs[0]) {
05433       Mask.push_back(ExtractElt - VEXTOffset