LLVM  mainline
ARMISelLowering.cpp
Go to the documentation of this file.
00001 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file defines the interfaces that ARM uses to lower LLVM code into a
00011 // selection DAG.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 #include "ARMISelLowering.h"
00016 #include "ARMCallingConv.h"
00017 #include "ARMConstantPoolValue.h"
00018 #include "ARMMachineFunctionInfo.h"
00019 #include "ARMPerfectShuffle.h"
00020 #include "ARMSubtarget.h"
00021 #include "ARMTargetMachine.h"
00022 #include "ARMTargetObjectFile.h"
00023 #include "MCTargetDesc/ARMAddressingModes.h"
00024 #include "llvm/ADT/Statistic.h"
00025 #include "llvm/ADT/StringExtras.h"
00026 #include "llvm/ADT/StringSwitch.h"
00027 #include "llvm/CodeGen/CallingConvLower.h"
00028 #include "llvm/CodeGen/IntrinsicLowering.h"
00029 #include "llvm/CodeGen/MachineBasicBlock.h"
00030 #include "llvm/CodeGen/MachineFrameInfo.h"
00031 #include "llvm/CodeGen/MachineFunction.h"
00032 #include "llvm/CodeGen/MachineInstrBuilder.h"
00033 #include "llvm/CodeGen/MachineJumpTableInfo.h"
00034 #include "llvm/CodeGen/MachineModuleInfo.h"
00035 #include "llvm/CodeGen/MachineRegisterInfo.h"
00036 #include "llvm/CodeGen/SelectionDAG.h"
00037 #include "llvm/IR/CallingConv.h"
00038 #include "llvm/IR/Constants.h"
00039 #include "llvm/IR/Function.h"
00040 #include "llvm/IR/GlobalValue.h"
00041 #include "llvm/IR/IRBuilder.h"
00042 #include "llvm/IR/Instruction.h"
00043 #include "llvm/IR/Instructions.h"
00044 #include "llvm/IR/IntrinsicInst.h"
00045 #include "llvm/IR/Intrinsics.h"
00046 #include "llvm/IR/Type.h"
00047 #include "llvm/MC/MCSectionMachO.h"
00048 #include "llvm/Support/CommandLine.h"
00049 #include "llvm/Support/Debug.h"
00050 #include "llvm/Support/ErrorHandling.h"
00051 #include "llvm/Support/MathExtras.h"
00052 #include "llvm/Support/raw_ostream.h"
00053 #include "llvm/Target/TargetOptions.h"
00054 #include <utility>
00055 using namespace llvm;
00056 
00057 #define DEBUG_TYPE "arm-isel"
00058 
00059 STATISTIC(NumTailCalls, "Number of tail calls");
00060 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
00061 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
00062 
00063 cl::opt<bool>
00064 EnableARMLongCalls("arm-long-calls", cl::Hidden,
00065   cl::desc("Generate calls via indirect call instructions"),
00066   cl::init(false));
00067 
00068 static cl::opt<bool>
00069 ARMInterworking("arm-interworking", cl::Hidden,
00070   cl::desc("Enable / disable ARM interworking (for debugging only)"),
00071   cl::init(true));
00072 
00073 namespace {
00074   class ARMCCState : public CCState {
00075   public:
00076     ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
00077                SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
00078                ParmContext PC)
00079         : CCState(CC, isVarArg, MF, locs, C) {
00080       assert(((PC == Call) || (PC == Prologue)) &&
00081              "ARMCCState users must specify whether their context is call"
00082              "or prologue generation.");
00083       CallOrPrologue = PC;
00084     }
00085   };
00086 }
00087 
00088 // The APCS parameter registers.
00089 static const MCPhysReg GPRArgRegs[] = {
00090   ARM::R0, ARM::R1, ARM::R2, ARM::R3
00091 };
00092 
00093 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
00094                                        MVT PromotedBitwiseVT) {
00095   if (VT != PromotedLdStVT) {
00096     setOperationAction(ISD::LOAD, VT, Promote);
00097     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
00098 
00099     setOperationAction(ISD::STORE, VT, Promote);
00100     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
00101   }
00102 
00103   MVT ElemTy = VT.getVectorElementType();
00104   if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
00105     setOperationAction(ISD::SETCC, VT, Custom);
00106   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
00107   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
00108   if (ElemTy == MVT::i32) {
00109     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
00110     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
00111     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
00112     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
00113   } else {
00114     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
00115     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
00116     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
00117     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
00118   }
00119   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
00120   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
00121   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
00122   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
00123   setOperationAction(ISD::SELECT,            VT, Expand);
00124   setOperationAction(ISD::SELECT_CC,         VT, Expand);
00125   setOperationAction(ISD::VSELECT,           VT, Expand);
00126   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
00127   if (VT.isInteger()) {
00128     setOperationAction(ISD::SHL, VT, Custom);
00129     setOperationAction(ISD::SRA, VT, Custom);
00130     setOperationAction(ISD::SRL, VT, Custom);
00131   }
00132 
00133   // Promote all bit-wise operations.
00134   if (VT.isInteger() && VT != PromotedBitwiseVT) {
00135     setOperationAction(ISD::AND, VT, Promote);
00136     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
00137     setOperationAction(ISD::OR,  VT, Promote);
00138     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
00139     setOperationAction(ISD::XOR, VT, Promote);
00140     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
00141   }
00142 
00143   // Neon does not support vector divide/remainder operations.
00144   setOperationAction(ISD::SDIV, VT, Expand);
00145   setOperationAction(ISD::UDIV, VT, Expand);
00146   setOperationAction(ISD::FDIV, VT, Expand);
00147   setOperationAction(ISD::SREM, VT, Expand);
00148   setOperationAction(ISD::UREM, VT, Expand);
00149   setOperationAction(ISD::FREM, VT, Expand);
00150 }
00151 
00152 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
00153   addRegisterClass(VT, &ARM::DPRRegClass);
00154   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
00155 }
00156 
00157 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
00158   addRegisterClass(VT, &ARM::DPairRegClass);
00159   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
00160 }
00161 
00162 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
00163                                      const ARMSubtarget &STI)
00164     : TargetLowering(TM), Subtarget(&STI) {
00165   RegInfo = Subtarget->getRegisterInfo();
00166   Itins = Subtarget->getInstrItineraryData();
00167 
00168   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
00169 
00170   if (Subtarget->isTargetMachO()) {
00171     // Uses VFP for Thumb libfuncs if available.
00172     if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
00173         Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
00174       // Single-precision floating-point arithmetic.
00175       setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
00176       setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
00177       setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
00178       setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
00179 
00180       // Double-precision floating-point arithmetic.
00181       setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
00182       setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
00183       setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
00184       setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
00185 
00186       // Single-precision comparisons.
00187       setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
00188       setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
00189       setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
00190       setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
00191       setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
00192       setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
00193       setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
00194       setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
00195 
00196       setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
00197       setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
00198       setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
00199       setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
00200       setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
00201       setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
00202       setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
00203       setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
00204 
00205       // Double-precision comparisons.
00206       setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
00207       setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
00208       setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
00209       setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
00210       setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
00211       setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
00212       setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
00213       setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
00214 
00215       setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
00216       setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
00217       setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
00218       setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
00219       setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
00220       setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
00221       setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
00222       setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
00223 
00224       // Floating-point to integer conversions.
00225       // i64 conversions are done via library routines even when generating VFP
00226       // instructions, so use the same ones.
00227       setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
00228       setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
00229       setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
00230       setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
00231 
00232       // Conversions between floating types.
00233       setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
00234       setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
00235 
00236       // Integer to floating-point conversions.
00237       // i64 conversions are done via library routines even when generating VFP
00238       // instructions, so use the same ones.
00239       // FIXME: There appears to be some naming inconsistency in ARM libgcc:
00240       // e.g., __floatunsidf vs. __floatunssidfvfp.
00241       setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
00242       setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
00243       setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
00244       setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
00245     }
00246   }
00247 
00248   // These libcalls are not available in 32-bit.
00249   setLibcallName(RTLIB::SHL_I128, nullptr);
00250   setLibcallName(RTLIB::SRL_I128, nullptr);
00251   setLibcallName(RTLIB::SRA_I128, nullptr);
00252 
00253   if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetMachO() &&
00254       !Subtarget->isTargetWindows()) {
00255     static const struct {
00256       const RTLIB::Libcall Op;
00257       const char * const Name;
00258       const CallingConv::ID CC;
00259       const ISD::CondCode Cond;
00260     } LibraryCalls[] = {
00261       // Double-precision floating-point arithmetic helper functions
00262       // RTABI chapter 4.1.2, Table 2
00263       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00264       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00265       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00266       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00267 
00268       // Double-precision floating-point comparison helper functions
00269       // RTABI chapter 4.1.2, Table 3
00270       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00271       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00272       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00273       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00274       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00275       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00276       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00277       { RTLIB::O_F64,   "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00278 
00279       // Single-precision floating-point arithmetic helper functions
00280       // RTABI chapter 4.1.2, Table 4
00281       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00282       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00283       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00284       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00285 
00286       // Single-precision floating-point comparison helper functions
00287       // RTABI chapter 4.1.2, Table 5
00288       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00289       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00290       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00291       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00292       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00293       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00294       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00295       { RTLIB::O_F32,   "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00296 
00297       // Floating-point to integer conversions.
00298       // RTABI chapter 4.1.2, Table 6
00299       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00300       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00301       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00302       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00303       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00304       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00305       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00306       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00307 
00308       // Conversions between floating types.
00309       // RTABI chapter 4.1.2, Table 7
00310       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00311       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00312       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00313 
00314       // Integer to floating-point conversions.
00315       // RTABI chapter 4.1.2, Table 8
00316       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00317       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00318       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00319       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00320       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00321       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00322       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00323       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00324 
00325       // Long long helper functions
00326       // RTABI chapter 4.2, Table 9
00327       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00328       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00329       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00330       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00331 
00332       // Integer division functions
00333       // RTABI chapter 4.3.1
00334       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00335       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00336       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00337       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00338       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00339       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00340       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00341       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00342 
00343       // Memory operations
00344       // RTABI chapter 4.3.4
00345       { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00346       { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00347       { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00348     };
00349 
00350     for (const auto &LC : LibraryCalls) {
00351       setLibcallName(LC.Op, LC.Name);
00352       setLibcallCallingConv(LC.Op, LC.CC);
00353       if (LC.Cond != ISD::SETCC_INVALID)
00354         setCmpLibcallCC(LC.Op, LC.Cond);
00355     }
00356   }
00357 
00358   if (Subtarget->isTargetWindows()) {
00359     static const struct {
00360       const RTLIB::Libcall Op;
00361       const char * const Name;
00362       const CallingConv::ID CC;
00363     } LibraryCalls[] = {
00364       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
00365       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
00366       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
00367       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
00368       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
00369       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
00370       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
00371       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
00372     };
00373 
00374     for (const auto &LC : LibraryCalls) {
00375       setLibcallName(LC.Op, LC.Name);
00376       setLibcallCallingConv(LC.Op, LC.CC);
00377     }
00378   }
00379 
00380   // Use divmod compiler-rt calls for iOS 5.0 and later.
00381   if (Subtarget->getTargetTriple().isiOS() &&
00382       !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) {
00383     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
00384     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
00385   }
00386 
00387   // The half <-> float conversion functions are always soft-float, but are
00388   // needed for some targets which use a hard-float calling convention by
00389   // default.
00390   if (Subtarget->isAAPCS_ABI()) {
00391     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
00392     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
00393     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
00394   } else {
00395     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
00396     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
00397     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
00398   }
00399 
00400   if (Subtarget->isThumb1Only())
00401     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
00402   else
00403     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
00404   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
00405       !Subtarget->isThumb1Only()) {
00406     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
00407     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
00408   }
00409 
00410   for (MVT VT : MVT::vector_valuetypes()) {
00411     for (MVT InnerVT : MVT::vector_valuetypes()) {
00412       setTruncStoreAction(VT, InnerVT, Expand);
00413       setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
00414       setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
00415       setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
00416     }
00417 
00418     setOperationAction(ISD::MULHS, VT, Expand);
00419     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
00420     setOperationAction(ISD::MULHU, VT, Expand);
00421     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
00422 
00423     setOperationAction(ISD::BSWAP, VT, Expand);
00424   }
00425 
00426   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
00427   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
00428 
00429   if (Subtarget->hasNEON()) {
00430     addDRTypeForNEON(MVT::v2f32);
00431     addDRTypeForNEON(MVT::v8i8);
00432     addDRTypeForNEON(MVT::v4i16);
00433     addDRTypeForNEON(MVT::v2i32);
00434     addDRTypeForNEON(MVT::v1i64);
00435 
00436     addQRTypeForNEON(MVT::v4f32);
00437     addQRTypeForNEON(MVT::v2f64);
00438     addQRTypeForNEON(MVT::v16i8);
00439     addQRTypeForNEON(MVT::v8i16);
00440     addQRTypeForNEON(MVT::v4i32);
00441     addQRTypeForNEON(MVT::v2i64);
00442 
00443     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
00444     // neither Neon nor VFP support any arithmetic operations on it.
00445     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
00446     // supported for v4f32.
00447     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
00448     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
00449     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
00450     // FIXME: Code duplication: FDIV and FREM are expanded always, see
00451     // ARMTargetLowering::addTypeForNEON method for details.
00452     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
00453     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
00454     // FIXME: Create unittest.
00455     // In another words, find a way when "copysign" appears in DAG with vector
00456     // operands.
00457     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
00458     // FIXME: Code duplication: SETCC has custom operation action, see
00459     // ARMTargetLowering::addTypeForNEON method for details.
00460     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
00461     // FIXME: Create unittest for FNEG and for FABS.
00462     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
00463     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
00464     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
00465     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
00466     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
00467     setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
00468     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
00469     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
00470     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
00471     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
00472     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
00473     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
00474     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
00475     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
00476     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
00477     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
00478     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
00479     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
00480     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
00481 
00482     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
00483     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
00484     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
00485     setOperationAction(ISD::FPOWI, MVT::v4f32, Expand);
00486     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
00487     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
00488     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
00489     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
00490     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
00491     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
00492     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
00493     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
00494     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
00495     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
00496     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
00497 
00498     // Mark v2f32 intrinsics.
00499     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
00500     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
00501     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
00502     setOperationAction(ISD::FPOWI, MVT::v2f32, Expand);
00503     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
00504     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
00505     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
00506     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
00507     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
00508     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
00509     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
00510     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
00511     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
00512     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
00513     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
00514 
00515     // Neon does not support some operations on v1i64 and v2i64 types.
00516     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
00517     // Custom handling for some quad-vector types to detect VMULL.
00518     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
00519     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
00520     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
00521     // Custom handling for some vector types to avoid expensive expansions
00522     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
00523     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
00524     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
00525     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
00526     setOperationAction(ISD::SETCC, MVT::v1i64, Expand);
00527     setOperationAction(ISD::SETCC, MVT::v2i64, Expand);
00528     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
00529     // a destination type that is wider than the source, and nor does
00530     // it have a FP_TO_[SU]INT instruction with a narrower destination than
00531     // source.
00532     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
00533     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
00534     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
00535     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
00536 
00537     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
00538     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
00539 
00540     // NEON does not have single instruction CTPOP for vectors with element
00541     // types wider than 8-bits.  However, custom lowering can leverage the
00542     // v8i8/v16i8 vcnt instruction.
00543     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
00544     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
00545     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
00546     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
00547 
00548     // NEON only has FMA instructions as of VFP4.
00549     if (!Subtarget->hasVFP4()) {
00550       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
00551       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
00552     }
00553 
00554     setTargetDAGCombine(ISD::INTRINSIC_VOID);
00555     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
00556     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
00557     setTargetDAGCombine(ISD::SHL);
00558     setTargetDAGCombine(ISD::SRL);
00559     setTargetDAGCombine(ISD::SRA);
00560     setTargetDAGCombine(ISD::SIGN_EXTEND);
00561     setTargetDAGCombine(ISD::ZERO_EXTEND);
00562     setTargetDAGCombine(ISD::ANY_EXTEND);
00563     setTargetDAGCombine(ISD::SELECT_CC);
00564     setTargetDAGCombine(ISD::BUILD_VECTOR);
00565     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
00566     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
00567     setTargetDAGCombine(ISD::STORE);
00568     setTargetDAGCombine(ISD::FP_TO_SINT);
00569     setTargetDAGCombine(ISD::FP_TO_UINT);
00570     setTargetDAGCombine(ISD::FDIV);
00571     setTargetDAGCombine(ISD::LOAD);
00572 
00573     // It is legal to extload from v4i8 to v4i16 or v4i32.
00574     for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
00575                    MVT::v2i32}) {
00576       for (MVT VT : MVT::integer_vector_valuetypes()) {
00577         setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
00578         setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
00579         setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
00580       }
00581     }
00582   }
00583 
00584   // ARM and Thumb2 support UMLAL/SMLAL.
00585   if (!Subtarget->isThumb1Only())
00586     setTargetDAGCombine(ISD::ADDC);
00587 
00588   if (Subtarget->isFPOnlySP()) {
00589     // When targetting a floating-point unit with only single-precision
00590     // operations, f64 is legal for the few double-precision instructions which
00591     // are present However, no double-precision operations other than moves,
00592     // loads and stores are provided by the hardware.
00593     setOperationAction(ISD::FADD,       MVT::f64, Expand);
00594     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
00595     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
00596     setOperationAction(ISD::FMA,        MVT::f64, Expand);
00597     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
00598     setOperationAction(ISD::FREM,       MVT::f64, Expand);
00599     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
00600     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
00601     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
00602     setOperationAction(ISD::FABS,       MVT::f64, Expand);
00603     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
00604     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
00605     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
00606     setOperationAction(ISD::FPOWI,      MVT::f64, Expand);
00607     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
00608     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
00609     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
00610     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
00611     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
00612     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
00613     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
00614     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
00615     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
00616     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
00617     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
00618     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00619     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
00620     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00621     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00622     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
00623     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
00624     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
00625     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
00626   }
00627 
00628   computeRegisterProperties(Subtarget->getRegisterInfo());
00629 
00630   // ARM does not have floating-point extending loads.
00631   for (MVT VT : MVT::fp_valuetypes()) {
00632     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
00633     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
00634   }
00635 
00636   // ... or truncating stores
00637   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
00638   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
00639   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
00640 
00641   // ARM does not have i1 sign extending load.
00642   for (MVT VT : MVT::integer_valuetypes())
00643     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
00644 
00645   // ARM supports all 4 flavors of integer indexed load / store.
00646   if (!Subtarget->isThumb1Only()) {
00647     for (unsigned im = (unsigned)ISD::PRE_INC;
00648          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
00649       setIndexedLoadAction(im,  MVT::i1,  Legal);
00650       setIndexedLoadAction(im,  MVT::i8,  Legal);
00651       setIndexedLoadAction(im,  MVT::i16, Legal);
00652       setIndexedLoadAction(im,  MVT::i32, Legal);
00653       setIndexedStoreAction(im, MVT::i1,  Legal);
00654       setIndexedStoreAction(im, MVT::i8,  Legal);
00655       setIndexedStoreAction(im, MVT::i16, Legal);
00656       setIndexedStoreAction(im, MVT::i32, Legal);
00657     }
00658   }
00659 
00660   setOperationAction(ISD::SADDO, MVT::i32, Custom);
00661   setOperationAction(ISD::UADDO, MVT::i32, Custom);
00662   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
00663   setOperationAction(ISD::USUBO, MVT::i32, Custom);
00664 
00665   // i64 operation support.
00666   setOperationAction(ISD::MUL,     MVT::i64, Expand);
00667   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
00668   if (Subtarget->isThumb1Only()) {
00669     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
00670     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
00671   }
00672   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
00673       || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP()))
00674     setOperationAction(ISD::MULHS, MVT::i32, Expand);
00675 
00676   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
00677   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
00678   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
00679   setOperationAction(ISD::SRL,       MVT::i64, Custom);
00680   setOperationAction(ISD::SRA,       MVT::i64, Custom);
00681 
00682   if (!Subtarget->isThumb1Only()) {
00683     // FIXME: We should do this for Thumb1 as well.
00684     setOperationAction(ISD::ADDC,    MVT::i32, Custom);
00685     setOperationAction(ISD::ADDE,    MVT::i32, Custom);
00686     setOperationAction(ISD::SUBC,    MVT::i32, Custom);
00687     setOperationAction(ISD::SUBE,    MVT::i32, Custom);
00688   }
00689 
00690   // ARM does not have ROTL.
00691   setOperationAction(ISD::ROTL,  MVT::i32, Expand);
00692   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
00693   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
00694   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
00695     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
00696 
00697   // These just redirect to CTTZ and CTLZ on ARM.
00698   setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i32  , Expand);
00699   setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i32  , Expand);
00700 
00701   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
00702 
00703   // Only ARMv6 has BSWAP.
00704   if (!Subtarget->hasV6Ops())
00705     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
00706 
00707   if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) &&
00708       !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) {
00709     // These are expanded into libcalls if the cpu doesn't have HW divider.
00710     setOperationAction(ISD::SDIV,  MVT::i32, Expand);
00711     setOperationAction(ISD::UDIV,  MVT::i32, Expand);
00712   }
00713 
00714   // FIXME: Also set divmod for SREM on EABI
00715   setOperationAction(ISD::SREM,  MVT::i32, Expand);
00716   setOperationAction(ISD::UREM,  MVT::i32, Expand);
00717   // Register based DivRem for AEABI (RTABI 4.2)
00718   if (Subtarget->isTargetAEABI()) {
00719     setLibcallName(RTLIB::SDIVREM_I8,  "__aeabi_idivmod");
00720     setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod");
00721     setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod");
00722     setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod");
00723     setLibcallName(RTLIB::UDIVREM_I8,  "__aeabi_uidivmod");
00724     setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod");
00725     setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod");
00726     setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod");
00727 
00728     setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS);
00729     setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS);
00730     setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS);
00731     setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS);
00732     setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS);
00733     setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS);
00734     setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS);
00735     setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS);
00736 
00737     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
00738     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
00739   } else {
00740     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
00741     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
00742   }
00743 
00744   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
00745   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
00746   setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
00747   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
00748   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
00749 
00750   setOperationAction(ISD::TRAP, MVT::Other, Legal);
00751 
00752   // Use the default implementation.
00753   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
00754   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
00755   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
00756   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
00757   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
00758   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
00759 
00760   if (!Subtarget->isTargetMachO()) {
00761     // Non-MachO platforms may return values in these registers via the
00762     // personality function.
00763     setExceptionPointerRegister(ARM::R0);
00764     setExceptionSelectorRegister(ARM::R1);
00765   }
00766 
00767   if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
00768     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
00769   else
00770     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
00771 
00772   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
00773   // the default expansion. If we are targeting a single threaded system,
00774   // then set them all for expand so we can lower them later into their
00775   // non-atomic form.
00776   if (TM.Options.ThreadModel == ThreadModel::Single)
00777     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other, Expand);
00778   else if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) {
00779     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
00780     // to ldrex/strex loops already.
00781     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
00782 
00783     // On v8, we have particularly efficient implementations of atomic fences
00784     // if they can be combined with nearby atomic loads and stores.
00785     if (!Subtarget->hasV8Ops()) {
00786       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
00787       setInsertFencesForAtomic(true);
00788     }
00789   } else {
00790     // If there's anything we can use as a barrier, go through custom lowering
00791     // for ATOMIC_FENCE.
00792     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
00793                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
00794 
00795     // Set them all for expansion, which will force libcalls.
00796     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
00797     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
00798     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
00799     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
00800     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
00801     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
00802     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
00803     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
00804     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
00805     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
00806     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
00807     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
00808     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
00809     // Unordered/Monotonic case.
00810     setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
00811     setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
00812   }
00813 
00814   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
00815 
00816   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
00817   if (!Subtarget->hasV6Ops()) {
00818     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
00819     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
00820   }
00821   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00822 
00823   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
00824       !Subtarget->isThumb1Only()) {
00825     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
00826     // iff target supports vfp2.
00827     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
00828     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
00829   }
00830 
00831   // We want to custom lower some of our intrinsics.
00832   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
00833   if (Subtarget->isTargetDarwin()) {
00834     setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
00835     setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
00836     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
00837   }
00838 
00839   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
00840   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
00841   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
00842   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
00843   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
00844   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
00845   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
00846   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
00847   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
00848 
00849   setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
00850   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
00851   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
00852   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
00853   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
00854 
00855   // We don't support sin/cos/fmod/copysign/pow
00856   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
00857   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
00858   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
00859   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
00860   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
00861   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
00862   setOperationAction(ISD::FREM,      MVT::f64, Expand);
00863   setOperationAction(ISD::FREM,      MVT::f32, Expand);
00864   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
00865       !Subtarget->isThumb1Only()) {
00866     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
00867     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
00868   }
00869   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
00870   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
00871 
00872   if (!Subtarget->hasVFP4()) {
00873     setOperationAction(ISD::FMA, MVT::f64, Expand);
00874     setOperationAction(ISD::FMA, MVT::f32, Expand);
00875   }
00876 
00877   // Various VFP goodness
00878   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
00879     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
00880     if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
00881       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
00882       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
00883     }
00884 
00885     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
00886     if (!Subtarget->hasFP16()) {
00887       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
00888       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
00889     }
00890   }
00891 
00892   // Combine sin / cos into one node or libcall if possible.
00893   if (Subtarget->hasSinCos()) {
00894     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
00895     setLibcallName(RTLIB::SINCOS_F64, "sincos");
00896     if (Subtarget->getTargetTriple().isiOS()) {
00897       // For iOS, we don't want to the normal expansion of a libcall to
00898       // sincos. We want to issue a libcall to __sincos_stret.
00899       setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
00900       setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
00901     }
00902   }
00903 
00904   // FP-ARMv8 implements a lot of rounding-like FP operations.
00905   if (Subtarget->hasFPARMv8()) {
00906     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
00907     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
00908     setOperationAction(ISD::FROUND, MVT::f32, Legal);
00909     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
00910     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
00911     setOperationAction(ISD::FRINT, MVT::f32, Legal);
00912     if (!Subtarget->isFPOnlySP()) {
00913       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
00914       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
00915       setOperationAction(ISD::FROUND, MVT::f64, Legal);
00916       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
00917       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
00918       setOperationAction(ISD::FRINT, MVT::f64, Legal);
00919     }
00920   }
00921   // We have target-specific dag combine patterns for the following nodes:
00922   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
00923   setTargetDAGCombine(ISD::ADD);
00924   setTargetDAGCombine(ISD::SUB);
00925   setTargetDAGCombine(ISD::MUL);
00926   setTargetDAGCombine(ISD::AND);
00927   setTargetDAGCombine(ISD::OR);
00928   setTargetDAGCombine(ISD::XOR);
00929 
00930   if (Subtarget->hasV6Ops())
00931     setTargetDAGCombine(ISD::SRL);
00932 
00933   setStackPointerRegisterToSaveRestore(ARM::SP);
00934 
00935   if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
00936       !Subtarget->hasVFP2())
00937     setSchedulingPreference(Sched::RegPressure);
00938   else
00939     setSchedulingPreference(Sched::Hybrid);
00940 
00941   //// temporary - rewrite interface to use type
00942   MaxStoresPerMemset = 8;
00943   MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
00944   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
00945   MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00946   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
00947   MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00948 
00949   // On ARM arguments smaller than 4 bytes are extended, so all arguments
00950   // are at least 4 bytes aligned.
00951   setMinStackArgumentAlignment(4);
00952 
00953   // Prefer likely predicted branches to selects on out-of-order cores.
00954   PredictableSelectIsExpensive = Subtarget->isLikeA9();
00955 
00956   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
00957 }
00958 
00959 bool ARMTargetLowering::useSoftFloat() const {
00960   return Subtarget->useSoftFloat();
00961 }
00962 
00963 // FIXME: It might make sense to define the representative register class as the
00964 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
00965 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
00966 // SPR's representative would be DPR_VFP2. This should work well if register
00967 // pressure tracking were modified such that a register use would increment the
00968 // pressure of the register class's representative and all of it's super
00969 // classes' representatives transitively. We have not implemented this because
00970 // of the difficulty prior to coalescing of modeling operand register classes
00971 // due to the common occurrence of cross class copies and subregister insertions
00972 // and extractions.
00973 std::pair<const TargetRegisterClass *, uint8_t>
00974 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
00975                                            MVT VT) const {
00976   const TargetRegisterClass *RRC = nullptr;
00977   uint8_t Cost = 1;
00978   switch (VT.SimpleTy) {
00979   default:
00980     return TargetLowering::findRepresentativeClass(TRI, VT);
00981   // Use DPR as representative register class for all floating point
00982   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
00983   // the cost is 1 for both f32 and f64.
00984   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
00985   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
00986     RRC = &ARM::DPRRegClass;
00987     // When NEON is used for SP, only half of the register file is available
00988     // because operations that define both SP and DP results will be constrained
00989     // to the VFP2 class (D0-D15). We currently model this constraint prior to
00990     // coalescing by double-counting the SP regs. See the FIXME above.
00991     if (Subtarget->useNEONForSinglePrecisionFP())
00992       Cost = 2;
00993     break;
00994   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
00995   case MVT::v4f32: case MVT::v2f64:
00996     RRC = &ARM::DPRRegClass;
00997     Cost = 2;
00998     break;
00999   case MVT::v4i64:
01000     RRC = &ARM::DPRRegClass;
01001     Cost = 4;
01002     break;
01003   case MVT::v8i64:
01004     RRC = &ARM::DPRRegClass;
01005     Cost = 8;
01006     break;
01007   }
01008   return std::make_pair(RRC, Cost);
01009 }
01010 
01011 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
01012   switch ((ARMISD::NodeType)Opcode) {
01013   case ARMISD::FIRST_NUMBER:  break;
01014   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
01015   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
01016   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
01017   case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
01018   case ARMISD::CALL:          return "ARMISD::CALL";
01019   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
01020   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
01021   case ARMISD::tCALL:         return "ARMISD::tCALL";
01022   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
01023   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
01024   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
01025   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
01026   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
01027   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
01028   case ARMISD::CMP:           return "ARMISD::CMP";
01029   case ARMISD::CMN:           return "ARMISD::CMN";
01030   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
01031   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
01032   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
01033   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
01034   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
01035 
01036   case ARMISD::CMOV:          return "ARMISD::CMOV";
01037 
01038   case ARMISD::RBIT:          return "ARMISD::RBIT";
01039 
01040   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
01041   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
01042   case ARMISD::RRX:           return "ARMISD::RRX";
01043 
01044   case ARMISD::ADDC:          return "ARMISD::ADDC";
01045   case ARMISD::ADDE:          return "ARMISD::ADDE";
01046   case ARMISD::SUBC:          return "ARMISD::SUBC";
01047   case ARMISD::SUBE:          return "ARMISD::SUBE";
01048 
01049   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
01050   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
01051 
01052   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
01053   case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
01054 
01055   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
01056 
01057   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
01058 
01059   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
01060 
01061   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
01062 
01063   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
01064 
01065   case ARMISD::WIN__CHKSTK:   return "ARMISD:::WIN__CHKSTK";
01066 
01067   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
01068   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
01069   case ARMISD::VCGE:          return "ARMISD::VCGE";
01070   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
01071   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
01072   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
01073   case ARMISD::VCGT:          return "ARMISD::VCGT";
01074   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
01075   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
01076   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
01077   case ARMISD::VTST:          return "ARMISD::VTST";
01078 
01079   case ARMISD::VSHL:          return "ARMISD::VSHL";
01080   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
01081   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
01082   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
01083   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
01084   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
01085   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
01086   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
01087   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
01088   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
01089   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
01090   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
01091   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
01092   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
01093   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
01094   case ARMISD::VSLI:          return "ARMISD::VSLI";
01095   case ARMISD::VSRI:          return "ARMISD::VSRI";
01096   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
01097   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
01098   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
01099   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
01100   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
01101   case ARMISD::VDUP:          return "ARMISD::VDUP";
01102   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
01103   case ARMISD::VEXT:          return "ARMISD::VEXT";
01104   case ARMISD::VREV64:        return "ARMISD::VREV64";
01105   case ARMISD::VREV32:        return "ARMISD::VREV32";
01106   case ARMISD::VREV16:        return "ARMISD::VREV16";
01107   case ARMISD::VZIP:          return "ARMISD::VZIP";
01108   case ARMISD::VUZP:          return "ARMISD::VUZP";
01109   case ARMISD::VTRN:          return "ARMISD::VTRN";
01110   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
01111   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
01112   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
01113   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
01114   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
01115   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
01116   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
01117   case ARMISD::FMAX:          return "ARMISD::FMAX";
01118   case ARMISD::FMIN:          return "ARMISD::FMIN";
01119   case ARMISD::VMAXNM:        return "ARMISD::VMAX";
01120   case ARMISD::VMINNM:        return "ARMISD::VMIN";
01121   case ARMISD::BFI:           return "ARMISD::BFI";
01122   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
01123   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
01124   case ARMISD::VBSL:          return "ARMISD::VBSL";
01125   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
01126   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
01127   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
01128   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
01129   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
01130   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
01131   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
01132   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
01133   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
01134   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
01135   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
01136   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
01137   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
01138   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
01139   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
01140   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
01141   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
01142   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
01143   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
01144   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
01145   }
01146   return nullptr;
01147 }
01148 
01149 EVT ARMTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
01150   if (!VT.isVector()) return getPointerTy();
01151   return VT.changeVectorElementTypeToInteger();
01152 }
01153 
01154 /// getRegClassFor - Return the register class that should be used for the
01155 /// specified value type.
01156 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
01157   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
01158   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
01159   // load / store 4 to 8 consecutive D registers.
01160   if (Subtarget->hasNEON()) {
01161     if (VT == MVT::v4i64)
01162       return &ARM::QQPRRegClass;
01163     if (VT == MVT::v8i64)
01164       return &ARM::QQQQPRRegClass;
01165   }
01166   return TargetLowering::getRegClassFor(VT);
01167 }
01168 
01169 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
01170 // source/dest is aligned and the copy size is large enough. We therefore want
01171 // to align such objects passed to memory intrinsics.
01172 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
01173                                                unsigned &PrefAlign) const {
01174   if (!isa<MemIntrinsic>(CI))
01175     return false;
01176   MinSize = 8;
01177   // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
01178   // cycle faster than 4-byte aligned LDM.
01179   PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
01180   return true;
01181 }
01182 
01183 // Create a fast isel object.
01184 FastISel *
01185 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
01186                                   const TargetLibraryInfo *libInfo) const {
01187   return ARM::createFastISel(funcInfo, libInfo);
01188 }
01189 
01190 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
01191   unsigned NumVals = N->getNumValues();
01192   if (!NumVals)
01193     return Sched::RegPressure;
01194 
01195   for (unsigned i = 0; i != NumVals; ++i) {
01196     EVT VT = N->getValueType(i);
01197     if (VT == MVT::Glue || VT == MVT::Other)
01198       continue;
01199     if (VT.isFloatingPoint() || VT.isVector())
01200       return Sched::ILP;
01201   }
01202 
01203   if (!N->isMachineOpcode())
01204     return Sched::RegPressure;
01205 
01206   // Load are scheduled for latency even if there instruction itinerary
01207   // is not available.
01208   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
01209   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
01210 
01211   if (MCID.getNumDefs() == 0)
01212     return Sched::RegPressure;
01213   if (!Itins->isEmpty() &&
01214       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
01215     return Sched::ILP;
01216 
01217   return Sched::RegPressure;
01218 }
01219 
01220 //===----------------------------------------------------------------------===//
01221 // Lowering Code
01222 //===----------------------------------------------------------------------===//
01223 
01224 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
01225 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
01226   switch (CC) {
01227   default: llvm_unreachable("Unknown condition code!");
01228   case ISD::SETNE:  return ARMCC::NE;
01229   case ISD::SETEQ:  return ARMCC::EQ;
01230   case ISD::SETGT:  return ARMCC::GT;
01231   case ISD::SETGE:  return ARMCC::GE;
01232   case ISD::SETLT:  return ARMCC::LT;
01233   case ISD::SETLE:  return ARMCC::LE;
01234   case ISD::SETUGT: return ARMCC::HI;
01235   case ISD::SETUGE: return ARMCC::HS;
01236   case ISD::SETULT: return ARMCC::LO;
01237   case ISD::SETULE: return ARMCC::LS;
01238   }
01239 }
01240 
01241 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
01242 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
01243                         ARMCC::CondCodes &CondCode2) {
01244   CondCode2 = ARMCC::AL;
01245   switch (CC) {
01246   default: llvm_unreachable("Unknown FP condition!");
01247   case ISD::SETEQ:
01248   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
01249   case ISD::SETGT:
01250   case ISD::SETOGT: CondCode = ARMCC::GT; break;
01251   case ISD::SETGE:
01252   case ISD::SETOGE: CondCode = ARMCC::GE; break;
01253   case ISD::SETOLT: CondCode = ARMCC::MI; break;
01254   case ISD::SETOLE: CondCode = ARMCC::LS; break;
01255   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
01256   case ISD::SETO:   CondCode = ARMCC::VC; break;
01257   case ISD::SETUO:  CondCode = ARMCC::VS; break;
01258   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
01259   case ISD::SETUGT: CondCode = ARMCC::HI; break;
01260   case ISD::SETUGE: CondCode = ARMCC::PL; break;
01261   case ISD::SETLT:
01262   case ISD::SETULT: CondCode = ARMCC::LT; break;
01263   case ISD::SETLE:
01264   case ISD::SETULE: CondCode = ARMCC::LE; break;
01265   case ISD::SETNE:
01266   case ISD::SETUNE: CondCode = ARMCC::NE; break;
01267   }
01268 }
01269 
01270 //===----------------------------------------------------------------------===//
01271 //                      Calling Convention Implementation
01272 //===----------------------------------------------------------------------===//
01273 
01274 #include "ARMGenCallingConv.inc"
01275 
01276 /// getEffectiveCallingConv - Get the effective calling convention, taking into
01277 /// account presence of floating point hardware and calling convention
01278 /// limitations, such as support for variadic functions.
01279 CallingConv::ID
01280 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
01281                                            bool isVarArg) const {
01282   switch (CC) {
01283   default:
01284     llvm_unreachable("Unsupported calling convention");
01285   case CallingConv::ARM_AAPCS:
01286   case CallingConv::ARM_APCS:
01287   case CallingConv::GHC:
01288     return CC;
01289   case CallingConv::ARM_AAPCS_VFP:
01290     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
01291   case CallingConv::C:
01292     if (!Subtarget->isAAPCS_ABI())
01293       return CallingConv::ARM_APCS;
01294     else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
01295              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
01296              !isVarArg)
01297       return CallingConv::ARM_AAPCS_VFP;
01298     else
01299       return CallingConv::ARM_AAPCS;
01300   case CallingConv::Fast:
01301     if (!Subtarget->isAAPCS_ABI()) {
01302       if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01303         return CallingConv::Fast;
01304       return CallingConv::ARM_APCS;
01305     } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01306       return CallingConv::ARM_AAPCS_VFP;
01307     else
01308       return CallingConv::ARM_AAPCS;
01309   }
01310 }
01311 
01312 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
01313 /// CallingConvention.
01314 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
01315                                                  bool Return,
01316                                                  bool isVarArg) const {
01317   switch (getEffectiveCallingConv(CC, isVarArg)) {
01318   default:
01319     llvm_unreachable("Unsupported calling convention");
01320   case CallingConv::ARM_APCS:
01321     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
01322   case CallingConv::ARM_AAPCS:
01323     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
01324   case CallingConv::ARM_AAPCS_VFP:
01325     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
01326   case CallingConv::Fast:
01327     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
01328   case CallingConv::GHC:
01329     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
01330   }
01331 }
01332 
01333 /// LowerCallResult - Lower the result values of a call into the
01334 /// appropriate copies out of appropriate physical registers.
01335 SDValue
01336 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
01337                                    CallingConv::ID CallConv, bool isVarArg,
01338                                    const SmallVectorImpl<ISD::InputArg> &Ins,
01339                                    SDLoc dl, SelectionDAG &DAG,
01340                                    SmallVectorImpl<SDValue> &InVals,
01341                                    bool isThisReturn, SDValue ThisVal) const {
01342 
01343   // Assign locations to each value returned by this call.
01344   SmallVector<CCValAssign, 16> RVLocs;
01345   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
01346                     *DAG.getContext(), Call);
01347   CCInfo.AnalyzeCallResult(Ins,
01348                            CCAssignFnForNode(CallConv, /* Return*/ true,
01349                                              isVarArg));
01350 
01351   // Copy all of the result registers out of their specified physreg.
01352   for (unsigned i = 0; i != RVLocs.size(); ++i) {
01353     CCValAssign VA = RVLocs[i];
01354 
01355     // Pass 'this' value directly from the argument to return value, to avoid
01356     // reg unit interference
01357     if (i == 0 && isThisReturn) {
01358       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
01359              "unexpected return calling convention register assignment");
01360       InVals.push_back(ThisVal);
01361       continue;
01362     }
01363 
01364     SDValue Val;
01365     if (VA.needsCustom()) {
01366       // Handle f64 or half of a v2f64.
01367       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01368                                       InFlag);
01369       Chain = Lo.getValue(1);
01370       InFlag = Lo.getValue(2);
01371       VA = RVLocs[++i]; // skip ahead to next loc
01372       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01373                                       InFlag);
01374       Chain = Hi.getValue(1);
01375       InFlag = Hi.getValue(2);
01376       if (!Subtarget->isLittle())
01377         std::swap (Lo, Hi);
01378       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01379 
01380       if (VA.getLocVT() == MVT::v2f64) {
01381         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
01382         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01383                           DAG.getConstant(0, dl, MVT::i32));
01384 
01385         VA = RVLocs[++i]; // skip ahead to next loc
01386         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01387         Chain = Lo.getValue(1);
01388         InFlag = Lo.getValue(2);
01389         VA = RVLocs[++i]; // skip ahead to next loc
01390         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01391         Chain = Hi.getValue(1);
01392         InFlag = Hi.getValue(2);
01393         if (!Subtarget->isLittle())
01394           std::swap (Lo, Hi);
01395         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01396         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01397                           DAG.getConstant(1, dl, MVT::i32));
01398       }
01399     } else {
01400       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
01401                                InFlag);
01402       Chain = Val.getValue(1);
01403       InFlag = Val.getValue(2);
01404     }
01405 
01406     switch (VA.getLocInfo()) {
01407     default: llvm_unreachable("Unknown loc info!");
01408     case CCValAssign::Full: break;
01409     case CCValAssign::BCvt:
01410       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
01411       break;
01412     }
01413 
01414     InVals.push_back(Val);
01415   }
01416 
01417   return Chain;
01418 }
01419 
01420 /// LowerMemOpCallTo - Store the argument to the stack.
01421 SDValue
01422 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
01423                                     SDValue StackPtr, SDValue Arg,
01424                                     SDLoc dl, SelectionDAG &DAG,
01425                                     const CCValAssign &VA,
01426                                     ISD::ArgFlagsTy Flags) const {
01427   unsigned LocMemOffset = VA.getLocMemOffset();
01428   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
01429   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
01430   return DAG.getStore(Chain, dl, Arg, PtrOff,
01431                       MachinePointerInfo::getStack(LocMemOffset),
01432                       false, false, 0);
01433 }
01434 
01435 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
01436                                          SDValue Chain, SDValue &Arg,
01437                                          RegsToPassVector &RegsToPass,
01438                                          CCValAssign &VA, CCValAssign &NextVA,
01439                                          SDValue &StackPtr,
01440                                          SmallVectorImpl<SDValue> &MemOpChains,
01441                                          ISD::ArgFlagsTy Flags) const {
01442 
01443   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
01444                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
01445   unsigned id = Subtarget->isLittle() ? 0 : 1;
01446   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
01447 
01448   if (NextVA.isRegLoc())
01449     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
01450   else {
01451     assert(NextVA.isMemLoc());
01452     if (!StackPtr.getNode())
01453       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01454 
01455     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
01456                                            dl, DAG, NextVA,
01457                                            Flags));
01458   }
01459 }
01460 
01461 /// LowerCall - Lowering a call into a callseq_start <-
01462 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
01463 /// nodes.
01464 SDValue
01465 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
01466                              SmallVectorImpl<SDValue> &InVals) const {
01467   SelectionDAG &DAG                     = CLI.DAG;
01468   SDLoc &dl                             = CLI.DL;
01469   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
01470   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
01471   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
01472   SDValue Chain                         = CLI.Chain;
01473   SDValue Callee                        = CLI.Callee;
01474   bool &isTailCall                      = CLI.IsTailCall;
01475   CallingConv::ID CallConv              = CLI.CallConv;
01476   bool doesNotRet                       = CLI.DoesNotReturn;
01477   bool isVarArg                         = CLI.IsVarArg;
01478 
01479   MachineFunction &MF = DAG.getMachineFunction();
01480   bool isStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
01481   bool isThisReturn   = false;
01482   bool isSibCall      = false;
01483 
01484   // Disable tail calls if they're not supported.
01485   if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls)
01486     isTailCall = false;
01487 
01488   if (isTailCall) {
01489     // Check if it's really possible to do a tail call.
01490     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
01491                     isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
01492                                                    Outs, OutVals, Ins, DAG);
01493     if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
01494       report_fatal_error("failed to perform tail call elimination on a call "
01495                          "site marked musttail");
01496     // We don't support GuaranteedTailCallOpt for ARM, only automatically
01497     // detected sibcalls.
01498     if (isTailCall) {
01499       ++NumTailCalls;
01500       isSibCall = true;
01501     }
01502   }
01503 
01504   // Analyze operands of the call, assigning locations to each operand.
01505   SmallVector<CCValAssign, 16> ArgLocs;
01506   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
01507                     *DAG.getContext(), Call);
01508   CCInfo.AnalyzeCallOperands(Outs,
01509                              CCAssignFnForNode(CallConv, /* Return*/ false,
01510                                                isVarArg));
01511 
01512   // Get a count of how many bytes are to be pushed on the stack.
01513   unsigned NumBytes = CCInfo.getNextStackOffset();
01514 
01515   // For tail calls, memory operands are available in our caller's stack.
01516   if (isSibCall)
01517     NumBytes = 0;
01518 
01519   // Adjust the stack pointer for the new arguments...
01520   // These operations are automatically eliminated by the prolog/epilog pass
01521   if (!isSibCall)
01522     Chain = DAG.getCALLSEQ_START(Chain,
01523                                  DAG.getIntPtrConstant(NumBytes, dl, true), dl);
01524 
01525   SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01526 
01527   RegsToPassVector RegsToPass;
01528   SmallVector<SDValue, 8> MemOpChains;
01529 
01530   // Walk the register/memloc assignments, inserting copies/loads.  In the case
01531   // of tail call optimization, arguments are handled later.
01532   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
01533        i != e;
01534        ++i, ++realArgIdx) {
01535     CCValAssign &VA = ArgLocs[i];
01536     SDValue Arg = OutVals[realArgIdx];
01537     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
01538     bool isByVal = Flags.isByVal();
01539 
01540     // Promote the value if needed.
01541     switch (VA.getLocInfo()) {
01542     default: llvm_unreachable("Unknown loc info!");
01543     case CCValAssign::Full: break;
01544     case CCValAssign::SExt:
01545       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
01546       break;
01547     case CCValAssign::ZExt:
01548       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
01549       break;
01550     case CCValAssign::AExt:
01551       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
01552       break;
01553     case CCValAssign::BCvt:
01554       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
01555       break;
01556     }
01557 
01558     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
01559     if (VA.needsCustom()) {
01560       if (VA.getLocVT() == MVT::v2f64) {
01561         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01562                                   DAG.getConstant(0, dl, MVT::i32));
01563         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01564                                   DAG.getConstant(1, dl, MVT::i32));
01565 
01566         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
01567                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01568 
01569         VA = ArgLocs[++i]; // skip ahead to next loc
01570         if (VA.isRegLoc()) {
01571           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
01572                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01573         } else {
01574           assert(VA.isMemLoc());
01575 
01576           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
01577                                                  dl, DAG, VA, Flags));
01578         }
01579       } else {
01580         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
01581                          StackPtr, MemOpChains, Flags);
01582       }
01583     } else if (VA.isRegLoc()) {
01584       if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) {
01585         assert(VA.getLocVT() == MVT::i32 &&
01586                "unexpected calling convention register assignment");
01587         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
01588                "unexpected use of 'returned'");
01589         isThisReturn = true;
01590       }
01591       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
01592     } else if (isByVal) {
01593       assert(VA.isMemLoc());
01594       unsigned offset = 0;
01595 
01596       // True if this byval aggregate will be split between registers
01597       // and memory.
01598       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
01599       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
01600 
01601       if (CurByValIdx < ByValArgsCount) {
01602 
01603         unsigned RegBegin, RegEnd;
01604         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
01605 
01606         EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
01607         unsigned int i, j;
01608         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
01609           SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
01610           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
01611           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
01612                                      MachinePointerInfo(),
01613                                      false, false, false,
01614                                      DAG.InferPtrAlignment(AddArg));
01615           MemOpChains.push_back(Load.getValue(1));
01616           RegsToPass.push_back(std::make_pair(j, Load));
01617         }
01618 
01619         // If parameter size outsides register area, "offset" value
01620         // helps us to calculate stack slot for remained part properly.
01621         offset = RegEnd - RegBegin;
01622 
01623         CCInfo.nextInRegsParam();
01624       }
01625 
01626       if (Flags.getByValSize() > 4*offset) {
01627         unsigned LocMemOffset = VA.getLocMemOffset();
01628         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
01629         SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
01630                                   StkPtrOff);
01631         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
01632         SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
01633         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
01634                                            MVT::i32);
01635         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl,
01636                                             MVT::i32);
01637 
01638         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
01639         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
01640         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
01641                                           Ops));
01642       }
01643     } else if (!isSibCall) {
01644       assert(VA.isMemLoc());
01645 
01646       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
01647                                              dl, DAG, VA, Flags));
01648     }
01649   }
01650 
01651   if (!MemOpChains.empty())
01652     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
01653 
01654   // Build a sequence of copy-to-reg nodes chained together with token chain
01655   // and flag operands which copy the outgoing args into the appropriate regs.
01656   SDValue InFlag;
01657   // Tail call byval lowering might overwrite argument registers so in case of
01658   // tail call optimization the copies to registers are lowered later.
01659   if (!isTailCall)
01660     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01661       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01662                                RegsToPass[i].second, InFlag);
01663       InFlag = Chain.getValue(1);
01664     }
01665 
01666   // For tail calls lower the arguments to the 'real' stack slot.
01667   if (isTailCall) {
01668     // Force all the incoming stack arguments to be loaded from the stack
01669     // before any new outgoing arguments are stored to the stack, because the
01670     // outgoing stack slots may alias the incoming argument stack slots, and
01671     // the alias isn't otherwise explicit. This is slightly more conservative
01672     // than necessary, because it means that each store effectively depends
01673     // on every argument instead of just those arguments it would clobber.
01674 
01675     // Do not flag preceding copytoreg stuff together with the following stuff.
01676     InFlag = SDValue();
01677     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01678       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01679                                RegsToPass[i].second, InFlag);
01680       InFlag = Chain.getValue(1);
01681     }
01682     InFlag = SDValue();
01683   }
01684 
01685   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
01686   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
01687   // node so that legalize doesn't hack it.
01688   bool isDirect = false;
01689   bool isARMFunc = false;
01690   bool isLocalARMFunc = false;
01691   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
01692 
01693   if (EnableARMLongCalls) {
01694     assert((Subtarget->isTargetWindows() ||
01695             getTargetMachine().getRelocationModel() == Reloc::Static) &&
01696            "long-calls with non-static relocation model!");
01697     // Handle a global address or an external symbol. If it's not one of
01698     // those, the target's already in a register, so we don't need to do
01699     // anything extra.
01700     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01701       const GlobalValue *GV = G->getGlobal();
01702       // Create a constant pool entry for the callee address
01703       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01704       ARMConstantPoolValue *CPV =
01705         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
01706 
01707       // Get the address of the callee into a register
01708       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01709       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01710       Callee = DAG.getLoad(getPointerTy(), dl,
01711                            DAG.getEntryNode(), CPAddr,
01712                            MachinePointerInfo::getConstantPool(),
01713                            false, false, false, 0);
01714     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
01715       const char *Sym = S->getSymbol();
01716 
01717       // Create a constant pool entry for the callee address
01718       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01719       ARMConstantPoolValue *CPV =
01720         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01721                                       ARMPCLabelIndex, 0);
01722       // Get the address of the callee into a register
01723       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01724       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01725       Callee = DAG.getLoad(getPointerTy(), dl,
01726                            DAG.getEntryNode(), CPAddr,
01727                            MachinePointerInfo::getConstantPool(),
01728                            false, false, false, 0);
01729     }
01730   } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01731     const GlobalValue *GV = G->getGlobal();
01732     isDirect = true;
01733     bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
01734     bool isStub = (isExt && Subtarget->isTargetMachO()) &&
01735                    getTargetMachine().getRelocationModel() != Reloc::Static;
01736     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01737     // ARM call to a local ARM function is predicable.
01738     isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
01739     // tBX takes a register source operand.
01740     if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01741       assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
01742       Callee = DAG.getNode(ARMISD::WrapperPIC, dl, getPointerTy(),
01743                            DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
01744                                                       0, ARMII::MO_NONLAZY));
01745       Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
01746                            MachinePointerInfo::getGOT(), false, false, true, 0);
01747     } else if (Subtarget->isTargetCOFF()) {
01748       assert(Subtarget->isTargetWindows() &&
01749              "Windows is the only supported COFF target");
01750       unsigned TargetFlags = GV->hasDLLImportStorageClass()
01751                                  ? ARMII::MO_DLLIMPORT
01752                                  : ARMII::MO_NO_FLAG;
01753       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), /*Offset=*/0,
01754                                           TargetFlags);
01755       if (GV->hasDLLImportStorageClass())
01756         Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
01757                              DAG.getNode(ARMISD::Wrapper, dl, getPointerTy(),
01758                                          Callee), MachinePointerInfo::getGOT(),
01759                              false, false, false, 0);
01760     } else {
01761       // On ELF targets for PIC code, direct calls should go through the PLT
01762       unsigned OpFlags = 0;
01763       if (Subtarget->isTargetELF() &&
01764           getTargetMachine().getRelocationModel() == Reloc::PIC_)
01765         OpFlags = ARMII::MO_PLT;
01766       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
01767     }
01768   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
01769     isDirect = true;
01770     bool isStub = Subtarget->isTargetMachO() &&
01771                   getTargetMachine().getRelocationModel() != Reloc::Static;
01772     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01773     // tBX takes a register source operand.
01774     const char *Sym = S->getSymbol();
01775     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01776       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01777       ARMConstantPoolValue *CPV =
01778         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01779                                       ARMPCLabelIndex, 4);
01780       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01781       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01782       Callee = DAG.getLoad(getPointerTy(), dl,
01783                            DAG.getEntryNode(), CPAddr,
01784                            MachinePointerInfo::getConstantPool(),
01785                            false, false, false, 0);
01786       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
01787       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
01788                            getPointerTy(), Callee, PICLabel);
01789     } else {
01790       unsigned OpFlags = 0;
01791       // On ELF targets for PIC code, direct calls should go through the PLT
01792       if (Subtarget->isTargetELF() &&
01793                   getTargetMachine().getRelocationModel() == Reloc::PIC_)
01794         OpFlags = ARMII::MO_PLT;
01795       Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
01796     }
01797   }
01798 
01799   // FIXME: handle tail calls differently.
01800   unsigned CallOpc;
01801   bool HasMinSizeAttr = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
01802   if (Subtarget->isThumb()) {
01803     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
01804       CallOpc = ARMISD::CALL_NOLINK;
01805     else
01806       CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
01807   } else {
01808     if (!isDirect && !Subtarget->hasV5TOps())
01809       CallOpc = ARMISD::CALL_NOLINK;
01810     else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
01811                // Emit regular call when code size is the priority
01812                !HasMinSizeAttr)
01813       // "mov lr, pc; b _foo" to avoid confusing the RSP
01814       CallOpc = ARMISD::CALL_NOLINK;
01815     else
01816       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
01817   }
01818 
01819   std::vector<SDValue> Ops;
01820   Ops.push_back(Chain);
01821   Ops.push_back(Callee);
01822 
01823   // Add argument registers to the end of the list so that they are known live
01824   // into the call.
01825   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
01826     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
01827                                   RegsToPass[i].second.getValueType()));
01828 
01829   // Add a register mask operand representing the call-preserved registers.
01830   if (!isTailCall) {
01831     const uint32_t *Mask;
01832     const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
01833     if (isThisReturn) {
01834       // For 'this' returns, use the R0-preserving mask if applicable
01835       Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
01836       if (!Mask) {
01837         // Set isThisReturn to false if the calling convention is not one that
01838         // allows 'returned' to be modeled in this way, so LowerCallResult does
01839         // not try to pass 'this' straight through
01840         isThisReturn = false;
01841         Mask = ARI->getCallPreservedMask(MF, CallConv);
01842       }
01843     } else
01844       Mask = ARI->getCallPreservedMask(MF, CallConv);
01845 
01846     assert(Mask && "Missing call preserved mask for calling convention");
01847     Ops.push_back(DAG.getRegisterMask(Mask));
01848   }
01849 
01850   if (InFlag.getNode())
01851     Ops.push_back(InFlag);
01852 
01853   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
01854   if (isTailCall) {
01855     MF.getFrameInfo()->setHasTailCall();
01856     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
01857   }
01858 
01859   // Returns a chain and a flag for retval copy to use.
01860   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
01861   InFlag = Chain.getValue(1);
01862 
01863   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
01864                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
01865   if (!Ins.empty())
01866     InFlag = Chain.getValue(1);
01867 
01868   // Handle result values, copying them out of physregs into vregs that we
01869   // return.
01870   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
01871                          InVals, isThisReturn,
01872                          isThisReturn ? OutVals[0] : SDValue());
01873 }
01874 
01875 /// HandleByVal - Every parameter *after* a byval parameter is passed
01876 /// on the stack.  Remember the next parameter register to allocate,
01877 /// and then confiscate the rest of the parameter registers to insure
01878 /// this.
01879 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
01880                                     unsigned Align) const {
01881   assert((State->getCallOrPrologue() == Prologue ||
01882           State->getCallOrPrologue() == Call) &&
01883          "unhandled ParmContext");
01884 
01885   // Byval (as with any stack) slots are always at least 4 byte aligned.
01886   Align = std::max(Align, 4U);
01887 
01888   unsigned Reg = State->AllocateReg(GPRArgRegs);
01889   if (!Reg)
01890     return;
01891 
01892   unsigned AlignInRegs = Align / 4;
01893   unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
01894   for (unsigned i = 0; i < Waste; ++i)
01895     Reg = State->AllocateReg(GPRArgRegs);
01896 
01897   if (!Reg)
01898     return;
01899 
01900   unsigned Excess = 4 * (ARM::R4 - Reg);
01901 
01902   // Special case when NSAA != SP and parameter size greater than size of
01903   // all remained GPR regs. In that case we can't split parameter, we must
01904   // send it to stack. We also must set NCRN to R4, so waste all
01905   // remained registers.
01906   const unsigned NSAAOffset = State->getNextStackOffset();
01907   if (NSAAOffset != 0 && Size > Excess) {
01908     while (State->AllocateReg(GPRArgRegs))
01909       ;
01910     return;
01911   }
01912 
01913   // First register for byval parameter is the first register that wasn't
01914   // allocated before this method call, so it would be "reg".
01915   // If parameter is small enough to be saved in range [reg, r4), then
01916   // the end (first after last) register would be reg + param-size-in-regs,
01917   // else parameter would be splitted between registers and stack,
01918   // end register would be r4 in this case.
01919   unsigned ByValRegBegin = Reg;
01920   unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
01921   State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
01922   // Note, first register is allocated in the beginning of function already,
01923   // allocate remained amount of registers we need.
01924   for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
01925     State->AllocateReg(GPRArgRegs);
01926   // A byval parameter that is split between registers and memory needs its
01927   // size truncated here.
01928   // In the case where the entire structure fits in registers, we set the
01929   // size in memory to zero.
01930   Size = std::max<int>(Size - Excess, 0);
01931 }
01932 
01933 /// MatchingStackOffset - Return true if the given stack call argument is
01934 /// already available in the same position (relatively) of the caller's
01935 /// incoming argument stack.
01936 static
01937 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
01938                          MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
01939                          const TargetInstrInfo *TII) {
01940   unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
01941   int FI = INT_MAX;
01942   if (Arg.getOpcode() == ISD::CopyFromReg) {
01943     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
01944     if (!TargetRegisterInfo::isVirtualRegister(VR))
01945       return false;
01946     MachineInstr *Def = MRI->getVRegDef(VR);
01947     if (!Def)
01948       return false;
01949     if (!Flags.isByVal()) {
01950       if (!TII->isLoadFromStackSlot(Def, FI))
01951         return false;
01952     } else {
01953       return false;
01954     }
01955   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
01956     if (Flags.isByVal())
01957       // ByVal argument is passed in as a pointer but it's now being
01958       // dereferenced. e.g.
01959       // define @foo(%struct.X* %A) {
01960       //   tail call @bar(%struct.X* byval %A)
01961       // }
01962       return false;
01963     SDValue Ptr = Ld->getBasePtr();
01964     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
01965     if (!FINode)
01966       return false;
01967     FI = FINode->getIndex();
01968   } else
01969     return false;
01970 
01971   assert(FI != INT_MAX);
01972   if (!MFI->isFixedObjectIndex(FI))
01973     return false;
01974   return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
01975 }
01976 
01977 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
01978 /// for tail call optimization. Targets which want to do tail call
01979 /// optimization should implement this function.
01980 bool
01981 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
01982                                                      CallingConv::ID CalleeCC,
01983                                                      bool isVarArg,
01984                                                      bool isCalleeStructRet,
01985                                                      bool isCallerStructRet,
01986                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
01987                                     const SmallVectorImpl<SDValue> &OutVals,
01988                                     const SmallVectorImpl<ISD::InputArg> &Ins,
01989                                                      SelectionDAG& DAG) const {
01990   const Function *CallerF = DAG.getMachineFunction().getFunction();
01991   CallingConv::ID CallerCC = CallerF->getCallingConv();
01992   bool CCMatch = CallerCC == CalleeCC;
01993 
01994   // Look for obvious safe cases to perform tail call optimization that do not
01995   // require ABI changes. This is what gcc calls sibcall.
01996 
01997   // Do not sibcall optimize vararg calls unless the call site is not passing
01998   // any arguments.
01999   if (isVarArg && !Outs.empty())
02000     return false;
02001 
02002   // Exception-handling functions need a special set of instructions to indicate
02003   // a return to the hardware. Tail-calling another function would probably
02004   // break this.
02005   if (CallerF->hasFnAttribute("interrupt"))
02006     return false;
02007 
02008   // Also avoid sibcall optimization if either caller or callee uses struct
02009   // return semantics.
02010   if (isCalleeStructRet || isCallerStructRet)
02011     return false;
02012 
02013   // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo::
02014   // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
02015   // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
02016   // support in the assembler and linker to be used. This would need to be
02017   // fixed to fully support tail calls in Thumb1.
02018   //
02019   // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
02020   // LR.  This means if we need to reload LR, it takes an extra instructions,
02021   // which outweighs the value of the tail call; but here we don't know yet
02022   // whether LR is going to be used.  Probably the right approach is to
02023   // generate the tail call here and turn it back into CALL/RET in
02024   // emitEpilogue if LR is used.
02025 
02026   // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
02027   // but we need to make sure there are enough registers; the only valid
02028   // registers are the 4 used for parameters.  We don't currently do this
02029   // case.
02030   if (Subtarget->isThumb1Only())
02031     return false;
02032 
02033   // Externally-defined functions with weak linkage should not be
02034   // tail-called on ARM when the OS does not support dynamic
02035   // pre-emption of symbols, as the AAELF spec requires normal calls
02036   // to undefined weak functions to be replaced with a NOP or jump to the
02037   // next instruction. The behaviour of branch instructions in this
02038   // situation (as used for tail calls) is implementation-defined, so we
02039   // cannot rely on the linker replacing the tail call with a return.
02040   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
02041     const GlobalValue *GV = G->getGlobal();
02042     const Triple TT(getTargetMachine().getTargetTriple());
02043     if (GV->hasExternalWeakLinkage() &&
02044         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
02045       return false;
02046   }
02047 
02048   // If the calling conventions do not match, then we'd better make sure the
02049   // results are returned in the same way as what the caller expects.
02050   if (!CCMatch) {
02051     SmallVector<CCValAssign, 16> RVLocs1;
02052     ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
02053                        *DAG.getContext(), Call);
02054     CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
02055 
02056     SmallVector<CCValAssign, 16> RVLocs2;
02057     ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
02058                        *DAG.getContext(), Call);
02059     CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
02060 
02061     if (RVLocs1.size() != RVLocs2.size())
02062       return false;
02063     for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
02064       if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
02065         return false;
02066       if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
02067         return false;
02068       if (RVLocs1[i].isRegLoc()) {
02069         if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
02070           return false;
02071       } else {
02072         if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
02073           return false;
02074       }
02075     }
02076   }
02077 
02078   // If Caller's vararg or byval argument has been split between registers and
02079   // stack, do not perform tail call, since part of the argument is in caller's
02080   // local frame.
02081   const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction().
02082                                       getInfo<ARMFunctionInfo>();
02083   if (AFI_Caller->getArgRegsSaveSize())
02084     return false;
02085 
02086   // If the callee takes no arguments then go on to check the results of the
02087   // call.
02088   if (!Outs.empty()) {
02089     // Check if stack adjustment is needed. For now, do not do this if any
02090     // argument is passed on the stack.
02091     SmallVector<CCValAssign, 16> ArgLocs;
02092     ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
02093                       *DAG.getContext(), Call);
02094     CCInfo.AnalyzeCallOperands(Outs,
02095                                CCAssignFnForNode(CalleeCC, false, isVarArg));
02096     if (CCInfo.getNextStackOffset()) {
02097       MachineFunction &MF = DAG.getMachineFunction();
02098 
02099       // Check if the arguments are already laid out in the right way as
02100       // the caller's fixed stack objects.
02101       MachineFrameInfo *MFI = MF.getFrameInfo();
02102       const MachineRegisterInfo *MRI = &MF.getRegInfo();
02103       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
02104       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
02105            i != e;
02106            ++i, ++realArgIdx) {
02107         CCValAssign &VA = ArgLocs[i];
02108         EVT RegVT = VA.getLocVT();
02109         SDValue Arg = OutVals[realArgIdx];
02110         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
02111         if (VA.getLocInfo() == CCValAssign::Indirect)
02112           return false;
02113         if (VA.needsCustom()) {
02114           // f64 and vector types are split into multiple registers or
02115           // register/stack-slot combinations.  The types will not match
02116           // the registers; give up on memory f64 refs until we figure
02117           // out what to do about this.
02118           if (!VA.isRegLoc())
02119             return false;
02120           if (!ArgLocs[++i].isRegLoc())
02121             return false;
02122           if (RegVT == MVT::v2f64) {
02123             if (!ArgLocs[++i].isRegLoc())
02124               return false;
02125             if (!ArgLocs[++i].isRegLoc())
02126               return false;
02127           }
02128         } else if (!VA.isRegLoc()) {
02129           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
02130                                    MFI, MRI, TII))
02131             return false;
02132         }
02133       }
02134     }
02135   }
02136 
02137   return true;
02138 }
02139 
02140 bool
02141 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
02142                                   MachineFunction &MF, bool isVarArg,
02143                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
02144                                   LLVMContext &Context) const {
02145   SmallVector<CCValAssign, 16> RVLocs;
02146   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
02147   return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true,
02148                                                     isVarArg));
02149 }
02150 
02151 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
02152                                     SDLoc DL, SelectionDAG &DAG) {
02153   const MachineFunction &MF = DAG.getMachineFunction();
02154   const Function *F = MF.getFunction();
02155 
02156   StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
02157 
02158   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
02159   // version of the "preferred return address". These offsets affect the return
02160   // instruction if this is a return from PL1 without hypervisor extensions.
02161   //    IRQ/FIQ: +4     "subs pc, lr, #4"
02162   //    SWI:     0      "subs pc, lr, #0"
02163   //    ABORT:   +4     "subs pc, lr, #4"
02164   //    UNDEF:   +4/+2  "subs pc, lr, #0"
02165   // UNDEF varies depending on where the exception came from ARM or Thumb
02166   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
02167 
02168   int64_t LROffset;
02169   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
02170       IntKind == "ABORT")
02171     LROffset = 4;
02172   else if (IntKind == "SWI" || IntKind == "UNDEF")
02173     LROffset = 0;
02174   else
02175     report_fatal_error("Unsupported interrupt attribute. If present, value "
02176                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
02177 
02178   RetOps.insert(RetOps.begin() + 1,
02179                 DAG.getConstant(LROffset, DL, MVT::i32, false));
02180 
02181   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
02182 }
02183 
02184 SDValue
02185 ARMTargetLowering::LowerReturn(SDValue Chain,
02186                                CallingConv::ID CallConv, bool isVarArg,
02187                                const SmallVectorImpl<ISD::OutputArg> &Outs,
02188                                const SmallVectorImpl<SDValue> &OutVals,
02189                                SDLoc dl, SelectionDAG &DAG) const {
02190 
02191   // CCValAssign - represent the assignment of the return value to a location.
02192   SmallVector<CCValAssign, 16> RVLocs;
02193 
02194   // CCState - Info about the registers and stack slots.
02195   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
02196                     *DAG.getContext(), Call);
02197 
02198   // Analyze outgoing return values.
02199   CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
02200                                                isVarArg));
02201 
02202   SDValue Flag;
02203   SmallVector<SDValue, 4> RetOps;
02204   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
02205   bool isLittleEndian = Subtarget->isLittle();
02206 
02207   MachineFunction &MF = DAG.getMachineFunction();
02208   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02209   AFI->setReturnRegsCount(RVLocs.size());
02210 
02211   // Copy the result values into the output registers.
02212   for (unsigned i = 0, realRVLocIdx = 0;
02213        i != RVLocs.size();
02214        ++i, ++realRVLocIdx) {
02215     CCValAssign &VA = RVLocs[i];
02216     assert(VA.isRegLoc() && "Can only return in registers!");
02217 
02218     SDValue Arg = OutVals[realRVLocIdx];
02219 
02220     switch (VA.getLocInfo()) {
02221     default: llvm_unreachable("Unknown loc info!");
02222     case CCValAssign::Full: break;
02223     case CCValAssign::BCvt:
02224       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
02225       break;
02226     }
02227 
02228     if (VA.needsCustom()) {
02229       if (VA.getLocVT() == MVT::v2f64) {
02230         // Extract the first half and return it in two registers.
02231         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02232                                    DAG.getConstant(0, dl, MVT::i32));
02233         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
02234                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
02235 
02236         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02237                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
02238                                  Flag);
02239         Flag = Chain.getValue(1);
02240         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02241         VA = RVLocs[++i]; // skip ahead to next loc
02242         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02243                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
02244                                  Flag);
02245         Flag = Chain.getValue(1);
02246         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02247         VA = RVLocs[++i]; // skip ahead to next loc
02248 
02249         // Extract the 2nd half and fall through to handle it as an f64 value.
02250         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02251                           DAG.getConstant(1, dl, MVT::i32));
02252       }
02253       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
02254       // available.
02255       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
02256                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
02257       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02258                                fmrrd.getValue(isLittleEndian ? 0 : 1),
02259                                Flag);
02260       Flag = Chain.getValue(1);
02261       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02262       VA = RVLocs[++i]; // skip ahead to next loc
02263       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02264                                fmrrd.getValue(isLittleEndian ? 1 : 0),
02265                                Flag);
02266     } else
02267       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
02268 
02269     // Guarantee that all emitted copies are
02270     // stuck together, avoiding something bad.
02271     Flag = Chain.getValue(1);
02272     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02273   }
02274 
02275   // Update chain and glue.
02276   RetOps[0] = Chain;
02277   if (Flag.getNode())
02278     RetOps.push_back(Flag);
02279 
02280   // CPUs which aren't M-class use a special sequence to return from
02281   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
02282   // though we use "subs pc, lr, #N").
02283   //
02284   // M-class CPUs actually use a normal return sequence with a special
02285   // (hardware-provided) value in LR, so the normal code path works.
02286   if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
02287       !Subtarget->isMClass()) {
02288     if (Subtarget->isThumb1Only())
02289       report_fatal_error("interrupt attribute is not supported in Thumb1");
02290     return LowerInterruptReturn(RetOps, dl, DAG);
02291   }
02292 
02293   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
02294 }
02295 
02296 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
02297   if (N->getNumValues() != 1)
02298     return false;
02299   if (!N->hasNUsesOfValue(1, 0))
02300     return false;
02301 
02302   SDValue TCChain = Chain;
02303   SDNode *Copy = *N->use_begin();
02304   if (Copy->getOpcode() == ISD::CopyToReg) {
02305     // If the copy has a glue operand, we conservatively assume it isn't safe to
02306     // perform a tail call.
02307     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02308       return false;
02309     TCChain = Copy->getOperand(0);
02310   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
02311     SDNode *VMov = Copy;
02312     // f64 returned in a pair of GPRs.
02313     SmallPtrSet<SDNode*, 2> Copies;
02314     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02315          UI != UE; ++UI) {
02316       if (UI->getOpcode() != ISD::CopyToReg)
02317         return false;
02318       Copies.insert(*UI);
02319     }
02320     if (Copies.size() > 2)
02321       return false;
02322 
02323     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02324          UI != UE; ++UI) {
02325       SDValue UseChain = UI->getOperand(0);
02326       if (Copies.count(UseChain.getNode()))
02327         // Second CopyToReg
02328         Copy = *UI;
02329       else {
02330         // We are at the top of this chain.
02331         // If the copy has a glue operand, we conservatively assume it
02332         // isn't safe to perform a tail call.
02333         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
02334           return false;
02335         // First CopyToReg
02336         TCChain = UseChain;
02337       }
02338     }
02339   } else if (Copy->getOpcode() == ISD::BITCAST) {
02340     // f32 returned in a single GPR.
02341     if (!Copy->hasOneUse())
02342       return false;
02343     Copy = *Copy->use_begin();
02344     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
02345       return false;
02346     // If the copy has a glue operand, we conservatively assume it isn't safe to
02347     // perform a tail call.
02348     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02349       return false;
02350     TCChain = Copy->getOperand(0);
02351   } else {
02352     return false;
02353   }
02354 
02355   bool HasRet = false;
02356   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
02357        UI != UE; ++UI) {
02358     if (UI->getOpcode() != ARMISD::RET_FLAG &&
02359         UI->getOpcode() != ARMISD::INTRET_FLAG)
02360       return false;
02361     HasRet = true;
02362   }
02363 
02364   if (!HasRet)
02365     return false;
02366 
02367   Chain = TCChain;
02368   return true;
02369 }
02370 
02371 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
02372   if (!Subtarget->supportsTailCall())
02373     return false;
02374 
02375   if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
02376     return false;
02377 
02378   return !Subtarget->isThumb1Only();
02379 }
02380 
02381 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
02382 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
02383 // one of the above mentioned nodes. It has to be wrapped because otherwise
02384 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
02385 // be used to form addressing mode. These wrapped nodes will be selected
02386 // into MOVi.
02387 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
02388   EVT PtrVT = Op.getValueType();
02389   // FIXME there is no actual debug info here
02390   SDLoc dl(Op);
02391   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
02392   SDValue Res;
02393   if (CP->isMachineConstantPoolEntry())
02394     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
02395                                     CP->getAlignment());
02396   else
02397     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
02398                                     CP->getAlignment());
02399   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
02400 }
02401 
02402 unsigned ARMTargetLowering::getJumpTableEncoding() const {
02403   return MachineJumpTableInfo::EK_Inline;
02404 }
02405 
02406 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
02407                                              SelectionDAG &DAG) const {
02408   MachineFunction &MF = DAG.getMachineFunction();
02409   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02410   unsigned ARMPCLabelIndex = 0;
02411   SDLoc DL(Op);
02412   EVT PtrVT = getPointerTy();
02413   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
02414   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02415   SDValue CPAddr;
02416   if (RelocM == Reloc::Static) {
02417     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
02418   } else {
02419     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02420     ARMPCLabelIndex = AFI->createPICLabelUId();
02421     ARMConstantPoolValue *CPV =
02422       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
02423                                       ARMCP::CPBlockAddress, PCAdj);
02424     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02425   }
02426   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
02427   SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
02428                                MachinePointerInfo::getConstantPool(),
02429                                false, false, false, 0);
02430   if (RelocM == Reloc::Static)
02431     return Result;
02432   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
02433   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
02434 }
02435 
02436 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
02437 SDValue
02438 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
02439                                                  SelectionDAG &DAG) const {
02440   SDLoc dl(GA);
02441   EVT PtrVT = getPointerTy();
02442   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02443   MachineFunction &MF = DAG.getMachineFunction();
02444   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02445   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02446   ARMConstantPoolValue *CPV =
02447     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02448                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
02449   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02450   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
02451   Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
02452                          MachinePointerInfo::getConstantPool(),
02453                          false, false, false, 0);
02454   SDValue Chain = Argument.getValue(1);
02455 
02456   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
02457   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
02458 
02459   // call __tls_get_addr.
02460   ArgListTy Args;
02461   ArgListEntry Entry;
02462   Entry.Node = Argument;
02463   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
02464   Args.push_back(Entry);
02465 
02466   // FIXME: is there useful debug info available here?
02467   TargetLowering::CallLoweringInfo CLI(DAG);
02468   CLI.setDebugLoc(dl).setChain(Chain)
02469     .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
02470                DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args),
02471                0);
02472 
02473   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
02474   return CallResult.first;
02475 }
02476 
02477 // Lower ISD::GlobalTLSAddress using the "initial exec" or
02478 // "local exec" model.
02479 SDValue
02480 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
02481                                         SelectionDAG &DAG,
02482                                         TLSModel::Model model) const {
02483   const GlobalValue *GV = GA->getGlobal();
02484   SDLoc dl(GA);
02485   SDValue Offset;
02486   SDValue Chain = DAG.getEntryNode();
02487   EVT PtrVT = getPointerTy();
02488   // Get the Thread Pointer
02489   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02490 
02491   if (model == TLSModel::InitialExec) {
02492     MachineFunction &MF = DAG.getMachineFunction();
02493     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02494     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02495     // Initial exec model.
02496     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02497     ARMConstantPoolValue *CPV =
02498       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02499                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
02500                                       true);
02501     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02502     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02503     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02504                          MachinePointerInfo::getConstantPool(),
02505                          false, false, false, 0);
02506     Chain = Offset.getValue(1);
02507 
02508     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
02509     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
02510 
02511     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02512                          MachinePointerInfo::getConstantPool(),
02513                          false, false, false, 0);
02514   } else {
02515     // local exec model
02516     assert(model == TLSModel::LocalExec);
02517     ARMConstantPoolValue *CPV =
02518       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
02519     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02520     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02521     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02522                          MachinePointerInfo::getConstantPool(),
02523                          false, false, false, 0);
02524   }
02525 
02526   // The address of the thread local variable is the add of the thread
02527   // pointer with the offset of the variable.
02528   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
02529 }
02530 
02531 SDValue
02532 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
02533   // TODO: implement the "local dynamic" model
02534   assert(Subtarget->isTargetELF() &&
02535          "TLS not implemented for non-ELF targets");
02536   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
02537 
02538   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
02539 
02540   switch (model) {
02541     case TLSModel::GeneralDynamic:
02542     case TLSModel::LocalDynamic:
02543       return LowerToTLSGeneralDynamicModel(GA, DAG);
02544     case TLSModel::InitialExec:
02545     case TLSModel::LocalExec:
02546       return LowerToTLSExecModels(GA, DAG, model);
02547   }
02548   llvm_unreachable("bogus TLS model");
02549 }
02550 
02551 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
02552                                                  SelectionDAG &DAG) const {
02553   EVT PtrVT = getPointerTy();
02554   SDLoc dl(Op);
02555   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02556   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
02557     bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
02558     ARMConstantPoolValue *CPV =
02559       ARMConstantPoolConstant::Create(GV,
02560                                       UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
02561     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02562     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02563     SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
02564                                  CPAddr,
02565                                  MachinePointerInfo::getConstantPool(),
02566                                  false, false, false, 0);
02567     SDValue Chain = Result.getValue(1);
02568     SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
02569     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
02570     if (!UseGOTOFF)
02571       Result = DAG.getLoad(PtrVT, dl, Chain, Result,
02572                            MachinePointerInfo::getGOT(),
02573                            false, false, false, 0);
02574     return Result;
02575   }
02576 
02577   // If we have T2 ops, we can materialize the address directly via movt/movw
02578   // pair. This is always cheaper.
02579   if (Subtarget->useMovt(DAG.getMachineFunction())) {
02580     ++NumMovwMovt;
02581     // FIXME: Once remat is capable of dealing with instructions with register
02582     // operands, expand this into two nodes.
02583     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
02584                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
02585   } else {
02586     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
02587     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02588     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02589                        MachinePointerInfo::getConstantPool(),
02590                        false, false, false, 0);
02591   }
02592 }
02593 
02594 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
02595                                                     SelectionDAG &DAG) const {
02596   EVT PtrVT = getPointerTy();
02597   SDLoc dl(Op);
02598   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02599   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02600 
02601   if (Subtarget->useMovt(DAG.getMachineFunction()))
02602     ++NumMovwMovt;
02603 
02604   // FIXME: Once remat is capable of dealing with instructions with register
02605   // operands, expand this into multiple nodes
02606   unsigned Wrapper =
02607       RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper;
02608 
02609   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
02610   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
02611 
02612   if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
02613     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
02614                          MachinePointerInfo::getGOT(), false, false, false, 0);
02615   return Result;
02616 }
02617 
02618 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
02619                                                      SelectionDAG &DAG) const {
02620   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
02621   assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
02622          "Windows on ARM expects to use movw/movt");
02623 
02624   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02625   const ARMII::TOF TargetFlags =
02626     (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
02627   EVT PtrVT = getPointerTy();
02628   SDValue Result;
02629   SDLoc DL(Op);
02630 
02631   ++NumMovwMovt;
02632 
02633   // FIXME: Once remat is capable of dealing with instructions with register
02634   // operands, expand this into two nodes.
02635   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
02636                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
02637                                                   TargetFlags));
02638   if (GV->hasDLLImportStorageClass())
02639     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
02640                          MachinePointerInfo::getGOT(), false, false, false, 0);
02641   return Result;
02642 }
02643 
02644 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
02645                                                     SelectionDAG &DAG) const {
02646   assert(Subtarget->isTargetELF() &&
02647          "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
02648   MachineFunction &MF = DAG.getMachineFunction();
02649   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02650   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02651   EVT PtrVT = getPointerTy();
02652   SDLoc dl(Op);
02653   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02654   ARMConstantPoolValue *CPV =
02655     ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_",
02656                                   ARMPCLabelIndex, PCAdj);
02657   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02658   CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02659   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02660                                MachinePointerInfo::getConstantPool(),
02661                                false, false, false, 0);
02662   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
02663   return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02664 }
02665 
02666 SDValue
02667 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
02668   SDLoc dl(Op);
02669   SDValue Val = DAG.getConstant(0, dl, MVT::i32);
02670   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
02671                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
02672                      Op.getOperand(1), Val);
02673 }
02674 
02675 SDValue
02676 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
02677   SDLoc dl(Op);
02678   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
02679                      Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
02680 }
02681 
02682 SDValue
02683 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
02684                                           const ARMSubtarget *Subtarget) const {
02685   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
02686   SDLoc dl(Op);
02687   switch (IntNo) {
02688   default: return SDValue();    // Don't custom lower most intrinsics.
02689   case Intrinsic::arm_rbit: {
02690     assert(Op.getOperand(1).getValueType() == MVT::i32 &&
02691            "RBIT intrinsic must have i32 type!");
02692     return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(1));
02693   }
02694   case Intrinsic::arm_thread_pointer: {
02695     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02696     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02697   }
02698   case Intrinsic::eh_sjlj_lsda: {
02699     MachineFunction &MF = DAG.getMachineFunction();
02700     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02701     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02702     EVT PtrVT = getPointerTy();
02703     Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02704     SDValue CPAddr;
02705     unsigned PCAdj = (RelocM != Reloc::PIC_)
02706       ? 0 : (Subtarget->isThumb() ? 4 : 8);
02707     ARMConstantPoolValue *CPV =
02708       ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
02709                                       ARMCP::CPLSDA, PCAdj);
02710     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02711     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02712     SDValue Result =
02713       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02714                   MachinePointerInfo::getConstantPool(),
02715                   false, false, false, 0);
02716 
02717     if (RelocM == Reloc::PIC_) {
02718       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
02719       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02720     }
02721     return Result;
02722   }
02723   case Intrinsic::arm_neon_vmulls:
02724   case Intrinsic::arm_neon_vmullu: {
02725     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
02726       ? ARMISD::VMULLs : ARMISD::VMULLu;
02727     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
02728                        Op.getOperand(1), Op.getOperand(2));
02729   }
02730   }
02731 }
02732 
02733 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
02734                                  const ARMSubtarget *Subtarget) {
02735   // FIXME: handle "fence singlethread" more efficiently.
02736   SDLoc dl(Op);
02737   if (!Subtarget->hasDataBarrier()) {
02738     // Some ARMv6 cpus can support data barriers with an mcr instruction.
02739     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
02740     // here.
02741     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
02742            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
02743     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
02744                        DAG.getConstant(0, dl, MVT::i32));
02745   }
02746 
02747   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
02748   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
02749   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
02750   if (Subtarget->isMClass()) {
02751     // Only a full system barrier exists in the M-class architectures.
02752     Domain = ARM_MB::SY;
02753   } else if (Subtarget->isSwift() && Ord == Release) {
02754     // Swift happens to implement ISHST barriers in a way that's compatible with
02755     // Release semantics but weaker than ISH so we'd be fools not to use
02756     // it. Beware: other processors probably don't!
02757     Domain = ARM_MB::ISHST;
02758   }
02759 
02760   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
02761                      DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
02762                      DAG.getConstant(Domain, dl, MVT::i32));
02763 }
02764 
02765 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
02766                              const ARMSubtarget *Subtarget) {
02767   // ARM pre v5TE and Thumb1 does not have preload instructions.
02768   if (!(Subtarget->isThumb2() ||
02769         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
02770     // Just preserve the chain.
02771     return Op.getOperand(0);
02772 
02773   SDLoc dl(Op);
02774   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
02775   if (!isRead &&
02776       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
02777     // ARMv7 with MP extension has PLDW.
02778     return Op.getOperand(0);
02779 
02780   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
02781   if (Subtarget->isThumb()) {
02782     // Invert the bits.
02783     isRead = ~isRead & 1;
02784     isData = ~isData & 1;
02785   }
02786 
02787   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
02788                      Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
02789                      DAG.getConstant(isData, dl, MVT::i32));
02790 }
02791 
02792 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
02793   MachineFunction &MF = DAG.getMachineFunction();
02794   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
02795 
02796   // vastart just stores the address of the VarArgsFrameIndex slot into the
02797   // memory location argument.
02798   SDLoc dl(Op);
02799   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02800   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02801   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02802   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
02803                       MachinePointerInfo(SV), false, false, 0);
02804 }
02805 
02806 SDValue
02807 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
02808                                         SDValue &Root, SelectionDAG &DAG,
02809                                         SDLoc dl) const {
02810   MachineFunction &MF = DAG.getMachineFunction();
02811   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02812 
02813   const TargetRegisterClass *RC;
02814   if (AFI->isThumb1OnlyFunction())
02815     RC = &ARM::tGPRRegClass;
02816   else
02817     RC = &ARM::GPRRegClass;
02818 
02819   // Transform the arguments stored in physical registers into virtual ones.
02820   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
02821   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02822 
02823   SDValue ArgValue2;
02824   if (NextVA.isMemLoc()) {
02825     MachineFrameInfo *MFI = MF.getFrameInfo();
02826     int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
02827 
02828     // Create load node to retrieve arguments from the stack.
02829     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
02830     ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
02831                             MachinePointerInfo::getFixedStack(FI),
02832                             false, false, false, 0);
02833   } else {
02834     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
02835     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02836   }
02837   if (!Subtarget->isLittle())
02838     std::swap (ArgValue, ArgValue2);
02839   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
02840 }
02841 
02842 // The remaining GPRs hold either the beginning of variable-argument
02843 // data, or the beginning of an aggregate passed by value (usually
02844 // byval).  Either way, we allocate stack slots adjacent to the data
02845 // provided by our caller, and store the unallocated registers there.
02846 // If this is a variadic function, the va_list pointer will begin with
02847 // these values; otherwise, this reassembles a (byval) structure that
02848 // was split between registers and memory.
02849 // Return: The frame index registers were stored into.
02850 int
02851 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
02852                                   SDLoc dl, SDValue &Chain,
02853                                   const Value *OrigArg,
02854                                   unsigned InRegsParamRecordIdx,
02855                                   int ArgOffset,
02856                                   unsigned ArgSize) const {
02857   // Currently, two use-cases possible:
02858   // Case #1. Non-var-args function, and we meet first byval parameter.
02859   //          Setup first unallocated register as first byval register;
02860   //          eat all remained registers
02861   //          (these two actions are performed by HandleByVal method).
02862   //          Then, here, we initialize stack frame with
02863   //          "store-reg" instructions.
02864   // Case #2. Var-args function, that doesn't contain byval parameters.
02865   //          The same: eat all remained unallocated registers,
02866   //          initialize stack frame.
02867 
02868   MachineFunction &MF = DAG.getMachineFunction();
02869   MachineFrameInfo *MFI = MF.getFrameInfo();
02870   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02871   unsigned RBegin, REnd;
02872   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
02873     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
02874   } else {
02875     unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
02876     RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
02877     REnd = ARM::R4;
02878   }
02879 
02880   if (REnd != RBegin)
02881     ArgOffset = -4 * (ARM::R4 - RBegin);
02882 
02883   int FrameIndex = MFI->CreateFixedObject(ArgSize, ArgOffset, false);
02884   SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy());
02885 
02886   SmallVector<SDValue, 4> MemOps;
02887   const TargetRegisterClass *RC =
02888       AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
02889 
02890   for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
02891     unsigned VReg = MF.addLiveIn(Reg, RC);
02892     SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
02893     SDValue Store =
02894         DAG.getStore(Val.getValue(1), dl, Val, FIN,
02895                      MachinePointerInfo(OrigArg, 4 * i), false, false, 0);
02896     MemOps.push_back(Store);
02897     FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
02898                       DAG.getConstant(4, dl, getPointerTy()));
02899   }
02900 
02901   if (!MemOps.empty())
02902     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02903   return FrameIndex;
02904 }
02905 
02906 // Setup stack frame, the va_list pointer will start from.
02907 void
02908 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
02909                                         SDLoc dl, SDValue &Chain,
02910                                         unsigned ArgOffset,
02911                                         unsigned TotalArgRegsSaveSize,
02912                                         bool ForceMutable) const {
02913   MachineFunction &MF = DAG.getMachineFunction();
02914   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02915 
02916   // Try to store any remaining integer argument regs
02917   // to their spots on the stack so that they may be loaded by deferencing
02918   // the result of va_next.
02919   // If there is no regs to be stored, just point address after last
02920   // argument passed via stack.
02921   int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
02922                                   CCInfo.getInRegsParamsCount(),
02923                                   CCInfo.getNextStackOffset(), 4);
02924   AFI->setVarArgsFrameIndex(FrameIndex);
02925 }
02926 
02927 SDValue
02928 ARMTargetLowering::LowerFormalArguments(SDValue Chain,
02929                                         CallingConv::ID CallConv, bool isVarArg,
02930                                         const SmallVectorImpl<ISD::InputArg>
02931                                           &Ins,
02932                                         SDLoc dl, SelectionDAG &DAG,
02933                                         SmallVectorImpl<SDValue> &InVals)
02934                                           const {
02935   MachineFunction &MF = DAG.getMachineFunction();
02936   MachineFrameInfo *MFI = MF.getFrameInfo();
02937 
02938   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02939 
02940   // Assign locations to all of the incoming arguments.
02941   SmallVector<CCValAssign, 16> ArgLocs;
02942   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
02943                     *DAG.getContext(), Prologue);
02944   CCInfo.AnalyzeFormalArguments(Ins,
02945                                 CCAssignFnForNode(CallConv, /* Return*/ false,
02946                                                   isVarArg));
02947 
02948   SmallVector<SDValue, 16> ArgValues;
02949   SDValue ArgValue;
02950   Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
02951   unsigned CurArgIdx = 0;
02952 
02953   // Initially ArgRegsSaveSize is zero.
02954   // Then we increase this value each time we meet byval parameter.
02955   // We also increase this value in case of varargs function.
02956   AFI->setArgRegsSaveSize(0);
02957 
02958   // Calculate the amount of stack space that we need to allocate to store
02959   // byval and variadic arguments that are passed in registers.
02960   // We need to know this before we allocate the first byval or variadic
02961   // argument, as they will be allocated a stack slot below the CFA (Canonical
02962   // Frame Address, the stack pointer at entry to the function).
02963   unsigned ArgRegBegin = ARM::R4;
02964   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
02965     if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
02966       break;
02967 
02968     CCValAssign &VA = ArgLocs[i];
02969     unsigned Index = VA.getValNo();
02970     ISD::ArgFlagsTy Flags = Ins[Index].Flags;
02971     if (!Flags.isByVal())
02972       continue;
02973 
02974     assert(VA.isMemLoc() && "unexpected byval pointer in reg");
02975     unsigned RBegin, REnd;
02976     CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
02977     ArgRegBegin = std::min(ArgRegBegin, RBegin);
02978 
02979     CCInfo.nextInRegsParam();
02980   }
02981   CCInfo.rewindByValRegsInfo();
02982 
02983   int lastInsIndex = -1;
02984   if (isVarArg && MFI->hasVAStart()) {
02985     unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
02986     if (RegIdx != array_lengthof(GPRArgRegs))
02987       ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
02988   }
02989 
02990   unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
02991   AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
02992 
02993   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
02994     CCValAssign &VA = ArgLocs[i];
02995     if (Ins[VA.getValNo()].isOrigArg()) {
02996       std::advance(CurOrigArg,
02997                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
02998       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
02999     }
03000     // Arguments stored in registers.
03001     if (VA.isRegLoc()) {
03002       EVT RegVT = VA.getLocVT();
03003 
03004       if (VA.needsCustom()) {
03005         // f64 and vector types are split up into multiple registers or
03006         // combinations of registers and stack slots.
03007         if (VA.getLocVT() == MVT::v2f64) {
03008           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
03009                                                    Chain, DAG, dl);
03010           VA = ArgLocs[++i]; // skip ahead to next loc
03011           SDValue ArgValue2;
03012           if (VA.isMemLoc()) {
03013             int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
03014             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03015             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
03016                                     MachinePointerInfo::getFixedStack(FI),
03017                                     false, false, false, 0);
03018           } else {
03019             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
03020                                              Chain, DAG, dl);
03021           }
03022           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
03023           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03024                                  ArgValue, ArgValue1,
03025                                  DAG.getIntPtrConstant(0, dl));
03026           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03027                                  ArgValue, ArgValue2,
03028                                  DAG.getIntPtrConstant(1, dl));
03029         } else
03030           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
03031 
03032       } else {
03033         const TargetRegisterClass *RC;
03034 
03035         if (RegVT == MVT::f32)
03036           RC = &ARM::SPRRegClass;
03037         else if (RegVT == MVT::f64)
03038           RC = &ARM::DPRRegClass;
03039         else if (RegVT == MVT::v2f64)
03040           RC = &ARM::QPRRegClass;
03041         else if (RegVT == MVT::i32)
03042           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
03043                                            : &ARM::GPRRegClass;
03044         else
03045           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
03046 
03047         // Transform the arguments in physical registers into virtual ones.
03048         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
03049         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
03050       }
03051 
03052       // If this is an 8 or 16-bit value, it is really passed promoted
03053       // to 32 bits.  Insert an assert[sz]ext to capture this, then
03054       // truncate to the right size.
03055       switch (VA.getLocInfo()) {
03056       default: llvm_unreachable("Unknown loc info!");
03057       case CCValAssign::Full: break;
03058       case CCValAssign::BCvt:
03059         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
03060         break;
03061       case CCValAssign::SExt:
03062         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
03063                                DAG.getValueType(VA.getValVT()));
03064         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03065         break;
03066       case CCValAssign::ZExt:
03067         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
03068                                DAG.getValueType(VA.getValVT()));
03069         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03070         break;
03071       }
03072 
03073       InVals.push_back(ArgValue);
03074 
03075     } else { // VA.isRegLoc()
03076 
03077       // sanity check
03078       assert(VA.isMemLoc());
03079       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
03080 
03081       int index = VA.getValNo();
03082 
03083       // Some Ins[] entries become multiple ArgLoc[] entries.
03084       // Process them only once.
03085       if (index != lastInsIndex)
03086         {
03087           ISD::ArgFlagsTy Flags = Ins[index].Flags;
03088           // FIXME: For now, all byval parameter objects are marked mutable.
03089           // This can be changed with more analysis.
03090           // In case of tail call optimization mark all arguments mutable.
03091           // Since they could be overwritten by lowering of arguments in case of
03092           // a tail call.
03093           if (Flags.isByVal()) {
03094             assert(Ins[index].isOrigArg() &&
03095                    "Byval arguments cannot be implicit");
03096             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
03097 
03098             int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, CurOrigArg,
03099                                             CurByValIndex, VA.getLocMemOffset(),
03100                                             Flags.getByValSize());
03101             InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy()));
03102             CCInfo.nextInRegsParam();
03103           } else {
03104             unsigned FIOffset = VA.getLocMemOffset();
03105             int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
03106                                             FIOffset, true);
03107 
03108             // Create load nodes to retrieve arguments from the stack.
03109             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03110             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
03111                                          MachinePointerInfo::getFixedStack(FI),
03112                                          false, false, false, 0));
03113           }
03114           lastInsIndex = index;
03115         }
03116     }
03117   }
03118 
03119   // varargs
03120   if (isVarArg && MFI->hasVAStart())
03121     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
03122                          CCInfo.getNextStackOffset(),
03123                          TotalArgRegsSaveSize);
03124 
03125   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
03126 
03127   return Chain;
03128 }
03129 
03130 /// isFloatingPointZero - Return true if this is +0.0.
03131 static bool isFloatingPointZero(SDValue Op) {
03132   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
03133     return CFP->getValueAPF().isPosZero();
03134   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
03135     // Maybe this has already been legalized into the constant pool?
03136     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
03137       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
03138       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
03139         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
03140           return CFP->getValueAPF().isPosZero();
03141     }
03142   } else if (Op->getOpcode() == ISD::BITCAST &&
03143              Op->getValueType(0) == MVT::f64) {
03144     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
03145     // created by LowerConstantFP().
03146     SDValue BitcastOp = Op->getOperand(0);
03147     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM) {
03148       SDValue MoveOp = BitcastOp->getOperand(0);
03149       if (MoveOp->getOpcode() == ISD::TargetConstant &&
03150           cast<ConstantSDNode>(MoveOp)->getZExtValue() == 0) {
03151         return true;
03152       }
03153     }
03154   }
03155   return false;
03156 }
03157 
03158 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
03159 /// the given operands.
03160 SDValue
03161 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
03162                              SDValue &ARMcc, SelectionDAG &DAG,
03163                              SDLoc dl) const {
03164   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
03165     unsigned C = RHSC->getZExtValue();
03166     if (!isLegalICmpImmediate(C)) {
03167       // Constant does not fit, try adjusting it by one?
03168       switch (CC) {
03169       default: break;
03170       case ISD::SETLT:
03171       case ISD::SETGE:
03172         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
03173           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
03174           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
03175         }
03176         break;
03177       case ISD::SETULT:
03178       case ISD::SETUGE:
03179         if (C != 0 && isLegalICmpImmediate(C-1)) {
03180           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
03181           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
03182         }
03183         break;
03184       case ISD::SETLE:
03185       case ISD::SETGT:
03186         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
03187           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
03188           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
03189         }
03190         break;
03191       case ISD::SETULE:
03192       case ISD::SETUGT:
03193         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
03194           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
03195           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
03196         }
03197         break;
03198       }
03199     }
03200   }
03201 
03202   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03203   ARMISD::NodeType CompareType;
03204   switch (CondCode) {
03205   default:
03206     CompareType = ARMISD::CMP;
03207     break;
03208   case ARMCC::EQ:
03209   case ARMCC::NE:
03210     // Uses only Z Flag
03211     CompareType = ARMISD::CMPZ;
03212     break;
03213   }
03214   ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
03215   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
03216 }
03217 
03218 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
03219 SDValue
03220 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
03221                              SDLoc dl) const {
03222   assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
03223   SDValue Cmp;
03224   if (!isFloatingPointZero(RHS))
03225     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
03226   else
03227     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
03228   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
03229 }
03230 
03231 /// duplicateCmp - Glue values can have only one use, so this function
03232 /// duplicates a comparison node.
03233 SDValue
03234 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
03235   unsigned Opc = Cmp.getOpcode();
03236   SDLoc DL(Cmp);
03237   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
03238     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03239 
03240   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
03241   Cmp = Cmp.getOperand(0);
03242   Opc = Cmp.getOpcode();
03243   if (Opc == ARMISD::CMPFP)
03244     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03245   else {
03246     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
03247     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
03248   }
03249   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
03250 }
03251 
03252 std::pair<SDValue, SDValue>
03253 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
03254                                  SDValue &ARMcc) const {
03255   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
03256 
03257   SDValue Value, OverflowCmp;
03258   SDValue LHS = Op.getOperand(0);
03259   SDValue RHS = Op.getOperand(1);
03260   SDLoc dl(Op);
03261 
03262   // FIXME: We are currently always generating CMPs because we don't support
03263   // generating CMN through the backend. This is not as good as the natural
03264   // CMP case because it causes a register dependency and cannot be folded
03265   // later.
03266 
03267   switch (Op.getOpcode()) {
03268   default:
03269     llvm_unreachable("Unknown overflow instruction!");
03270   case ISD::SADDO:
03271     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
03272     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
03273     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
03274     break;
03275   case ISD::UADDO:
03276     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
03277     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
03278     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
03279     break;
03280   case ISD::SSUBO:
03281     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
03282     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
03283     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
03284     break;
03285   case ISD::USUBO:
03286     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
03287     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
03288     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
03289     break;
03290   } // switch (...)
03291 
03292   return std::make_pair(Value, OverflowCmp);
03293 }
03294 
03295 
03296 SDValue
03297 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
03298   // Let legalize expand this if it isn't a legal type yet.
03299   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
03300     return SDValue();
03301 
03302   SDValue Value, OverflowCmp;
03303   SDValue ARMcc;
03304   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
03305   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03306   SDLoc dl(Op);
03307   // We use 0 and 1 as false and true values.
03308   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
03309   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
03310   EVT VT = Op.getValueType();
03311 
03312   SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
03313                                  ARMcc, CCR, OverflowCmp);
03314 
03315   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
03316   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
03317 }
03318 
03319 
03320 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
03321   SDValue Cond = Op.getOperand(0);
03322   SDValue SelectTrue = Op.getOperand(1);
03323   SDValue SelectFalse = Op.getOperand(2);
03324   SDLoc dl(Op);
03325   unsigned Opc = Cond.getOpcode();
03326 
03327   if (Cond.getResNo() == 1 &&
03328       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
03329        Opc == ISD::USUBO)) {
03330     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
03331       return SDValue();
03332 
03333     SDValue Value, OverflowCmp;
03334     SDValue ARMcc;
03335     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
03336     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03337     EVT VT = Op.getValueType();
03338 
03339     return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
03340                    OverflowCmp, DAG);
03341   }
03342 
03343   // Convert:
03344   //
03345   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
03346   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
03347   //
03348   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
03349     const ConstantSDNode *CMOVTrue =
03350       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
03351     const ConstantSDNode *CMOVFalse =
03352       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
03353 
03354     if (CMOVTrue && CMOVFalse) {
03355       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
03356       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
03357 
03358       SDValue True;
03359       SDValue False;
03360       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
03361         True = SelectTrue;
03362         False = SelectFalse;
03363       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
03364         True = SelectFalse;
03365         False = SelectTrue;
03366       }
03367 
03368       if (True.getNode() && False.getNode()) {
03369         EVT VT = Op.getValueType();
03370         SDValue ARMcc = Cond.getOperand(2);
03371         SDValue CCR = Cond.getOperand(3);
03372         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
03373         assert(True.getValueType() == VT);
03374         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
03375       }
03376     }
03377   }
03378 
03379   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
03380   // undefined bits before doing a full-word comparison with zero.
03381   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
03382                      DAG.getConstant(1, dl, Cond.getValueType()));
03383 
03384   return DAG.getSelectCC(dl, Cond,
03385                          DAG.getConstant(0, dl, Cond.getValueType()),
03386                          SelectTrue, SelectFalse, ISD::SETNE);
03387 }
03388 
03389 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
03390                                  bool &swpCmpOps, bool &swpVselOps) {
03391   // Start by selecting the GE condition code for opcodes that return true for
03392   // 'equality'
03393   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
03394       CC == ISD::SETULE)
03395     CondCode = ARMCC::GE;
03396 
03397   // and GT for opcodes that return false for 'equality'.
03398   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
03399            CC == ISD::SETULT)
03400     CondCode = ARMCC::GT;
03401 
03402   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
03403   // to swap the compare operands.
03404   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
03405       CC == ISD::SETULT)
03406     swpCmpOps = true;
03407 
03408   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
03409   // If we have an unordered opcode, we need to swap the operands to the VSEL
03410   // instruction (effectively negating the condition).
03411   //
03412   // This also has the effect of swapping which one of 'less' or 'greater'
03413   // returns true, so we also swap the compare operands. It also switches
03414   // whether we return true for 'equality', so we compensate by picking the
03415   // opposite condition code to our original choice.
03416   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
03417       CC == ISD::SETUGT) {
03418     swpCmpOps = !swpCmpOps;
03419     swpVselOps = !swpVselOps;
03420     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
03421   }
03422 
03423   // 'ordered' is 'anything but unordered', so use the VS condition code and
03424   // swap the VSEL operands.
03425   if (CC == ISD::SETO) {
03426     CondCode = ARMCC::VS;
03427     swpVselOps = true;
03428   }
03429 
03430   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
03431   // code and swap the VSEL operands.
03432   if (CC == ISD::SETUNE) {
03433     CondCode = ARMCC::EQ;
03434     swpVselOps = true;
03435   }
03436 }
03437 
03438 SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal,
03439                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
03440                                    SDValue Cmp, SelectionDAG &DAG) const {
03441   if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
03442     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03443                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
03444     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03445                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
03446 
03447     SDValue TrueLow = TrueVal.getValue(0);
03448     SDValue TrueHigh = TrueVal.getValue(1);
03449     SDValue FalseLow = FalseVal.getValue(0);
03450     SDValue FalseHigh = FalseVal.getValue(1);
03451 
03452     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
03453                               ARMcc, CCR, Cmp);
03454     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
03455                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
03456 
03457     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
03458   } else {
03459     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
03460                        Cmp);
03461   }
03462 }
03463 
03464 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
03465   EVT VT = Op.getValueType();
03466   SDValue LHS = Op.getOperand(0);
03467   SDValue RHS = Op.getOperand(1);
03468   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
03469   SDValue TrueVal = Op.getOperand(2);
03470   SDValue FalseVal = Op.getOperand(3);
03471   SDLoc dl(Op);
03472 
03473   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03474     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03475                                                     dl);
03476 
03477     // If softenSetCCOperands only returned one value, we should compare it to
03478     // zero.
03479     if (!RHS.getNode()) {
03480       RHS = DAG.getConstant(0, dl, LHS.getValueType());
03481       CC = ISD::SETNE;
03482     }
03483   }
03484 
03485   if (LHS.getValueType() == MVT::i32) {
03486     // Try to generate VSEL on ARMv8.
03487     // The VSEL instruction can't use all the usual ARM condition
03488     // codes: it only has two bits to select the condition code, so it's
03489     // constrained to use only GE, GT, VS and EQ.
03490     //
03491     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
03492     // swap the operands of the previous compare instruction (effectively
03493     // inverting the compare condition, swapping 'less' and 'greater') and
03494     // sometimes need to swap the operands to the VSEL (which inverts the
03495     // condition in the sense of firing whenever the previous condition didn't)
03496     if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03497                                     TrueVal.getValueType() == MVT::f64)) {
03498       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03499       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
03500           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
03501         CC = ISD::getSetCCInverse(CC, true);
03502         std::swap(TrueVal, FalseVal);
03503       }
03504     }
03505 
03506     SDValue ARMcc;
03507     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03508     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03509     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03510   }
03511 
03512   ARMCC::CondCodes CondCode, CondCode2;
03513   FPCCToARMCC(CC, CondCode, CondCode2);
03514 
03515   // Try to generate VMAXNM/VMINNM on ARMv8.
03516   if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03517                                   TrueVal.getValueType() == MVT::f64)) {
03518     // We can use VMAXNM/VMINNM for a compare followed by a select with the
03519     // same operands, as follows:
03520     //   c = fcmp [?gt, ?ge, ?lt, ?le] a, b
03521     //   select c, a, b
03522     // In NoNaNsFPMath the CC will have been changed from, e.g., 'ogt' to 'gt'.
03523     bool swapSides = false;
03524     if (!getTargetMachine().Options.NoNaNsFPMath) {
03525       // transformability may depend on which way around we compare
03526       switch (CC) {
03527       default:
03528         break;
03529       case ISD::SETOGT:
03530       case ISD::SETOGE:
03531       case ISD::SETOLT:
03532       case ISD::SETOLE:
03533         // the non-NaN should be RHS
03534         swapSides = DAG.isKnownNeverNaN(LHS) && !DAG.isKnownNeverNaN(RHS);
03535         break;
03536       case ISD::SETUGT:
03537       case ISD::SETUGE:
03538       case ISD::SETULT:
03539       case ISD::SETULE:
03540         // the non-NaN should be LHS
03541         swapSides = DAG.isKnownNeverNaN(RHS) && !DAG.isKnownNeverNaN(LHS);
03542         break;
03543       }
03544     }
03545     swapSides = swapSides || (LHS == FalseVal && RHS == TrueVal);
03546     if (swapSides) {
03547       CC = ISD::getSetCCSwappedOperands(CC);
03548       std::swap(LHS, RHS);
03549     }
03550     if (LHS == TrueVal && RHS == FalseVal) {
03551       bool canTransform = true;
03552       // FIXME: FastMathFlags::noSignedZeros() doesn't appear reachable from here
03553       if (!getTargetMachine().Options.UnsafeFPMath &&
03554           !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
03555         const ConstantFPSDNode *Zero;
03556         switch (CC) {
03557         default:
03558           break;
03559         case ISD::SETOGT:
03560         case ISD::SETUGT:
03561         case ISD::SETGT:
03562           // RHS must not be -0
03563           canTransform = (Zero = dyn_cast<ConstantFPSDNode>(RHS)) &&
03564                          !Zero->isNegative();
03565           break;
03566         case ISD::SETOGE:
03567         case ISD::SETUGE:
03568         case ISD::SETGE:
03569           // LHS must not be -0
03570           canTransform = (Zero = dyn_cast<ConstantFPSDNode>(LHS)) &&
03571                          !Zero->isNegative();
03572           break;
03573         case ISD::SETOLT:
03574         case ISD::SETULT:
03575         case ISD::SETLT:
03576           // RHS must not be +0
03577           canTransform = (Zero = dyn_cast<ConstantFPSDNode>(RHS)) &&
03578                           Zero->isNegative();
03579           break;
03580         case ISD::SETOLE:
03581         case ISD::SETULE:
03582         case ISD::SETLE:
03583           // LHS must not be +0
03584           canTransform = (Zero = dyn_cast<ConstantFPSDNode>(LHS)) &&
03585                           Zero->isNegative();
03586           break;
03587         }
03588       }
03589       if (canTransform) {
03590         // Note: If one of the elements in a pair is a number and the other
03591         // element is NaN, the corresponding result element is the number.
03592         // This is consistent with the IEEE 754-2008 standard.
03593         // Therefore, a > b ? a : b <=> vmax(a,b), if b is constant and a is NaN
03594         switch (CC) {
03595         default:
03596           break;
03597         case ISD::SETOGT:
03598         case ISD::SETOGE:
03599           if (!DAG.isKnownNeverNaN(RHS))
03600             break;
03601           return DAG.getNode(ARMISD::VMAXNM, dl, VT, LHS, RHS);
03602         case ISD::SETUGT:
03603         case ISD::SETUGE:
03604           if (!DAG.isKnownNeverNaN(LHS))
03605             break;
03606         case ISD::SETGT:
03607         case ISD::SETGE:
03608           return DAG.getNode(ARMISD::VMAXNM, dl, VT, LHS, RHS);
03609         case ISD::SETOLT:
03610         case ISD::SETOLE:
03611           if (!DAG.isKnownNeverNaN(RHS))
03612             break;
03613           return DAG.getNode(ARMISD::VMINNM, dl, VT, LHS, RHS);
03614         case ISD::SETULT:
03615         case ISD::SETULE:
03616           if (!DAG.isKnownNeverNaN(LHS))
03617             break;
03618         case ISD::SETLT:
03619         case ISD::SETLE:
03620           return DAG.getNode(ARMISD::VMINNM, dl, VT, LHS, RHS);
03621         }
03622       }
03623     }
03624 
03625     bool swpCmpOps = false;
03626     bool swpVselOps = false;
03627     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
03628 
03629     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
03630         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
03631       if (swpCmpOps)
03632         std::swap(LHS, RHS);
03633       if (swpVselOps)
03634         std::swap(TrueVal, FalseVal);
03635     }
03636   }
03637 
03638   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
03639   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03640   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03641   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03642   if (CondCode2 != ARMCC::AL) {
03643     SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
03644     // FIXME: Needs another CMP because flag can have but one use.
03645     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
03646     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
03647   }
03648   return Result;
03649 }
03650 
03651 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
03652 /// to morph to an integer compare sequence.
03653 static bool canChangeToInt(SDValue Op, bool &SeenZero,
03654                            const ARMSubtarget *Subtarget) {
03655   SDNode *N = Op.getNode();
03656   if (!N->hasOneUse())
03657     // Otherwise it requires moving the value from fp to integer registers.
03658     return false;
03659   if (!N->getNumValues())
03660     return false;
03661   EVT VT = Op.getValueType();
03662   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
03663     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
03664     // vmrs are very slow, e.g. cortex-a8.
03665     return false;
03666 
03667   if (isFloatingPointZero(Op)) {
03668     SeenZero = true;
03669     return true;
03670   }
03671   return ISD::isNormalLoad(N);
03672 }
03673 
03674 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
03675   if (isFloatingPointZero(Op))
03676     return DAG.getConstant(0, SDLoc(Op), MVT::i32);
03677 
03678   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
03679     return DAG.getLoad(MVT::i32, SDLoc(Op),
03680                        Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
03681                        Ld->isVolatile(), Ld->isNonTemporal(),
03682                        Ld->isInvariant(), Ld->getAlignment());
03683 
03684   llvm_unreachable("Unknown VFP cmp argument!");
03685 }
03686 
03687 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
03688                            SDValue &RetVal1, SDValue &RetVal2) {
03689   SDLoc dl(Op);
03690 
03691   if (isFloatingPointZero(Op)) {
03692     RetVal1 = DAG.getConstant(0, dl, MVT::i32);
03693     RetVal2 = DAG.getConstant(0, dl, MVT::i32);
03694     return;
03695   }
03696 
03697   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
03698     SDValue Ptr = Ld->getBasePtr();
03699     RetVal1 = DAG.getLoad(MVT::i32, dl,
03700                           Ld->getChain(), Ptr,
03701                           Ld->getPointerInfo(),
03702                           Ld->isVolatile(), Ld->isNonTemporal(),
03703                           Ld->isInvariant(), Ld->getAlignment());
03704 
03705     EVT PtrType = Ptr.getValueType();
03706     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
03707     SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
03708                                  PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
03709     RetVal2 = DAG.getLoad(MVT::i32, dl,
03710                           Ld->getChain(), NewPtr,
03711                           Ld->getPointerInfo().getWithOffset(4),
03712                           Ld->isVolatile(), Ld->isNonTemporal(),
03713                           Ld->isInvariant(), NewAlign);
03714     return;
03715   }
03716 
03717   llvm_unreachable("Unknown VFP cmp argument!");
03718 }
03719 
03720 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
03721 /// f32 and even f64 comparisons to integer ones.
03722 SDValue
03723 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
03724   SDValue Chain = Op.getOperand(0);
03725   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03726   SDValue LHS = Op.getOperand(2);
03727   SDValue RHS = Op.getOperand(3);
03728   SDValue Dest = Op.getOperand(4);
03729   SDLoc dl(Op);
03730 
03731   bool LHSSeenZero = false;
03732   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
03733   bool RHSSeenZero = false;
03734   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
03735   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
03736     // If unsafe fp math optimization is enabled and there are no other uses of
03737     // the CMP operands, and the condition code is EQ or NE, we can optimize it
03738     // to an integer comparison.
03739     if (CC == ISD::SETOEQ)
03740       CC = ISD::SETEQ;
03741     else if (CC == ISD::SETUNE)
03742       CC = ISD::SETNE;
03743 
03744     SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
03745     SDValue ARMcc;
03746     if (LHS.getValueType() == MVT::f32) {
03747       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03748                         bitcastf32Toi32(LHS, DAG), Mask);
03749       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03750                         bitcastf32Toi32(RHS, DAG), Mask);
03751       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03752       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03753       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03754                          Chain, Dest, ARMcc, CCR, Cmp);
03755     }
03756 
03757     SDValue LHS1, LHS2;
03758     SDValue RHS1, RHS2;
03759     expandf64Toi32(LHS, DAG, LHS1, LHS2);
03760     expandf64Toi32(RHS, DAG, RHS1, RHS2);
03761     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
03762     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
03763     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03764     ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
03765     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03766     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
03767     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
03768   }
03769 
03770   return SDValue();
03771 }
03772 
03773 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
03774   SDValue Chain = Op.getOperand(0);
03775   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03776   SDValue LHS = Op.getOperand(2);
03777   SDValue RHS = Op.getOperand(3);
03778   SDValue Dest = Op.getOperand(4);
03779   SDLoc dl(Op);
03780 
03781   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03782     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03783                                                     dl);
03784 
03785     // If softenSetCCOperands only returned one value, we should compare it to
03786     // zero.
03787     if (!RHS.getNode()) {
03788       RHS = DAG.getConstant(0, dl, LHS.getValueType());
03789       CC = ISD::SETNE;
03790     }
03791   }
03792 
03793   if (LHS.getValueType() == MVT::i32) {
03794     SDValue ARMcc;
03795     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03796     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03797     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03798                        Chain, Dest, ARMcc, CCR, Cmp);
03799   }
03800 
03801   assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
03802 
03803   if (getTargetMachine().Options.UnsafeFPMath &&
03804       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
03805        CC == ISD::SETNE || CC == ISD::SETUNE)) {
03806     SDValue Result = OptimizeVFPBrcond(Op, DAG);
03807     if (Result.getNode())
03808       return Result;
03809   }
03810 
03811   ARMCC::CondCodes CondCode, CondCode2;
03812   FPCCToARMCC(CC, CondCode, CondCode2);
03813 
03814   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
03815   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03816   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03817   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03818   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
03819   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03820   if (CondCode2 != ARMCC::AL) {
03821     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
03822     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
03823     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03824   }
03825   return Res;
03826 }
03827 
03828 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
03829   SDValue Chain = Op.getOperand(0);
03830   SDValue Table = Op.getOperand(1);
03831   SDValue Index = Op.getOperand(2);
03832   SDLoc dl(Op);
03833 
03834   EVT PTy = getPointerTy();
03835   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
03836   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
03837   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
03838   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
03839   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
03840   if (Subtarget->isThumb2()) {
03841     // Thumb2 uses a two-level jump. That is, it jumps into the jump table
03842     // which does another jump to the destination. This also makes it easier
03843     // to translate it to TBB / TBH later.
03844     // FIXME: This might not work if the function is extremely large.
03845     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
03846                        Addr, Op.getOperand(2), JTI);
03847   }
03848   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
03849     Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
03850                        MachinePointerInfo::getJumpTable(),
03851                        false, false, false, 0);
03852     Chain = Addr.getValue(1);
03853     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
03854     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
03855   } else {
03856     Addr = DAG.getLoad(PTy, dl, Chain, Addr,
03857                        MachinePointerInfo::getJumpTable(),
03858                        false, false, false, 0);
03859     Chain = Addr.getValue(1);
03860     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
03861   }
03862 }
03863 
03864 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
03865   EVT VT = Op.getValueType();
03866   SDLoc dl(Op);
03867 
03868   if (Op.getValueType().getVectorElementType() == MVT::i32) {
03869     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
03870       return Op;
03871     return DAG.UnrollVectorOp(Op.getNode());
03872   }
03873 
03874   assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
03875          "Invalid type for custom lowering!");
03876   if (VT != MVT::v4i16)
03877     return DAG.UnrollVectorOp(Op.getNode());
03878 
03879   Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
03880   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
03881 }
03882 
03883 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
03884   EVT VT = Op.getValueType();
03885   if (VT.isVector())
03886     return LowerVectorFP_TO_INT(Op, DAG);
03887   if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
03888     RTLIB::Libcall LC;
03889     if (Op.getOpcode() == ISD::FP_TO_SINT)
03890       LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
03891                               Op.getValueType());
03892     else
03893       LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
03894                               Op.getValueType());
03895     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03896                        /*isSigned*/ false, SDLoc(Op)).first;
03897   }
03898 
03899   return Op;
03900 }
03901 
03902 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
03903   EVT VT = Op.getValueType();
03904   SDLoc dl(Op);
03905 
03906   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
03907     if (VT.getVectorElementType() == MVT::f32)
03908       return Op;
03909     return DAG.UnrollVectorOp(Op.getNode());
03910   }
03911 
03912   assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
03913          "Invalid type for custom lowering!");
03914   if (VT != MVT::v4f32)
03915     return DAG.UnrollVectorOp(Op.getNode());
03916 
03917   unsigned CastOpc;
03918   unsigned Opc;
03919   switch (Op.getOpcode()) {
03920   default: llvm_unreachable("Invalid opcode!");
03921   case ISD::SINT_TO_FP:
03922     CastOpc = ISD::SIGN_EXTEND;
03923     Opc = ISD::SINT_TO_FP;
03924     break;
03925   case ISD::UINT_TO_FP:
03926     CastOpc = ISD::ZERO_EXTEND;
03927     Opc = ISD::UINT_TO_FP;
03928     break;
03929   }
03930 
03931   Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
03932   return DAG.getNode(Opc, dl, VT, Op);
03933 }
03934 
03935 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
03936   EVT VT = Op.getValueType();
03937   if (VT.isVector())
03938     return LowerVectorINT_TO_FP(Op, DAG);
03939   if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
03940     RTLIB::Libcall LC;
03941     if (Op.getOpcode() == ISD::SINT_TO_FP)
03942       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
03943                               Op.getValueType());
03944     else
03945       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
03946                               Op.getValueType());
03947     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03948                        /*isSigned*/ false, SDLoc(Op)).first;
03949   }
03950 
03951   return Op;
03952 }
03953 
03954 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
03955   // Implement fcopysign with a fabs and a conditional fneg.
03956   SDValue Tmp0 = Op.getOperand(0);
03957   SDValue Tmp1 = Op.getOperand(1);
03958   SDLoc dl(Op);
03959   EVT VT = Op.getValueType();
03960   EVT SrcVT = Tmp1.getValueType();
03961   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
03962     Tmp0.getOpcode() == ARMISD::VMOVDRR;
03963   bool UseNEON = !InGPR && Subtarget->hasNEON();
03964 
03965   if (UseNEON) {
03966     // Use VBSL to copy the sign bit.
03967     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
03968     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
03969                                DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
03970     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
03971     if (VT == MVT::f64)
03972       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
03973                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
03974                          DAG.getConstant(32, dl, MVT::i32));
03975     else /*if (VT == MVT::f32)*/
03976       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
03977     if (SrcVT == MVT::f32) {
03978       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
03979       if (VT == MVT::f64)
03980         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
03981                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
03982                            DAG.getConstant(32, dl, MVT::i32));
03983     } else if (VT == MVT::f32)
03984       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
03985                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
03986                          DAG.getConstant(32, dl, MVT::i32));
03987     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
03988     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
03989 
03990     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
03991                                             dl, MVT::i32);
03992     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
03993     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
03994                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
03995 
03996     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
03997                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
03998                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
03999     if (VT == MVT::f32) {
04000       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
04001       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
04002                         DAG.getConstant(0, dl, MVT::i32));
04003     } else {
04004       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
04005     }
04006 
04007     return Res;
04008   }
04009 
04010   // Bitcast operand 1 to i32.
04011   if (SrcVT == MVT::f64)
04012     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
04013                        Tmp1).getValue(1);
04014   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
04015 
04016   // Or in the signbit with integer operations.
04017   SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
04018   SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
04019   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
04020   if (VT == MVT::f32) {
04021     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
04022                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
04023     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
04024                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
04025   }
04026 
04027   // f64: Or the high part with signbit and then combine two parts.
04028   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
04029                      Tmp0);
04030   SDValue Lo = Tmp0.getValue(0);
04031   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
04032   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
04033   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
04034 }
04035 
04036 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
04037   MachineFunction &MF = DAG.getMachineFunction();
04038   MachineFrameInfo *MFI = MF.getFrameInfo();
04039   MFI->setReturnAddressIsTaken(true);
04040 
04041   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
04042     return SDValue();
04043 
04044   EVT VT = Op.getValueType();
04045   SDLoc dl(Op);
04046   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
04047   if (Depth) {
04048     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
04049     SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
04050     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
04051                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
04052                        MachinePointerInfo(), false, false, false, 0);
04053   }
04054 
04055   // Return LR, which contains the return address. Mark it an implicit live-in.
04056   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
04057   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
04058 }
04059 
04060 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
04061   const ARMBaseRegisterInfo &ARI =
04062     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
04063   MachineFunction &MF = DAG.getMachineFunction();
04064   MachineFrameInfo *MFI = MF.getFrameInfo();
04065   MFI->setFrameAddressIsTaken(true);
04066 
04067   EVT VT = Op.getValueType();
04068   SDLoc dl(Op);  // FIXME probably not meaningful
04069   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
04070   unsigned FrameReg = ARI.getFrameRegister(MF);
04071   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
04072   while (Depth--)
04073     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
04074                             MachinePointerInfo(),
04075                             false, false, false, 0);
04076   return FrameAddr;
04077 }
04078 
04079 // FIXME? Maybe this could be a TableGen attribute on some registers and
04080 // this table could be generated automatically from RegInfo.
04081 unsigned ARMTargetLowering::getRegisterByName(const char* RegName,
04082                                               EVT VT) const {
04083   unsigned Reg = StringSwitch<unsigned>(RegName)
04084                        .Case("sp", ARM::SP)
04085                        .Default(0);
04086   if (Reg)
04087     return Reg;
04088   report_fatal_error("Invalid register name global variable");
04089 }
04090 
04091 /// ExpandBITCAST - If the target supports VFP, this function is called to
04092 /// expand a bit convert where either the source or destination type is i64 to
04093 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
04094 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
04095 /// vectors), since the legalizer won't know what to do with that.
04096 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
04097   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
04098   SDLoc dl(N);
04099   SDValue Op = N->getOperand(0);
04100 
04101   // This function is only supposed to be called for i64 types, either as the
04102   // source or destination of the bit convert.
04103   EVT SrcVT = Op.getValueType();
04104   EVT DstVT = N->getValueType(0);
04105   assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
04106          "ExpandBITCAST called for non-i64 type");
04107 
04108   // Turn i64->f64 into VMOVDRR.
04109   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
04110     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04111                              DAG.getConstant(0, dl, MVT::i32));
04112     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04113                              DAG.getConstant(1, dl, MVT::i32));
04114     return DAG.getNode(ISD::BITCAST, dl, DstVT,
04115                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
04116   }
04117 
04118   // Turn f64->i64 into VMOVRRD.
04119   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
04120     SDValue Cvt;
04121     if (TLI.isBigEndian() && SrcVT.isVector() &&
04122         SrcVT.getVectorNumElements() > 1)
04123       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04124                         DAG.getVTList(MVT::i32, MVT::i32),
04125                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
04126     else
04127       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04128                         DAG.getVTList(MVT::i32, MVT::i32), Op);
04129     // Merge the pieces into a single i64 value.
04130     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
04131   }
04132 
04133   return SDValue();
04134 }
04135 
04136 /// getZeroVector - Returns a vector of specified type with all zero elements.
04137 /// Zero vectors are used to represent vector negation and in those cases
04138 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
04139 /// not support i64 elements, so sometimes the zero vectors will need to be
04140 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
04141 /// zero vector.
04142 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
04143   assert(VT.isVector() && "Expected a vector type");
04144   // The canonical modified immediate encoding of a zero vector is....0!
04145   SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
04146   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
04147   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
04148   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
04149 }
04150 
04151 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
04152 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04153 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
04154                                                 SelectionDAG &DAG) const {
04155   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04156   EVT VT = Op.getValueType();
04157   unsigned VTBits = VT.getSizeInBits();
04158   SDLoc dl(Op);
04159   SDValue ShOpLo = Op.getOperand(0);
04160   SDValue ShOpHi = Op.getOperand(1);
04161   SDValue ShAmt  = Op.getOperand(2);
04162   SDValue ARMcc;
04163   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
04164 
04165   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
04166 
04167   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04168                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
04169   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
04170   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04171                                    DAG.getConstant(VTBits, dl, MVT::i32));
04172   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
04173   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04174   SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
04175 
04176   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04177   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
04178                           ISD::SETGE, ARMcc, DAG, dl);
04179   SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
04180   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
04181                            CCR, Cmp);
04182 
04183   SDValue Ops[2] = { Lo, Hi };
04184   return DAG.getMergeValues(Ops, dl);
04185 }
04186 
04187 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
04188 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04189 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
04190                                                SelectionDAG &DAG) const {
04191   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04192   EVT VT = Op.getValueType();
04193   unsigned VTBits = VT.getSizeInBits();
04194   SDLoc dl(Op);
04195   SDValue ShOpLo = Op.getOperand(0);
04196   SDValue ShOpHi = Op.getOperand(1);
04197   SDValue ShAmt  = Op.getOperand(2);
04198   SDValue ARMcc;
04199 
04200   assert(Op.getOpcode() == ISD::SHL_PARTS);
04201   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04202                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
04203   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
04204   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04205                                    DAG.getConstant(VTBits, dl, MVT::i32));
04206   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
04207   SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
04208 
04209   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04210   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04211   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
04212                           ISD::SETGE, ARMcc, DAG, dl);
04213   SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
04214   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
04215                            CCR, Cmp);
04216 
04217   SDValue Ops[2] = { Lo, Hi };
04218   return DAG.getMergeValues(Ops, dl);
04219 }
04220 
04221 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
04222                                             SelectionDAG &DAG) const {
04223   // The rounding mode is in bits 23:22 of the FPSCR.
04224   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
04225   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
04226   // so that the shift + and get folded into a bitfield extract.
04227   SDLoc dl(Op);
04228   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
04229                               DAG.getConstant(Intrinsic::arm_get_fpscr, dl,
04230                                               MVT::i32));
04231   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
04232                                   DAG.getConstant(1U << 22, dl, MVT::i32));
04233   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
04234                               DAG.getConstant(22, dl, MVT::i32));
04235   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
04236                      DAG.getConstant(3, dl, MVT::i32));
04237 }
04238 
04239 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
04240                          const ARMSubtarget *ST) {
04241   EVT VT = N->getValueType(0);
04242   SDLoc dl(N);
04243 
04244   if (!ST->hasV6T2Ops())
04245     return SDValue();
04246 
04247   SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
04248   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
04249 }
04250 
04251 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
04252 /// for each 16-bit element from operand, repeated.  The basic idea is to
04253 /// leverage vcnt to get the 8-bit counts, gather and add the results.
04254 ///
04255 /// Trace for v4i16:
04256 /// input    = [v0    v1    v2    v3   ] (vi 16-bit element)
04257 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
04258 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
04259