LLVM API Documentation

ARMISelLowering.cpp
Go to the documentation of this file.
00001 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file defines the interfaces that ARM uses to lower LLVM code into a
00011 // selection DAG.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 #include "ARMISelLowering.h"
00016 #include "ARMCallingConv.h"
00017 #include "ARMConstantPoolValue.h"
00018 #include "ARMMachineFunctionInfo.h"
00019 #include "ARMPerfectShuffle.h"
00020 #include "ARMSubtarget.h"
00021 #include "ARMTargetMachine.h"
00022 #include "ARMTargetObjectFile.h"
00023 #include "MCTargetDesc/ARMAddressingModes.h"
00024 #include "llvm/ADT/Statistic.h"
00025 #include "llvm/ADT/StringExtras.h"
00026 #include "llvm/CodeGen/CallingConvLower.h"
00027 #include "llvm/CodeGen/IntrinsicLowering.h"
00028 #include "llvm/CodeGen/MachineBasicBlock.h"
00029 #include "llvm/CodeGen/MachineFrameInfo.h"
00030 #include "llvm/CodeGen/MachineFunction.h"
00031 #include "llvm/CodeGen/MachineInstrBuilder.h"
00032 #include "llvm/CodeGen/MachineJumpTableInfo.h"
00033 #include "llvm/CodeGen/MachineModuleInfo.h"
00034 #include "llvm/CodeGen/MachineRegisterInfo.h"
00035 #include "llvm/CodeGen/SelectionDAG.h"
00036 #include "llvm/IR/CallingConv.h"
00037 #include "llvm/IR/Constants.h"
00038 #include "llvm/IR/Function.h"
00039 #include "llvm/IR/GlobalValue.h"
00040 #include "llvm/IR/IRBuilder.h"
00041 #include "llvm/IR/Instruction.h"
00042 #include "llvm/IR/Instructions.h"
00043 #include "llvm/IR/Intrinsics.h"
00044 #include "llvm/IR/Type.h"
00045 #include "llvm/MC/MCSectionMachO.h"
00046 #include "llvm/Support/CommandLine.h"
00047 #include "llvm/Support/Debug.h"
00048 #include "llvm/Support/ErrorHandling.h"
00049 #include "llvm/Support/MathExtras.h"
00050 #include "llvm/Target/TargetOptions.h"
00051 #include <utility>
00052 using namespace llvm;
00053 
00054 #define DEBUG_TYPE "arm-isel"
00055 
00056 STATISTIC(NumTailCalls, "Number of tail calls");
00057 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
00058 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
00059 
00060 cl::opt<bool>
00061 EnableARMLongCalls("arm-long-calls", cl::Hidden,
00062   cl::desc("Generate calls via indirect call instructions"),
00063   cl::init(false));
00064 
00065 static cl::opt<bool>
00066 ARMInterworking("arm-interworking", cl::Hidden,
00067   cl::desc("Enable / disable ARM interworking (for debugging only)"),
00068   cl::init(true));
00069 
00070 namespace {
00071   class ARMCCState : public CCState {
00072   public:
00073     ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
00074                SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
00075                ParmContext PC)
00076         : CCState(CC, isVarArg, MF, locs, C) {
00077       assert(((PC == Call) || (PC == Prologue)) &&
00078              "ARMCCState users must specify whether their context is call"
00079              "or prologue generation.");
00080       CallOrPrologue = PC;
00081     }
00082   };
00083 }
00084 
00085 // The APCS parameter registers.
00086 static const MCPhysReg GPRArgRegs[] = {
00087   ARM::R0, ARM::R1, ARM::R2, ARM::R3
00088 };
00089 
00090 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
00091                                        MVT PromotedBitwiseVT) {
00092   if (VT != PromotedLdStVT) {
00093     setOperationAction(ISD::LOAD, VT, Promote);
00094     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
00095 
00096     setOperationAction(ISD::STORE, VT, Promote);
00097     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
00098   }
00099 
00100   MVT ElemTy = VT.getVectorElementType();
00101   if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
00102     setOperationAction(ISD::SETCC, VT, Custom);
00103   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
00104   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
00105   if (ElemTy == MVT::i32) {
00106     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
00107     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
00108     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
00109     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
00110   } else {
00111     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
00112     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
00113     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
00114     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
00115   }
00116   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
00117   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
00118   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
00119   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
00120   setOperationAction(ISD::SELECT,            VT, Expand);
00121   setOperationAction(ISD::SELECT_CC,         VT, Expand);
00122   setOperationAction(ISD::VSELECT,           VT, Expand);
00123   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
00124   if (VT.isInteger()) {
00125     setOperationAction(ISD::SHL, VT, Custom);
00126     setOperationAction(ISD::SRA, VT, Custom);
00127     setOperationAction(ISD::SRL, VT, Custom);
00128   }
00129 
00130   // Promote all bit-wise operations.
00131   if (VT.isInteger() && VT != PromotedBitwiseVT) {
00132     setOperationAction(ISD::AND, VT, Promote);
00133     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
00134     setOperationAction(ISD::OR,  VT, Promote);
00135     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
00136     setOperationAction(ISD::XOR, VT, Promote);
00137     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
00138   }
00139 
00140   // Neon does not support vector divide/remainder operations.
00141   setOperationAction(ISD::SDIV, VT, Expand);
00142   setOperationAction(ISD::UDIV, VT, Expand);
00143   setOperationAction(ISD::FDIV, VT, Expand);
00144   setOperationAction(ISD::SREM, VT, Expand);
00145   setOperationAction(ISD::UREM, VT, Expand);
00146   setOperationAction(ISD::FREM, VT, Expand);
00147 }
00148 
00149 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
00150   addRegisterClass(VT, &ARM::DPRRegClass);
00151   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
00152 }
00153 
00154 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
00155   addRegisterClass(VT, &ARM::DPairRegClass);
00156   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
00157 }
00158 
00159 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
00160     : TargetLowering(TM) {
00161   Subtarget = &TM.getSubtarget<ARMSubtarget>();
00162   RegInfo = TM.getSubtargetImpl()->getRegisterInfo();
00163   Itins = TM.getSubtargetImpl()->getInstrItineraryData();
00164 
00165   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
00166 
00167   if (Subtarget->isTargetMachO()) {
00168     // Uses VFP for Thumb libfuncs if available.
00169     if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
00170         Subtarget->hasARMOps() && !TM.Options.UseSoftFloat) {
00171       // Single-precision floating-point arithmetic.
00172       setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
00173       setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
00174       setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
00175       setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
00176 
00177       // Double-precision floating-point arithmetic.
00178       setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
00179       setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
00180       setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
00181       setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
00182 
00183       // Single-precision comparisons.
00184       setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
00185       setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
00186       setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
00187       setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
00188       setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
00189       setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
00190       setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
00191       setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
00192 
00193       setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
00194       setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
00195       setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
00196       setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
00197       setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
00198       setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
00199       setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
00200       setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
00201 
00202       // Double-precision comparisons.
00203       setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
00204       setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
00205       setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
00206       setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
00207       setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
00208       setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
00209       setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
00210       setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
00211 
00212       setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
00213       setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
00214       setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
00215       setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
00216       setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
00217       setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
00218       setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
00219       setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
00220 
00221       // Floating-point to integer conversions.
00222       // i64 conversions are done via library routines even when generating VFP
00223       // instructions, so use the same ones.
00224       setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
00225       setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
00226       setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
00227       setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
00228 
00229       // Conversions between floating types.
00230       setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
00231       setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
00232 
00233       // Integer to floating-point conversions.
00234       // i64 conversions are done via library routines even when generating VFP
00235       // instructions, so use the same ones.
00236       // FIXME: There appears to be some naming inconsistency in ARM libgcc:
00237       // e.g., __floatunsidf vs. __floatunssidfvfp.
00238       setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
00239       setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
00240       setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
00241       setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
00242     }
00243   }
00244 
00245   // These libcalls are not available in 32-bit.
00246   setLibcallName(RTLIB::SHL_I128, nullptr);
00247   setLibcallName(RTLIB::SRL_I128, nullptr);
00248   setLibcallName(RTLIB::SRA_I128, nullptr);
00249 
00250   if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetMachO() &&
00251       !Subtarget->isTargetWindows()) {
00252     static const struct {
00253       const RTLIB::Libcall Op;
00254       const char * const Name;
00255       const CallingConv::ID CC;
00256       const ISD::CondCode Cond;
00257     } LibraryCalls[] = {
00258       // Double-precision floating-point arithmetic helper functions
00259       // RTABI chapter 4.1.2, Table 2
00260       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00261       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00262       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00263       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00264 
00265       // Double-precision floating-point comparison helper functions
00266       // RTABI chapter 4.1.2, Table 3
00267       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00268       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00269       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00270       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00271       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00272       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00273       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00274       { RTLIB::O_F64,   "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00275 
00276       // Single-precision floating-point arithmetic helper functions
00277       // RTABI chapter 4.1.2, Table 4
00278       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00279       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00280       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00281       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00282 
00283       // Single-precision floating-point comparison helper functions
00284       // RTABI chapter 4.1.2, Table 5
00285       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
00286       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
00287       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
00288       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
00289       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
00290       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
00291       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
00292       { RTLIB::O_F32,   "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
00293 
00294       // Floating-point to integer conversions.
00295       // RTABI chapter 4.1.2, Table 6
00296       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00297       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00298       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00299       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00300       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00301       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00302       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00303       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00304 
00305       // Conversions between floating types.
00306       // RTABI chapter 4.1.2, Table 7
00307       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00308       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00309       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00310 
00311       // Integer to floating-point conversions.
00312       // RTABI chapter 4.1.2, Table 8
00313       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00314       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00315       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00316       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00317       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00318       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00319       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00320       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00321 
00322       // Long long helper functions
00323       // RTABI chapter 4.2, Table 9
00324       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00325       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00326       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00327       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00328 
00329       // Integer division functions
00330       // RTABI chapter 4.3.1
00331       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00332       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00333       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00334       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00335       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00336       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00337       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00338       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00339 
00340       // Memory operations
00341       // RTABI chapter 4.3.4
00342       { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00343       { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00344       { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
00345     };
00346 
00347     for (const auto &LC : LibraryCalls) {
00348       setLibcallName(LC.Op, LC.Name);
00349       setLibcallCallingConv(LC.Op, LC.CC);
00350       if (LC.Cond != ISD::SETCC_INVALID)
00351         setCmpLibcallCC(LC.Op, LC.Cond);
00352     }
00353   }
00354 
00355   if (Subtarget->isTargetWindows()) {
00356     static const struct {
00357       const RTLIB::Libcall Op;
00358       const char * const Name;
00359       const CallingConv::ID CC;
00360     } LibraryCalls[] = {
00361       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
00362       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
00363       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
00364       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
00365       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
00366       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
00367       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
00368       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
00369     };
00370 
00371     for (const auto &LC : LibraryCalls) {
00372       setLibcallName(LC.Op, LC.Name);
00373       setLibcallCallingConv(LC.Op, LC.CC);
00374     }
00375   }
00376 
00377   // Use divmod compiler-rt calls for iOS 5.0 and later.
00378   if (Subtarget->getTargetTriple().isiOS() &&
00379       !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) {
00380     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
00381     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
00382   }
00383 
00384   // The half <-> float conversion functions are always soft-float, but are
00385   // needed for some targets which use a hard-float calling convention by
00386   // default.
00387   if (Subtarget->isAAPCS_ABI()) {
00388     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
00389     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
00390     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
00391   } else {
00392     setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
00393     setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
00394     setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
00395   }
00396 
00397   if (Subtarget->isThumb1Only())
00398     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
00399   else
00400     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
00401   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00402       !Subtarget->isThumb1Only()) {
00403     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
00404     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
00405   }
00406 
00407   for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
00408        VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
00409     for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
00410          InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
00411       setTruncStoreAction((MVT::SimpleValueType)VT,
00412                           (MVT::SimpleValueType)InnerVT, Expand);
00413     setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
00414     setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
00415     setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
00416 
00417     setOperationAction(ISD::MULHS, (MVT::SimpleValueType)VT, Expand);
00418     setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
00419     setOperationAction(ISD::MULHU, (MVT::SimpleValueType)VT, Expand);
00420     setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
00421 
00422     setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
00423   }
00424 
00425   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
00426   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
00427 
00428   if (Subtarget->hasNEON()) {
00429     addDRTypeForNEON(MVT::v2f32);
00430     addDRTypeForNEON(MVT::v8i8);
00431     addDRTypeForNEON(MVT::v4i16);
00432     addDRTypeForNEON(MVT::v2i32);
00433     addDRTypeForNEON(MVT::v1i64);
00434 
00435     addQRTypeForNEON(MVT::v4f32);
00436     addQRTypeForNEON(MVT::v2f64);
00437     addQRTypeForNEON(MVT::v16i8);
00438     addQRTypeForNEON(MVT::v8i16);
00439     addQRTypeForNEON(MVT::v4i32);
00440     addQRTypeForNEON(MVT::v2i64);
00441 
00442     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
00443     // neither Neon nor VFP support any arithmetic operations on it.
00444     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
00445     // supported for v4f32.
00446     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
00447     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
00448     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
00449     // FIXME: Code duplication: FDIV and FREM are expanded always, see
00450     // ARMTargetLowering::addTypeForNEON method for details.
00451     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
00452     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
00453     // FIXME: Create unittest.
00454     // In another words, find a way when "copysign" appears in DAG with vector
00455     // operands.
00456     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
00457     // FIXME: Code duplication: SETCC has custom operation action, see
00458     // ARMTargetLowering::addTypeForNEON method for details.
00459     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
00460     // FIXME: Create unittest for FNEG and for FABS.
00461     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
00462     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
00463     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
00464     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
00465     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
00466     setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
00467     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
00468     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
00469     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
00470     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
00471     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
00472     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
00473     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
00474     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
00475     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
00476     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
00477     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
00478     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
00479     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
00480 
00481     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
00482     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
00483     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
00484     setOperationAction(ISD::FPOWI, MVT::v4f32, Expand);
00485     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
00486     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
00487     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
00488     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
00489     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
00490     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
00491     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
00492     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
00493     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
00494     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
00495     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
00496 
00497     // Mark v2f32 intrinsics.
00498     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
00499     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
00500     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
00501     setOperationAction(ISD::FPOWI, MVT::v2f32, Expand);
00502     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
00503     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
00504     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
00505     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
00506     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
00507     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
00508     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
00509     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
00510     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
00511     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
00512     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
00513 
00514     // Neon does not support some operations on v1i64 and v2i64 types.
00515     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
00516     // Custom handling for some quad-vector types to detect VMULL.
00517     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
00518     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
00519     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
00520     // Custom handling for some vector types to avoid expensive expansions
00521     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
00522     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
00523     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
00524     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
00525     setOperationAction(ISD::SETCC, MVT::v1i64, Expand);
00526     setOperationAction(ISD::SETCC, MVT::v2i64, Expand);
00527     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
00528     // a destination type that is wider than the source, and nor does
00529     // it have a FP_TO_[SU]INT instruction with a narrower destination than
00530     // source.
00531     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
00532     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
00533     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
00534     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
00535 
00536     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
00537     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
00538 
00539     // NEON does not have single instruction CTPOP for vectors with element
00540     // types wider than 8-bits.  However, custom lowering can leverage the
00541     // v8i8/v16i8 vcnt instruction.
00542     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
00543     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
00544     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
00545     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
00546 
00547     // NEON only has FMA instructions as of VFP4.
00548     if (!Subtarget->hasVFP4()) {
00549       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
00550       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
00551     }
00552 
00553     setTargetDAGCombine(ISD::INTRINSIC_VOID);
00554     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
00555     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
00556     setTargetDAGCombine(ISD::SHL);
00557     setTargetDAGCombine(ISD::SRL);
00558     setTargetDAGCombine(ISD::SRA);
00559     setTargetDAGCombine(ISD::SIGN_EXTEND);
00560     setTargetDAGCombine(ISD::ZERO_EXTEND);
00561     setTargetDAGCombine(ISD::ANY_EXTEND);
00562     setTargetDAGCombine(ISD::SELECT_CC);
00563     setTargetDAGCombine(ISD::BUILD_VECTOR);
00564     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
00565     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
00566     setTargetDAGCombine(ISD::STORE);
00567     setTargetDAGCombine(ISD::FP_TO_SINT);
00568     setTargetDAGCombine(ISD::FP_TO_UINT);
00569     setTargetDAGCombine(ISD::FDIV);
00570 
00571     // It is legal to extload from v4i8 to v4i16 or v4i32.
00572     MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8,
00573                   MVT::v4i16, MVT::v2i16,
00574                   MVT::v2i32};
00575     for (unsigned i = 0; i < 6; ++i) {
00576       setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal);
00577       setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal);
00578       setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal);
00579     }
00580   }
00581 
00582   // ARM and Thumb2 support UMLAL/SMLAL.
00583   if (!Subtarget->isThumb1Only())
00584     setTargetDAGCombine(ISD::ADDC);
00585 
00586   if (Subtarget->isFPOnlySP()) {
00587     // When targetting a floating-point unit with only single-precision
00588     // operations, f64 is legal for the few double-precision instructions which
00589     // are present However, no double-precision operations other than moves,
00590     // loads and stores are provided by the hardware.
00591     setOperationAction(ISD::FADD,       MVT::f64, Expand);
00592     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
00593     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
00594     setOperationAction(ISD::FMA,        MVT::f64, Expand);
00595     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
00596     setOperationAction(ISD::FREM,       MVT::f64, Expand);
00597     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
00598     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
00599     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
00600     setOperationAction(ISD::FABS,       MVT::f64, Expand);
00601     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
00602     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
00603     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
00604     setOperationAction(ISD::FPOWI,      MVT::f64, Expand);
00605     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
00606     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
00607     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
00608     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
00609     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
00610     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
00611     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
00612     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
00613     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
00614     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
00615     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
00616     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
00617     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
00618   }
00619 
00620   computeRegisterProperties();
00621 
00622   // ARM does not have floating-point extending loads.
00623   setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
00624   setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
00625 
00626   // ... or truncating stores
00627   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
00628   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
00629   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
00630 
00631   // ARM does not have i1 sign extending load.
00632   setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
00633 
00634   // ARM supports all 4 flavors of integer indexed load / store.
00635   if (!Subtarget->isThumb1Only()) {
00636     for (unsigned im = (unsigned)ISD::PRE_INC;
00637          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
00638       setIndexedLoadAction(im,  MVT::i1,  Legal);
00639       setIndexedLoadAction(im,  MVT::i8,  Legal);
00640       setIndexedLoadAction(im,  MVT::i16, Legal);
00641       setIndexedLoadAction(im,  MVT::i32, Legal);
00642       setIndexedStoreAction(im, MVT::i1,  Legal);
00643       setIndexedStoreAction(im, MVT::i8,  Legal);
00644       setIndexedStoreAction(im, MVT::i16, Legal);
00645       setIndexedStoreAction(im, MVT::i32, Legal);
00646     }
00647   }
00648 
00649   setOperationAction(ISD::SADDO, MVT::i32, Custom);
00650   setOperationAction(ISD::UADDO, MVT::i32, Custom);
00651   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
00652   setOperationAction(ISD::USUBO, MVT::i32, Custom);
00653 
00654   // i64 operation support.
00655   setOperationAction(ISD::MUL,     MVT::i64, Expand);
00656   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
00657   if (Subtarget->isThumb1Only()) {
00658     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
00659     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
00660   }
00661   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
00662       || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP()))
00663     setOperationAction(ISD::MULHS, MVT::i32, Expand);
00664 
00665   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
00666   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
00667   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
00668   setOperationAction(ISD::SRL,       MVT::i64, Custom);
00669   setOperationAction(ISD::SRA,       MVT::i64, Custom);
00670 
00671   if (!Subtarget->isThumb1Only()) {
00672     // FIXME: We should do this for Thumb1 as well.
00673     setOperationAction(ISD::ADDC,    MVT::i32, Custom);
00674     setOperationAction(ISD::ADDE,    MVT::i32, Custom);
00675     setOperationAction(ISD::SUBC,    MVT::i32, Custom);
00676     setOperationAction(ISD::SUBE,    MVT::i32, Custom);
00677   }
00678 
00679   // ARM does not have ROTL.
00680   setOperationAction(ISD::ROTL,  MVT::i32, Expand);
00681   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
00682   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
00683   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
00684     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
00685 
00686   // These just redirect to CTTZ and CTLZ on ARM.
00687   setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i32  , Expand);
00688   setOperationAction(ISD::CTLZ_ZERO_UNDEF  , MVT::i32  , Expand);
00689 
00690   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
00691 
00692   // Only ARMv6 has BSWAP.
00693   if (!Subtarget->hasV6Ops())
00694     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
00695 
00696   if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) &&
00697       !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) {
00698     // These are expanded into libcalls if the cpu doesn't have HW divider.
00699     setOperationAction(ISD::SDIV,  MVT::i32, Expand);
00700     setOperationAction(ISD::UDIV,  MVT::i32, Expand);
00701   }
00702 
00703   // FIXME: Also set divmod for SREM on EABI
00704   setOperationAction(ISD::SREM,  MVT::i32, Expand);
00705   setOperationAction(ISD::UREM,  MVT::i32, Expand);
00706   // Register based DivRem for AEABI (RTABI 4.2)
00707   if (Subtarget->isTargetAEABI()) {
00708     setLibcallName(RTLIB::SDIVREM_I8,  "__aeabi_idivmod");
00709     setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod");
00710     setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod");
00711     setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod");
00712     setLibcallName(RTLIB::UDIVREM_I8,  "__aeabi_uidivmod");
00713     setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod");
00714     setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod");
00715     setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod");
00716 
00717     setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS);
00718     setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS);
00719     setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS);
00720     setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS);
00721     setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS);
00722     setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS);
00723     setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS);
00724     setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS);
00725 
00726     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
00727     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
00728   } else {
00729     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
00730     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
00731   }
00732 
00733   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
00734   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
00735   setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
00736   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
00737   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
00738 
00739   setOperationAction(ISD::TRAP, MVT::Other, Legal);
00740 
00741   // Use the default implementation.
00742   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
00743   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
00744   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
00745   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
00746   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
00747   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
00748 
00749   if (!Subtarget->isTargetMachO()) {
00750     // Non-MachO platforms may return values in these registers via the
00751     // personality function.
00752     setExceptionPointerRegister(ARM::R0);
00753     setExceptionSelectorRegister(ARM::R1);
00754   }
00755 
00756   if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
00757     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
00758   else
00759     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
00760 
00761   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
00762   // the default expansion. If we are targeting a single threaded system,
00763   // then set them all for expand so we can lower them later into their
00764   // non-atomic form.
00765   if (TM.Options.ThreadModel == ThreadModel::Single)
00766     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other, Expand);
00767   else if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) {
00768     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
00769     // to ldrex/strex loops already.
00770     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
00771 
00772     // On v8, we have particularly efficient implementations of atomic fences
00773     // if they can be combined with nearby atomic loads and stores.
00774     if (!Subtarget->hasV8Ops()) {
00775       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
00776       setInsertFencesForAtomic(true);
00777     }
00778   } else {
00779     // If there's anything we can use as a barrier, go through custom lowering
00780     // for ATOMIC_FENCE.
00781     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
00782                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
00783 
00784     // Set them all for expansion, which will force libcalls.
00785     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
00786     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
00787     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
00788     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
00789     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
00790     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
00791     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
00792     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
00793     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
00794     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
00795     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
00796     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
00797     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
00798     // Unordered/Monotonic case.
00799     setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
00800     setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
00801   }
00802 
00803   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
00804 
00805   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
00806   if (!Subtarget->hasV6Ops()) {
00807     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
00808     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
00809   }
00810   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
00811 
00812   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00813       !Subtarget->isThumb1Only()) {
00814     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
00815     // iff target supports vfp2.
00816     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
00817     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
00818   }
00819 
00820   // We want to custom lower some of our intrinsics.
00821   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
00822   if (Subtarget->isTargetDarwin()) {
00823     setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
00824     setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
00825     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
00826   }
00827 
00828   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
00829   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
00830   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
00831   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
00832   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
00833   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
00834   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
00835   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
00836   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
00837 
00838   setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
00839   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
00840   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
00841   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
00842   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
00843 
00844   // We don't support sin/cos/fmod/copysign/pow
00845   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
00846   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
00847   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
00848   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
00849   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
00850   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
00851   setOperationAction(ISD::FREM,      MVT::f64, Expand);
00852   setOperationAction(ISD::FREM,      MVT::f32, Expand);
00853   if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
00854       !Subtarget->isThumb1Only()) {
00855     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
00856     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
00857   }
00858   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
00859   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
00860 
00861   if (!Subtarget->hasVFP4()) {
00862     setOperationAction(ISD::FMA, MVT::f64, Expand);
00863     setOperationAction(ISD::FMA, MVT::f32, Expand);
00864   }
00865 
00866   // Various VFP goodness
00867   if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) {
00868     // int <-> fp are custom expanded into bit_convert + ARMISD ops.
00869     if (Subtarget->hasVFP2()) {
00870       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
00871       setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
00872       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
00873       setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
00874     }
00875 
00876     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
00877     if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
00878       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
00879       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
00880     }
00881 
00882     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
00883     if (!Subtarget->hasFP16()) {
00884       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
00885       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
00886     }
00887   }
00888 
00889   // Combine sin / cos into one node or libcall if possible.
00890   if (Subtarget->hasSinCos()) {
00891     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
00892     setLibcallName(RTLIB::SINCOS_F64, "sincos");
00893     if (Subtarget->getTargetTriple().isiOS()) {
00894       // For iOS, we don't want to the normal expansion of a libcall to
00895       // sincos. We want to issue a libcall to __sincos_stret.
00896       setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
00897       setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
00898     }
00899   }
00900 
00901   // FP-ARMv8 implements a lot of rounding-like FP operations.
00902   if (Subtarget->hasFPARMv8()) {
00903     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
00904     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
00905     setOperationAction(ISD::FROUND, MVT::f32, Legal);
00906     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
00907     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
00908     setOperationAction(ISD::FRINT, MVT::f32, Legal);
00909     if (!Subtarget->isFPOnlySP()) {
00910       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
00911       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
00912       setOperationAction(ISD::FROUND, MVT::f64, Legal);
00913       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
00914       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
00915       setOperationAction(ISD::FRINT, MVT::f64, Legal);
00916     }
00917   }
00918   // We have target-specific dag combine patterns for the following nodes:
00919   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
00920   setTargetDAGCombine(ISD::ADD);
00921   setTargetDAGCombine(ISD::SUB);
00922   setTargetDAGCombine(ISD::MUL);
00923   setTargetDAGCombine(ISD::AND);
00924   setTargetDAGCombine(ISD::OR);
00925   setTargetDAGCombine(ISD::XOR);
00926 
00927   if (Subtarget->hasV6Ops())
00928     setTargetDAGCombine(ISD::SRL);
00929 
00930   setStackPointerRegisterToSaveRestore(ARM::SP);
00931 
00932   if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() ||
00933       !Subtarget->hasVFP2())
00934     setSchedulingPreference(Sched::RegPressure);
00935   else
00936     setSchedulingPreference(Sched::Hybrid);
00937 
00938   //// temporary - rewrite interface to use type
00939   MaxStoresPerMemset = 8;
00940   MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
00941   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
00942   MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00943   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
00944   MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 4 : 2;
00945 
00946   // On ARM arguments smaller than 4 bytes are extended, so all arguments
00947   // are at least 4 bytes aligned.
00948   setMinStackArgumentAlignment(4);
00949 
00950   // Prefer likely predicted branches to selects on out-of-order cores.
00951   PredictableSelectIsExpensive = Subtarget->isLikeA9();
00952 
00953   setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
00954 }
00955 
00956 // FIXME: It might make sense to define the representative register class as the
00957 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
00958 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
00959 // SPR's representative would be DPR_VFP2. This should work well if register
00960 // pressure tracking were modified such that a register use would increment the
00961 // pressure of the register class's representative and all of it's super
00962 // classes' representatives transitively. We have not implemented this because
00963 // of the difficulty prior to coalescing of modeling operand register classes
00964 // due to the common occurrence of cross class copies and subregister insertions
00965 // and extractions.
00966 std::pair<const TargetRegisterClass*, uint8_t>
00967 ARMTargetLowering::findRepresentativeClass(MVT VT) const{
00968   const TargetRegisterClass *RRC = nullptr;
00969   uint8_t Cost = 1;
00970   switch (VT.SimpleTy) {
00971   default:
00972     return TargetLowering::findRepresentativeClass(VT);
00973   // Use DPR as representative register class for all floating point
00974   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
00975   // the cost is 1 for both f32 and f64.
00976   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
00977   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
00978     RRC = &ARM::DPRRegClass;
00979     // When NEON is used for SP, only half of the register file is available
00980     // because operations that define both SP and DP results will be constrained
00981     // to the VFP2 class (D0-D15). We currently model this constraint prior to
00982     // coalescing by double-counting the SP regs. See the FIXME above.
00983     if (Subtarget->useNEONForSinglePrecisionFP())
00984       Cost = 2;
00985     break;
00986   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
00987   case MVT::v4f32: case MVT::v2f64:
00988     RRC = &ARM::DPRRegClass;
00989     Cost = 2;
00990     break;
00991   case MVT::v4i64:
00992     RRC = &ARM::DPRRegClass;
00993     Cost = 4;
00994     break;
00995   case MVT::v8i64:
00996     RRC = &ARM::DPRRegClass;
00997     Cost = 8;
00998     break;
00999   }
01000   return std::make_pair(RRC, Cost);
01001 }
01002 
01003 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
01004   switch (Opcode) {
01005   default: return nullptr;
01006   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
01007   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
01008   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
01009   case ARMISD::CALL:          return "ARMISD::CALL";
01010   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
01011   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
01012   case ARMISD::tCALL:         return "ARMISD::tCALL";
01013   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
01014   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
01015   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
01016   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
01017   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
01018   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
01019   case ARMISD::CMP:           return "ARMISD::CMP";
01020   case ARMISD::CMN:           return "ARMISD::CMN";
01021   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
01022   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
01023   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
01024   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
01025   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
01026 
01027   case ARMISD::CMOV:          return "ARMISD::CMOV";
01028 
01029   case ARMISD::RBIT:          return "ARMISD::RBIT";
01030 
01031   case ARMISD::FTOSI:         return "ARMISD::FTOSI";
01032   case ARMISD::FTOUI:         return "ARMISD::FTOUI";
01033   case ARMISD::SITOF:         return "ARMISD::SITOF";
01034   case ARMISD::UITOF:         return "ARMISD::UITOF";
01035 
01036   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
01037   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
01038   case ARMISD::RRX:           return "ARMISD::RRX";
01039 
01040   case ARMISD::ADDC:          return "ARMISD::ADDC";
01041   case ARMISD::ADDE:          return "ARMISD::ADDE";
01042   case ARMISD::SUBC:          return "ARMISD::SUBC";
01043   case ARMISD::SUBE:          return "ARMISD::SUBE";
01044 
01045   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
01046   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
01047 
01048   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
01049   case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
01050 
01051   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
01052 
01053   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
01054 
01055   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
01056 
01057   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
01058 
01059   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
01060 
01061   case ARMISD::WIN__CHKSTK:   return "ARMISD:::WIN__CHKSTK";
01062 
01063   case ARMISD::VCEQ:          return "ARMISD::VCEQ";
01064   case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
01065   case ARMISD::VCGE:          return "ARMISD::VCGE";
01066   case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
01067   case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
01068   case ARMISD::VCGEU:         return "ARMISD::VCGEU";
01069   case ARMISD::VCGT:          return "ARMISD::VCGT";
01070   case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
01071   case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
01072   case ARMISD::VCGTU:         return "ARMISD::VCGTU";
01073   case ARMISD::VTST:          return "ARMISD::VTST";
01074 
01075   case ARMISD::VSHL:          return "ARMISD::VSHL";
01076   case ARMISD::VSHRs:         return "ARMISD::VSHRs";
01077   case ARMISD::VSHRu:         return "ARMISD::VSHRu";
01078   case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
01079   case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
01080   case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
01081   case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
01082   case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
01083   case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
01084   case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
01085   case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
01086   case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
01087   case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
01088   case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
01089   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
01090   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
01091   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
01092   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
01093   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
01094   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
01095   case ARMISD::VDUP:          return "ARMISD::VDUP";
01096   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
01097   case ARMISD::VEXT:          return "ARMISD::VEXT";
01098   case ARMISD::VREV64:        return "ARMISD::VREV64";
01099   case ARMISD::VREV32:        return "ARMISD::VREV32";
01100   case ARMISD::VREV16:        return "ARMISD::VREV16";
01101   case ARMISD::VZIP:          return "ARMISD::VZIP";
01102   case ARMISD::VUZP:          return "ARMISD::VUZP";
01103   case ARMISD::VTRN:          return "ARMISD::VTRN";
01104   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
01105   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
01106   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
01107   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
01108   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
01109   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
01110   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
01111   case ARMISD::FMAX:          return "ARMISD::FMAX";
01112   case ARMISD::FMIN:          return "ARMISD::FMIN";
01113   case ARMISD::VMAXNM:        return "ARMISD::VMAX";
01114   case ARMISD::VMINNM:        return "ARMISD::VMIN";
01115   case ARMISD::BFI:           return "ARMISD::BFI";
01116   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
01117   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
01118   case ARMISD::VBSL:          return "ARMISD::VBSL";
01119   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
01120   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
01121   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
01122   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
01123   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
01124   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
01125   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
01126   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
01127   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
01128   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
01129   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
01130   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
01131   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
01132   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
01133   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
01134   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
01135   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
01136   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
01137   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
01138   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
01139   }
01140 }
01141 
01142 EVT ARMTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
01143   if (!VT.isVector()) return getPointerTy();
01144   return VT.changeVectorElementTypeToInteger();
01145 }
01146 
01147 /// getRegClassFor - Return the register class that should be used for the
01148 /// specified value type.
01149 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
01150   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
01151   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
01152   // load / store 4 to 8 consecutive D registers.
01153   if (Subtarget->hasNEON()) {
01154     if (VT == MVT::v4i64)
01155       return &ARM::QQPRRegClass;
01156     if (VT == MVT::v8i64)
01157       return &ARM::QQQQPRRegClass;
01158   }
01159   return TargetLowering::getRegClassFor(VT);
01160 }
01161 
01162 // Create a fast isel object.
01163 FastISel *
01164 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
01165                                   const TargetLibraryInfo *libInfo) const {
01166   return ARM::createFastISel(funcInfo, libInfo);
01167 }
01168 
01169 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
01170 /// be used for loads / stores from the global.
01171 unsigned ARMTargetLowering::getMaximalGlobalOffset() const {
01172   return (Subtarget->isThumb1Only() ? 127 : 4095);
01173 }
01174 
01175 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
01176   unsigned NumVals = N->getNumValues();
01177   if (!NumVals)
01178     return Sched::RegPressure;
01179 
01180   for (unsigned i = 0; i != NumVals; ++i) {
01181     EVT VT = N->getValueType(i);
01182     if (VT == MVT::Glue || VT == MVT::Other)
01183       continue;
01184     if (VT.isFloatingPoint() || VT.isVector())
01185       return Sched::ILP;
01186   }
01187 
01188   if (!N->isMachineOpcode())
01189     return Sched::RegPressure;
01190 
01191   // Load are scheduled for latency even if there instruction itinerary
01192   // is not available.
01193   const TargetInstrInfo *TII =
01194       getTargetMachine().getSubtargetImpl()->getInstrInfo();
01195   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
01196 
01197   if (MCID.getNumDefs() == 0)
01198     return Sched::RegPressure;
01199   if (!Itins->isEmpty() &&
01200       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
01201     return Sched::ILP;
01202 
01203   return Sched::RegPressure;
01204 }
01205 
01206 //===----------------------------------------------------------------------===//
01207 // Lowering Code
01208 //===----------------------------------------------------------------------===//
01209 
01210 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
01211 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
01212   switch (CC) {
01213   default: llvm_unreachable("Unknown condition code!");
01214   case ISD::SETNE:  return ARMCC::NE;
01215   case ISD::SETEQ:  return ARMCC::EQ;
01216   case ISD::SETGT:  return ARMCC::GT;
01217   case ISD::SETGE:  return ARMCC::GE;
01218   case ISD::SETLT:  return ARMCC::LT;
01219   case ISD::SETLE:  return ARMCC::LE;
01220   case ISD::SETUGT: return ARMCC::HI;
01221   case ISD::SETUGE: return ARMCC::HS;
01222   case ISD::SETULT: return ARMCC::LO;
01223   case ISD::SETULE: return ARMCC::LS;
01224   }
01225 }
01226 
01227 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
01228 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
01229                         ARMCC::CondCodes &CondCode2) {
01230   CondCode2 = ARMCC::AL;
01231   switch (CC) {
01232   default: llvm_unreachable("Unknown FP condition!");
01233   case ISD::SETEQ:
01234   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
01235   case ISD::SETGT:
01236   case ISD::SETOGT: CondCode = ARMCC::GT; break;
01237   case ISD::SETGE:
01238   case ISD::SETOGE: CondCode = ARMCC::GE; break;
01239   case ISD::SETOLT: CondCode = ARMCC::MI; break;
01240   case ISD::SETOLE: CondCode = ARMCC::LS; break;
01241   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
01242   case ISD::SETO:   CondCode = ARMCC::VC; break;
01243   case ISD::SETUO:  CondCode = ARMCC::VS; break;
01244   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
01245   case ISD::SETUGT: CondCode = ARMCC::HI; break;
01246   case ISD::SETUGE: CondCode = ARMCC::PL; break;
01247   case ISD::SETLT:
01248   case ISD::SETULT: CondCode = ARMCC::LT; break;
01249   case ISD::SETLE:
01250   case ISD::SETULE: CondCode = ARMCC::LE; break;
01251   case ISD::SETNE:
01252   case ISD::SETUNE: CondCode = ARMCC::NE; break;
01253   }
01254 }
01255 
01256 //===----------------------------------------------------------------------===//
01257 //                      Calling Convention Implementation
01258 //===----------------------------------------------------------------------===//
01259 
01260 #include "ARMGenCallingConv.inc"
01261 
01262 /// getEffectiveCallingConv - Get the effective calling convention, taking into
01263 /// account presence of floating point hardware and calling convention
01264 /// limitations, such as support for variadic functions.
01265 CallingConv::ID
01266 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
01267                                            bool isVarArg) const {
01268   switch (CC) {
01269   default:
01270     llvm_unreachable("Unsupported calling convention");
01271   case CallingConv::ARM_AAPCS:
01272   case CallingConv::ARM_APCS:
01273   case CallingConv::GHC:
01274     return CC;
01275   case CallingConv::ARM_AAPCS_VFP:
01276     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
01277   case CallingConv::C:
01278     if (!Subtarget->isAAPCS_ABI())
01279       return CallingConv::ARM_APCS;
01280     else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
01281              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
01282              !isVarArg)
01283       return CallingConv::ARM_AAPCS_VFP;
01284     else
01285       return CallingConv::ARM_AAPCS;
01286   case CallingConv::Fast:
01287     if (!Subtarget->isAAPCS_ABI()) {
01288       if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01289         return CallingConv::Fast;
01290       return CallingConv::ARM_APCS;
01291     } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
01292       return CallingConv::ARM_AAPCS_VFP;
01293     else
01294       return CallingConv::ARM_AAPCS;
01295   }
01296 }
01297 
01298 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
01299 /// CallingConvention.
01300 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
01301                                                  bool Return,
01302                                                  bool isVarArg) const {
01303   switch (getEffectiveCallingConv(CC, isVarArg)) {
01304   default:
01305     llvm_unreachable("Unsupported calling convention");
01306   case CallingConv::ARM_APCS:
01307     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
01308   case CallingConv::ARM_AAPCS:
01309     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
01310   case CallingConv::ARM_AAPCS_VFP:
01311     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
01312   case CallingConv::Fast:
01313     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
01314   case CallingConv::GHC:
01315     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
01316   }
01317 }
01318 
01319 /// LowerCallResult - Lower the result values of a call into the
01320 /// appropriate copies out of appropriate physical registers.
01321 SDValue
01322 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
01323                                    CallingConv::ID CallConv, bool isVarArg,
01324                                    const SmallVectorImpl<ISD::InputArg> &Ins,
01325                                    SDLoc dl, SelectionDAG &DAG,
01326                                    SmallVectorImpl<SDValue> &InVals,
01327                                    bool isThisReturn, SDValue ThisVal) const {
01328 
01329   // Assign locations to each value returned by this call.
01330   SmallVector<CCValAssign, 16> RVLocs;
01331   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
01332                     *DAG.getContext(), Call);
01333   CCInfo.AnalyzeCallResult(Ins,
01334                            CCAssignFnForNode(CallConv, /* Return*/ true,
01335                                              isVarArg));
01336 
01337   // Copy all of the result registers out of their specified physreg.
01338   for (unsigned i = 0; i != RVLocs.size(); ++i) {
01339     CCValAssign VA = RVLocs[i];
01340 
01341     // Pass 'this' value directly from the argument to return value, to avoid
01342     // reg unit interference
01343     if (i == 0 && isThisReturn) {
01344       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
01345              "unexpected return calling convention register assignment");
01346       InVals.push_back(ThisVal);
01347       continue;
01348     }
01349 
01350     SDValue Val;
01351     if (VA.needsCustom()) {
01352       // Handle f64 or half of a v2f64.
01353       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01354                                       InFlag);
01355       Chain = Lo.getValue(1);
01356       InFlag = Lo.getValue(2);
01357       VA = RVLocs[++i]; // skip ahead to next loc
01358       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
01359                                       InFlag);
01360       Chain = Hi.getValue(1);
01361       InFlag = Hi.getValue(2);
01362       if (!Subtarget->isLittle())
01363         std::swap (Lo, Hi);
01364       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01365 
01366       if (VA.getLocVT() == MVT::v2f64) {
01367         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
01368         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01369                           DAG.getConstant(0, MVT::i32));
01370 
01371         VA = RVLocs[++i]; // skip ahead to next loc
01372         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01373         Chain = Lo.getValue(1);
01374         InFlag = Lo.getValue(2);
01375         VA = RVLocs[++i]; // skip ahead to next loc
01376         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
01377         Chain = Hi.getValue(1);
01378         InFlag = Hi.getValue(2);
01379         if (!Subtarget->isLittle())
01380           std::swap (Lo, Hi);
01381         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
01382         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
01383                           DAG.getConstant(1, MVT::i32));
01384       }
01385     } else {
01386       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
01387                                InFlag);
01388       Chain = Val.getValue(1);
01389       InFlag = Val.getValue(2);
01390     }
01391 
01392     switch (VA.getLocInfo()) {
01393     default: llvm_unreachable("Unknown loc info!");
01394     case CCValAssign::Full: break;
01395     case CCValAssign::BCvt:
01396       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
01397       break;
01398     }
01399 
01400     InVals.push_back(Val);
01401   }
01402 
01403   return Chain;
01404 }
01405 
01406 /// LowerMemOpCallTo - Store the argument to the stack.
01407 SDValue
01408 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
01409                                     SDValue StackPtr, SDValue Arg,
01410                                     SDLoc dl, SelectionDAG &DAG,
01411                                     const CCValAssign &VA,
01412                                     ISD::ArgFlagsTy Flags) const {
01413   unsigned LocMemOffset = VA.getLocMemOffset();
01414   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
01415   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
01416   return DAG.getStore(Chain, dl, Arg, PtrOff,
01417                       MachinePointerInfo::getStack(LocMemOffset),
01418                       false, false, 0);
01419 }
01420 
01421 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
01422                                          SDValue Chain, SDValue &Arg,
01423                                          RegsToPassVector &RegsToPass,
01424                                          CCValAssign &VA, CCValAssign &NextVA,
01425                                          SDValue &StackPtr,
01426                                          SmallVectorImpl<SDValue> &MemOpChains,
01427                                          ISD::ArgFlagsTy Flags) const {
01428 
01429   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
01430                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
01431   unsigned id = Subtarget->isLittle() ? 0 : 1;
01432   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
01433 
01434   if (NextVA.isRegLoc())
01435     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
01436   else {
01437     assert(NextVA.isMemLoc());
01438     if (!StackPtr.getNode())
01439       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01440 
01441     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
01442                                            dl, DAG, NextVA,
01443                                            Flags));
01444   }
01445 }
01446 
01447 /// LowerCall - Lowering a call into a callseq_start <-
01448 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
01449 /// nodes.
01450 SDValue
01451 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
01452                              SmallVectorImpl<SDValue> &InVals) const {
01453   SelectionDAG &DAG                     = CLI.DAG;
01454   SDLoc &dl                          = CLI.DL;
01455   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
01456   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
01457   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
01458   SDValue Chain                         = CLI.Chain;
01459   SDValue Callee                        = CLI.Callee;
01460   bool &isTailCall                      = CLI.IsTailCall;
01461   CallingConv::ID CallConv              = CLI.CallConv;
01462   bool doesNotRet                       = CLI.DoesNotReturn;
01463   bool isVarArg                         = CLI.IsVarArg;
01464 
01465   MachineFunction &MF = DAG.getMachineFunction();
01466   bool isStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
01467   bool isThisReturn   = false;
01468   bool isSibCall      = false;
01469 
01470   // Disable tail calls if they're not supported.
01471   if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls)
01472     isTailCall = false;
01473 
01474   if (isTailCall) {
01475     // Check if it's really possible to do a tail call.
01476     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
01477                     isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
01478                                                    Outs, OutVals, Ins, DAG);
01479     if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
01480       report_fatal_error("failed to perform tail call elimination on a call "
01481                          "site marked musttail");
01482     // We don't support GuaranteedTailCallOpt for ARM, only automatically
01483     // detected sibcalls.
01484     if (isTailCall) {
01485       ++NumTailCalls;
01486       isSibCall = true;
01487     }
01488   }
01489 
01490   // Analyze operands of the call, assigning locations to each operand.
01491   SmallVector<CCValAssign, 16> ArgLocs;
01492   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
01493                     *DAG.getContext(), Call);
01494   CCInfo.AnalyzeCallOperands(Outs,
01495                              CCAssignFnForNode(CallConv, /* Return*/ false,
01496                                                isVarArg));
01497 
01498   // Get a count of how many bytes are to be pushed on the stack.
01499   unsigned NumBytes = CCInfo.getNextStackOffset();
01500 
01501   // For tail calls, memory operands are available in our caller's stack.
01502   if (isSibCall)
01503     NumBytes = 0;
01504 
01505   // Adjust the stack pointer for the new arguments...
01506   // These operations are automatically eliminated by the prolog/epilog pass
01507   if (!isSibCall)
01508     Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
01509                                  dl);
01510 
01511   SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
01512 
01513   RegsToPassVector RegsToPass;
01514   SmallVector<SDValue, 8> MemOpChains;
01515 
01516   // Walk the register/memloc assignments, inserting copies/loads.  In the case
01517   // of tail call optimization, arguments are handled later.
01518   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
01519        i != e;
01520        ++i, ++realArgIdx) {
01521     CCValAssign &VA = ArgLocs[i];
01522     SDValue Arg = OutVals[realArgIdx];
01523     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
01524     bool isByVal = Flags.isByVal();
01525 
01526     // Promote the value if needed.
01527     switch (VA.getLocInfo()) {
01528     default: llvm_unreachable("Unknown loc info!");
01529     case CCValAssign::Full: break;
01530     case CCValAssign::SExt:
01531       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
01532       break;
01533     case CCValAssign::ZExt:
01534       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
01535       break;
01536     case CCValAssign::AExt:
01537       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
01538       break;
01539     case CCValAssign::BCvt:
01540       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
01541       break;
01542     }
01543 
01544     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
01545     if (VA.needsCustom()) {
01546       if (VA.getLocVT() == MVT::v2f64) {
01547         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01548                                   DAG.getConstant(0, MVT::i32));
01549         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
01550                                   DAG.getConstant(1, MVT::i32));
01551 
01552         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
01553                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01554 
01555         VA = ArgLocs[++i]; // skip ahead to next loc
01556         if (VA.isRegLoc()) {
01557           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
01558                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
01559         } else {
01560           assert(VA.isMemLoc());
01561 
01562           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
01563                                                  dl, DAG, VA, Flags));
01564         }
01565       } else {
01566         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
01567                          StackPtr, MemOpChains, Flags);
01568       }
01569     } else if (VA.isRegLoc()) {
01570       if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) {
01571         assert(VA.getLocVT() == MVT::i32 &&
01572                "unexpected calling convention register assignment");
01573         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
01574                "unexpected use of 'returned'");
01575         isThisReturn = true;
01576       }
01577       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
01578     } else if (isByVal) {
01579       assert(VA.isMemLoc());
01580       unsigned offset = 0;
01581 
01582       // True if this byval aggregate will be split between registers
01583       // and memory.
01584       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
01585       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
01586 
01587       if (CurByValIdx < ByValArgsCount) {
01588 
01589         unsigned RegBegin, RegEnd;
01590         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
01591 
01592         EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
01593         unsigned int i, j;
01594         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
01595           SDValue Const = DAG.getConstant(4*i, MVT::i32);
01596           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
01597           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
01598                                      MachinePointerInfo(),
01599                                      false, false, false,
01600                                      DAG.InferPtrAlignment(AddArg));
01601           MemOpChains.push_back(Load.getValue(1));
01602           RegsToPass.push_back(std::make_pair(j, Load));
01603         }
01604 
01605         // If parameter size outsides register area, "offset" value
01606         // helps us to calculate stack slot for remained part properly.
01607         offset = RegEnd - RegBegin;
01608 
01609         CCInfo.nextInRegsParam();
01610       }
01611 
01612       if (Flags.getByValSize() > 4*offset) {
01613         unsigned LocMemOffset = VA.getLocMemOffset();
01614         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
01615         SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
01616                                   StkPtrOff);
01617         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
01618         SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
01619         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
01620                                            MVT::i32);
01621         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32);
01622 
01623         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
01624         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
01625         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
01626                                           Ops));
01627       }
01628     } else if (!isSibCall) {
01629       assert(VA.isMemLoc());
01630 
01631       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
01632                                              dl, DAG, VA, Flags));
01633     }
01634   }
01635 
01636   if (!MemOpChains.empty())
01637     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
01638 
01639   // Build a sequence of copy-to-reg nodes chained together with token chain
01640   // and flag operands which copy the outgoing args into the appropriate regs.
01641   SDValue InFlag;
01642   // Tail call byval lowering might overwrite argument registers so in case of
01643   // tail call optimization the copies to registers are lowered later.
01644   if (!isTailCall)
01645     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01646       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01647                                RegsToPass[i].second, InFlag);
01648       InFlag = Chain.getValue(1);
01649     }
01650 
01651   // For tail calls lower the arguments to the 'real' stack slot.
01652   if (isTailCall) {
01653     // Force all the incoming stack arguments to be loaded from the stack
01654     // before any new outgoing arguments are stored to the stack, because the
01655     // outgoing stack slots may alias the incoming argument stack slots, and
01656     // the alias isn't otherwise explicit. This is slightly more conservative
01657     // than necessary, because it means that each store effectively depends
01658     // on every argument instead of just those arguments it would clobber.
01659 
01660     // Do not flag preceding copytoreg stuff together with the following stuff.
01661     InFlag = SDValue();
01662     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
01663       Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
01664                                RegsToPass[i].second, InFlag);
01665       InFlag = Chain.getValue(1);
01666     }
01667     InFlag = SDValue();
01668   }
01669 
01670   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
01671   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
01672   // node so that legalize doesn't hack it.
01673   bool isDirect = false;
01674   bool isARMFunc = false;
01675   bool isLocalARMFunc = false;
01676   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
01677 
01678   if (EnableARMLongCalls) {
01679     assert((Subtarget->isTargetWindows() ||
01680             getTargetMachine().getRelocationModel() == Reloc::Static) &&
01681            "long-calls with non-static relocation model!");
01682     // Handle a global address or an external symbol. If it's not one of
01683     // those, the target's already in a register, so we don't need to do
01684     // anything extra.
01685     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01686       const GlobalValue *GV = G->getGlobal();
01687       // Create a constant pool entry for the callee address
01688       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01689       ARMConstantPoolValue *CPV =
01690         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
01691 
01692       // Get the address of the callee into a register
01693       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01694       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01695       Callee = DAG.getLoad(getPointerTy(), dl,
01696                            DAG.getEntryNode(), CPAddr,
01697                            MachinePointerInfo::getConstantPool(),
01698                            false, false, false, 0);
01699     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
01700       const char *Sym = S->getSymbol();
01701 
01702       // Create a constant pool entry for the callee address
01703       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01704       ARMConstantPoolValue *CPV =
01705         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01706                                       ARMPCLabelIndex, 0);
01707       // Get the address of the callee into a register
01708       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01709       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01710       Callee = DAG.getLoad(getPointerTy(), dl,
01711                            DAG.getEntryNode(), CPAddr,
01712                            MachinePointerInfo::getConstantPool(),
01713                            false, false, false, 0);
01714     }
01715   } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
01716     const GlobalValue *GV = G->getGlobal();
01717     isDirect = true;
01718     bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
01719     bool isStub = (isExt && Subtarget->isTargetMachO()) &&
01720                    getTargetMachine().getRelocationModel() != Reloc::Static;
01721     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01722     // ARM call to a local ARM function is predicable.
01723     isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
01724     // tBX takes a register source operand.
01725     if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01726       assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
01727       Callee = DAG.getNode(ARMISD::WrapperPIC, dl, getPointerTy(),
01728                            DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
01729                                                       0, ARMII::MO_NONLAZY));
01730       Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
01731                            MachinePointerInfo::getGOT(), false, false, true, 0);
01732     } else if (Subtarget->isTargetCOFF()) {
01733       assert(Subtarget->isTargetWindows() &&
01734              "Windows is the only supported COFF target");
01735       unsigned TargetFlags = GV->hasDLLImportStorageClass()
01736                                  ? ARMII::MO_DLLIMPORT
01737                                  : ARMII::MO_NO_FLAG;
01738       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), /*Offset=*/0,
01739                                           TargetFlags);
01740       if (GV->hasDLLImportStorageClass())
01741         Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
01742                              DAG.getNode(ARMISD::Wrapper, dl, getPointerTy(),
01743                                          Callee), MachinePointerInfo::getGOT(),
01744                              false, false, false, 0);
01745     } else {
01746       // On ELF targets for PIC code, direct calls should go through the PLT
01747       unsigned OpFlags = 0;
01748       if (Subtarget->isTargetELF() &&
01749           getTargetMachine().getRelocationModel() == Reloc::PIC_)
01750         OpFlags = ARMII::MO_PLT;
01751       Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
01752     }
01753   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
01754     isDirect = true;
01755     bool isStub = Subtarget->isTargetMachO() &&
01756                   getTargetMachine().getRelocationModel() != Reloc::Static;
01757     isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
01758     // tBX takes a register source operand.
01759     const char *Sym = S->getSymbol();
01760     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
01761       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
01762       ARMConstantPoolValue *CPV =
01763         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
01764                                       ARMPCLabelIndex, 4);
01765       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
01766       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
01767       Callee = DAG.getLoad(getPointerTy(), dl,
01768                            DAG.getEntryNode(), CPAddr,
01769                            MachinePointerInfo::getConstantPool(),
01770                            false, false, false, 0);
01771       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
01772       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
01773                            getPointerTy(), Callee, PICLabel);
01774     } else {
01775       unsigned OpFlags = 0;
01776       // On ELF targets for PIC code, direct calls should go through the PLT
01777       if (Subtarget->isTargetELF() &&
01778                   getTargetMachine().getRelocationModel() == Reloc::PIC_)
01779         OpFlags = ARMII::MO_PLT;
01780       Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
01781     }
01782   }
01783 
01784   // FIXME: handle tail calls differently.
01785   unsigned CallOpc;
01786   bool HasMinSizeAttr = MF.getFunction()->getAttributes().hasAttribute(
01787       AttributeSet::FunctionIndex, Attribute::MinSize);
01788   if (Subtarget->isThumb()) {
01789     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
01790       CallOpc = ARMISD::CALL_NOLINK;
01791     else
01792       CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
01793   } else {
01794     if (!isDirect && !Subtarget->hasV5TOps())
01795       CallOpc = ARMISD::CALL_NOLINK;
01796     else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
01797                // Emit regular call when code size is the priority
01798                !HasMinSizeAttr)
01799       // "mov lr, pc; b _foo" to avoid confusing the RSP
01800       CallOpc = ARMISD::CALL_NOLINK;
01801     else
01802       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
01803   }
01804 
01805   std::vector<SDValue> Ops;
01806   Ops.push_back(Chain);
01807   Ops.push_back(Callee);
01808 
01809   // Add argument registers to the end of the list so that they are known live
01810   // into the call.
01811   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
01812     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
01813                                   RegsToPass[i].second.getValueType()));
01814 
01815   // Add a register mask operand representing the call-preserved registers.
01816   if (!isTailCall) {
01817     const uint32_t *Mask;
01818     const TargetRegisterInfo *TRI =
01819         getTargetMachine().getSubtargetImpl()->getRegisterInfo();
01820     const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI);
01821     if (isThisReturn) {
01822       // For 'this' returns, use the R0-preserving mask if applicable
01823       Mask = ARI->getThisReturnPreservedMask(CallConv);
01824       if (!Mask) {
01825         // Set isThisReturn to false if the calling convention is not one that
01826         // allows 'returned' to be modeled in this way, so LowerCallResult does
01827         // not try to pass 'this' straight through
01828         isThisReturn = false;
01829         Mask = ARI->getCallPreservedMask(CallConv);
01830       }
01831     } else
01832       Mask = ARI->getCallPreservedMask(CallConv);
01833 
01834     assert(Mask && "Missing call preserved mask for calling convention");
01835     Ops.push_back(DAG.getRegisterMask(Mask));
01836   }
01837 
01838   if (InFlag.getNode())
01839     Ops.push_back(InFlag);
01840 
01841   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
01842   if (isTailCall)
01843     return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
01844 
01845   // Returns a chain and a flag for retval copy to use.
01846   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
01847   InFlag = Chain.getValue(1);
01848 
01849   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
01850                              DAG.getIntPtrConstant(0, true), InFlag, dl);
01851   if (!Ins.empty())
01852     InFlag = Chain.getValue(1);
01853 
01854   // Handle result values, copying them out of physregs into vregs that we
01855   // return.
01856   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
01857                          InVals, isThisReturn,
01858                          isThisReturn ? OutVals[0] : SDValue());
01859 }
01860 
01861 /// HandleByVal - Every parameter *after* a byval parameter is passed
01862 /// on the stack.  Remember the next parameter register to allocate,
01863 /// and then confiscate the rest of the parameter registers to insure
01864 /// this.
01865 void
01866 ARMTargetLowering::HandleByVal(
01867     CCState *State, unsigned &size, unsigned Align) const {
01868   unsigned reg = State->AllocateReg(GPRArgRegs, 4);
01869   assert((State->getCallOrPrologue() == Prologue ||
01870           State->getCallOrPrologue() == Call) &&
01871          "unhandled ParmContext");
01872 
01873   if ((ARM::R0 <= reg) && (reg <= ARM::R3)) {
01874     if (Subtarget->isAAPCS_ABI() && Align > 4) {
01875       unsigned AlignInRegs = Align / 4;
01876       unsigned Waste = (ARM::R4 - reg) % AlignInRegs;
01877       for (unsigned i = 0; i < Waste; ++i)
01878         reg = State->AllocateReg(GPRArgRegs, 4);
01879     }
01880     if (reg != 0) {
01881       unsigned excess = 4 * (ARM::R4 - reg);
01882 
01883       // Special case when NSAA != SP and parameter size greater than size of
01884       // all remained GPR regs. In that case we can't split parameter, we must
01885       // send it to stack. We also must set NCRN to R4, so waste all
01886       // remained registers.
01887       const unsigned NSAAOffset = State->getNextStackOffset();
01888       if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) {
01889         while (State->AllocateReg(GPRArgRegs, 4))
01890           ;
01891         return;
01892       }
01893 
01894       // First register for byval parameter is the first register that wasn't
01895       // allocated before this method call, so it would be "reg".
01896       // If parameter is small enough to be saved in range [reg, r4), then
01897       // the end (first after last) register would be reg + param-size-in-regs,
01898       // else parameter would be splitted between registers and stack,
01899       // end register would be r4 in this case.
01900       unsigned ByValRegBegin = reg;
01901       unsigned ByValRegEnd = (size < excess) ? reg + size/4 : (unsigned)ARM::R4;
01902       State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
01903       // Note, first register is allocated in the beginning of function already,
01904       // allocate remained amount of registers we need.
01905       for (unsigned i = reg+1; i != ByValRegEnd; ++i)
01906         State->AllocateReg(GPRArgRegs, 4);
01907       // A byval parameter that is split between registers and memory needs its
01908       // size truncated here.
01909       // In the case where the entire structure fits in registers, we set the
01910       // size in memory to zero.
01911       if (size < excess)
01912         size = 0;
01913       else
01914         size -= excess;
01915     }
01916   }
01917 }
01918 
01919 /// MatchingStackOffset - Return true if the given stack call argument is
01920 /// already available in the same position (relatively) of the caller's
01921 /// incoming argument stack.
01922 static
01923 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
01924                          MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
01925                          const TargetInstrInfo *TII) {
01926   unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
01927   int FI = INT_MAX;
01928   if (Arg.getOpcode() == ISD::CopyFromReg) {
01929     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
01930     if (!TargetRegisterInfo::isVirtualRegister(VR))
01931       return false;
01932     MachineInstr *Def = MRI->getVRegDef(VR);
01933     if (!Def)
01934       return false;
01935     if (!Flags.isByVal()) {
01936       if (!TII->isLoadFromStackSlot(Def, FI))
01937         return false;
01938     } else {
01939       return false;
01940     }
01941   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
01942     if (Flags.isByVal())
01943       // ByVal argument is passed in as a pointer but it's now being
01944       // dereferenced. e.g.
01945       // define @foo(%struct.X* %A) {
01946       //   tail call @bar(%struct.X* byval %A)
01947       // }
01948       return false;
01949     SDValue Ptr = Ld->getBasePtr();
01950     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
01951     if (!FINode)
01952       return false;
01953     FI = FINode->getIndex();
01954   } else
01955     return false;
01956 
01957   assert(FI != INT_MAX);
01958   if (!MFI->isFixedObjectIndex(FI))
01959     return false;
01960   return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
01961 }
01962 
01963 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
01964 /// for tail call optimization. Targets which want to do tail call
01965 /// optimization should implement this function.
01966 bool
01967 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
01968                                                      CallingConv::ID CalleeCC,
01969                                                      bool isVarArg,
01970                                                      bool isCalleeStructRet,
01971                                                      bool isCallerStructRet,
01972                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
01973                                     const SmallVectorImpl<SDValue> &OutVals,
01974                                     const SmallVectorImpl<ISD::InputArg> &Ins,
01975                                                      SelectionDAG& DAG) const {
01976   const Function *CallerF = DAG.getMachineFunction().getFunction();
01977   CallingConv::ID CallerCC = CallerF->getCallingConv();
01978   bool CCMatch = CallerCC == CalleeCC;
01979 
01980   // Look for obvious safe cases to perform tail call optimization that do not
01981   // require ABI changes. This is what gcc calls sibcall.
01982 
01983   // Do not sibcall optimize vararg calls unless the call site is not passing
01984   // any arguments.
01985   if (isVarArg && !Outs.empty())
01986     return false;
01987 
01988   // Exception-handling functions need a special set of instructions to indicate
01989   // a return to the hardware. Tail-calling another function would probably
01990   // break this.
01991   if (CallerF->hasFnAttribute("interrupt"))
01992     return false;
01993 
01994   // Also avoid sibcall optimization if either caller or callee uses struct
01995   // return semantics.
01996   if (isCalleeStructRet || isCallerStructRet)
01997     return false;
01998 
01999   // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo::
02000   // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
02001   // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
02002   // support in the assembler and linker to be used. This would need to be
02003   // fixed to fully support tail calls in Thumb1.
02004   //
02005   // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
02006   // LR.  This means if we need to reload LR, it takes an extra instructions,
02007   // which outweighs the value of the tail call; but here we don't know yet
02008   // whether LR is going to be used.  Probably the right approach is to
02009   // generate the tail call here and turn it back into CALL/RET in
02010   // emitEpilogue if LR is used.
02011 
02012   // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
02013   // but we need to make sure there are enough registers; the only valid
02014   // registers are the 4 used for parameters.  We don't currently do this
02015   // case.
02016   if (Subtarget->isThumb1Only())
02017     return false;
02018 
02019   // Externally-defined functions with weak linkage should not be
02020   // tail-called on ARM when the OS does not support dynamic
02021   // pre-emption of symbols, as the AAELF spec requires normal calls
02022   // to undefined weak functions to be replaced with a NOP or jump to the
02023   // next instruction. The behaviour of branch instructions in this
02024   // situation (as used for tail calls) is implementation-defined, so we
02025   // cannot rely on the linker replacing the tail call with a return.
02026   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
02027     const GlobalValue *GV = G->getGlobal();
02028     if (GV->hasExternalWeakLinkage())
02029       return false;
02030   }
02031 
02032   // If the calling conventions do not match, then we'd better make sure the
02033   // results are returned in the same way as what the caller expects.
02034   if (!CCMatch) {
02035     SmallVector<CCValAssign, 16> RVLocs1;
02036     ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
02037                        *DAG.getContext(), Call);
02038     CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
02039 
02040     SmallVector<CCValAssign, 16> RVLocs2;
02041     ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
02042                        *DAG.getContext(), Call);
02043     CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
02044 
02045     if (RVLocs1.size() != RVLocs2.size())
02046       return false;
02047     for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
02048       if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
02049         return false;
02050       if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
02051         return false;
02052       if (RVLocs1[i].isRegLoc()) {
02053         if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
02054           return false;
02055       } else {
02056         if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
02057           return false;
02058       }
02059     }
02060   }
02061 
02062   // If Caller's vararg or byval argument has been split between registers and
02063   // stack, do not perform tail call, since part of the argument is in caller's
02064   // local frame.
02065   const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction().
02066                                       getInfo<ARMFunctionInfo>();
02067   if (AFI_Caller->getArgRegsSaveSize())
02068     return false;
02069 
02070   // If the callee takes no arguments then go on to check the results of the
02071   // call.
02072   if (!Outs.empty()) {
02073     // Check if stack adjustment is needed. For now, do not do this if any
02074     // argument is passed on the stack.
02075     SmallVector<CCValAssign, 16> ArgLocs;
02076     ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
02077                       *DAG.getContext(), Call);
02078     CCInfo.AnalyzeCallOperands(Outs,
02079                                CCAssignFnForNode(CalleeCC, false, isVarArg));
02080     if (CCInfo.getNextStackOffset()) {
02081       MachineFunction &MF = DAG.getMachineFunction();
02082 
02083       // Check if the arguments are already laid out in the right way as
02084       // the caller's fixed stack objects.
02085       MachineFrameInfo *MFI = MF.getFrameInfo();
02086       const MachineRegisterInfo *MRI = &MF.getRegInfo();
02087       const TargetInstrInfo *TII =
02088           getTargetMachine().getSubtargetImpl()->getInstrInfo();
02089       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
02090            i != e;
02091            ++i, ++realArgIdx) {
02092         CCValAssign &VA = ArgLocs[i];
02093         EVT RegVT = VA.getLocVT();
02094         SDValue Arg = OutVals[realArgIdx];
02095         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
02096         if (VA.getLocInfo() == CCValAssign::Indirect)
02097           return false;
02098         if (VA.needsCustom()) {
02099           // f64 and vector types are split into multiple registers or
02100           // register/stack-slot combinations.  The types will not match
02101           // the registers; give up on memory f64 refs until we figure
02102           // out what to do about this.
02103           if (!VA.isRegLoc())
02104             return false;
02105           if (!ArgLocs[++i].isRegLoc())
02106             return false;
02107           if (RegVT == MVT::v2f64) {
02108             if (!ArgLocs[++i].isRegLoc())
02109               return false;
02110             if (!ArgLocs[++i].isRegLoc())
02111               return false;
02112           }
02113         } else if (!VA.isRegLoc()) {
02114           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
02115                                    MFI, MRI, TII))
02116             return false;
02117         }
02118       }
02119     }
02120   }
02121 
02122   return true;
02123 }
02124 
02125 bool
02126 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
02127                                   MachineFunction &MF, bool isVarArg,
02128                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
02129                                   LLVMContext &Context) const {
02130   SmallVector<CCValAssign, 16> RVLocs;
02131   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
02132   return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true,
02133                                                     isVarArg));
02134 }
02135 
02136 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
02137                                     SDLoc DL, SelectionDAG &DAG) {
02138   const MachineFunction &MF = DAG.getMachineFunction();
02139   const Function *F = MF.getFunction();
02140 
02141   StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
02142 
02143   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
02144   // version of the "preferred return address". These offsets affect the return
02145   // instruction if this is a return from PL1 without hypervisor extensions.
02146   //    IRQ/FIQ: +4     "subs pc, lr, #4"
02147   //    SWI:     0      "subs pc, lr, #0"
02148   //    ABORT:   +4     "subs pc, lr, #4"
02149   //    UNDEF:   +4/+2  "subs pc, lr, #0"
02150   // UNDEF varies depending on where the exception came from ARM or Thumb
02151   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
02152 
02153   int64_t LROffset;
02154   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
02155       IntKind == "ABORT")
02156     LROffset = 4;
02157   else if (IntKind == "SWI" || IntKind == "UNDEF")
02158     LROffset = 0;
02159   else
02160     report_fatal_error("Unsupported interrupt attribute. If present, value "
02161                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
02162 
02163   RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false));
02164 
02165   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
02166 }
02167 
02168 SDValue
02169 ARMTargetLowering::LowerReturn(SDValue Chain,
02170                                CallingConv::ID CallConv, bool isVarArg,
02171                                const SmallVectorImpl<ISD::OutputArg> &Outs,
02172                                const SmallVectorImpl<SDValue> &OutVals,
02173                                SDLoc dl, SelectionDAG &DAG) const {
02174 
02175   // CCValAssign - represent the assignment of the return value to a location.
02176   SmallVector<CCValAssign, 16> RVLocs;
02177 
02178   // CCState - Info about the registers and stack slots.
02179   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
02180                     *DAG.getContext(), Call);
02181 
02182   // Analyze outgoing return values.
02183   CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
02184                                                isVarArg));
02185 
02186   SDValue Flag;
02187   SmallVector<SDValue, 4> RetOps;
02188   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
02189   bool isLittleEndian = Subtarget->isLittle();
02190 
02191   MachineFunction &MF = DAG.getMachineFunction();
02192   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02193   AFI->setReturnRegsCount(RVLocs.size());
02194 
02195   // Copy the result values into the output registers.
02196   for (unsigned i = 0, realRVLocIdx = 0;
02197        i != RVLocs.size();
02198        ++i, ++realRVLocIdx) {
02199     CCValAssign &VA = RVLocs[i];
02200     assert(VA.isRegLoc() && "Can only return in registers!");
02201 
02202     SDValue Arg = OutVals[realRVLocIdx];
02203 
02204     switch (VA.getLocInfo()) {
02205     default: llvm_unreachable("Unknown loc info!");
02206     case CCValAssign::Full: break;
02207     case CCValAssign::BCvt:
02208       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
02209       break;
02210     }
02211 
02212     if (VA.needsCustom()) {
02213       if (VA.getLocVT() == MVT::v2f64) {
02214         // Extract the first half and return it in two registers.
02215         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02216                                    DAG.getConstant(0, MVT::i32));
02217         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
02218                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
02219 
02220         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02221                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
02222                                  Flag);
02223         Flag = Chain.getValue(1);
02224         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02225         VA = RVLocs[++i]; // skip ahead to next loc
02226         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02227                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
02228                                  Flag);
02229         Flag = Chain.getValue(1);
02230         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02231         VA = RVLocs[++i]; // skip ahead to next loc
02232 
02233         // Extract the 2nd half and fall through to handle it as an f64 value.
02234         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
02235                           DAG.getConstant(1, MVT::i32));
02236       }
02237       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
02238       // available.
02239       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
02240                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
02241       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02242                                fmrrd.getValue(isLittleEndian ? 0 : 1),
02243                                Flag);
02244       Flag = Chain.getValue(1);
02245       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02246       VA = RVLocs[++i]; // skip ahead to next loc
02247       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
02248                                fmrrd.getValue(isLittleEndian ? 1 : 0),
02249                                Flag);
02250     } else
02251       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
02252 
02253     // Guarantee that all emitted copies are
02254     // stuck together, avoiding something bad.
02255     Flag = Chain.getValue(1);
02256     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
02257   }
02258 
02259   // Update chain and glue.
02260   RetOps[0] = Chain;
02261   if (Flag.getNode())
02262     RetOps.push_back(Flag);
02263 
02264   // CPUs which aren't M-class use a special sequence to return from
02265   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
02266   // though we use "subs pc, lr, #N").
02267   //
02268   // M-class CPUs actually use a normal return sequence with a special
02269   // (hardware-provided) value in LR, so the normal code path works.
02270   if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
02271       !Subtarget->isMClass()) {
02272     if (Subtarget->isThumb1Only())
02273       report_fatal_error("interrupt attribute is not supported in Thumb1");
02274     return LowerInterruptReturn(RetOps, dl, DAG);
02275   }
02276 
02277   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
02278 }
02279 
02280 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
02281   if (N->getNumValues() != 1)
02282     return false;
02283   if (!N->hasNUsesOfValue(1, 0))
02284     return false;
02285 
02286   SDValue TCChain = Chain;
02287   SDNode *Copy = *N->use_begin();
02288   if (Copy->getOpcode() == ISD::CopyToReg) {
02289     // If the copy has a glue operand, we conservatively assume it isn't safe to
02290     // perform a tail call.
02291     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02292       return false;
02293     TCChain = Copy->getOperand(0);
02294   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
02295     SDNode *VMov = Copy;
02296     // f64 returned in a pair of GPRs.
02297     SmallPtrSet<SDNode*, 2> Copies;
02298     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02299          UI != UE; ++UI) {
02300       if (UI->getOpcode() != ISD::CopyToReg)
02301         return false;
02302       Copies.insert(*UI);
02303     }
02304     if (Copies.size() > 2)
02305       return false;
02306 
02307     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
02308          UI != UE; ++UI) {
02309       SDValue UseChain = UI->getOperand(0);
02310       if (Copies.count(UseChain.getNode()))
02311         // Second CopyToReg
02312         Copy = *UI;
02313       else {
02314         // We are at the top of this chain.
02315         // If the copy has a glue operand, we conservatively assume it
02316         // isn't safe to perform a tail call.
02317         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
02318           return false;
02319         // First CopyToReg
02320         TCChain = UseChain;
02321       }
02322     }
02323   } else if (Copy->getOpcode() == ISD::BITCAST) {
02324     // f32 returned in a single GPR.
02325     if (!Copy->hasOneUse())
02326       return false;
02327     Copy = *Copy->use_begin();
02328     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
02329       return false;
02330     // If the copy has a glue operand, we conservatively assume it isn't safe to
02331     // perform a tail call.
02332     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
02333       return false;
02334     TCChain = Copy->getOperand(0);
02335   } else {
02336     return false;
02337   }
02338 
02339   bool HasRet = false;
02340   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
02341        UI != UE; ++UI) {
02342     if (UI->getOpcode() != ARMISD::RET_FLAG &&
02343         UI->getOpcode() != ARMISD::INTRET_FLAG)
02344       return false;
02345     HasRet = true;
02346   }
02347 
02348   if (!HasRet)
02349     return false;
02350 
02351   Chain = TCChain;
02352   return true;
02353 }
02354 
02355 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
02356   if (!Subtarget->supportsTailCall())
02357     return false;
02358 
02359   if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
02360     return false;
02361 
02362   return !Subtarget->isThumb1Only();
02363 }
02364 
02365 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
02366 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
02367 // one of the above mentioned nodes. It has to be wrapped because otherwise
02368 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
02369 // be used to form addressing mode. These wrapped nodes will be selected
02370 // into MOVi.
02371 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
02372   EVT PtrVT = Op.getValueType();
02373   // FIXME there is no actual debug info here
02374   SDLoc dl(Op);
02375   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
02376   SDValue Res;
02377   if (CP->isMachineConstantPoolEntry())
02378     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
02379                                     CP->getAlignment());
02380   else
02381     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
02382                                     CP->getAlignment());
02383   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
02384 }
02385 
02386 unsigned ARMTargetLowering::getJumpTableEncoding() const {
02387   return MachineJumpTableInfo::EK_Inline;
02388 }
02389 
02390 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
02391                                              SelectionDAG &DAG) const {
02392   MachineFunction &MF = DAG.getMachineFunction();
02393   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02394   unsigned ARMPCLabelIndex = 0;
02395   SDLoc DL(Op);
02396   EVT PtrVT = getPointerTy();
02397   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
02398   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02399   SDValue CPAddr;
02400   if (RelocM == Reloc::Static) {
02401     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
02402   } else {
02403     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02404     ARMPCLabelIndex = AFI->createPICLabelUId();
02405     ARMConstantPoolValue *CPV =
02406       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
02407                                       ARMCP::CPBlockAddress, PCAdj);
02408     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02409   }
02410   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
02411   SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
02412                                MachinePointerInfo::getConstantPool(),
02413                                false, false, false, 0);
02414   if (RelocM == Reloc::Static)
02415     return Result;
02416   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02417   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
02418 }
02419 
02420 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
02421 SDValue
02422 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
02423                                                  SelectionDAG &DAG) const {
02424   SDLoc dl(GA);
02425   EVT PtrVT = getPointerTy();
02426   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02427   MachineFunction &MF = DAG.getMachineFunction();
02428   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02429   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02430   ARMConstantPoolValue *CPV =
02431     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02432                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
02433   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02434   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
02435   Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
02436                          MachinePointerInfo::getConstantPool(),
02437                          false, false, false, 0);
02438   SDValue Chain = Argument.getValue(1);
02439 
02440   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02441   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
02442 
02443   // call __tls_get_addr.
02444   ArgListTy Args;
02445   ArgListEntry Entry;
02446   Entry.Node = Argument;
02447   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
02448   Args.push_back(Entry);
02449 
02450   // FIXME: is there useful debug info available here?
02451   TargetLowering::CallLoweringInfo CLI(DAG);
02452   CLI.setDebugLoc(dl).setChain(Chain)
02453     .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
02454                DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args),
02455                0);
02456 
02457   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
02458   return CallResult.first;
02459 }
02460 
02461 // Lower ISD::GlobalTLSAddress using the "initial exec" or
02462 // "local exec" model.
02463 SDValue
02464 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
02465                                         SelectionDAG &DAG,
02466                                         TLSModel::Model model) const {
02467   const GlobalValue *GV = GA->getGlobal();
02468   SDLoc dl(GA);
02469   SDValue Offset;
02470   SDValue Chain = DAG.getEntryNode();
02471   EVT PtrVT = getPointerTy();
02472   // Get the Thread Pointer
02473   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02474 
02475   if (model == TLSModel::InitialExec) {
02476     MachineFunction &MF = DAG.getMachineFunction();
02477     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02478     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02479     // Initial exec model.
02480     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
02481     ARMConstantPoolValue *CPV =
02482       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
02483                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
02484                                       true);
02485     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02486     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02487     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02488                          MachinePointerInfo::getConstantPool(),
02489                          false, false, false, 0);
02490     Chain = Offset.getValue(1);
02491 
02492     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02493     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
02494 
02495     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02496                          MachinePointerInfo::getConstantPool(),
02497                          false, false, false, 0);
02498   } else {
02499     // local exec model
02500     assert(model == TLSModel::LocalExec);
02501     ARMConstantPoolValue *CPV =
02502       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
02503     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02504     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
02505     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
02506                          MachinePointerInfo::getConstantPool(),
02507                          false, false, false, 0);
02508   }
02509 
02510   // The address of the thread local variable is the add of the thread
02511   // pointer with the offset of the variable.
02512   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
02513 }
02514 
02515 SDValue
02516 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
02517   // TODO: implement the "local dynamic" model
02518   assert(Subtarget->isTargetELF() &&
02519          "TLS not implemented for non-ELF targets");
02520   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
02521 
02522   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
02523 
02524   switch (model) {
02525     case TLSModel::GeneralDynamic:
02526     case TLSModel::LocalDynamic:
02527       return LowerToTLSGeneralDynamicModel(GA, DAG);
02528     case TLSModel::InitialExec:
02529     case TLSModel::LocalExec:
02530       return LowerToTLSExecModels(GA, DAG, model);
02531   }
02532   llvm_unreachable("bogus TLS model");
02533 }
02534 
02535 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
02536                                                  SelectionDAG &DAG) const {
02537   EVT PtrVT = getPointerTy();
02538   SDLoc dl(Op);
02539   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02540   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
02541     bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
02542     ARMConstantPoolValue *CPV =
02543       ARMConstantPoolConstant::Create(GV,
02544                                       UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
02545     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02546     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02547     SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
02548                                  CPAddr,
02549                                  MachinePointerInfo::getConstantPool(),
02550                                  false, false, false, 0);
02551     SDValue Chain = Result.getValue(1);
02552     SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
02553     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
02554     if (!UseGOTOFF)
02555       Result = DAG.getLoad(PtrVT, dl, Chain, Result,
02556                            MachinePointerInfo::getGOT(),
02557                            false, false, false, 0);
02558     return Result;
02559   }
02560 
02561   // If we have T2 ops, we can materialize the address directly via movt/movw
02562   // pair. This is always cheaper.
02563   if (Subtarget->useMovt(DAG.getMachineFunction())) {
02564     ++NumMovwMovt;
02565     // FIXME: Once remat is capable of dealing with instructions with register
02566     // operands, expand this into two nodes.
02567     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
02568                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
02569   } else {
02570     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
02571     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02572     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02573                        MachinePointerInfo::getConstantPool(),
02574                        false, false, false, 0);
02575   }
02576 }
02577 
02578 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
02579                                                     SelectionDAG &DAG) const {
02580   EVT PtrVT = getPointerTy();
02581   SDLoc dl(Op);
02582   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02583   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02584 
02585   if (Subtarget->useMovt(DAG.getMachineFunction()))
02586     ++NumMovwMovt;
02587 
02588   // FIXME: Once remat is capable of dealing with instructions with register
02589   // operands, expand this into multiple nodes
02590   unsigned Wrapper =
02591       RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper;
02592 
02593   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
02594   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
02595 
02596   if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
02597     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
02598                          MachinePointerInfo::getGOT(), false, false, false, 0);
02599   return Result;
02600 }
02601 
02602 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
02603                                                      SelectionDAG &DAG) const {
02604   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
02605   assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
02606          "Windows on ARM expects to use movw/movt");
02607 
02608   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
02609   const ARMII::TOF TargetFlags =
02610     (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
02611   EVT PtrVT = getPointerTy();
02612   SDValue Result;
02613   SDLoc DL(Op);
02614 
02615   ++NumMovwMovt;
02616 
02617   // FIXME: Once remat is capable of dealing with instructions with register
02618   // operands, expand this into two nodes.
02619   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
02620                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
02621                                                   TargetFlags));
02622   if (GV->hasDLLImportStorageClass())
02623     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
02624                          MachinePointerInfo::getGOT(), false, false, false, 0);
02625   return Result;
02626 }
02627 
02628 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
02629                                                     SelectionDAG &DAG) const {
02630   assert(Subtarget->isTargetELF() &&
02631          "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
02632   MachineFunction &MF = DAG.getMachineFunction();
02633   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02634   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02635   EVT PtrVT = getPointerTy();
02636   SDLoc dl(Op);
02637   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
02638   ARMConstantPoolValue *CPV =
02639     ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_",
02640                                   ARMPCLabelIndex, PCAdj);
02641   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02642   CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02643   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02644                                MachinePointerInfo::getConstantPool(),
02645                                false, false, false, 0);
02646   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02647   return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02648 }
02649 
02650 SDValue
02651 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
02652   SDLoc dl(Op);
02653   SDValue Val = DAG.getConstant(0, MVT::i32);
02654   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
02655                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
02656                      Op.getOperand(1), Val);
02657 }
02658 
02659 SDValue
02660 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
02661   SDLoc dl(Op);
02662   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
02663                      Op.getOperand(1), DAG.getConstant(0, MVT::i32));
02664 }
02665 
02666 SDValue
02667 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
02668                                           const ARMSubtarget *Subtarget) const {
02669   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
02670   SDLoc dl(Op);
02671   switch (IntNo) {
02672   default: return SDValue();    // Don't custom lower most intrinsics.
02673   case Intrinsic::arm_rbit: {
02674     assert(Op.getOperand(1).getValueType() == MVT::i32 &&
02675            "RBIT intrinsic must have i32 type!");
02676     return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(1));
02677   }
02678   case Intrinsic::arm_thread_pointer: {
02679     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02680     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
02681   }
02682   case Intrinsic::eh_sjlj_lsda: {
02683     MachineFunction &MF = DAG.getMachineFunction();
02684     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02685     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
02686     EVT PtrVT = getPointerTy();
02687     Reloc::Model RelocM = getTargetMachine().getRelocationModel();
02688     SDValue CPAddr;
02689     unsigned PCAdj = (RelocM != Reloc::PIC_)
02690       ? 0 : (Subtarget->isThumb() ? 4 : 8);
02691     ARMConstantPoolValue *CPV =
02692       ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
02693                                       ARMCP::CPLSDA, PCAdj);
02694     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
02695     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
02696     SDValue Result =
02697       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
02698                   MachinePointerInfo::getConstantPool(),
02699                   false, false, false, 0);
02700 
02701     if (RelocM == Reloc::PIC_) {
02702       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
02703       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
02704     }
02705     return Result;
02706   }
02707   case Intrinsic::arm_neon_vmulls:
02708   case Intrinsic::arm_neon_vmullu: {
02709     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
02710       ? ARMISD::VMULLs : ARMISD::VMULLu;
02711     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
02712                        Op.getOperand(1), Op.getOperand(2));
02713   }
02714   }
02715 }
02716 
02717 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
02718                                  const ARMSubtarget *Subtarget) {
02719   // FIXME: handle "fence singlethread" more efficiently.
02720   SDLoc dl(Op);
02721   if (!Subtarget->hasDataBarrier()) {
02722     // Some ARMv6 cpus can support data barriers with an mcr instruction.
02723     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
02724     // here.
02725     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
02726            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
02727     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
02728                        DAG.getConstant(0, MVT::i32));
02729   }
02730 
02731   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
02732   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
02733   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
02734   if (Subtarget->isMClass()) {
02735     // Only a full system barrier exists in the M-class architectures.
02736     Domain = ARM_MB::SY;
02737   } else if (Subtarget->isSwift() && Ord == Release) {
02738     // Swift happens to implement ISHST barriers in a way that's compatible with
02739     // Release semantics but weaker than ISH so we'd be fools not to use
02740     // it. Beware: other processors probably don't!
02741     Domain = ARM_MB::ISHST;
02742   }
02743 
02744   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
02745                      DAG.getConstant(Intrinsic::arm_dmb, MVT::i32),
02746                      DAG.getConstant(Domain, MVT::i32));
02747 }
02748 
02749 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
02750                              const ARMSubtarget *Subtarget) {
02751   // ARM pre v5TE and Thumb1 does not have preload instructions.
02752   if (!(Subtarget->isThumb2() ||
02753         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
02754     // Just preserve the chain.
02755     return Op.getOperand(0);
02756 
02757   SDLoc dl(Op);
02758   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
02759   if (!isRead &&
02760       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
02761     // ARMv7 with MP extension has PLDW.
02762     return Op.getOperand(0);
02763 
02764   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
02765   if (Subtarget->isThumb()) {
02766     // Invert the bits.
02767     isRead = ~isRead & 1;
02768     isData = ~isData & 1;
02769   }
02770 
02771   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
02772                      Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
02773                      DAG.getConstant(isData, MVT::i32));
02774 }
02775 
02776 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
02777   MachineFunction &MF = DAG.getMachineFunction();
02778   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
02779 
02780   // vastart just stores the address of the VarArgsFrameIndex slot into the
02781   // memory location argument.
02782   SDLoc dl(Op);
02783   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
02784   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
02785   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
02786   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
02787                       MachinePointerInfo(SV), false, false, 0);
02788 }
02789 
02790 SDValue
02791 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
02792                                         SDValue &Root, SelectionDAG &DAG,
02793                                         SDLoc dl) const {
02794   MachineFunction &MF = DAG.getMachineFunction();
02795   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02796 
02797   const TargetRegisterClass *RC;
02798   if (AFI->isThumb1OnlyFunction())
02799     RC = &ARM::tGPRRegClass;
02800   else
02801     RC = &ARM::GPRRegClass;
02802 
02803   // Transform the arguments stored in physical registers into virtual ones.
02804   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
02805   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02806 
02807   SDValue ArgValue2;
02808   if (NextVA.isMemLoc()) {
02809     MachineFrameInfo *MFI = MF.getFrameInfo();
02810     int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
02811 
02812     // Create load node to retrieve arguments from the stack.
02813     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
02814     ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
02815                             MachinePointerInfo::getFixedStack(FI),
02816                             false, false, false, 0);
02817   } else {
02818     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
02819     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
02820   }
02821   if (!Subtarget->isLittle())
02822     std::swap (ArgValue, ArgValue2);
02823   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
02824 }
02825 
02826 void
02827 ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
02828                                   unsigned InRegsParamRecordIdx,
02829                                   unsigned ArgSize,
02830                                   unsigned &ArgRegsSize,
02831                                   unsigned &ArgRegsSaveSize)
02832   const {
02833   unsigned NumGPRs;
02834   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
02835     unsigned RBegin, REnd;
02836     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
02837     NumGPRs = REnd - RBegin;
02838   } else {
02839     unsigned int firstUnalloced;
02840     firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs,
02841                                                 sizeof(GPRArgRegs) /
02842                                                 sizeof(GPRArgRegs[0]));
02843     NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0;
02844   }
02845 
02846   unsigned Align = MF.getTarget()
02847                        .getSubtargetImpl()
02848                        ->getFrameLowering()
02849                        ->getStackAlignment();
02850   ArgRegsSize = NumGPRs * 4;
02851 
02852   // If parameter is split between stack and GPRs...
02853   if (NumGPRs && Align > 4 &&
02854       (ArgRegsSize < ArgSize ||
02855         InRegsParamRecordIdx >= CCInfo.getInRegsParamsCount())) {
02856     // Add padding for part of param recovered from GPRs.  For example,
02857     // if Align == 8, its last byte must be at address K*8 - 1.
02858     // We need to do it, since remained (stack) part of parameter has
02859     // stack alignment, and we need to "attach" "GPRs head" without gaps
02860     // to it:
02861     // Stack:
02862     // |---- 8 bytes block ----| |---- 8 bytes block ----| |---- 8 bytes...
02863     // [ [padding] [GPRs head] ] [        Tail passed via stack       ....
02864     //
02865     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02866     unsigned Padding =
02867         OffsetToAlignment(ArgRegsSize + AFI->getArgRegsSaveSize(), Align);
02868     ArgRegsSaveSize = ArgRegsSize + Padding;
02869   } else
02870     // We don't need to extend regs save size for byval parameters if they
02871     // are passed via GPRs only.
02872     ArgRegsSaveSize = ArgRegsSize;
02873 }
02874 
02875 // The remaining GPRs hold either the beginning of variable-argument
02876 // data, or the beginning of an aggregate passed by value (usually
02877 // byval).  Either way, we allocate stack slots adjacent to the data
02878 // provided by our caller, and store the unallocated registers there.
02879 // If this is a variadic function, the va_list pointer will begin with
02880 // these values; otherwise, this reassembles a (byval) structure that
02881 // was split between registers and memory.
02882 // Return: The frame index registers were stored into.
02883 int
02884 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
02885                                   SDLoc dl, SDValue &Chain,
02886                                   const Value *OrigArg,
02887                                   unsigned InRegsParamRecordIdx,
02888                                   unsigned OffsetFromOrigArg,
02889                                   unsigned ArgOffset,
02890                                   unsigned ArgSize,
02891                                   bool ForceMutable,
02892                                   unsigned ByValStoreOffset,
02893                                   unsigned TotalArgRegsSaveSize) const {
02894 
02895   // Currently, two use-cases possible:
02896   // Case #1. Non-var-args function, and we meet first byval parameter.
02897   //          Setup first unallocated register as first byval register;
02898   //          eat all remained registers
02899   //          (these two actions are performed by HandleByVal method).
02900   //          Then, here, we initialize stack frame with
02901   //          "store-reg" instructions.
02902   // Case #2. Var-args function, that doesn't contain byval parameters.
02903   //          The same: eat all remained unallocated registers,
02904   //          initialize stack frame.
02905 
02906   MachineFunction &MF = DAG.getMachineFunction();
02907   MachineFrameInfo *MFI = MF.getFrameInfo();
02908   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02909   unsigned firstRegToSaveIndex, lastRegToSaveIndex;
02910   unsigned RBegin, REnd;
02911   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
02912     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
02913     firstRegToSaveIndex = RBegin - ARM::R0;
02914     lastRegToSaveIndex = REnd - ARM::R0;
02915   } else {
02916     firstRegToSaveIndex = CCInfo.getFirstUnallocated
02917       (GPRArgRegs, array_lengthof(GPRArgRegs));
02918     lastRegToSaveIndex = 4;
02919   }
02920 
02921   unsigned ArgRegsSize, ArgRegsSaveSize;
02922   computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgSize,
02923                  ArgRegsSize, ArgRegsSaveSize);
02924 
02925   // Store any by-val regs to their spots on the stack so that they may be
02926   // loaded by deferencing the result of formal parameter pointer or va_next.
02927   // Note: once stack area for byval/varargs registers
02928   // was initialized, it can't be initialized again.
02929   if (ArgRegsSaveSize) {
02930     unsigned Padding = ArgRegsSaveSize - ArgRegsSize;
02931 
02932     if (Padding) {
02933       assert(AFI->getStoredByValParamsPadding() == 0 &&
02934              "The only parameter may be padded.");
02935       AFI->setStoredByValParamsPadding(Padding);
02936     }
02937 
02938     int FrameIndex = MFI->CreateFixedObject(ArgRegsSaveSize,
02939                                             Padding +
02940                                               ByValStoreOffset -
02941                                               (int64_t)TotalArgRegsSaveSize,
02942                                             false);
02943     SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy());
02944     if (Padding) {
02945        MFI->CreateFixedObject(Padding,
02946                               ArgOffset + ByValStoreOffset -
02947                                 (int64_t)ArgRegsSaveSize,
02948                               false);
02949     }
02950 
02951     SmallVector<SDValue, 4> MemOps;
02952     for (unsigned i = 0; firstRegToSaveIndex < lastRegToSaveIndex;
02953          ++firstRegToSaveIndex, ++i) {
02954       const TargetRegisterClass *RC;
02955       if (AFI->isThumb1OnlyFunction())
02956         RC = &ARM::tGPRRegClass;
02957       else
02958         RC = &ARM::GPRRegClass;
02959 
02960       unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC);
02961       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
02962       SDValue Store =
02963         DAG.getStore(Val.getValue(1), dl, Val, FIN,
02964                      MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i),
02965                      false, false, 0);
02966       MemOps.push_back(Store);
02967       FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
02968                         DAG.getConstant(4, getPointerTy()));
02969     }
02970 
02971     AFI->setArgRegsSaveSize(ArgRegsSaveSize + AFI->getArgRegsSaveSize());
02972 
02973     if (!MemOps.empty())
02974       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
02975     return FrameIndex;
02976   } else {
02977     if (ArgSize == 0) {
02978       // We cannot allocate a zero-byte object for the first variadic argument,
02979       // so just make up a size.
02980       ArgSize = 4;
02981     }
02982     // This will point to the next argument passed via stack.
02983     return MFI->CreateFixedObject(
02984       ArgSize, ArgOffset, !ForceMutable);
02985   }
02986 }
02987 
02988 // Setup stack frame, the va_list pointer will start from.
02989 void
02990 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
02991                                         SDLoc dl, SDValue &Chain,
02992                                         unsigned ArgOffset,
02993                                         unsigned TotalArgRegsSaveSize,
02994                                         bool ForceMutable) const {
02995   MachineFunction &MF = DAG.getMachineFunction();
02996   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
02997 
02998   // Try to store any remaining integer argument regs
02999   // to their spots on the stack so that they may be loaded by deferencing
03000   // the result of va_next.
03001   // If there is no regs to be stored, just point address after last
03002   // argument passed via stack.
03003   int FrameIndex =
03004     StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
03005                    CCInfo.getInRegsParamsCount(), 0, ArgOffset, 0, ForceMutable,
03006                    0, TotalArgRegsSaveSize);
03007 
03008   AFI->setVarArgsFrameIndex(FrameIndex);
03009 }
03010 
03011 SDValue
03012 ARMTargetLowering::LowerFormalArguments(SDValue Chain,
03013                                         CallingConv::ID CallConv, bool isVarArg,
03014                                         const SmallVectorImpl<ISD::InputArg>
03015                                           &Ins,
03016                                         SDLoc dl, SelectionDAG &DAG,
03017                                         SmallVectorImpl<SDValue> &InVals)
03018                                           const {
03019   MachineFunction &MF = DAG.getMachineFunction();
03020   MachineFrameInfo *MFI = MF.getFrameInfo();
03021 
03022   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
03023 
03024   // Assign locations to all of the incoming arguments.
03025   SmallVector<CCValAssign, 16> ArgLocs;
03026   ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
03027                     *DAG.getContext(), Prologue);
03028   CCInfo.AnalyzeFormalArguments(Ins,
03029                                 CCAssignFnForNode(CallConv, /* Return*/ false,
03030                                                   isVarArg));
03031 
03032   SmallVector<SDValue, 16> ArgValues;
03033   int lastInsIndex = -1;
03034   SDValue ArgValue;
03035   Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
03036   unsigned CurArgIdx = 0;
03037 
03038   // Initially ArgRegsSaveSize is zero.
03039   // Then we increase this value each time we meet byval parameter.
03040   // We also increase this value in case of varargs function.
03041   AFI->setArgRegsSaveSize(0);
03042 
03043   unsigned ByValStoreOffset = 0;
03044   unsigned TotalArgRegsSaveSize = 0;
03045   unsigned ArgRegsSaveSizeMaxAlign = 4;
03046 
03047   // Calculate the amount of stack space that we need to allocate to store
03048   // byval and variadic arguments that are passed in registers.
03049   // We need to know this before we allocate the first byval or variadic
03050   // argument, as they will be allocated a stack slot below the CFA (Canonical
03051   // Frame Address, the stack pointer at entry to the function).
03052   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
03053     CCValAssign &VA = ArgLocs[i];
03054     if (VA.isMemLoc()) {
03055       int index = VA.getValNo();
03056       if (index != lastInsIndex) {
03057         ISD::ArgFlagsTy Flags = Ins[index].Flags;
03058         if (Flags.isByVal()) {
03059           unsigned ExtraArgRegsSize;
03060           unsigned ExtraArgRegsSaveSize;
03061           computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsProcessed(),
03062                          Flags.getByValSize(),
03063                          ExtraArgRegsSize, ExtraArgRegsSaveSize);
03064 
03065           TotalArgRegsSaveSize += ExtraArgRegsSaveSize;
03066           if (Flags.getByValAlign() > ArgRegsSaveSizeMaxAlign)
03067               ArgRegsSaveSizeMaxAlign = Flags.getByValAlign();
03068           CCInfo.nextInRegsParam();
03069         }
03070         lastInsIndex = index;
03071       }
03072     }
03073   }
03074   CCInfo.rewindByValRegsInfo();
03075   lastInsIndex = -1;
03076   if (isVarArg && MFI->hasVAStart()) {
03077     unsigned ExtraArgRegsSize;
03078     unsigned ExtraArgRegsSaveSize;
03079     computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsCount(), 0,
03080                    ExtraArgRegsSize, ExtraArgRegsSaveSize);
03081     TotalArgRegsSaveSize += ExtraArgRegsSaveSize;
03082   }
03083   // If the arg regs save area contains N-byte aligned values, the
03084   // bottom of it must be at least N-byte aligned.
03085   TotalArgRegsSaveSize = RoundUpToAlignment(TotalArgRegsSaveSize, ArgRegsSaveSizeMaxAlign);
03086   TotalArgRegsSaveSize = std::min(TotalArgRegsSaveSize, 16U);
03087 
03088   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
03089     CCValAssign &VA = ArgLocs[i];
03090     std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx);
03091     CurArgIdx = Ins[VA.getValNo()].OrigArgIndex;
03092     // Arguments stored in registers.
03093     if (VA.isRegLoc()) {
03094       EVT RegVT = VA.getLocVT();
03095 
03096       if (VA.needsCustom()) {
03097         // f64 and vector types are split up into multiple registers or
03098         // combinations of registers and stack slots.
03099         if (VA.getLocVT() == MVT::v2f64) {
03100           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
03101                                                    Chain, DAG, dl);
03102           VA = ArgLocs[++i]; // skip ahead to next loc
03103           SDValue ArgValue2;
03104           if (VA.isMemLoc()) {
03105             int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
03106             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03107             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
03108                                     MachinePointerInfo::getFixedStack(FI),
03109                                     false, false, false, 0);
03110           } else {
03111             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
03112                                              Chain, DAG, dl);
03113           }
03114           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
03115           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03116                                  ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
03117           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
03118                                  ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
03119         } else
03120           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
03121 
03122       } else {
03123         const TargetRegisterClass *RC;
03124 
03125         if (RegVT == MVT::f32)
03126           RC = &ARM::SPRRegClass;
03127         else if (RegVT == MVT::f64)
03128           RC = &ARM::DPRRegClass;
03129         else if (RegVT == MVT::v2f64)
03130           RC = &ARM::QPRRegClass;
03131         else if (RegVT == MVT::i32)
03132           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
03133                                            : &ARM::GPRRegClass;
03134         else
03135           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
03136 
03137         // Transform the arguments in physical registers into virtual ones.
03138         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
03139         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
03140       }
03141 
03142       // If this is an 8 or 16-bit value, it is really passed promoted
03143       // to 32 bits.  Insert an assert[sz]ext to capture this, then
03144       // truncate to the right size.
03145       switch (VA.getLocInfo()) {
03146       default: llvm_unreachable("Unknown loc info!");
03147       case CCValAssign::Full: break;
03148       case CCValAssign::BCvt:
03149         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
03150         break;
03151       case CCValAssign::SExt:
03152         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
03153                                DAG.getValueType(VA.getValVT()));
03154         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03155         break;
03156       case CCValAssign::ZExt:
03157         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
03158                                DAG.getValueType(VA.getValVT()));
03159         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
03160         break;
03161       }
03162 
03163       InVals.push_back(ArgValue);
03164 
03165     } else { // VA.isRegLoc()
03166 
03167       // sanity check
03168       assert(VA.isMemLoc());
03169       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
03170 
03171       int index = ArgLocs[i].getValNo();
03172 
03173       // Some Ins[] entries become multiple ArgLoc[] entries.
03174       // Process them only once.
03175       if (index != lastInsIndex)
03176         {
03177           ISD::ArgFlagsTy Flags = Ins[index].Flags;
03178           // FIXME: For now, all byval parameter objects are marked mutable.
03179           // This can be changed with more analysis.
03180           // In case of tail call optimization mark all arguments mutable.
03181           // Since they could be overwritten by lowering of arguments in case of
03182           // a tail call.
03183           if (Flags.isByVal()) {
03184             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
03185 
03186             ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
03187             int FrameIndex = StoreByValRegs(
03188                 CCInfo, DAG, dl, Chain, CurOrigArg,
03189                 CurByValIndex,
03190                 Ins[VA.getValNo()].PartOffset,
03191                 VA.getLocMemOffset(),
03192                 Flags.getByValSize(),
03193                 true /*force mutable frames*/,
03194                 ByValStoreOffset,
03195                 TotalArgRegsSaveSize);
03196             ByValStoreOffset += Flags.getByValSize();
03197             ByValStoreOffset = std::min(ByValStoreOffset, 16U);
03198             InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy()));
03199             CCInfo.nextInRegsParam();
03200           } else {
03201             unsigned FIOffset = VA.getLocMemOffset();
03202             int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
03203                                             FIOffset, true);
03204 
03205             // Create load nodes to retrieve arguments from the stack.
03206             SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
03207             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
03208                                          MachinePointerInfo::getFixedStack(FI),
03209                                          false, false, false, 0));
03210           }
03211           lastInsIndex = index;
03212         }
03213     }
03214   }
03215 
03216   // varargs
03217   if (isVarArg && MFI->hasVAStart())
03218     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
03219                          CCInfo.getNextStackOffset(),
03220                          TotalArgRegsSaveSize);
03221 
03222   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
03223 
03224   return Chain;
03225 }
03226 
03227 /// isFloatingPointZero - Return true if this is +0.0.
03228 static bool isFloatingPointZero(SDValue Op) {
03229   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
03230     return CFP->getValueAPF().isPosZero();
03231   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
03232     // Maybe this has already been legalized into the constant pool?
03233     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
03234       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
03235       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
03236         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
03237           return CFP->getValueAPF().isPosZero();
03238     }
03239   } else if (Op->getOpcode() == ISD::BITCAST &&
03240              Op->getValueType(0) == MVT::f64) {
03241     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
03242     // created by LowerConstantFP().
03243     SDValue BitcastOp = Op->getOperand(0);
03244     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM) {
03245       SDValue MoveOp = BitcastOp->getOperand(0);
03246       if (MoveOp->getOpcode() == ISD::TargetConstant &&
03247           cast<ConstantSDNode>(MoveOp)->getZExtValue() == 0) {
03248         return true;
03249       }
03250     }
03251   }
03252   return false;
03253 }
03254 
03255 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
03256 /// the given operands.
03257 SDValue
03258 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
03259                              SDValue &ARMcc, SelectionDAG &DAG,
03260                              SDLoc dl) const {
03261   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
03262     unsigned C = RHSC->getZExtValue();
03263     if (!isLegalICmpImmediate(C)) {
03264       // Constant does not fit, try adjusting it by one?
03265       switch (CC) {
03266       default: break;
03267       case ISD::SETLT:
03268       case ISD::SETGE:
03269         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
03270           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
03271           RHS = DAG.getConstant(C-1, MVT::i32);
03272         }
03273         break;
03274       case ISD::SETULT:
03275       case ISD::SETUGE:
03276         if (C != 0 && isLegalICmpImmediate(C-1)) {
03277           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
03278           RHS = DAG.getConstant(C-1, MVT::i32);
03279         }
03280         break;
03281       case ISD::SETLE:
03282       case ISD::SETGT:
03283         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
03284           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
03285           RHS = DAG.getConstant(C+1, MVT::i32);
03286         }
03287         break;
03288       case ISD::SETULE:
03289       case ISD::SETUGT:
03290         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
03291           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
03292           RHS = DAG.getConstant(C+1, MVT::i32);
03293         }
03294         break;
03295       }
03296     }
03297   }
03298 
03299   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03300   ARMISD::NodeType CompareType;
03301   switch (CondCode) {
03302   default:
03303     CompareType = ARMISD::CMP;
03304     break;
03305   case ARMCC::EQ:
03306   case ARMCC::NE:
03307     // Uses only Z Flag
03308     CompareType = ARMISD::CMPZ;
03309     break;
03310   }
03311   ARMcc = DAG.getConstant(CondCode, MVT::i32);
03312   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
03313 }
03314 
03315 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
03316 SDValue
03317 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
03318                              SDLoc dl) const {
03319   assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
03320   SDValue Cmp;
03321   if (!isFloatingPointZero(RHS))
03322     Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
03323   else
03324     Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
03325   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
03326 }
03327 
03328 /// duplicateCmp - Glue values can have only one use, so this function
03329 /// duplicates a comparison node.
03330 SDValue
03331 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
03332   unsigned Opc = Cmp.getOpcode();
03333   SDLoc DL(Cmp);
03334   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
03335     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03336 
03337   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
03338   Cmp = Cmp.getOperand(0);
03339   Opc = Cmp.getOpcode();
03340   if (Opc == ARMISD::CMPFP)
03341     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
03342   else {
03343     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
03344     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
03345   }
03346   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
03347 }
03348 
03349 std::pair<SDValue, SDValue>
03350 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
03351                                  SDValue &ARMcc) const {
03352   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
03353 
03354   SDValue Value, OverflowCmp;
03355   SDValue LHS = Op.getOperand(0);
03356   SDValue RHS = Op.getOperand(1);
03357 
03358 
03359   // FIXME: We are currently always generating CMPs because we don't support
03360   // generating CMN through the backend. This is not as good as the natural
03361   // CMP case because it causes a register dependency and cannot be folded
03362   // later.
03363 
03364   switch (Op.getOpcode()) {
03365   default:
03366     llvm_unreachable("Unknown overflow instruction!");
03367   case ISD::SADDO:
03368     ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
03369     Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
03370     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
03371     break;
03372   case ISD::UADDO:
03373     ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
03374     Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
03375     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
03376     break;
03377   case ISD::SSUBO:
03378     ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
03379     Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
03380     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
03381     break;
03382   case ISD::USUBO:
03383     ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
03384     Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
03385     OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
03386     break;
03387   } // switch (...)
03388 
03389   return std::make_pair(Value, OverflowCmp);
03390 }
03391 
03392 
03393 SDValue
03394 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
03395   // Let legalize expand this if it isn't a legal type yet.
03396   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
03397     return SDValue();
03398 
03399   SDValue Value, OverflowCmp;
03400   SDValue ARMcc;
03401   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
03402   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03403   // We use 0 and 1 as false and true values.
03404   SDValue TVal = DAG.getConstant(1, MVT::i32);
03405   SDValue FVal = DAG.getConstant(0, MVT::i32);
03406   EVT VT = Op.getValueType();
03407 
03408   SDValue Overflow = DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, TVal, FVal,
03409                                  ARMcc, CCR, OverflowCmp);
03410 
03411   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
03412   return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow);
03413 }
03414 
03415 
03416 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
03417   SDValue Cond = Op.getOperand(0);
03418   SDValue SelectTrue = Op.getOperand(1);
03419   SDValue SelectFalse = Op.getOperand(2);
03420   SDLoc dl(Op);
03421   unsigned Opc = Cond.getOpcode();
03422 
03423   if (Cond.getResNo() == 1 &&
03424       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
03425        Opc == ISD::USUBO)) {
03426     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
03427       return SDValue();
03428 
03429     SDValue Value, OverflowCmp;
03430     SDValue ARMcc;
03431     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
03432     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03433     EVT VT = Op.getValueType();
03434 
03435     return getCMOV(SDLoc(Op), VT, SelectTrue, SelectFalse, ARMcc, CCR,
03436                    OverflowCmp, DAG);
03437   }
03438 
03439   // Convert:
03440   //
03441   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
03442   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
03443   //
03444   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
03445     const ConstantSDNode *CMOVTrue =
03446       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
03447     const ConstantSDNode *CMOVFalse =
03448       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
03449 
03450     if (CMOVTrue && CMOVFalse) {
03451       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
03452       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
03453 
03454       SDValue True;
03455       SDValue False;
03456       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
03457         True = SelectTrue;
03458         False = SelectFalse;
03459       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
03460         True = SelectFalse;
03461         False = SelectTrue;
03462       }
03463 
03464       if (True.getNode() && False.getNode()) {
03465         EVT VT = Op.getValueType();
03466         SDValue ARMcc = Cond.getOperand(2);
03467         SDValue CCR = Cond.getOperand(3);
03468         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
03469         assert(True.getValueType() == VT);
03470         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
03471       }
03472     }
03473   }
03474 
03475   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
03476   // undefined bits before doing a full-word comparison with zero.
03477   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
03478                      DAG.getConstant(1, Cond.getValueType()));
03479 
03480   return DAG.getSelectCC(dl, Cond,
03481                          DAG.getConstant(0, Cond.getValueType()),
03482                          SelectTrue, SelectFalse, ISD::SETNE);
03483 }
03484 
03485 static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) {
03486   if (CC == ISD::SETNE)
03487     return ISD::SETEQ;
03488   return ISD::getSetCCInverse(CC, true);
03489 }
03490 
03491 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
03492                                  bool &swpCmpOps, bool &swpVselOps) {
03493   // Start by selecting the GE condition code for opcodes that return true for
03494   // 'equality'
03495   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
03496       CC == ISD::SETULE)
03497     CondCode = ARMCC::GE;
03498 
03499   // and GT for opcodes that return false for 'equality'.
03500   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
03501            CC == ISD::SETULT)
03502     CondCode = ARMCC::GT;
03503 
03504   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
03505   // to swap the compare operands.
03506   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
03507       CC == ISD::SETULT)
03508     swpCmpOps = true;
03509 
03510   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
03511   // If we have an unordered opcode, we need to swap the operands to the VSEL
03512   // instruction (effectively negating the condition).
03513   //
03514   // This also has the effect of swapping which one of 'less' or 'greater'
03515   // returns true, so we also swap the compare operands. It also switches
03516   // whether we return true for 'equality', so we compensate by picking the
03517   // opposite condition code to our original choice.
03518   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
03519       CC == ISD::SETUGT) {
03520     swpCmpOps = !swpCmpOps;
03521     swpVselOps = !swpVselOps;
03522     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
03523   }
03524 
03525   // 'ordered' is 'anything but unordered', so use the VS condition code and
03526   // swap the VSEL operands.
03527   if (CC == ISD::SETO) {
03528     CondCode = ARMCC::VS;
03529     swpVselOps = true;
03530   }
03531 
03532   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
03533   // code and swap the VSEL operands.
03534   if (CC == ISD::SETUNE) {
03535     CondCode = ARMCC::EQ;
03536     swpVselOps = true;
03537   }
03538 }
03539 
03540 SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal,
03541                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
03542                                    SDValue Cmp, SelectionDAG &DAG) const {
03543   if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
03544     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03545                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
03546     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
03547                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
03548 
03549     SDValue TrueLow = TrueVal.getValue(0);
03550     SDValue TrueHigh = TrueVal.getValue(1);
03551     SDValue FalseLow = FalseVal.getValue(0);
03552     SDValue FalseHigh = FalseVal.getValue(1);
03553 
03554     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
03555                               ARMcc, CCR, Cmp);
03556     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
03557                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
03558 
03559     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
03560   } else {
03561     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
03562                        Cmp);
03563   }
03564 }
03565 
03566 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
03567   EVT VT = Op.getValueType();
03568   SDValue LHS = Op.getOperand(0);
03569   SDValue RHS = Op.getOperand(1);
03570   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
03571   SDValue TrueVal = Op.getOperand(2);
03572   SDValue FalseVal = Op.getOperand(3);
03573   SDLoc dl(Op);
03574 
03575   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03576     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03577                                                     dl);
03578 
03579     // If softenSetCCOperands only returned one value, we should compare it to
03580     // zero.
03581     if (!RHS.getNode()) {
03582       RHS = DAG.getConstant(0, LHS.getValueType());
03583       CC = ISD::SETNE;
03584     }
03585   }
03586 
03587   if (LHS.getValueType() == MVT::i32) {
03588     // Try to generate VSEL on ARMv8.
03589     // The VSEL instruction can't use all the usual ARM condition
03590     // codes: it only has two bits to select the condition code, so it's
03591     // constrained to use only GE, GT, VS and EQ.
03592     //
03593     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
03594     // swap the operands of the previous compare instruction (effectively
03595     // inverting the compare condition, swapping 'less' and 'greater') and
03596     // sometimes need to swap the operands to the VSEL (which inverts the
03597     // condition in the sense of firing whenever the previous condition didn't)
03598     if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03599                                       TrueVal.getValueType() == MVT::f64)) {
03600       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03601       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
03602           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
03603         CC = getInverseCCForVSEL(CC);
03604         std::swap(TrueVal, FalseVal);
03605       }
03606     }
03607 
03608     SDValue ARMcc;
03609     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03610     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03611     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03612   }
03613 
03614   ARMCC::CondCodes CondCode, CondCode2;
03615   FPCCToARMCC(CC, CondCode, CondCode2);
03616 
03617   // Try to generate VSEL on ARMv8.
03618   if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
03619                                     TrueVal.getValueType() == MVT::f64)) {
03620     // We can select VMAXNM/VMINNM from a compare followed by a select with the
03621     // same operands, as follows:
03622     //   c = fcmp [ogt, olt, ugt, ult] a, b
03623     //   select c, a, b
03624     // We only do this in unsafe-fp-math, because signed zeros and NaNs are
03625     // handled differently than the original code sequence.
03626     if (getTargetMachine().Options.UnsafeFPMath) {
03627       if (LHS == TrueVal && RHS == FalseVal) {
03628         if (CC == ISD::SETOGT || CC == ISD::SETUGT)
03629           return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal);
03630         if (CC == ISD::SETOLT || CC == ISD::SETULT)
03631           return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal);
03632       } else if (LHS == FalseVal && RHS == TrueVal) {
03633         if (CC == ISD::SETOLT || CC == ISD::SETULT)
03634           return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal);
03635         if (CC == ISD::SETOGT || CC == ISD::SETUGT)
03636           return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal);
03637       }
03638     }
03639 
03640     bool swpCmpOps = false;
03641     bool swpVselOps = false;
03642     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
03643 
03644     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
03645         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
03646       if (swpCmpOps)
03647         std::swap(LHS, RHS);
03648       if (swpVselOps)
03649         std::swap(TrueVal, FalseVal);
03650     }
03651   }
03652 
03653   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
03654   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03655   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03656   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
03657   if (CondCode2 != ARMCC::AL) {
03658     SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
03659     // FIXME: Needs another CMP because flag can have but one use.
03660     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
03661     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
03662   }
03663   return Result;
03664 }
03665 
03666 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
03667 /// to morph to an integer compare sequence.
03668 static bool canChangeToInt(SDValue Op, bool &SeenZero,
03669                            const ARMSubtarget *Subtarget) {
03670   SDNode *N = Op.getNode();
03671   if (!N->hasOneUse())
03672     // Otherwise it requires moving the value from fp to integer registers.
03673     return false;
03674   if (!N->getNumValues())
03675     return false;
03676   EVT VT = Op.getValueType();
03677   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
03678     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
03679     // vmrs are very slow, e.g. cortex-a8.
03680     return false;
03681 
03682   if (isFloatingPointZero(Op)) {
03683     SeenZero = true;
03684     return true;
03685   }
03686   return ISD::isNormalLoad(N);
03687 }
03688 
03689 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
03690   if (isFloatingPointZero(Op))
03691     return DAG.getConstant(0, MVT::i32);
03692 
03693   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
03694     return DAG.getLoad(MVT::i32, SDLoc(Op),
03695                        Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
03696                        Ld->isVolatile(), Ld->isNonTemporal(),
03697                        Ld->isInvariant(), Ld->getAlignment());
03698 
03699   llvm_unreachable("Unknown VFP cmp argument!");
03700 }
03701 
03702 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
03703                            SDValue &RetVal1, SDValue &RetVal2) {
03704   if (isFloatingPointZero(Op)) {
03705     RetVal1 = DAG.getConstant(0, MVT::i32);
03706     RetVal2 = DAG.getConstant(0, MVT::i32);
03707     return;
03708   }
03709 
03710   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
03711     SDValue Ptr = Ld->getBasePtr();
03712     RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op),
03713                           Ld->getChain(), Ptr,
03714                           Ld->getPointerInfo(),
03715                           Ld->isVolatile(), Ld->isNonTemporal(),
03716                           Ld->isInvariant(), Ld->getAlignment());
03717 
03718     EVT PtrType = Ptr.getValueType();
03719     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
03720     SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op),
03721                                  PtrType, Ptr, DAG.getConstant(4, PtrType));
03722     RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op),
03723                           Ld->getChain(), NewPtr,
03724                           Ld->getPointerInfo().getWithOffset(4),
03725                           Ld->isVolatile(), Ld->isNonTemporal(),
03726                           Ld->isInvariant(), NewAlign);
03727     return;
03728   }
03729 
03730   llvm_unreachable("Unknown VFP cmp argument!");
03731 }
03732 
03733 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
03734 /// f32 and even f64 comparisons to integer ones.
03735 SDValue
03736 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
03737   SDValue Chain = Op.getOperand(0);
03738   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03739   SDValue LHS = Op.getOperand(2);
03740   SDValue RHS = Op.getOperand(3);
03741   SDValue Dest = Op.getOperand(4);
03742   SDLoc dl(Op);
03743 
03744   bool LHSSeenZero = false;
03745   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
03746   bool RHSSeenZero = false;
03747   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
03748   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
03749     // If unsafe fp math optimization is enabled and there are no other uses of
03750     // the CMP operands, and the condition code is EQ or NE, we can optimize it
03751     // to an integer comparison.
03752     if (CC == ISD::SETOEQ)
03753       CC = ISD::SETEQ;
03754     else if (CC == ISD::SETUNE)
03755       CC = ISD::SETNE;
03756 
03757     SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32);
03758     SDValue ARMcc;
03759     if (LHS.getValueType() == MVT::f32) {
03760       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03761                         bitcastf32Toi32(LHS, DAG), Mask);
03762       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
03763                         bitcastf32Toi32(RHS, DAG), Mask);
03764       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03765       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03766       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03767                          Chain, Dest, ARMcc, CCR, Cmp);
03768     }
03769 
03770     SDValue LHS1, LHS2;
03771     SDValue RHS1, RHS2;
03772     expandf64Toi32(LHS, DAG, LHS1, LHS2);
03773     expandf64Toi32(RHS, DAG, RHS1, RHS2);
03774     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
03775     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
03776     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
03777     ARMcc = DAG.getConstant(CondCode, MVT::i32);
03778     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03779     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
03780     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
03781   }
03782 
03783   return SDValue();
03784 }
03785 
03786 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
03787   SDValue Chain = Op.getOperand(0);
03788   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
03789   SDValue LHS = Op.getOperand(2);
03790   SDValue RHS = Op.getOperand(3);
03791   SDValue Dest = Op.getOperand(4);
03792   SDLoc dl(Op);
03793 
03794   if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
03795     DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
03796                                                     dl);
03797 
03798     // If softenSetCCOperands only returned one value, we should compare it to
03799     // zero.
03800     if (!RHS.getNode()) {
03801       RHS = DAG.getConstant(0, LHS.getValueType());
03802       CC = ISD::SETNE;
03803     }
03804   }
03805 
03806   if (LHS.getValueType() == MVT::i32) {
03807     SDValue ARMcc;
03808     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
03809     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03810     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
03811                        Chain, Dest, ARMcc, CCR, Cmp);
03812   }
03813 
03814   assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
03815 
03816   if (getTargetMachine().Options.UnsafeFPMath &&
03817       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
03818        CC == ISD::SETNE || CC == ISD::SETUNE)) {
03819     SDValue Result = OptimizeVFPBrcond(Op, DAG);
03820     if (Result.getNode())
03821       return Result;
03822   }
03823 
03824   ARMCC::CondCodes CondCode, CondCode2;
03825   FPCCToARMCC(CC, CondCode, CondCode2);
03826 
03827   SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
03828   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
03829   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
03830   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
03831   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
03832   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03833   if (CondCode2 != ARMCC::AL) {
03834     ARMcc = DAG.getConstant(CondCode2, MVT::i32);
03835     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
03836     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
03837   }
03838   return Res;
03839 }
03840 
03841 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
03842   SDValue Chain = Op.getOperand(0);
03843   SDValue Table = Op.getOperand(1);
03844   SDValue Index = Op.getOperand(2);
03845   SDLoc dl(Op);
03846 
03847   EVT PTy = getPointerTy();
03848   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
03849   ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
03850   SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
03851   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
03852   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
03853   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
03854   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
03855   if (Subtarget->isThumb2()) {
03856     // Thumb2 uses a two-level jump. That is, it jumps into the jump table
03857     // which does another jump to the destination. This also makes it easier
03858     // to translate it to TBB / TBH later.
03859     // FIXME: This might not work if the function is extremely large.
03860     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
03861                        Addr, Op.getOperand(2), JTI, UId);
03862   }
03863   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
03864     Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
03865                        MachinePointerInfo::getJumpTable(),
03866                        false, false, false, 0);
03867     Chain = Addr.getValue(1);
03868     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
03869     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
03870   } else {
03871     Addr = DAG.getLoad(PTy, dl, Chain, Addr,
03872                        MachinePointerInfo::getJumpTable(),
03873                        false, false, false, 0);
03874     Chain = Addr.getValue(1);
03875     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
03876   }
03877 }
03878 
03879 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
03880   EVT VT = Op.getValueType();
03881   SDLoc dl(Op);
03882 
03883   if (Op.getValueType().getVectorElementType() == MVT::i32) {
03884     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
03885       return Op;
03886     return DAG.UnrollVectorOp(Op.getNode());
03887   }
03888 
03889   assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
03890          "Invalid type for custom lowering!");
03891   if (VT != MVT::v4i16)
03892     return DAG.UnrollVectorOp(Op.getNode());
03893 
03894   Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
03895   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
03896 }
03897 
03898 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
03899   EVT VT = Op.getValueType();
03900   if (VT.isVector())
03901     return LowerVectorFP_TO_INT(Op, DAG);
03902 
03903   if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
03904     RTLIB::Libcall LC;
03905     if (Op.getOpcode() == ISD::FP_TO_SINT)
03906       LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
03907                               Op.getValueType());
03908     else
03909       LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
03910                               Op.getValueType());
03911     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03912                        /*isSigned*/ false, SDLoc(Op)).first;
03913   }
03914 
03915   SDLoc dl(Op);
03916   unsigned Opc;
03917 
03918   switch (Op.getOpcode()) {
03919   default: llvm_unreachable("Invalid opcode!");
03920   case ISD::FP_TO_SINT:
03921     Opc = ARMISD::FTOSI;
03922     break;
03923   case ISD::FP_TO_UINT:
03924     Opc = ARMISD::FTOUI;
03925     break;
03926   }
03927   Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
03928   return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
03929 }
03930 
03931 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
03932   EVT VT = Op.getValueType();
03933   SDLoc dl(Op);
03934 
03935   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
03936     if (VT.getVectorElementType() == MVT::f32)
03937       return Op;
03938     return DAG.UnrollVectorOp(Op.getNode());
03939   }
03940 
03941   assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
03942          "Invalid type for custom lowering!");
03943   if (VT != MVT::v4f32)
03944     return DAG.UnrollVectorOp(Op.getNode());
03945 
03946   unsigned CastOpc;
03947   unsigned Opc;
03948   switch (Op.getOpcode()) {
03949   default: llvm_unreachable("Invalid opcode!");
03950   case ISD::SINT_TO_FP:
03951     CastOpc = ISD::SIGN_EXTEND;
03952     Opc = ISD::SINT_TO_FP;
03953     break;
03954   case ISD::UINT_TO_FP:
03955     CastOpc = ISD::ZERO_EXTEND;
03956     Opc = ISD::UINT_TO_FP;
03957     break;
03958   }
03959 
03960   Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
03961   return DAG.getNode(Opc, dl, VT, Op);
03962 }
03963 
03964 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
03965   EVT VT = Op.getValueType();
03966   if (VT.isVector())
03967     return LowerVectorINT_TO_FP(Op, DAG);
03968 
03969   if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
03970     RTLIB::Libcall LC;
03971     if (Op.getOpcode() == ISD::SINT_TO_FP)
03972       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
03973                               Op.getValueType());
03974     else
03975       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
03976                               Op.getValueType());
03977     return makeLibCall(DAG, LC, Op.getValueType(), &Op.getOperand(0), 1,
03978                        /*isSigned*/ false, SDLoc(Op)).first;
03979   }
03980 
03981   SDLoc dl(Op);
03982   unsigned Opc;
03983 
03984   switch (Op.getOpcode()) {
03985   default: llvm_unreachable("Invalid opcode!");
03986   case ISD::SINT_TO_FP:
03987     Opc = ARMISD::SITOF;
03988     break;
03989   case ISD::UINT_TO_FP:
03990     Opc = ARMISD::UITOF;
03991     break;
03992   }
03993 
03994   Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
03995   return DAG.getNode(Opc, dl, VT, Op);
03996 }
03997 
03998 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
03999   // Implement fcopysign with a fabs and a conditional fneg.
04000   SDValue Tmp0 = Op.getOperand(0);
04001   SDValue Tmp1 = Op.getOperand(1);
04002   SDLoc dl(Op);
04003   EVT VT = Op.getValueType();
04004   EVT SrcVT = Tmp1.getValueType();
04005   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
04006     Tmp0.getOpcode() == ARMISD::VMOVDRR;
04007   bool UseNEON = !InGPR && Subtarget->hasNEON();
04008 
04009   if (UseNEON) {
04010     // Use VBSL to copy the sign bit.
04011     unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
04012     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
04013                                DAG.getTargetConstant(EncodedVal, MVT::i32));
04014     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
04015     if (VT == MVT::f64)
04016       Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
04017                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
04018                          DAG.getConstant(32, MVT::i32));
04019     else /*if (VT == MVT::f32)*/
04020       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
04021     if (SrcVT == MVT::f32) {
04022       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
04023       if (VT == MVT::f64)
04024         Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
04025                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
04026                            DAG.getConstant(32, MVT::i32));
04027     } else if (VT == MVT::f32)
04028       Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
04029                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
04030                          DAG.getConstant(32, MVT::i32));
04031     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
04032     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
04033 
04034     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
04035                                             MVT::i32);
04036     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
04037     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
04038                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
04039 
04040     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
04041                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
04042                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
04043     if (VT == MVT::f32) {
04044       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
04045       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
04046                         DAG.getConstant(0, MVT::i32));
04047     } else {
04048       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
04049     }
04050 
04051     return Res;
04052   }
04053 
04054   // Bitcast operand 1 to i32.
04055   if (SrcVT == MVT::f64)
04056     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
04057                        Tmp1).getValue(1);
04058   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
04059 
04060   // Or in the signbit with integer operations.
04061   SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32);
04062   SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32);
04063   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
04064   if (VT == MVT::f32) {
04065     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
04066                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
04067     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
04068                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
04069   }
04070 
04071   // f64: Or the high part with signbit and then combine two parts.
04072   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
04073                      Tmp0);
04074   SDValue Lo = Tmp0.getValue(0);
04075   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
04076   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
04077   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
04078 }
04079 
04080 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
04081   MachineFunction &MF = DAG.getMachineFunction();
04082   MachineFrameInfo *MFI = MF.getFrameInfo();
04083   MFI->setReturnAddressIsTaken(true);
04084 
04085   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
04086     return SDValue();
04087 
04088   EVT VT = Op.getValueType();
04089   SDLoc dl(Op);
04090   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
04091   if (Depth) {
04092     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
04093     SDValue Offset = DAG.getConstant(4, MVT::i32);
04094     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
04095                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
04096                        MachinePointerInfo(), false, false, false, 0);
04097   }
04098 
04099   // Return LR, which contains the return address. Mark it an implicit live-in.
04100   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
04101   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
04102 }
04103 
04104 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
04105   const ARMBaseRegisterInfo &ARI =
04106     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
04107   MachineFunction &MF = DAG.getMachineFunction();
04108   MachineFrameInfo *MFI = MF.getFrameInfo();
04109   MFI->setFrameAddressIsTaken(true);
04110 
04111   EVT VT = Op.getValueType();
04112   SDLoc dl(Op);  // FIXME probably not meaningful
04113   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
04114   unsigned FrameReg = ARI.getFrameRegister(MF);
04115   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
04116   while (Depth--)
04117     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
04118                             MachinePointerInfo(),
04119                             false, false, false, 0);
04120   return FrameAddr;
04121 }
04122 
04123 // FIXME? Maybe this could be a TableGen attribute on some registers and
04124 // this table could be generated automatically from RegInfo.
04125 unsigned ARMTargetLowering::getRegisterByName(const char* RegName,
04126                                               EVT VT) const {
04127   unsigned Reg = StringSwitch<unsigned>(RegName)
04128                        .Case("sp", ARM::SP)
04129                        .Default(0);
04130   if (Reg)
04131     return Reg;
04132   report_fatal_error("Invalid register name global variable");
04133 }
04134 
04135 /// ExpandBITCAST - If the target supports VFP, this function is called to
04136 /// expand a bit convert where either the source or destination type is i64 to
04137 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
04138 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
04139 /// vectors), since the legalizer won't know what to do with that.
04140 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
04141   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
04142   SDLoc dl(N);
04143   SDValue Op = N->getOperand(0);
04144 
04145   // This function is only supposed to be called for i64 types, either as the
04146   // source or destination of the bit convert.
04147   EVT SrcVT = Op.getValueType();
04148   EVT DstVT = N->getValueType(0);
04149   assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
04150          "ExpandBITCAST called for non-i64 type");
04151 
04152   // Turn i64->f64 into VMOVDRR.
04153   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
04154     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04155                              DAG.getConstant(0, MVT::i32));
04156     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
04157                              DAG.getConstant(1, MVT::i32));
04158     return DAG.getNode(ISD::BITCAST, dl, DstVT,
04159                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
04160   }
04161 
04162   // Turn f64->i64 into VMOVRRD.
04163   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
04164     SDValue Cvt;
04165     if (TLI.isBigEndian() && SrcVT.isVector() &&
04166         SrcVT.getVectorNumElements() > 1)
04167       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04168                         DAG.getVTList(MVT::i32, MVT::i32),
04169                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
04170     else
04171       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
04172                         DAG.getVTList(MVT::i32, MVT::i32), Op);
04173     // Merge the pieces into a single i64 value.
04174     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
04175   }
04176 
04177   return SDValue();
04178 }
04179 
04180 /// getZeroVector - Returns a vector of specified type with all zero elements.
04181 /// Zero vectors are used to represent vector negation and in those cases
04182 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
04183 /// not support i64 elements, so sometimes the zero vectors will need to be
04184 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
04185 /// zero vector.
04186 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
04187   assert(VT.isVector() && "Expected a vector type");
04188   // The canonical modified immediate encoding of a zero vector is....0!
04189   SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
04190   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
04191   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
04192   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
04193 }
04194 
04195 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
04196 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04197 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
04198                                                 SelectionDAG &DAG) const {
04199   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04200   EVT VT = Op.getValueType();
04201   unsigned VTBits = VT.getSizeInBits();
04202   SDLoc dl(Op);
04203   SDValue ShOpLo = Op.getOperand(0);
04204   SDValue ShOpHi = Op.getOperand(1);
04205   SDValue ShAmt  = Op.getOperand(2);
04206   SDValue ARMcc;
04207   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
04208 
04209   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
04210 
04211   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04212                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
04213   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
04214   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04215                                    DAG.getConstant(VTBits, MVT::i32));
04216   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
04217   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04218   SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
04219 
04220   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04221   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
04222                           ARMcc, DAG, dl);
04223   SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
04224   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
04225                            CCR, Cmp);
04226 
04227   SDValue Ops[2] = { Lo, Hi };
04228   return DAG.getMergeValues(Ops, dl);
04229 }
04230 
04231 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
04232 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
04233 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
04234                                                SelectionDAG &DAG) const {
04235   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
04236   EVT VT = Op.getValueType();
04237   unsigned VTBits = VT.getSizeInBits();
04238   SDLoc dl(Op);
04239   SDValue ShOpLo = Op.getOperand(0);
04240   SDValue ShOpHi = Op.getOperand(1);
04241   SDValue ShAmt  = Op.getOperand(2);
04242   SDValue ARMcc;
04243 
04244   assert(Op.getOpcode() == ISD::SHL_PARTS);
04245   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
04246                                  DAG.getConstant(VTBits, MVT::i32), ShAmt);
04247   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
04248   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
04249                                    DAG.getConstant(VTBits, MVT::i32));
04250   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
04251   SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
04252 
04253   SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
04254   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
04255   SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
04256                           ARMcc, DAG, dl);
04257   SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
04258   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
04259                            CCR, Cmp);
04260 
04261   SDValue Ops[2] = { Lo, Hi };
04262   return DAG.getMergeValues(Ops, dl);
04263 }
04264 
04265 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
04266                                             SelectionDAG &DAG) const {
04267   // The rounding mode is in bits 23:22 of the FPSCR.
04268   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
04269   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
04270   // so that the shift + and get folded into a bitfield extract.
04271   SDLoc dl(Op);
04272   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
04273                               DAG.getConstant(Intrinsic::arm_get_fpscr,
04274                                               MVT::i32));
04275   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
04276                                   DAG.getConstant(1U << 22, MVT::i32));
04277   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
04278                               DAG.getConstant(22, MVT::i32));
04279   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
04280                      DAG.getConstant(3, MVT::i32));
04281 }
04282 
04283 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
04284                          const ARMSubtarget *ST) {
04285   EVT VT = N->getValueType(0);
04286   SDLoc dl(N);
04287 
04288   if (!ST->hasV6T2Ops())
04289     return SDValue();
04290 
04291   SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
04292   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
04293 }
04294 
04295 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
04296 /// for each 16-bit element from operand, repeated.  The basic idea is to
04297 /// leverage vcnt to get the 8-bit counts, gather and add the results.
04298 ///
04299 /// Trace for v4i16:
04300 /// input    = [v0    v1    v2    v3   ] (vi 16-bit element)
04301 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
04302 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
04303 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6]
04304 ///            [b0 b1 b2 b3 b4 b5 b6 b7]
04305 ///           +[b1 b0 b3 b2 b5 b4 b7 b6]
04306 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0,
04307 /// vuzp:    = [k0 k1 k2 k3 k0 k1 k2 k3]  each ki is 8-bits)
04308 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
04309   EVT VT = N->getValueType(0);
04310   SDLoc DL(N);
04311 
04312   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
04313   SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
04314   SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0);
04315   SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1);
04316   SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2);
04317   return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3);
04318 }
04319 
04320 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the
04321 /// bit-count for each 16-bit element from the operand.  We need slightly
04322 /// different sequencing for v4i16 and v8i16 to stay within NEON's available
04323 /// 64/128-bit registers.
04324 ///
04325 /// Trace for v4i16:
04326 /// input           = [v0    v1    v2    v3    ] (vi 16-bit element)
04327 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi)
04328 /// v8i16:Extended  = [k0    k1    k2    k3    k0    k1    k2    k3    ]
04329 /// v4i16:Extracted = [k0    k1    k2    k3    ]
04330 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
04331   EVT VT = N->getValueType(0);
04332   SDLoc DL(N);
04333 
04334   SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
04335   if (VT.is64BitVector()) {
04336     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
04337     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
04338                        DAG.getIntPtrConstant(0));
04339   } else {
04340     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
04341                                     BitCounts, DAG.getIntPtrConstant(0));
04342     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
04343   }
04344 }
04345 
04346 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the
04347 /// bit-count for each 32-bit element from the operand.  The idea here is
04348 /// to split the vector into 16-bit elements, leverage the 16-bit count
04349 /// routine, and then combine the results.
04350 ///
04351 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged):
04352 /// input    = [v0    v1    ] (vi: 32-bit elements)
04353 /// Bitcast  = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1])
04354 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi)
04355 /// vrev: N0 = [k1 k0 k3 k2 ]
04356 ///            [k0 k1 k2 k3 ]
04357 ///       N1 =+[k1 k0 k3 k2 ]
04358 ///            [k0 k2 k1 k3 ]
04359 ///       N2 =+[k1 k3 k0 k2 ]
04360 ///            [k0    k2    k1    k3    ]
04361 /// Extended =+[k1    k3    k0    k2    ]
04362 ///            [k0    k2    ]
04363 /// Extracted=+[k1    k3    ]
04364 ///
04365 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
04366   EVT VT = N->getValueType(0);
04367   SDLoc DL(N);
04368 
04369   EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
04370 
04371   SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0));
04372   SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG);
04373   SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16);
04374   SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0);
04375   SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1);
04376 
04377   if (VT.is64BitVector()) {
04378     SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
04379     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
04380                        DAG.getIntPtrConstant(0));
04381   } else {
04382     SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
04383                                     DAG.getIntPtrConstant(0));
04384     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
04385   }
04386 }
04387 
04388 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
04389                           const ARMSubtarget *ST) {
04390   EVT VT = N->getValueType(0);
04391 
04392   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
04393   assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||
04394           VT == MVT::v4i16 || VT == MVT::v8i16) &&
04395          "Unexpected type for custom ctpop lowering");
04396 
04397   if (VT.getVectorElementType() == MVT::i32)
04398     return lowerCTPOP32BitElements(N, DAG);
04399   else
04400     return lowerCTPOP16BitElements(N, DAG);
04401 }
04402 
04403 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
04404                           const ARMSubtarget *ST) {
04405   EVT VT = N->getValueType(0);
04406   SDLoc dl(N);
04407 
04408   if (!VT.isVector())
04409     return SDValue();
04410 
04411   // Lower vector shifts on NEON to use VSHL.
04412   assert(ST->hasNEON() && "unexpected vector shift");
04413 
04414   // Left shifts translate directly to the vshiftu intrinsic.
04415   if (N->getOpcode() == ISD::SHL)
04416     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
04417                        DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
04418                        N->getOperand(0), N->getOperand(1));
04419 
04420   assert((N->getOpcode() == ISD::SRA ||
04421           N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
04422 
04423   // NEON uses the same intrinsics for both left and right shifts.  For
04424   // right shifts, the shift amounts are negative, so negate the vector of
04425   // shift amounts.
04426   EVT ShiftVT = N->getOperand(1).getValueType();
04427   SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
04428                                      getZeroVector(ShiftVT, DAG, dl),
04429                                      N->getOperand(1));
04430   Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
04431                              Intrinsic::arm_neon_vshifts :
04432                              Intrinsic::arm_neon_vshiftu);
04433   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
04434                      DAG.getConstant(vshiftInt, MVT::i32),
04435                      N->getOperand(0), NegatedCount);
04436 }
04437 
04438 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
04439                                 const ARMSubtarget *ST) {
04440   EVT VT = N->getValueType(0);
04441   SDLoc dl(N);
04442 
04443   // We can get here for a node like i32 = ISD::SHL i32, i64
04444   if (VT != MVT::i64)
04445     return SDValue();
04446 
04447   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
04448          "Unknown shift to lower!");
04449 
04450   // We only lower SRA, SRL of 1 here, all others use generic lowering.
04451   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
04452       cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
04453     return SDValue();
04454 
04455   // If we are in thumb mode, we don't have RRX.
04456   if (ST->isThumb1Only()) return SDValue();
04457 
04458   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
04459   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
04460                            DAG.getConstant(0, MVT::i32));
04461   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
04462                            DAG.getConstant(1, MVT::i32));
04463 
04464   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
04465   // captures the result into a carry flag.
04466   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
04467   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
04468 
04469   // The low part is an ARMISD::RRX operand, which shifts the carry in.
04470   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
04471 
04472   // Merge the pieces into a single i64 value.
04473  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
04474 }
04475 
04476 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
04477   SDValue TmpOp0, TmpOp1;
04478   bool Invert = false;
04479   bool Swap = false;
04480   unsigned Opc = 0;
04481 
04482   SDValue Op0 = Op.getOperand(0);
04483   SDValue Op1 = Op.getOperand(1);
04484   SDValue CC = Op.getOperand(2);
04485   EVT VT = Op.getValueType();
04486   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
04487   SDLoc dl(Op);
04488 
04489   if (Op1.getValueType().isFloatingPoint()) {
04490     switch (SetCCOpcode) {
04491     default: llvm_unreachable("Illegal FP comparison");
04492     case ISD::SETUNE:
04493     case ISD::SETNE:  Invert = true; // Fallthrough
04494     case ISD::SETOEQ:
04495     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
04496     case ISD::SETOLT:
04497     case ISD::SETLT: Swap = true; // Fallthrough
04498     case ISD::SETOGT:
04499     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
04500     case ISD::SETOLE:
04501     case ISD::SETLE:  Swap = true; // Fallthrough
04502     case ISD::SETOGE:
04503     case ISD::SETGE: Opc = ARMISD::VCGE; break;
04504     case ISD::SETUGE: Swap = true; // Fallthrough
04505     case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
04506     case ISD::SETUGT: Swap = true; // Fallthrough
04507     case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
04508     case ISD::SETUEQ: Invert = true; // Fallthrough
04509     case ISD::SETONE:
04510       // Expand this to (OLT | OGT).
04511       TmpOp0 = Op0;
04512       TmpOp1 = Op1;
04513       Opc = ISD::OR;
04514       Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
04515       Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
04516       break;
04517     case ISD::SETUO: Invert = true; // Fallthrough
04518     case ISD::SETO:
04519       // Expand this to (OLT | OGE).
04520       TmpOp0 = Op0;
04521       TmpOp1 = Op1;
04522       Opc = ISD::OR;
04523       Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
04524       Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
04525       break;
04526     }
04527   } else {
04528     // Integer comparisons.
04529     switch (SetCCOpcode) {
04530     default: llvm_unreachable("Illegal integer comparison");
04531     case ISD::SETNE:  Invert = true;
04532     case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
04533     case ISD::SETLT:  Swap = true;
04534     case ISD::SETGT:  Opc = ARMISD::VCGT; break;
04535     case ISD::SETLE:  Swap = true;
04536     case ISD::SETGE:  Opc = ARMISD::VCGE; break;
04537     case ISD::SETULT: Swap = true;
04538     case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
04539     case ISD::SETULE: Swap = true;
04540     case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
04541     }
04542 
04543     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
04544     if (Opc == ARMISD::VCEQ) {
04545 
04546       SDValue AndOp;
04547       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
04548         AndOp = Op0;
04549       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
04550         AndOp = Op1;
04551 
04552       // Ignore bitconvert.
04553       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
04554         AndOp = AndOp.getOperand(0);
04555 
04556       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
04557         Opc = ARMISD::VTST;
04558         Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
04559         Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
04560         Invert = !Invert;
04561       }
04562     }
04563   }
04564 
04565   if (Swap)
04566     std::swap(Op0, Op1);
04567 
04568   // If one of the operands is a constant vector zero, attempt to fold the
04569   // comparison to a specialized compare-against-zero form.
04570   SDValue SingleOp;
04571   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
04572     SingleOp = Op0;
04573   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
04574     if (Opc == ARMISD::VCGE)
04575       Opc = ARMISD::VCLEZ;
04576     else if (Opc == ARMISD::VCGT)
04577       Opc = ARMISD::VCLTZ;
04578     SingleOp = Op1;
04579   }
04580 
04581   SDValue Result;
04582   if (SingleOp.getNode()) {
04583     switch (Opc) {
04584     case ARMISD::VCEQ:
04585       Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
04586     case ARMISD::VCGE:
04587       Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
04588     case ARMISD::VCLEZ:
04589       Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
04590     case ARMISD::VCGT:
04591       Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
04592     case ARMISD::VCLTZ:
04593       Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
04594     default:
04595       Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
04596     }
04597   } else {
04598      Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
04599   }
04600 
04601   if (Invert)
04602     Result = DAG.getNOT(dl, Result, VT);
04603 
04604   return Result;
04605 }
04606 
04607 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
04608 /// valid vector constant for a NEON instruction with a "modified immediate"
04609 /// operand (e.g., VMOV).  If so, return the encoded value.
04610 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
04611                                  unsigned SplatBitSize, SelectionDAG &DAG,
04612                                  EVT &VT, bool is128Bits, NEONModImmType type) {
04613   unsigned OpCmode, Imm;
04614 
04615   // SplatBitSize is set to the smallest size that splats the vector, so a
04616   // zero vector will always have SplatBitSize == 8.  However, NEON modified
04617   // immediate instructions others than VMOV do not support the 8-bit encoding
04618   // of a zero vector, and the default encoding of zero is supposed to be the
04619   // 32-bit version.
04620   if (SplatBits == 0)
04621     SplatBitSize = 32;
04622 
04623   switch (SplatBitSize) {
04624   case 8:
04625     if (type != VMOVModImm)
04626       return SDValue();
04627     // Any 1-byte value is OK.  Op=0, Cmode=1110.
04628     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
04629     OpCmode = 0xe;
04630     Imm = SplatBits;
04631     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
04632     break;
04633 
04634   case 16:
04635     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
04636     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
04637     if ((SplatBits & ~0xff) == 0) {
04638       // Value = 0x00nn: Op=x, Cmode=100x.
04639       OpCmode = 0x8;
04640       Imm = SplatBits;
04641       break;
04642     }
04643     if ((SplatBits & ~0xff00) == 0) {
04644       // Value = 0xnn00: Op=x, Cmode=101x.
04645       OpCmode = 0xa;
04646       Imm = SplatBits >> 8;
04647       break;
04648     }
04649     return SDValue();
04650 
04651   case 32:
04652     // NEON's 32-bit VMOV supports splat values where:
04653     // * only one byte is nonzero, or
04654     // * the least significant byte is 0xff and the second byte is nonzero, or
04655     // * the least significant 2 bytes are 0xff and the third is nonzero.
04656     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
04657     if ((SplatBits & ~0xff) == 0) {
04658       // Value = 0x000000nn: Op=x, Cmode=000x.
04659       OpCmode = 0;
04660       Imm = SplatBits;
04661       break;
04662     }
04663     if ((SplatBits & ~0xff00) == 0) {
04664       // Value = 0x0000nn00: Op=x, Cmode=001x.
04665       OpCmode = 0x2;
04666       Imm = SplatBits >> 8;
04667       break;
04668     }
04669     if ((SplatBits & ~0xff0000) == 0) {
04670       // Value = 0x00nn0000: Op=x, Cmode=010x.
04671       OpCmode = 0x4;
04672       Imm = SplatBits >> 16;
04673       break;
04674     }
04675     if ((SplatBits & ~0xff000000) == 0) {
04676       // Value = 0xnn000000: Op=x, Cmode=011x.
04677       OpCmode = 0x6;
04678       Imm = SplatBits >> 24;
04679       break;
04680     }
04681 
04682     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
04683     if (type == OtherModImm) return SDValue();
04684 
04685     if ((SplatBits & ~0xffff) == 0 &&
04686         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
04687       // Value = 0x0000nnff: Op=x, Cmode=1100.
04688       OpCmode = 0xc;
04689       Imm = SplatBits >> 8;
04690       break;
04691     }
04692 
04693     if ((SplatBits & ~0xffffff) == 0 &&
04694         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
04695       // Value = 0x00nnffff: Op=x, Cmode=1101.
04696       OpCmode = 0xd;
04697       Imm = SplatBits >> 16;
04698       break;
04699     }
04700 
04701     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
04702     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
04703     // VMOV.I32.  A (very) minor optimization would be to replicate the value
04704     // and fall through here to test for a valid 64-bit splat.  But, then the
04705     // caller would also need to check and handle the change in size.
04706     return SDValue();
04707 
04708   case 64: {
04709     if (type != VMOVModImm)
04710       return SDValue();
04711     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
04712     uint64_t BitMask = 0xff;
04713     uint64_t Val = 0;
04714     unsigned ImmMask = 1;
04715     Imm = 0;
04716     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
04717       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
04718         Val |= BitMask;
04719         Imm |= ImmMask;
04720       } else if ((SplatBits & BitMask) != 0) {
04721         return SDValue();
04722       }
04723       BitMask <<= 8;
04724       ImmMask <<= 1;
04725     }
04726 
04727     if (DAG.getTargetLoweringInfo().isBigEndian())
04728       // swap higher and lower 32 bit word
04729       Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
04730 
04731     // Op=1, Cmode=1110.
04732     OpCmode = 0x1e;
04733     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
04734     break;
04735   }
04736 
04737   default:
04738     llvm_unreachable("unexpected size for isNEONModifiedImm");
04739   }
04740 
04741   unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
04742   return DAG.getTargetConstant(EncodedVal, MVT::i32);
04743 }
04744 
04745 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
04746                                            const ARMSubtarget *ST) const {
04747   if (!ST->hasVFP3())
04748     return SDValue();
04749 
04750   bool IsDouble = Op.getValueType() == MVT::f64;
04751   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
04752 
04753   // Use the default (constant pool) lowering for double constants when we have
04754   // an SP-only FPU
04755   if (IsDouble && Subtarget->isFPOnlySP())
04756     return SDValue();
04757 
04758   // Try splatting with a VMOV.f32...
04759   APFloat FPVal = CFP->getValueAPF();
04760   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
04761 
04762   if (ImmVal != -1) {
04763     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
04764       // We have code in place to select a valid ConstantFP already, no need to
04765       // do any mangling.
04766       return Op;
04767     }
04768 
04769     // It's a float and we are trying to use NEON operations where
04770     // possible. Lower it to a splat followed by an extract.
04771     SDLoc DL(Op);
04772     SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32);
04773     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
04774                                       NewVal);
04775     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
04776                        DAG.getConstant(0, MVT::i32));
04777   }
04778 
04779   // The rest of our options are NEON only, make sure that's allowed before
04780   // proceeding..
04781   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
04782     return SDValue();
04783 
04784   EVT VMovVT;
04785   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
04786 
04787   // It wouldn't really be worth bothering for doubles except for one very
04788   // important value, which does happen to match: 0.0. So make sure we don't do
04789   // anything stupid.
04790   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
04791     return SDValue();
04792 
04793   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
04794   SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
04795                                      false, VMOVModImm);
04796   if (NewVal != SDValue()) {
04797     SDLoc DL(Op);
04798     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
04799                                       NewVal);
04800     if (IsDouble)
04801       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
04802 
04803     // It's a float: cast and extract a vector element.
04804     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
04805                                        VecConstant);
04806     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
04807                        DAG.getConstant(0, MVT::i32));
04808   }
04809 
04810   // Finally, try a VMVN.i32
04811   NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
04812                              false, VMVNModImm);
04813   if (NewVal != SDValue()) {
04814     SDLoc DL(Op);
04815     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
04816 
04817     if (IsDouble)
04818       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
04819 
04820     // It's a float: cast and extract a vector element.
04821     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
04822                                        VecConstant);
04823     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
04824                        DAG.getConstant(0, MVT::i32));
04825   }
04826 
04827   return SDValue();
04828 }
04829 
04830 // check if an VEXT instruction can handle the shuffle mask when the
04831 // vector sources of the shuffle are the same.
04832 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
04833   unsigned NumElts = VT.getVectorNumElements();
04834 
04835   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
04836   if (M[0] < 0)
04837     return false;
04838 
04839   Imm = M[0];
04840 
04841   // If this is a VEXT shuffle, the immediate value is the index of the first
04842   // element.  The other shuffle indices must be the successive elements after
04843   // the first one.
04844   unsigned ExpectedElt = Imm;
04845   for (unsigned i = 1; i < NumElts; ++i) {
04846     // Increment the expected index.  If it wraps around, just follow it
04847     // back to index zero and keep going.
04848     ++ExpectedElt;
04849     if (ExpectedElt == NumElts)
04850       ExpectedElt = 0;
04851 
04852     if (M[i] < 0) continue; // ignore UNDEF indices
04853     if (ExpectedElt != static_cast<unsigned>(M[i]))
04854       return false;
04855   }
04856 
04857   return true;
04858 }
04859 
04860 
04861 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
04862                        bool &ReverseVEXT, unsigned &Imm) {
04863   unsigned NumElts = VT.getVectorNumElements();
04864   ReverseVEXT = false;
04865 
04866   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
04867   if (M[0] < 0)
04868     return false;
04869 
04870   Imm = M[0];
04871 
04872   // If this is a VEXT shuffle, the immediate value is the index of the first
04873   // element.  The other shuffle indices must be the successive elements after
04874   // the first one.
04875   unsigned ExpectedElt = Imm;
04876   for (unsigned i = 1; i < NumElts; ++i) {
04877     // Increment the expected index.  If it wraps around, it may still be
04878     // a VEXT but the source vectors must be swapped.
04879     ExpectedElt += 1;
04880     if (ExpectedElt == NumElts * 2) {
04881       ExpectedElt = 0;
04882       ReverseVEXT = true;
04883     }
04884 
04885     if (M[i] < 0) continue; // ignore UNDEF indices
04886     if (ExpectedElt != static_cast<unsigned>(M[i]))
04887       return false;
04888   }
04889 
04890   // Adjust the index value if the source operands will be swapped.
04891   if (ReverseVEXT)
04892     Imm -= NumElts;
04893 
04894   return true;
04895 }
04896 
04897 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
04898 /// instruction with the specified blocksize.  (The order of the elements
04899 /// within each block of the vector is reversed.)
04900 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
04901   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
04902          "Only possible block sizes for VREV are: 16, 32, 64");
04903 
04904   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04905   if (EltSz == 64)
04906     return false;
04907 
04908   unsigned NumElts = VT.getVectorNumElements();
04909   unsigned BlockElts = M[0] + 1;
04910   // If the first shuffle index is UNDEF, be optimistic.
04911   if (M[0] < 0)
04912     BlockElts = BlockSize / EltSz;
04913 
04914   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
04915     return false;
04916 
04917   for (unsigned i = 0; i < NumElts; ++i) {
04918     if (M[i] < 0) continue; // ignore UNDEF indices
04919     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
04920       return false;
04921   }
04922 
04923   return true;
04924 }
04925 
04926 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
04927   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
04928   // range, then 0 is placed into the resulting vector. So pretty much any mask
04929   // of 8 elements can work here.
04930   return VT == MVT::v8i8 && M.size() == 8;
04931 }
04932 
04933 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04934   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04935   if (EltSz == 64)
04936     return false;
04937 
04938   unsigned NumElts = VT.getVectorNumElements();
04939   WhichResult = (M[0] == 0 ? 0 : 1);
04940   for (unsigned i = 0; i < NumElts; i += 2) {
04941     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
04942         (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult))
04943       return false;
04944   }
04945   return true;
04946 }
04947 
04948 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
04949 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04950 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
04951 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04952   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04953   if (EltSz == 64)
04954     return false;
04955 
04956   unsigned NumElts = VT.getVectorNumElements();
04957   WhichResult = (M[0] == 0 ? 0 : 1);
04958   for (unsigned i = 0; i < NumElts; i += 2) {
04959     if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
04960         (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult))
04961       return false;
04962   }
04963   return true;
04964 }
04965 
04966 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
04967   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04968   if (EltSz == 64)
04969     return false;
04970 
04971   unsigned NumElts = VT.getVectorNumElements();
04972   WhichResult = (M[0] == 0 ? 0 : 1);
04973   for (unsigned i = 0; i != NumElts; ++i) {
04974     if (M[i] < 0) continue; // ignore UNDEF indices
04975     if ((unsigned) M[i] != 2 * i + WhichResult)
04976       return false;
04977   }
04978 
04979   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
04980   if (VT.is64BitVector() && EltSz == 32)
04981     return false;
04982 
04983   return true;
04984 }
04985 
04986 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
04987 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
04988 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
04989 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
04990   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
04991   if (EltSz == 64)
04992     return false;
04993 
04994   unsigned Half = VT.getVectorNumElements() / 2;
04995   WhichResult = (M[0] == 0 ? 0 : 1);
04996   for (unsigned j = 0; j != 2; ++j) {
04997     unsigned Idx = WhichResult;
04998     for (unsigned i = 0; i != Half; ++i) {
04999       int MIdx = M[i + j * Half];
05000       if (MIdx >= 0 && (unsigned) MIdx != Idx)
05001         return false;
05002       Idx += 2;
05003     }
05004   }
05005 
05006   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
05007   if (VT.is64BitVector() && EltSz == 32)
05008     return false;
05009 
05010   return true;
05011 }
05012 
05013 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
05014   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
05015   if (EltSz == 64)
05016     return false;
05017 
05018   unsigned NumElts = VT.getVectorNumElements();
05019   WhichResult = (M[0] == 0 ? 0 : 1);
05020   unsigned Idx = WhichResult * NumElts / 2;
05021   for (unsigned i = 0; i != NumElts; i += 2) {
05022     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
05023         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts))
05024       return false;
05025     Idx += 1;
05026   }
05027 
05028   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
05029   if (VT.is64BitVector() && EltSz == 32)
05030     return false;
05031 
05032   return true;
05033 }
05034 
05035 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
05036 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
05037 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
05038 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
05039   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
05040   if (EltSz == 64)
05041     return false;
05042 
05043   unsigned NumElts = VT.getVectorNumElements();
05044   WhichResult = (M[0] == 0 ? 0 : 1);
05045   unsigned Idx = WhichResult * NumElts / 2;
05046   for (unsigned i = 0; i != NumElts; i += 2) {
05047     if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
05048         (M[i+1] >= 0 && (unsigned) M[i+1] != Idx))
05049       return false;
05050     Idx += 1;
05051   }
05052 
05053   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
05054   if (VT.is64BitVector() && EltSz == 32)
05055     return false;
05056 
05057   return true;
05058 }
05059 
05060 /// \return true if this is a reverse operation on an vector.
05061 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
05062   unsigned NumElts = VT.getVectorNumElements();
05063   // Make sure the mask has the right size.
05064   if (NumElts != M.size())
05065       return false;
05066 
05067   // Look for <15, ..., 3, -1, 1, 0>.
05068   for (unsigned i = 0; i != NumElts; ++i)
05069     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
05070       return false;
05071 
05072   return true;
05073 }
05074 
05075 // If N is an integer constant that can be moved into a register in one
05076 // instruction, return an SDValue of such a constant (will become a MOV
05077 // instruction).  Otherwise return null.
05078 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
05079                                      const ARMSubtarget *ST, SDLoc dl) {
05080   uint64_t Val;
05081   if (!isa<ConstantSDNode>(N))
05082     return SDValue();
05083   Val = cast<ConstantSDNode>(N)->getZExtValue();
05084 
05085   if (ST->isThumb1Only()) {
05086     if (Val <= 255 || ~Val <= 255)
05087       return DAG.getConstant(Val, MVT::i32);
05088   } else {
05089     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
05090       return DAG.getConstant(Val, MVT::i32);
05091   }
05092   return SDValue();
05093 }
05094 
05095 // If this is a case we can't handle, return null and let the default
05096 // expansion code take care of it.
05097 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
05098                                              const ARMSubtarget *ST) const {
05099   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
05100   SDLoc dl(Op);
05101   EVT VT = Op.getValueType();
05102 
05103   APInt SplatBits, SplatUndef;
05104   unsigned SplatBitSize;
05105   bool HasAnyUndefs;
05106   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
05107     if (SplatBitSize <= 64) {
05108       // Check if an immediate VMOV works.
05109       EVT VmovVT;
05110       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
05111                                       SplatUndef.getZExtValue(), SplatBitSize,
05112                                       DAG, VmovVT, VT.is128BitVector(),
05113                                       VMOVModImm);
05114       if (Val.getNode()) {
05115         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
05116         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
05117       }
05118 
05119       // Try an immediate VMVN.
05120       uint64_t NegatedImm = (~SplatBits).getZExtValue();
05121       Val = isNEONModifiedImm(NegatedImm,
05122                                       SplatUndef.getZExtValue(), SplatBitSize,
05123                                       DAG, VmovVT, VT.is128BitVector(),
05124                                       VMVNModImm);
05125       if (Val.getNode()) {
05126         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
05127         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
05128       }
05129 
05130       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
05131       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
05132         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
05133         if (ImmVal != -1) {
05134           SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
05135           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
05136         }
05137       }
05138     }
05139   }
05140 
05141   // Scan through the operands to see if only one value is used.
05142   //
05143   // As an optimisation, even if more than one value is used it may be more
05144   // profitable to splat with one value then change some lanes.
05145   //
05146   // Heuristically we decide to do this if the vector has a "dominant" value,
05147   // defined as splatted to more than half of the lanes.
05148   unsigned NumElts = VT.getVectorNumElements();
05149   bool isOnlyLowElement = true;
05150   bool usesOnlyOneValue = true;
05151   bool hasDominantValue = false;
05152   bool isConstant = true;
05153 
05154   // Map of the number of times a particular SDValue appears in the
05155   // element list.
05156   DenseMap<SDValue, unsigned> ValueCounts;
05157   SDValue Value;
05158   for (unsigned i = 0; i < NumElts; ++i) {
05159     SDValue V = Op.getOperand(i);
05160     if (V.getOpcode() == ISD::UNDEF)
05161       continue;
05162     if (i > 0)
05163       isOnlyLowElement = false;
05164     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
05165       isConstant = false;
05166 
05167     ValueCounts.insert(std::make_pair(V, 0));
05168     unsigned &Count = ValueCounts[V];
05169 
05170     // Is this value dominant? (takes up more than half of the lanes)
05171     if (++Count > (NumElts / 2)) {
05172       hasDominantValue = true;
05173       Value = V;
05174     }
05175   }
05176   if (ValueCounts.size() != 1)
05177     usesOnlyOneValue = false;
05178   if (!Value.getNode() && ValueCounts.size() > 0)
05179     Value = ValueCounts.begin()->first;
05180 
05181   if (ValueCounts.size() == 0)
05182     return DAG.getUNDEF(VT);
05183 
05184   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
05185   // Keep going if we are hitting this case.
05186   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
05187     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
05188 
05189   unsigned EltSize = VT.getVectorElementType().getSizeInBits();
05190 
05191   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
05192   // i32 and try again.
05193   if (hasDominantValue && EltSize <= 32) {
05194     if (!isConstant) {
05195       SDValue N;
05196 
05197       // If we are VDUPing a value that comes directly from a vector, that will
05198       // cause an unnecessary move to and from a GPR, where instead we could
05199       // just use VDUPLANE. We can only do this if the lane being extracted
05200       // is at a constant index, as the VDUP from lane instructions only have
05201       // constant-index forms.
05202       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
05203           isa<ConstantSDNode>(Value->getOperand(1))) {
05204         // We need to create a new undef vector to use for the VDUPLANE if the
05205         // size of the vector from which we get the value is different than the
05206         // size of the vector that we need to create. We will insert the element
05207         // such that the register coalescer will remove unnecessary copies.
05208         if (VT != Value->getOperand(0).getValueType()) {
05209           ConstantSDNode *constIndex;
05210           constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1));
05211           assert(constIndex && "The index is not a constant!");
05212           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
05213                              VT.getVectorNumElements();
05214           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
05215                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
05216                         Value, DAG.getConstant(index, MVT::i32)),
05217                            DAG.getConstant(index, MVT::i32));
05218         } else
05219           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
05220                         Value->getOperand(0), Value->getOperand(1));
05221       } else
05222         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
05223 
05224       if (!usesOnlyOneValue) {
05225         // The dominant value was splatted as 'N', but we now have to insert
05226         // all differing elements.
05227         for (unsigned I = 0; I < NumElts; ++I) {
05228           if (Op.getOperand(I) == Value)
05229             continue;
05230           SmallVector<SDValue, 3> Ops;
05231           Ops.push_back(N);
05232           Ops.push_back(Op.getOperand(I));
05233           Ops.push_back(DAG.getConstant(I, MVT::i32));
05234           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
05235         }
05236       }
05237       return N;
05238     }
05239     if (VT.getVectorElementType().isFloatingPoint()) {
05240       SmallVector<SDValue, 8> Ops;
05241       for (unsigned i = 0; i < NumElts; ++i)
05242         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
05243                                   Op.getOperand(i)));
05244       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
05245       SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
05246       Val = LowerBUILD_VECTOR(Val, DAG, ST);
05247       if (Val.getNode())
05248         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
05249     }
05250     if (usesOnlyOneValue) {
05251       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
05252       if (isConstant && Val.getNode())
05253         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
05254     }
05255   }
05256 
05257   // If all elements are constants and the case above didn't get hit, fall back
05258   // to the default expansion, which will generate a load from the constant
05259   // pool.
05260   if (isConstant)
05261     return SDValue();
05262 
05263   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
05264   if (NumElts >= 4) {
05265     SDValue shuffle = ReconstructShuffle(Op, DAG);
05266     if (shuffle != SDValue())
05267       return shuffle;
05268   }
05269 
05270   // Vectors with 32- or 64-bit elements can be built by directly assigning
05271   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
05272   // will be legalized.
05273   if (EltSize >= 32) {
05274     // Do the expansion with floating-point types, since that is what the VFP
05275     // registers are defined to use, and since i64 is not legal.
05276     EVT EltVT = EVT::getFloatingPointVT(EltSize);
05277     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
05278     SmallVector<SDValue, 8> Ops;
05279     for (unsigned i = 0; i < NumElts; ++i)
05280       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
05281     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
05282     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
05283   }
05284 
05285   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
05286   // know the default expansion would otherwise fall back on something even
05287   // worse. For a vector with one or two non-undef values, that's
05288   // scalar_to_vector for the elements followed by a shuffle (provided the
05289   // shuffle is valid for the target) and materialization element by element
05290   // on the stack followed by a load for everything else.
05291   if (!isConstant && !usesOnlyOneValue) {
05292     SDValue Vec = DAG.getUNDEF(VT);
05293     for (unsigned i = 0 ; i < NumElts; ++i) {
05294       SDValue V = Op.getOperand(i);
05295       if (V.getOpcode() == ISD::UNDEF)
05296         continue;
05297       SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
05298       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
05299     }
05300     return Vec;
05301   }
05302 
05303   return SDValue();
05304 }
05305 
05306 // Gather data to see if the operation can be modelled as a
05307 // shuffle in combination with VEXTs.
05308 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
05309                                               SelectionDAG &DAG) const {
05310   SDLoc dl(Op);
05311   EVT VT = Op.getValueType();
05312   unsigned NumElts = VT.getVectorNumElements();
05313 
05314   SmallVector<SDValue, 2> SourceVecs;
05315   SmallVector<unsigned, 2> MinElts;
05316   SmallVector<unsigned, 2> MaxElts;
05317 
05318   for (unsigned i = 0; i < NumElts; ++i) {
05319     SDValue V = Op.getOperand(i);
05320     if (V.getOpcode() == ISD::UNDEF)
05321       continue;
05322     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
05323       // A shuffle can only come from building a vector from various
05324       // elements of other vectors.
05325       return SDValue();
05326     } else if (V.getOperand(0).getValueType().getVectorElementType() !=
05327                VT.getVectorElementType()) {
05328       // This code doesn't know how to handle shuffles where the vector
05329       // element types do not match (this happens because type legalization
05330       // promotes the return type of EXTRACT_VECTOR_ELT).
05331       // FIXME: It might be appropriate to extend this code to handle
05332       // mismatched types.
05333       return SDValue();
05334     }
05335 
05336     // Record this extraction against the appropriate vector if possible...
05337     SDValue SourceVec = V.getOperand(0);
05338     // If the element number isn't a constant, we can't effectively
05339     // analyze what's going on.
05340     if (!isa<ConstantSDNode>(V.getOperand(1)))
05341       return SDValue();
05342     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
05343     bool FoundSource = false;
05344     for (unsigned j = 0; j < SourceVecs.size(); ++j) {
05345       if (SourceVecs[j] == SourceVec) {
05346         if (MinElts[j] > EltNo)
05347           MinElts[j] = EltNo;
05348         if (MaxElts[j] < EltNo)
05349           MaxElts[j] = EltNo;
05350         FoundSource = true;
05351         break;
05352       }
05353     }
05354 
05355     // Or record a new source if not...
05356     if (!FoundSource) {
05357       SourceVecs.push_back(SourceVec);
05358       MinElts.push_back(EltNo);
05359       MaxElts.push_back(EltNo);
05360     }
05361   }
05362 
05363   // Currently only do something sane when at most two source vectors
05364   // involved.
05365   if (SourceVecs.size() > 2)
05366     return SDValue();
05367 
05368   SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
05369   int VEXTOffsets[2] = {0, 0};
05370 
05371   // This loop extracts the usage patterns of the source vectors
05372   // and prepares appropriate SDValues for a shuffle if possible.
05373   for (unsigned i = 0; i < SourceVecs.size(); ++i) {
05374     if (SourceVecs[i].getValueType() == VT) {
05375       // No VEXT necessary
05376       ShuffleSrcs[i] = SourceVecs[i];
05377       VEXTOffsets[i] = 0;
05378       continue;
05379     } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) {
05380       // It probably isn't worth padding out a smaller vector just to
05381       // break it down again in a shuffle.
05382       return SDValue();
05383     }
05384 
05385     // Since only 64-bit and 128-bit vectors are legal on ARM and
05386     // we've eliminated the other cases...
05387     assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts &&
05388            "unexpected vector sizes in ReconstructShuffle");
05389 
05390     if (MaxElts[i] - MinElts[i] >= NumElts) {
05391       // Span too large for a VEXT to cope
05392       return SDValue();
05393     }
05394 
05395     if (MinElts[i] >= NumElts) {
05396       // The extraction can just take the second half
05397       VEXTOffsets[i] = NumElts;
05398       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05399                                    SourceVecs[i],
05400                                    DAG.getIntPtrConstant(NumElts));
05401     } else if (MaxElts[i] < NumElts) {
05402       // The extraction can just take the first half
05403       VEXTOffsets[i] = 0;
05404       ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05405                                    SourceVecs[i],
05406                                    DAG.getIntPtrConstant(0));
05407     } else {
05408       // An actual VEXT is needed
05409       VEXTOffsets[i] = MinElts[i];
05410       SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05411                                      SourceVecs[i],
05412                                      DAG.getIntPtrConstant(0));
05413       SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
05414                                      SourceVecs[i],
05415                                      DAG.getIntPtrConstant(NumElts));
05416       ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
05417                                    DAG.getConstant(VEXTOffsets[i], MVT::i32));
05418     }
05419   }
05420 
05421   SmallVector<int, 8> Mask;
05422 
05423   for (unsigned i = 0; i < NumElts; ++i) {
05424     SDValue Entry = Op.getOperand(i);
05425     if (Entry.getOpcode() == ISD::UNDEF) {
05426       Mask.push_back(-1);
05427       continue;
05428     }
05429 
05430     SDValue ExtractVec = Entry.getOperand(0);
05431     int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i)
05432                                           .getOperand(1))->getSExtValue();
05433     if (ExtractVec == SourceVecs[0]) {
05434       Mask.push_back(ExtractElt - VEXTOffsets[0]);
05435     } else {
05436       Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]);
05437     }
05438   }
05439 
05440   // Final check before we try to produce nonsense...
05441   if (